From b4fedf221800d0997af0a9a67f5fde949de431ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:05:54 +0000 Subject: [PATCH] Bump github.com/tektoncd/chains from 0.22.2 to 0.23.0 Bumps [github.com/tektoncd/chains](https://github.com/tektoncd/chains) from 0.22.2 to 0.23.0. - [Release notes](https://github.com/tektoncd/chains/releases) - [Changelog](https://github.com/tektoncd/chains/blob/main/releases.md) - [Commits](https://github.com/tektoncd/chains/compare/v0.22.2...v0.23.0) --- updated-dependencies: - dependency-name: github.com/tektoncd/chains dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 109 +- go.sum | 251 +- vendor/cel.dev/expr/.bazelversion | 2 + vendor/cel.dev/expr/.gitattributes | 2 + vendor/cel.dev/expr/.gitignore | 1 + vendor/cel.dev/expr/BUILD.bazel | 34 + vendor/cel.dev/expr/CODE_OF_CONDUCT.md | 25 + vendor/cel.dev/expr/CONTRIBUTING.md | 32 + vendor/cel.dev/expr/GOVERNANCE.md | 43 + vendor/cel.dev/expr/LICENSE | 202 + vendor/cel.dev/expr/MAINTAINERS.md | 13 + vendor/cel.dev/expr/README.md | 65 + vendor/cel.dev/expr/WORKSPACE | 145 + vendor/cel.dev/expr/checked.pb.go | 1432 ++ vendor/cel.dev/expr/cloudbuild.yaml | 9 + vendor/cel.dev/expr/eval.pb.go | 490 + vendor/cel.dev/expr/explain.pb.go | 236 + vendor/cel.dev/expr/regen_go_proto.sh | 9 + .../expr/regen_go_proto_canonical_protos.sh | 10 + vendor/cel.dev/expr/syntax.pb.go | 1633 +++ vendor/cel.dev/expr/value.pb.go | 653 + vendor/cloud.google.com/go/auth/CHANGES.md | 15 + vendor/cloud.google.com/go/auth/auth.go | 17 +- .../go/auth/httptransport/httptransport.go | 3 +- .../go/auth/internal/internal.go | 2 +- .../transport/cert/enterprise_cert.go | 8 +- .../internal/transport/cert/workload_cert.go | 5 +- .../go/auth/oauth2adapt/CHANGES.md | 7 + .../go/auth/oauth2adapt/oauth2adapt.go | 41 +- vendor/cloud.google.com/go/monitoring/LICENSE | 202 + .../apiv3/v2/alert_policy_client.go | 399 + .../go/monitoring/apiv3/v2/auxiliary.go | 682 + .../go/monitoring/apiv3/v2/auxiliary_go123.go | 112 + .../go/monitoring/apiv3/v2/doc.go | 124 + .../monitoring/apiv3/v2/gapic_metadata.json | 336 + .../go/monitoring/apiv3/v2/group_client.go | 466 + .../go/monitoring/apiv3/v2/metric_client.go | 578 + .../apiv3/v2/monitoringpb/alert.pb.go | 2432 +++ .../apiv3/v2/monitoringpb/alert_service.pb.go | 1045 ++ .../apiv3/v2/monitoringpb/common.pb.go | 1165 ++ .../v2/monitoringpb/dropped_labels.pb.go | 197 + .../apiv3/v2/monitoringpb/group.pb.go | 265 + .../apiv3/v2/monitoringpb/group_service.pb.go | 1319 ++ .../apiv3/v2/monitoringpb/metric.pb.go | 1194 ++ .../v2/monitoringpb/metric_service.pb.go | 2502 ++++ .../v2/monitoringpb/mutation_record.pb.go | 192 + .../apiv3/v2/monitoringpb/notification.pb.go | 647 + .../monitoringpb/notification_service.pb.go | 2002 +++ .../apiv3/v2/monitoringpb/query_service.pb.go | 212 + .../apiv3/v2/monitoringpb/service.pb.go | 3107 ++++ .../v2/monitoringpb/service_service.pb.go | 1796 +++ .../apiv3/v2/monitoringpb/snooze.pb.go | 315 + .../v2/monitoringpb/snooze_service.pb.go | 867 ++ .../apiv3/v2/monitoringpb/span_context.pb.go | 188 + .../apiv3/v2/monitoringpb/uptime.pb.go | 2726 ++++ .../v2/monitoringpb/uptime_service.pb.go | 1226 ++ .../apiv3/v2/notification_channel_client.go | 618 + .../go/monitoring/apiv3/v2/query_client.go | 233 + .../apiv3/v2/service_monitoring_client.go | 565 + .../go/monitoring/apiv3/v2/snooze_client.go | 343 + .../apiv3/v2/uptime_check_client.go | 450 + .../go/monitoring/apiv3/v2/version.go | 23 + .../go/monitoring/internal/version.go | 18 + vendor/cloud.google.com/go/storage/CHANGES.md | 77 + vendor/cloud.google.com/go/storage/bucket.go | 6 + vendor/cloud.google.com/go/storage/client.go | 4 +- vendor/cloud.google.com/go/storage/doc.go | 65 +- .../go/storage/dynamic_delay.go | 237 + .../go/storage/experimental/experimental.go | 75 + .../go/storage/grpc_client.go | 834 +- vendor/cloud.google.com/go/storage/grpc_dp.go | 22 + .../go/storage/grpc_metrics.go | 281 + vendor/cloud.google.com/go/storage/hmac.go | 22 +- .../go/storage/http_client.go | 146 +- .../go/storage/internal/apiv2/auxiliary.go | 94 - .../storage/internal/apiv2/auxiliary_go123.go | 38 + .../go/storage/internal/apiv2/doc.go | 18 +- .../internal/apiv2/gapic_metadata.json | 50 - .../storage/internal/apiv2/storage_client.go | 535 +- .../internal/apiv2/storagepb/storage.pb.go | 5757 +++----- .../go/storage/internal/experimental.go | 32 + .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/invoke.go | 34 +- .../go/storage/notifications.go | 36 +- vendor/cloud.google.com/go/storage/option.go | 153 +- vendor/cloud.google.com/go/storage/reader.go | 16 +- vendor/cloud.google.com/go/storage/storage.go | 9 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 15 + .../azcore/arm/runtime/policy_bearer_token.go | 48 +- .../azure-sdk-for-go/sdk/azcore/errors.go | 3 + .../internal/exported/response_error.go | 38 +- .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/policy/policy.go | 23 +- .../sdk/azcore/runtime/policy_bearer_token.go | 136 +- .../sdk/azcore/runtime/policy_retry.go | 28 +- .../sdk/azidentity/BREAKING_CHANGES.md | 10 + .../sdk/azidentity/CHANGELOG.md | 47 + .../azure-sdk-for-go/sdk/azidentity/README.md | 11 +- .../sdk/azidentity/TOKEN_CACHING.MD | 50 +- .../sdk/azidentity/TROUBLESHOOTING.md | 2 +- .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/authentication_record.go | 18 +- .../sdk/azidentity/azidentity.go | 10 +- .../azidentity/azure_pipelines_credential.go | 31 +- .../azidentity/chained_token_credential.go | 16 +- .../azure-sdk-for-go/sdk/azidentity/ci.yml | 15 +- .../azidentity/client_assertion_credential.go | 16 +- .../client_certificate_credential.go | 18 +- .../azidentity/client_secret_credential.go | 14 +- .../sdk/azidentity/confidential_client.go | 16 +- .../azidentity/default_azure_credential.go | 11 +- .../sdk/azidentity/device_code_credential.go | 38 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 38 +- .../sdk/azidentity/go.work.sum | 60 - .../interactive_browser_credential.go | 38 +- .../sdk/azidentity/internal/cache.go | 86 + .../sdk/azidentity/internal/exported.go | 18 - .../sdk/azidentity/internal/internal.go | 31 - .../sdk/azidentity/managed_identity_client.go | 150 +- .../azidentity/managed_identity_credential.go | 52 +- .../sdk/azidentity/public_client.go | 21 +- .../sdk/azidentity/test-resources-post.ps1 | 26 +- .../sdk/azidentity/test-resources.bicep | 9 + .../username_password_credential.go | 28 +- .../sdk/azidentity/version.go | 2 +- .../sdk/azidentity/workload_identity.go | 10 + .../detectors/gcp/LICENSE | 202 + .../detectors/gcp/README.md | 3 + .../detectors/gcp/app_engine.go | 76 + .../detectors/gcp/bms.go | 55 + .../detectors/gcp/detector.go | 102 + .../detectors/gcp/faas.go | 105 + .../detectors/gcp/gce.go | 75 + .../detectors/gcp/gke.go | 70 + .../exporter/metric/LICENSE | 202 + .../exporter/metric/README.md | 37 + .../exporter/metric/cloudmonitoring.go | 49 + .../exporter/metric/constants.go | 97 + .../exporter/metric/error.go | 32 + .../exporter/metric/metric.go | 890 ++ .../exporter/metric/option.go | 201 + .../exporter/metric/version.go | 21 + .../internal/resourcemapping/LICENSE | 202 + .../resourcemapping/resourcemapping.go | 286 + vendor/github.com/IBM/sarama/Dockerfile.kafka | 2 +- .../github.com/IBM/sarama/async_producer.go | 4 +- vendor/github.com/IBM/sarama/broker.go | 7 +- vendor/github.com/IBM/sarama/config.go | 4 +- .../github.com/IBM/sarama/docker-compose.yml | 10 +- .../github.com/IBM/sarama/offset_manager.go | 28 +- .../IBM/sarama/transaction_manager.go | 2 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/private/metrics/metrics.go | 320 - .../aws/middleware/request_id_retriever.go | 4 + .../aws/middleware/user_agent.go | 1 + .../aws/retry/attempt_metrics.go | 51 + .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 51 +- .../aws-sdk-go-v2/aws/signer/v4/middleware.go | 5 + .../aws/transport/http/client.go | 36 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 40 + .../config/go_module_metadata.go | 2 +- .../config/resolve_credentials.go | 15 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 36 + .../credentials/go_module_metadata.go | 2 +- .../feature/ec2/imds/CHANGELOG.md | 16 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 16 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.json | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 16 + .../endpoints/v2/go_module_metadata.go | 2 +- .../internal/accept-encoding/CHANGELOG.md | 8 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 17 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 38 + .../aws-sdk-go-v2/service/kms/api_client.go | 351 +- .../service/kms/api_op_CancelKeyDeletion.go | 15 + .../kms/api_op_ConnectCustomKeyStore.go | 15 + .../service/kms/api_op_CreateAlias.go | 15 + .../kms/api_op_CreateCustomKeyStore.go | 15 + .../service/kms/api_op_CreateGrant.go | 15 + .../service/kms/api_op_CreateKey.go | 15 + .../service/kms/api_op_Decrypt.go | 15 + .../service/kms/api_op_DeleteAlias.go | 15 + .../kms/api_op_DeleteCustomKeyStore.go | 15 + .../kms/api_op_DeleteImportedKeyMaterial.go | 15 + .../service/kms/api_op_DeriveSharedSecret.go | 15 + .../kms/api_op_DescribeCustomKeyStores.go | 15 + .../service/kms/api_op_DescribeKey.go | 15 + .../service/kms/api_op_DisableKey.go | 15 + .../service/kms/api_op_DisableKeyRotation.go | 15 + .../kms/api_op_DisconnectCustomKeyStore.go | 15 + .../service/kms/api_op_EnableKey.go | 15 + .../service/kms/api_op_EnableKeyRotation.go | 15 + .../service/kms/api_op_Encrypt.go | 15 + .../service/kms/api_op_GenerateDataKey.go | 15 + .../service/kms/api_op_GenerateDataKeyPair.go | 15 + ..._op_GenerateDataKeyPairWithoutPlaintext.go | 15 + .../api_op_GenerateDataKeyWithoutPlaintext.go | 15 + .../service/kms/api_op_GenerateMac.go | 15 + .../service/kms/api_op_GenerateRandom.go | 15 + .../service/kms/api_op_GetKeyPolicy.go | 15 + .../kms/api_op_GetKeyRotationStatus.go | 15 + .../kms/api_op_GetParametersForImport.go | 15 + .../service/kms/api_op_GetPublicKey.go | 15 + .../service/kms/api_op_ImportKeyMaterial.go | 15 + .../service/kms/api_op_ListAliases.go | 15 + .../service/kms/api_op_ListGrants.go | 15 + .../service/kms/api_op_ListKeyPolicies.go | 15 + .../service/kms/api_op_ListKeyRotations.go | 15 + .../service/kms/api_op_ListKeys.go | 15 + .../service/kms/api_op_ListResourceTags.go | 15 + .../service/kms/api_op_ListRetirableGrants.go | 15 + .../service/kms/api_op_PutKeyPolicy.go | 15 + .../service/kms/api_op_ReEncrypt.go | 15 + .../service/kms/api_op_ReplicateKey.go | 15 + .../service/kms/api_op_RetireGrant.go | 15 + .../service/kms/api_op_RevokeGrant.go | 15 + .../service/kms/api_op_RotateKeyOnDemand.go | 15 + .../service/kms/api_op_ScheduleKeyDeletion.go | 15 + .../aws-sdk-go-v2/service/kms/api_op_Sign.go | 15 + .../service/kms/api_op_TagResource.go | 15 + .../service/kms/api_op_UntagResource.go | 15 + .../service/kms/api_op_UpdateAlias.go | 15 + .../kms/api_op_UpdateCustomKeyStore.go | 15 + .../kms/api_op_UpdateKeyDescription.go | 15 + .../service/kms/api_op_UpdatePrimaryRegion.go | 15 + .../service/kms/api_op_Verify.go | 15 + .../service/kms/api_op_VerifyMac.go | 15 + .../aws/aws-sdk-go-v2/service/kms/auth.go | 35 +- .../service/kms/deserializers.go | 213 + .../aws-sdk-go-v2/service/kms/endpoints.go | 16 +- .../service/kms/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/kms/options.go | 11 +- .../aws-sdk-go-v2/service/kms/serializers.go | 319 + .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 38 + .../aws-sdk-go-v2/service/sso/api_client.go | 351 +- .../service/sso/api_op_GetRoleCredentials.go | 15 + .../service/sso/api_op_ListAccountRoles.go | 15 + .../service/sso/api_op_ListAccounts.go | 15 + .../service/sso/api_op_Logout.go | 15 + .../aws/aws-sdk-go-v2/service/sso/auth.go | 35 +- .../service/sso/deserializers.go | 21 + .../aws-sdk-go-v2/service/sso/endpoints.go | 16 +- .../service/sso/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sso/options.go | 11 +- .../aws-sdk-go-v2/service/sso/serializers.go | 33 +- .../service/ssooidc/CHANGELOG.md | 38 + .../service/ssooidc/api_client.go | 351 +- .../service/ssooidc/api_op_CreateToken.go | 15 + .../ssooidc/api_op_CreateTokenWithIAM.go | 15 + .../service/ssooidc/api_op_RegisterClient.go | 15 + .../api_op_StartDeviceAuthorization.go | 15 + .../aws/aws-sdk-go-v2/service/ssooidc/auth.go | 35 +- .../service/ssooidc/deserializers.go | 21 + .../service/ssooidc/endpoints.go | 16 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/ssooidc/options.go | 11 +- .../service/ssooidc/serializers.go | 25 + .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 38 + .../aws-sdk-go-v2/service/sts/api_client.go | 351 +- .../service/sts/api_op_AssumeRole.go | 15 + .../service/sts/api_op_AssumeRoleWithSAML.go | 15 + .../sts/api_op_AssumeRoleWithWebIdentity.go | 15 + .../sts/api_op_DecodeAuthorizationMessage.go | 15 + .../service/sts/api_op_GetAccessKeyInfo.go | 15 + .../service/sts/api_op_GetCallerIdentity.go | 15 + .../service/sts/api_op_GetFederationToken.go | 15 + .../service/sts/api_op_GetSessionToken.go | 15 + .../aws/aws-sdk-go-v2/service/sts/auth.go | 35 +- .../service/sts/deserializers.go | 33 + .../aws-sdk-go-v2/service/sts/endpoints.go | 16 +- .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 11 +- .../aws-sdk-go-v2/service/sts/serializers.go | 49 + vendor/github.com/aws/smithy-go/CHANGELOG.md | 28 + .../aws/smithy-go/go_module_metadata.go | 2 +- .../aws/smithy-go/metrics/metrics.go | 136 + .../github.com/aws/smithy-go/metrics/nop.go | 67 + .../aws/smithy-go/middleware/context.go | 41 + vendor/github.com/aws/smithy-go/properties.go | 19 +- .../aws/smithy-go/tracing/context.go | 96 + .../github.com/aws/smithy-go/tracing/nop.go | 32 + .../aws/smithy-go/tracing/tracing.go | 95 + .../aws/smithy-go/transport/http/client.go | 45 +- .../aws/smithy-go/transport/http/metrics.go | 184 + .../cenkalti/backoff/v3/.travis.yml | 10 - .../cenkalti/backoff/{v3 => v4}/.gitignore | 3 + .../cenkalti/backoff/{v3 => v4}/LICENSE | 0 .../cenkalti/backoff/{v3 => v4}/README.md | 13 +- .../cenkalti/backoff/{v3 => v4}/backoff.go | 0 .../cenkalti/backoff/{v3 => v4}/context.go | 6 +- .../backoff/{v3 => v4}/exponential.go | 76 +- .../cenkalti/backoff/{v3 => v4}/retry.go | 70 +- .../cenkalti/backoff/{v3 => v4}/ticker.go | 3 + .../cenkalti/backoff/{v3 => v4}/timer.go | 0 .../cenkalti/backoff/{v3 => v4}/tries.go | 3 + vendor/github.com/cncf/xds/go/LICENSE | 201 + .../xds/go/udpa/annotations/migrate.pb.go | 411 + .../udpa/annotations/migrate.pb.validate.go | 350 + .../xds/go/udpa/annotations/security.pb.go | 196 + .../udpa/annotations/security.pb.validate.go | 142 + .../xds/go/udpa/annotations/sensitive.pb.go | 93 + .../udpa/annotations/sensitive.pb.validate.go | 36 + .../cncf/xds/go/udpa/annotations/status.pb.go | 253 + .../go/udpa/annotations/status.pb.validate.go | 140 + .../xds/go/udpa/annotations/versioning.pb.go | 179 + .../annotations/versioning.pb.validate.go | 140 + .../xds/go/udpa/type/v1/typed_struct.pb.go | 164 + .../udpa/type/v1/typed_struct.pb.validate.go | 166 + .../xds/go/xds/annotations/v3/migrate.pb.go | 412 + .../xds/annotations/v3/migrate.pb.validate.go | 350 + .../xds/go/xds/annotations/v3/security.pb.go | 197 + .../annotations/v3/security.pb.validate.go | 142 + .../xds/go/xds/annotations/v3/sensitive.pb.go | 93 + .../annotations/v3/sensitive.pb.validate.go | 36 + .../xds/go/xds/annotations/v3/status.pb.go | 495 + .../xds/annotations/v3/status.pb.validate.go | 452 + .../go/xds/annotations/v3/versioning.pb.go | 179 + .../annotations/v3/versioning.pb.validate.go | 140 + .../cncf/xds/go/xds/core/v3/authority.pb.go | 153 + .../go/xds/core/v3/authority.pb.validate.go | 146 + .../cncf/xds/go/xds/core/v3/cidr.pb.go | 172 + .../xds/go/xds/core/v3/cidr.pb.validate.go | 161 + .../xds/go/xds/core/v3/collection_entry.pb.go | 297 + .../core/v3/collection_entry.pb.validate.go | 383 + .../xds/go/xds/core/v3/context_params.pb.go | 160 + .../xds/core/v3/context_params.pb.validate.go | 138 + .../cncf/xds/go/xds/core/v3/extension.pb.go | 167 + .../go/xds/core/v3/extension.pb.validate.go | 164 + .../cncf/xds/go/xds/core/v3/resource.pb.go | 182 + .../go/xds/core/v3/resource.pb.validate.go | 195 + .../xds/go/xds/core/v3/resource_locator.pb.go | 406 + .../core/v3/resource_locator.pb.validate.go | 439 + .../xds/go/xds/core/v3/resource_name.pb.go | 190 + .../xds/core/v3/resource_name.pb.validate.go | 179 + .../xds/data/orca/v3/orca_load_report.pb.go | 272 + .../orca/v3/orca_load_report.pb.validate.go | 225 + .../xds/go/xds/service/orca/v3/orca.pb.go | 182 + .../xds/service/orca/v3/orca.pb.validate.go | 167 + .../go/xds/service/orca/v3/orca_grpc.pb.go | 135 + .../cncf/xds/go/xds/type/matcher/v3/cel.pb.go | 168 + .../go/xds/type/matcher/v3/cel.pb.validate.go | 177 + .../xds/go/xds/type/matcher/v3/domain.pb.go | 242 + .../xds/type/matcher/v3/domain.pb.validate.go | 315 + .../go/xds/type/matcher/v3/http_inputs.pb.go | 140 + .../matcher/v3/http_inputs.pb.validate.go | 139 + .../cncf/xds/go/xds/type/matcher/v3/ip.pb.go | 256 + .../go/xds/type/matcher/v3/ip.pb.validate.go | 347 + .../xds/go/xds/type/matcher/v3/matcher.pb.go | 1056 ++ .../type/matcher/v3/matcher.pb.validate.go | 1913 +++ .../xds/go/xds/type/matcher/v3/range.pb.go | 539 + .../xds/type/matcher/v3/range.pb.validate.go | 975 ++ .../xds/go/xds/type/matcher/v3/regex.pb.go | 242 + .../xds/type/matcher/v3/regex.pb.validate.go | 317 + .../xds/go/xds/type/matcher/v3/string.pb.go | 353 + .../xds/type/matcher/v3/string.pb.validate.go | 481 + .../cncf/xds/go/xds/type/v3/cel.pb.go | 330 + .../xds/go/xds/type/v3/cel.pb.validate.go | 450 + .../cncf/xds/go/xds/type/v3/range.pb.go | 298 + .../xds/go/xds/type/v3/range.pb.validate.go | 345 + .../xds/go/xds/type/v3/typed_struct.pb.go | 163 + .../xds/type/v3/typed_struct.pb.validate.go | 166 + .../envoyproxy/go-control-plane/LICENSE | 201 + .../envoy/admin/v3/certs.pb.go | 607 + .../envoy/admin/v3/certs.pb.validate.go | 870 ++ .../envoy/admin/v3/certs_vtproto.pb.go | 504 + .../envoy/admin/v3/clusters.pb.go | 744 + .../envoy/admin/v3/clusters.pb.validate.go | 803 + .../envoy/admin/v3/clusters_vtproto.pb.go | 656 + .../envoy/admin/v3/config_dump.pb.go | 642 + .../envoy/admin/v3/config_dump.pb.validate.go | 893 ++ .../envoy/admin/v3/config_dump_shared.pb.go | 2242 +++ .../v3/config_dump_shared.pb.validate.go | 3435 +++++ .../admin/v3/config_dump_shared_vtproto.pb.go | 1715 +++ .../envoy/admin/v3/config_dump_vtproto.pb.go | 466 + .../envoy/admin/v3/init_dump.pb.go | 241 + .../envoy/admin/v3/init_dump.pb.validate.go | 281 + .../envoy/admin/v3/init_dump_vtproto.pb.go | 149 + .../envoy/admin/v3/listeners.pb.go | 268 + .../envoy/admin/v3/listeners.pb.validate.go | 335 + .../envoy/admin/v3/listeners_vtproto.pb.go | 203 + .../envoy/admin/v3/memory.pb.go | 228 + .../envoy/admin/v3/memory.pb.validate.go | 147 + .../envoy/admin/v3/memory_vtproto.pb.go | 110 + .../envoy/admin/v3/metrics.pb.go | 234 + .../envoy/admin/v3/metrics.pb.validate.go | 142 + .../envoy/admin/v3/metrics_vtproto.pb.go | 89 + .../envoy/admin/v3/mutex_stats.pb.go | 191 + .../envoy/admin/v3/mutex_stats.pb.validate.go | 142 + .../envoy/admin/v3/mutex_stats_vtproto.pb.go | 86 + .../envoy/admin/v3/server_info.pb.go | 975 ++ .../envoy/admin/v3/server_info.pb.validate.go | 509 + .../envoy/admin/v3/server_info_vtproto.pb.go | 671 + .../go-control-plane/envoy/admin/v3/tap.pb.go | 182 + .../envoy/admin/v3/tap.pb.validate.go | 187 + .../envoy/admin/v3/tap_vtproto.pb.go | 106 + .../envoy/annotations/deprecation.pb.go | 159 + .../annotations/deprecation.pb.validate.go | 37 + .../envoy/annotations/resource.pb.go | 179 + .../envoy/annotations/resource.pb.validate.go | 141 + .../envoy/annotations/resource_vtproto.pb.go | 73 + .../envoy/config/accesslog/v3/accesslog.pb.go | 1924 +++ .../accesslog/v3/accesslog.pb.validate.go | 2773 ++++ .../accesslog/v3/accesslog_vtproto.pb.go | 1751 +++ .../envoy/config/bootstrap/v3/bootstrap.pb.go | 3310 +++++ .../bootstrap/v3/bootstrap.pb.validate.go | 4501 ++++++ .../bootstrap/v3/bootstrap_vtproto.pb.go | 3128 ++++ .../config/cluster/v3/circuit_breaker.pb.go | 507 + .../cluster/v3/circuit_breaker.pb.validate.go | 662 + .../cluster/v3/circuit_breaker_vtproto.pb.go | 337 + .../envoy/config/cluster/v3/cluster.pb.go | 4635 ++++++ .../config/cluster/v3/cluster.pb.validate.go | 4942 +++++++ .../config/cluster/v3/cluster_vtproto.pb.go | 3439 +++++ .../envoy/config/cluster/v3/filter.pb.go | 207 + .../config/cluster/v3/filter.pb.validate.go | 204 + .../config/cluster/v3/filter_vtproto.pb.go | 121 + .../config/cluster/v3/outlier_detection.pb.go | 641 + .../v3/outlier_detection.pb.validate.go | 717 + .../v3/outlier_detection_vtproto.pb.go | 456 + .../config/common/matcher/v3/matcher.pb.go | 1778 +++ .../common/matcher/v3/matcher.pb.validate.go | 3044 ++++ .../common/matcher/v3/matcher_vtproto.pb.go | 2035 +++ .../envoy/config/core/v3/address.pb.go | 1110 ++ .../config/core/v3/address.pb.validate.go | 1479 ++ .../config/core/v3/address_vtproto.pb.go | 859 ++ .../envoy/config/core/v3/backoff.pb.go | 193 + .../config/core/v3/backoff.pb.validate.go | 208 + .../config/core/v3/backoff_vtproto.pb.go | 91 + .../envoy/config/core/v3/base.pb.go | 3242 ++++ .../envoy/config/core/v3/base.pb.validate.go | 4164 ++++++ .../envoy/config/core/v3/base_vtproto.pb.go | 2497 ++++ .../envoy/config/core/v3/config_source.pb.go | 1194 ++ .../core/v3/config_source.pb.validate.go | 1345 ++ .../core/v3/config_source_vtproto.pb.go | 831 ++ .../config/core/v3/event_service_config.pb.go | 200 + .../v3/event_service_config.pb.validate.go | 197 + .../v3/event_service_config_vtproto.pb.go | 110 + .../envoy/config/core/v3/extension.pb.go | 185 + .../config/core/v3/extension.pb.validate.go | 165 + .../config/core/v3/extension_vtproto.pb.go | 88 + .../config/core/v3/grpc_method_list.pb.go | 246 + .../core/v3/grpc_method_list.pb.validate.go | 295 + .../core/v3/grpc_method_list_vtproto.pb.go | 149 + .../envoy/config/core/v3/grpc_service.pb.go | 1814 +++ .../core/v3/grpc_service.pb.validate.go | 2579 ++++ .../config/core/v3/grpc_service_vtproto.pb.go | 1648 +++ .../envoy/config/core/v3/health_check.pb.go | 1705 +++ .../core/v3/health_check.pb.validate.go | 2291 +++ .../config/core/v3/health_check_vtproto.pb.go | 1384 ++ .../envoy/config/core/v3/http_service.pb.go | 194 + .../core/v3/http_service.pb.validate.go | 210 + .../config/core/v3/http_service_vtproto.pb.go | 94 + .../envoy/config/core/v3/http_uri.pb.go | 236 + .../config/core/v3/http_uri.pb.validate.go | 228 + .../config/core/v3/http_uri_vtproto.pb.go | 123 + .../envoy/config/core/v3/protocol.pb.go | 2453 ++++ .../config/core/v3/protocol.pb.validate.go | 3015 ++++ .../config/core/v3/protocol_vtproto.pb.go | 1718 +++ .../envoy/config/core/v3/proxy_protocol.pb.go | 370 + .../core/v3/proxy_protocol.pb.validate.go | 291 + .../core/v3/proxy_protocol_vtproto.pb.go | 162 + .../envoy/config/core/v3/resolver.pb.go | 265 + .../config/core/v3/resolver.pb.validate.go | 319 + .../config/core/v3/resolver_vtproto.pb.go | 163 + .../envoy/config/core/v3/socket_option.pb.go | 615 + .../core/v3/socket_option.pb.validate.go | 728 + .../core/v3/socket_option_vtproto.pb.go | 391 + .../core/v3/substitution_format_string.pb.go | 439 + .../substitution_format_string.pb.validate.go | 445 + .../substitution_format_string_vtproto.pb.go | 298 + .../config/core/v3/udp_socket_config.pb.go | 190 + .../core/v3/udp_socket_config.pb.validate.go | 181 + .../core/v3/udp_socket_config_vtproto.pb.go | 91 + .../envoy/config/endpoint/v3/endpoint.pb.go | 508 + .../endpoint/v3/endpoint.pb.validate.go | 589 + .../endpoint/v3/endpoint_components.pb.go | 1004 ++ .../v3/endpoint_components.pb.validate.go | 1322 ++ .../v3/endpoint_components_vtproto.pb.go | 896 ++ .../config/endpoint/v3/endpoint_vtproto.pb.go | 331 + .../config/endpoint/v3/load_report.pb.go | 978 ++ .../endpoint/v3/load_report.pb.validate.go | 1094 ++ .../endpoint/v3/load_report_vtproto.pb.go | 696 + .../config/listener/v3/api_listener.pb.go | 179 + .../listener/v3/api_listener.pb.validate.go | 165 + .../listener/v3/api_listener_vtproto.pb.go | 77 + .../envoy/config/listener/v3/listener.pb.go | 1593 ++ .../listener/v3/listener.pb.validate.go | 2030 +++ .../listener/v3/listener_components.pb.go | 1353 ++ .../v3/listener_components.pb.validate.go | 1644 +++ .../v3/listener_components_vtproto.pb.go | 1213 ++ .../config/listener/v3/listener_vtproto.pb.go | 1297 ++ .../config/listener/v3/quic_config.pb.go | 380 + .../listener/v3/quic_config.pb.validate.go | 444 + .../listener/v3/quic_config_vtproto.pb.go | 345 + .../listener/v3/udp_listener_config.pb.go | 283 + .../v3/udp_listener_config.pb.validate.go | 328 + .../v3/udp_listener_config_vtproto.pb.go | 184 + .../config/metrics/v3/metrics_service.pb.go | 324 + .../metrics/v3/metrics_service.pb.validate.go | 236 + .../metrics/v3/metrics_service_vtproto.pb.go | 139 + .../envoy/config/metrics/v3/stats.pb.go | 1185 ++ .../config/metrics/v3/stats.pb.validate.go | 1394 ++ .../config/metrics/v3/stats_vtproto.pb.go | 976 ++ .../envoy/config/overload/v3/overload.pb.go | 1186 ++ .../overload/v3/overload.pb.validate.go | 1741 +++ .../config/overload/v3/overload_vtproto.pb.go | 938 ++ .../envoy/config/rbac/v3/rbac.pb.go | 1788 +++ .../envoy/config/rbac/v3/rbac.pb.validate.go | 2509 ++++ .../envoy/config/rbac/v3/rbac_vtproto.pb.go | 2103 +++ .../envoy/config/route/v3/route.pb.go | 573 + .../config/route/v3/route.pb.validate.go | 693 + .../config/route/v3/route_components.pb.go | 9078 ++++++++++++ .../route/v3/route_components.pb.validate.go | 12236 ++++++++++++++++ .../route/v3/route_components_vtproto.pb.go | 8302 +++++++++++ .../envoy/config/route/v3/route_vtproto.pb.go | 474 + .../envoy/config/route/v3/scoped_route.pb.go | 464 + .../route/v3/scoped_route.pb.validate.go | 505 + .../route/v3/scoped_route_vtproto.pb.go | 263 + .../envoy/config/tap/v3/common.pb.go | 1642 +++ .../envoy/config/tap/v3/common.pb.validate.go | 2419 +++ .../envoy/config/tap/v3/common_vtproto.pb.go | 1591 ++ .../envoy/config/trace/v3/datadog.pb.go | 290 + .../config/trace/v3/datadog.pb.validate.go | 321 + .../config/trace/v3/datadog_vtproto.pb.go | 167 + .../envoy/config/trace/v3/dynamic_ot.pb.go | 204 + .../config/trace/v3/dynamic_ot.pb.validate.go | 177 + .../config/trace/v3/dynamic_ot_vtproto.pb.go | 88 + .../envoy/config/trace/v3/http_tracer.pb.go | 293 + .../trace/v3/http_tracer.pb.validate.go | 320 + .../config/trace/v3/http_tracer_vtproto.pb.go | 178 + .../envoy/config/trace/v3/lightstep.pb.go | 294 + .../config/trace/v3/lightstep.pb.validate.go | 195 + .../config/trace/v3/lightstep_vtproto.pb.go | 145 + .../envoy/config/trace/v3/opencensus.pb.go | 489 + .../config/trace/v3/opencensus.pb.validate.go | 240 + .../config/trace/v3/opencensus_vtproto.pb.go | 311 + .../envoy/config/trace/v3/opentelemetry.pb.go | 258 + .../trace/v3/opentelemetry.pb.validate.go | 262 + .../trace/v3/opentelemetry_vtproto.pb.go | 206 + .../envoy/config/trace/v3/service.pb.go | 174 + .../config/trace/v3/service.pb.validate.go | 179 + .../config/trace/v3/service_vtproto.pb.go | 95 + .../envoy/config/trace/v3/skywalking.pb.go | 341 + .../config/trace/v3/skywalking.pb.validate.go | 355 + .../config/trace/v3/skywalking_vtproto.pb.go | 224 + .../envoy/config/trace/v3/trace.pb.go | 99 + .../config/trace/v3/trace.pb.validate.go | 37 + .../envoy/config/trace/v3/xray.pb.go | 310 + .../envoy/config/trace/v3/xray.pb.validate.go | 367 + .../envoy/config/trace/v3/xray_vtproto.pb.go | 221 + .../envoy/config/trace/v3/zipkin.pb.go | 360 + .../config/trace/v3/zipkin.pb.validate.go | 195 + .../config/trace/v3/zipkin_vtproto.pb.go | 144 + .../envoy/data/accesslog/v3/accesslog.pb.go | 2599 ++++ .../accesslog/v3/accesslog.pb.validate.go | 2257 +++ .../data/accesslog/v3/accesslog_vtproto.pb.go | 2040 +++ .../clusters/aggregate/v3/cluster.pb.go | 174 + .../aggregate/v3/cluster.pb.validate.go | 148 + .../aggregate/v3/cluster_vtproto.pb.go | 77 + .../filters/common/fault/v3/fault.pb.go | 622 + .../common/fault/v3/fault.pb.validate.go | 812 + .../common/fault/v3/fault_vtproto.pb.go | 491 + .../filters/http/fault/v3/fault.pb.go | 655 + .../http/fault/v3/fault.pb.validate.go | 658 + .../filters/http/fault/v3/fault_vtproto.pb.go | 546 + .../filters/http/rbac/v3/rbac.pb.go | 355 + .../filters/http/rbac/v3/rbac.pb.validate.go | 385 + .../filters/http/rbac/v3/rbac_vtproto.pb.go | 283 + .../filters/http/router/v3/router.pb.go | 469 + .../http/router/v3/router.pb.validate.go | 428 + .../http/router/v3/router_vtproto.pb.go | 302 + .../v3/http_connection_manager.pb.go | 4304 ++++++ .../v3/http_connection_manager.pb.validate.go | 4812 ++++++ .../v3/http_connection_manager_vtproto.pb.go | 3470 +++++ .../v3/client_side_weighted_round_robin.pb.go | 303 + ...t_side_weighted_round_robin.pb.validate.go | 300 + ...nt_side_weighted_round_robin_vtproto.pb.go | 148 + .../common/v3/common.pb.go | 617 + .../common/v3/common.pb.validate.go | 814 + .../common/v3/common_vtproto.pb.go | 492 + .../least_request/v3/least_request.pb.go | 383 + .../v3/least_request.pb.validate.go | 278 + .../v3/least_request_vtproto.pb.go | 196 + .../pick_first/v3/pick_first.pb.go | 169 + .../pick_first/v3/pick_first.pb.validate.go | 138 + .../pick_first/v3/pick_first_vtproto.pb.go | 74 + .../ring_hash/v3/ring_hash.pb.go | 393 + .../ring_hash/v3/ring_hash.pb.validate.go | 252 + .../ring_hash/v3/ring_hash_vtproto.pb.go | 191 + .../wrr_locality/v3/wrr_locality.pb.go | 181 + .../v3/wrr_locality.pb.validate.go | 176 + .../v3/wrr_locality_vtproto.pb.go | 95 + .../rbac/audit_loggers/stream/v3/stream.pb.go | 153 + .../stream/v3/stream.pb.validate.go | 137 + .../stream/v3/stream_vtproto.pb.go | 61 + .../transport_sockets/tls/v3/cert.pb.go | 89 + .../tls/v3/cert.pb.validate.go | 37 + .../transport_sockets/tls/v3/common.pb.go | 1689 +++ .../tls/v3/common.pb.validate.go | 1648 +++ .../tls/v3/common_vtproto.pb.go | 1155 ++ .../transport_sockets/tls/v3/secret.pb.go | 444 + .../tls/v3/secret.pb.validate.go | 575 + .../tls/v3/secret_vtproto.pb.go | 415 + .../transport_sockets/tls/v3/tls.pb.go | 1545 ++ .../tls/v3/tls.pb.validate.go | 1902 +++ .../tls/v3/tls_spiffe_validator_config.pb.go | 286 + ...tls_spiffe_validator_config.pb.validate.go | 329 + .../tls_spiffe_validator_config_vtproto.pb.go | 167 + .../tls/v3/tls_vtproto.pb.go | 1265 ++ .../envoy/service/discovery/v3/ads.pb.go | 182 + .../service/discovery/v3/ads.pb.validate.go | 136 + .../envoy/service/discovery/v3/ads_grpc.pb.go | 210 + .../service/discovery/v3/ads_vtproto.pb.go | 61 + .../service/discovery/v3/discovery.pb.go | 1693 +++ .../discovery/v3/discovery.pb.validate.go | 2139 +++ .../discovery/v3/discovery_vtproto.pb.go | 1546 ++ .../envoy/service/load_stats/v3/lrs.pb.go | 325 + .../service/load_stats/v3/lrs.pb.validate.go | 335 + .../service/load_stats/v3/lrs_grpc.pb.go | 197 + .../service/load_stats/v3/lrs_vtproto.pb.go | 230 + .../envoy/service/status/v3/csds.pb.go | 985 ++ .../service/status/v3/csds.pb.validate.go | 1057 ++ .../envoy/service/status/v3/csds_grpc.pb.go | 177 + .../service/status/v3/csds_vtproto.pb.go | 866 ++ .../envoy/type/http/v3/cookie.pb.go | 189 + .../envoy/type/http/v3/cookie.pb.validate.go | 178 + .../envoy/type/http/v3/cookie_vtproto.pb.go | 99 + .../type/http/v3/path_transformation.pb.go | 403 + .../v3/path_transformation.pb.validate.go | 595 + .../http/v3/path_transformation_vtproto.pb.go | 300 + .../envoy/type/matcher/v3/filter_state.pb.go | 203 + .../matcher/v3/filter_state.pb.validate.go | 208 + .../matcher/v3/filter_state_vtproto.pb.go | 121 + .../envoy/type/matcher/v3/http_inputs.pb.go | 452 + .../matcher/v3/http_inputs.pb.validate.go | 615 + .../type/matcher/v3/http_inputs_vtproto.pb.go | 289 + .../envoy/type/matcher/v3/metadata.pb.go | 303 + .../type/matcher/v3/metadata.pb.validate.go | 378 + .../type/matcher/v3/metadata_vtproto.pb.go | 195 + .../envoy/type/matcher/v3/node.pb.go | 189 + .../envoy/type/matcher/v3/node.pb.validate.go | 199 + .../envoy/type/matcher/v3/node_vtproto.pb.go | 94 + .../envoy/type/matcher/v3/number.pb.go | 213 + .../type/matcher/v3/number.pb.validate.go | 208 + .../type/matcher/v3/number_vtproto.pb.go | 160 + .../envoy/type/matcher/v3/path.pb.go | 197 + .../envoy/type/matcher/v3/path.pb.validate.go | 205 + .../envoy/type/matcher/v3/path_vtproto.pb.go | 110 + .../envoy/type/matcher/v3/regex.pb.go | 415 + .../type/matcher/v3/regex.pb.validate.go | 479 + .../envoy/type/matcher/v3/regex_vtproto.pb.go | 246 + .../type/matcher/v3/status_code_input.pb.go | 204 + .../v3/status_code_input.pb.validate.go | 247 + .../v3/status_code_input_vtproto.pb.go | 104 + .../envoy/type/matcher/v3/string.pb.go | 400 + .../type/matcher/v3/string.pb.validate.go | 482 + .../type/matcher/v3/string_vtproto.pb.go | 370 + .../envoy/type/matcher/v3/struct.pb.go | 329 + .../type/matcher/v3/struct.pb.validate.go | 364 + .../type/matcher/v3/struct_vtproto.pb.go | 171 + .../envoy/type/matcher/v3/value.pb.go | 552 + .../type/matcher/v3/value.pb.validate.go | 791 + .../envoy/type/matcher/v3/value_vtproto.pb.go | 545 + .../envoy/type/metadata/v3/metadata.pb.go | 683 + .../type/metadata/v3/metadata.pb.validate.go | 1025 ++ .../type/metadata/v3/metadata_vtproto.pb.go | 563 + .../envoy/type/tracing/v3/custom_tag.pb.go | 609 + .../type/tracing/v3/custom_tag.pb.validate.go | 847 ++ .../type/tracing/v3/custom_tag_vtproto.pb.go | 556 + .../envoy/type/v3/hash_policy.pb.go | 333 + .../envoy/type/v3/hash_policy.pb.validate.go | 451 + .../envoy/type/v3/hash_policy_vtproto.pb.go | 251 + .../go-control-plane/envoy/type/v3/http.pb.go | 144 + .../envoy/type/v3/http.pb.validate.go | 37 + .../envoy/type/v3/http_status.pb.go | 458 + .../envoy/type/v3/http_status.pb.validate.go | 162 + .../envoy/type/v3/http_status_vtproto.pb.go | 70 + .../envoy/type/v3/percent.pb.go | 316 + .../envoy/type/v3/percent.pb.validate.go | 261 + .../envoy/type/v3/percent_vtproto.pb.go | 132 + .../envoy/type/v3/range.pb.go | 324 + .../envoy/type/v3/range.pb.validate.go | 346 + .../envoy/type/v3/range_vtproto.pb.go | 200 + .../envoy/type/v3/ratelimit_strategy.pb.go | 407 + .../type/v3/ratelimit_strategy.pb.validate.go | 381 + .../type/v3/ratelimit_strategy_vtproto.pb.go | 241 + .../envoy/type/v3/ratelimit_unit.pb.go | 165 + .../type/v3/ratelimit_unit.pb.validate.go | 37 + .../envoy/type/v3/semantic_version.pb.go | 181 + .../type/v3/semantic_version.pb.validate.go | 143 + .../type/v3/semantic_version_vtproto.pb.go | 86 + .../envoy/type/v3/token_bucket.pb.go | 204 + .../envoy/type/v3/token_bucket.pb.validate.go | 203 + .../envoy/type/v3/token_bucket_vtproto.pb.go | 100 + .../envoyproxy/protoc-gen-validate/LICENSE | 202 + .../protoc-gen-validate/validate/BUILD | 74 + .../protoc-gen-validate/validate/validate.h | 183 + .../validate/validate.pb.go | 4106 ++++++ .../validate/validate.proto | 862 ++ .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 +- .../fsnotify/fsnotify/backend_fen.go | 324 +- .../fsnotify/fsnotify/backend_inotify.go | 594 +- .../fsnotify/fsnotify/backend_kqueue.go | 747 +- .../fsnotify/fsnotify/backend_other.go | 204 +- .../fsnotify/fsnotify/backend_windows.go | 305 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - .../github.com/fxamacker/cbor/v2/.gitignore | 12 + .../fxamacker/cbor/v2/.golangci.yml | 104 + .../fxamacker/cbor/v2/CODE_OF_CONDUCT.md | 133 + .../fxamacker/cbor/v2/CONTRIBUTING.md | 41 + vendor/github.com/fxamacker/cbor/v2/LICENSE | 21 + vendor/github.com/fxamacker/cbor/v2/README.md | 691 + .../github.com/fxamacker/cbor/v2/SECURITY.md | 7 + .../fxamacker/cbor/v2/bytestring.go | 63 + vendor/github.com/fxamacker/cbor/v2/cache.go | 363 + vendor/github.com/fxamacker/cbor/v2/common.go | 182 + vendor/github.com/fxamacker/cbor/v2/decode.go | 3187 ++++ .../github.com/fxamacker/cbor/v2/diagnose.go | 724 + vendor/github.com/fxamacker/cbor/v2/doc.go | 129 + vendor/github.com/fxamacker/cbor/v2/encode.go | 1989 +++ .../fxamacker/cbor/v2/encode_map.go | 94 + .../fxamacker/cbor/v2/encode_map_go117.go | 60 + .../fxamacker/cbor/v2/simplevalue.go | 69 + vendor/github.com/fxamacker/cbor/v2/stream.go | 277 + .../fxamacker/cbor/v2/structfields.go | 260 + vendor/github.com/fxamacker/cbor/v2/tag.go | 299 + vendor/github.com/fxamacker/cbor/v2/valid.go | 394 + .../github.com/hashicorp/vault/api/client.go | 28 +- .../hashicorp/vault/api/lifetime_watcher.go | 2 +- .../github.com/hashicorp/vault/api/request.go | 5 +- .../hashicorp/vault/api/response.go | 3 +- .../github.com/hashicorp/vault/api/secret.go | 4 + .../hashicorp/vault/api/sudo_paths.go | 1 + .../hashicorp/vault/api/sys_raft.go | 2 +- .../jellydator/ttlcache/v3/README.md | 3 +- .../jellydator/ttlcache/v3/cache.go | 107 +- .../pelletier/go-toml/v2/.goreleaser.yaml | 1 + .../github.com/pelletier/go-toml/v2/README.md | 2 +- .../pelletier/go-toml/v2/marshaler.go | 24 +- .../pelletier/go-toml/v2/unmarshaler.go | 45 +- .../github.com/planetscale/vtprotobuf/LICENSE | 29 + .../vtprotobuf/protohelpers/protohelpers.go | 122 + .../types/known/anypb/any_vtproto.pb.go | 389 + .../known/durationpb/duration_vtproto.pb.go | 317 + .../types/known/emptypb/empty_vtproto.pb.go | 207 + .../types/known/structpb/struct_vtproto.pb.go | 2004 +++ .../known/timestamppb/timestamp_vtproto.pb.go | 317 + .../known/wrapperspb/wrappers_vtproto.pb.go | 2240 +++ .../sigstore/pkg/oauthflow/interactive.go | 2 +- .../sigstore/pkg/signature/kms/gcp/client.go | 5 +- .../pkg/signature/kms/hashivault/client.go | 20 +- .../sigstore/sigstore/pkg/tuf/client.go | 1 + .../go-spiffe/v2/workloadapi/backoff.go | 47 +- .../spiffe/go-spiffe/v2/workloadapi/client.go | 19 +- .../go-spiffe/v2/workloadapi/jwtsource.go | 22 +- .../spiffe/go-spiffe/v2/workloadapi/option.go | 45 +- .../chains/pkg/chains/storage/docdb/docdb.go | 150 +- .../tektoncd/chains/pkg/config/config.go | 21 +- vendor/github.com/x448/float16/.travis.yml | 13 + vendor/github.com/x448/float16/LICENSE | 22 + vendor/github.com/x448/float16/README.md | 133 + vendor/github.com/x448/float16/float16.go | 302 + vendor/github.com/youmark/pkcs8/.travis.yml | 14 - .../bson/bsoncodec/default_value_decoders.go | 4 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 8 +- .../bson/bsonrw/extjson_wrappers.go | 4 +- .../mongo-driver/bson/bsonrw/value_reader.go | 12 +- .../mongo-driver/bson/raw_value.go | 8 +- .../mongo-driver/bson/registry.go | 18 +- .../mongo-driver/internal/logger/io_sink.go | 7 +- .../mongo-driver/mongo/change_stream.go | 6 + .../mongo-driver/mongo/client.go | 4 - .../mongo-driver/mongo/collection.go | 14 +- .../mongo-driver/mongo/database.go | 2 +- .../mongo/options/clientoptions.go | 15 +- .../mongo/options/mongooptions.go | 2 +- .../mongo/options/searchindexoptions.go | 7 + .../mongo-driver/mongo/search_index_view.go | 43 +- .../mongo/writeconcern/writeconcern.go | 2 +- .../mongo-driver/version/version.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 40 +- .../mongo-driver/x/bsonx/bsoncore/doc.go | 23 +- .../mongo-driver/x/mongo/driver/DESIGN.md | 27 - .../x/mongo/driver/auth/creds/doc.go | 14 + .../mongo-driver/x/mongo/driver/auth/doc.go | 21 +- .../x/mongo/driver/compression.go | 21 +- .../x/mongo/driver/connstring/connstring.go | 7 + .../mongo-driver/x/mongo/driver/dns/dns.go | 7 + .../mongo-driver/x/mongo/driver/driver.go | 7 + .../mongocrypt/mongocrypt_not_enabled.go | 7 + .../x/mongo/driver/mongocrypt/options/doc.go | 14 + .../mongo-driver/x/mongo/driver/ocsp/ocsp.go | 7 + .../mongo-driver/x/mongo/driver/operation.go | 21 +- .../driver/operation/create_search_indexes.go | 42 +- .../x/mongo/driver/operation/doc.go | 14 + .../driver/operation/drop_search_index.go | 37 +- .../driver/operation/update_search_index.go | 39 +- .../x/mongo/driver/session/doc.go | 14 + .../x/mongo/driver/topology/connection.go | 4 +- .../x/mongo/driver/topology/rtt_monitor.go | 6 - .../x/mongo/driver/topology/server.go | 9 +- .../x/mongo/driver/topology/topology.go | 17 +- .../x/mongo/driver/wiremessage/wiremessage.go | 40 +- .../contrib/detectors/gcp/LICENSE | 201 + .../contrib/detectors/gcp/README.md | 76 + .../contrib/detectors/gcp/cloud-function.go | 61 + .../contrib/detectors/gcp/cloud-run.go | 114 + .../contrib/detectors/gcp/detector.go | 153 + .../contrib/detectors/gcp/gce.go | 100 + .../contrib/detectors/gcp/gke.go | 69 + .../contrib/detectors/gcp/types.go | 33 + .../contrib/detectors/gcp/version.go | 17 + vendor/go.opentelemetry.io/otel/sdk/LICENSE | 201 + vendor/go.opentelemetry.io/otel/sdk/README.md | 3 + .../otel/sdk/instrumentation/README.md | 3 + .../otel/sdk/instrumentation/doc.go | 13 + .../otel/sdk/instrumentation/library.go | 9 + .../otel/sdk/instrumentation/scope.go | 15 + .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/metric/LICENSE | 201 + .../otel/sdk/metric/README.md | 3 + .../otel/sdk/metric/aggregation.go | 189 + .../otel/sdk/metric/cache.go | 83 + .../otel/sdk/metric/config.go | 137 + .../otel/sdk/metric/doc.go | 47 + .../otel/sdk/metric/env.go | 39 + .../otel/sdk/metric/exemplar.go | 81 + .../otel/sdk/metric/exporter.go | 77 + .../otel/sdk/metric/instrument.go | 347 + .../otel/sdk/metric/instrumentkind_string.go | 30 + .../metric/internal/aggregate/aggregate.go | 154 + .../otel/sdk/metric/internal/aggregate/doc.go | 7 + .../sdk/metric/internal/aggregate/exemplar.go | 42 + .../aggregate/exponential_histogram.go | 444 + .../metric/internal/aggregate/histogram.go | 233 + .../metric/internal/aggregate/lastvalue.go | 162 + .../sdk/metric/internal/aggregate/limit.go | 42 + .../otel/sdk/metric/internal/aggregate/sum.go | 238 + .../otel/sdk/metric/internal/exemplar/doc.go | 6 + .../otel/sdk/metric/internal/exemplar/drop.go | 23 + .../sdk/metric/internal/exemplar/exemplar.go | 29 + .../sdk/metric/internal/exemplar/filter.go | 29 + .../internal/exemplar/filtered_reservoir.go | 49 + .../otel/sdk/metric/internal/exemplar/hist.go | 46 + .../otel/sdk/metric/internal/exemplar/rand.go | 191 + .../sdk/metric/internal/exemplar/reservoir.go | 32 + .../sdk/metric/internal/exemplar/storage.go | 95 + .../sdk/metric/internal/exemplar/value.go | 58 + .../otel/sdk/metric/internal/reuse_slice.go | 13 + .../otel/sdk/metric/internal/x/README.md | 112 + .../otel/sdk/metric/internal/x/x.go | 85 + .../otel/sdk/metric/manual_reader.go | 203 + .../otel/sdk/metric/meter.go | 729 + .../otel/sdk/metric/metricdata/README.md | 3 + .../otel/sdk/metric/metricdata/data.go | 296 + .../otel/sdk/metric/metricdata/temporality.go | 30 + .../metric/metricdata/temporality_string.go | 25 + .../otel/sdk/metric/periodic_reader.go | 370 + .../otel/sdk/metric/pipeline.go | 655 + .../otel/sdk/metric/provider.go | 143 + .../otel/sdk/metric/reader.go | 189 + .../otel/sdk/metric/version.go | 9 + .../otel/sdk/metric/view.go | 117 + .../otel/sdk/resource/README.md | 3 + .../otel/sdk/resource/auto.go | 118 + .../otel/sdk/resource/builtin.go | 118 + .../otel/sdk/resource/config.go | 195 + .../otel/sdk/resource/container.go | 89 + .../otel/sdk/resource/doc.go | 20 + .../otel/sdk/resource/env.go | 95 + .../otel/sdk/resource/host_id.go | 109 + .../otel/sdk/resource/host_id_bsd.go | 12 + .../otel/sdk/resource/host_id_darwin.go | 8 + .../otel/sdk/resource/host_id_exec.go | 18 + .../otel/sdk/resource/host_id_linux.go | 11 + .../otel/sdk/resource/host_id_readfile.go | 17 + .../otel/sdk/resource/host_id_unsupported.go | 19 + .../otel/sdk/resource/host_id_windows.go | 37 + .../otel/sdk/resource/os.go | 89 + .../otel/sdk/resource/os_release_darwin.go | 91 + .../otel/sdk/resource/os_release_unix.go | 143 + .../otel/sdk/resource/os_unix.go | 79 + .../otel/sdk/resource/os_unsupported.go | 15 + .../otel/sdk/resource/os_windows.go | 90 + .../otel/sdk/resource/process.go | 173 + .../otel/sdk/resource/resource.go | 294 + .../go.opentelemetry.io/otel/sdk/version.go | 9 + .../otel/semconv/v1.24.0/README.md | 3 + .../otel/semconv/v1.24.0/attribute_group.go | 4387 ++++++ .../otel/semconv/v1.24.0/doc.go | 9 + .../otel/semconv/v1.24.0/event.go | 200 + .../otel/semconv/v1.24.0/exception.go | 9 + .../otel/semconv/v1.24.0/metric.go | 1071 ++ .../otel/semconv/v1.24.0/resource.go | 2545 ++++ .../otel/semconv/v1.24.0/schema.go | 9 + .../otel/semconv/v1.24.0/trace.go | 1323 ++ vendor/gocloud.dev/aws/aws.go | 108 +- .../gocloud.dev/docstore/awsdynamodb/codec.go | 1 - .../docstore/awsdynamodb/dynamo.go | 8 +- .../gocloud.dev/docstore/awsdynamodb/query.go | 26 +- vendor/gocloud.dev/docstore/driver/driver.go | 1 - .../docstore/gcpfirestore/codec.go | 2 + .../gocloud.dev/docstore/gcpfirestore/fs.go | 5 +- .../docstore/gcpfirestore/query.go | 14 +- vendor/gocloud.dev/docstore/query.go | 16 +- vendor/gocloud.dev/internal/retry/retry.go | 3 +- .../internal/useragent/useragent.go | 2 +- .../gocloud.dev/pubsub/kafkapubsub/kafka.go | 18 +- vendor/gocloud.dev/pubsub/pubsub.go | 80 +- vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/xerrors/LICENSE | 4 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/distribution/distribution.pb.go | 892 ++ .../genproto/googleapis/api/label/label.pb.go | 249 + .../googleapis/api/metric/metric.pb.go | 771 + .../api/monitoredres/monitored_resource.pb.go | 476 + .../type/calendarperiod/calendar_period.pb.go | 189 + .../grpc/authz/audit/audit_logger.go | 127 + .../grpc/authz/audit/stdout/stdout_logger.go | 110 + .../balancer/leastrequest/leastrequest.go | 183 + .../grpc/balancer/rls/balancer.go | 705 + .../grpc/balancer/rls/cache.go | 383 + .../grpc/balancer/rls/child_policy.go | 109 + .../grpc/balancer/rls/config.go | 311 + .../grpc/balancer/rls/control_channel.go | 220 + .../rls/internal/adaptive/adaptive.go | 131 + .../rls/internal/adaptive/lookback.go | 84 + .../balancer/rls/internal/keys/builder.go | 267 + .../grpc/balancer/rls/picker.go | 397 + .../balancer/weightedroundrobin/balancer.go | 636 + .../balancer/weightedroundrobin/config.go | 59 + .../weightedroundrobin/internal/internal.go | 44 + .../balancer/weightedroundrobin/logging.go | 34 + .../balancer/weightedroundrobin/scheduler.go | 146 + .../weightedroundrobin/weightedroundrobin.go | 69 + .../grpc/balancer/weightedtarget/logging.go | 34 + .../weightedaggregator/aggregator.go | 309 + .../balancer/weightedtarget/weightedtarget.go | 194 + .../weightedtarget/weightedtarget_config.go | 49 + .../tls/certprovider/distributor.go | 114 + .../tls/certprovider/pemfile/builder.go | 96 + .../tls/certprovider/pemfile/watcher.go | 260 + .../credentials/tls/certprovider/provider.go | 109 + .../credentials/tls/certprovider/store.go | 196 + .../grpc/encoding/gzip/gzip.go | 132 + .../grpc/internal/admin/admin.go | 60 + .../grpc/internal/balancer/nop/nop.go | 62 + .../internal/balancergroup/balancergroup.go | 613 + .../balancergroup/balancerstateaggregator.go | 37 + .../grpc/internal/cache/timeoutCache.go | 151 + .../credentials/xds/handshake_info.go | 297 + .../grpc/internal/hierarchy/hierarchy.go | 112 + .../internal/proto/grpc_lookup_v1/rls.pb.go | 387 + .../proto/grpc_lookup_v1/rls_config.pb.go | 949 ++ .../proto/grpc_lookup_v1/rls_grpc.pb.go | 137 + .../grpc/internal/wrr/edf.go | 92 + .../grpc/internal/wrr/random.go | 84 + .../grpc/internal/wrr/wrr.go | 32 + .../grpc/internal/xds/bootstrap/bootstrap.go | 828 ++ .../grpc/internal/xds/bootstrap/logging.go | 28 + .../grpc/internal/xds/bootstrap/template.go | 47 + .../internal/xds/bootstrap/tlscreds/bundle.go | 138 + .../internal/xds/matcher/matcher_header.go | 274 + .../internal/xds/matcher/string_matcher.go | 184 + .../grpc/internal/xds/rbac/converter.go | 101 + .../grpc/internal/xds/rbac/matchers.go | 432 + .../grpc/internal/xds/rbac/rbac_engine.go | 316 + .../grpc/orca/call_metrics.go | 196 + .../grpc/orca/internal/internal.go | 71 + vendor/google.golang.org/grpc/orca/orca.go | 57 + .../google.golang.org/grpc/orca/producer.go | 223 + .../grpc/orca/server_metrics.go | 352 + vendor/google.golang.org/grpc/orca/service.go | 163 + .../grpc/stats/opentelemetry/LICENSE | 202 + .../stats/opentelemetry/client_metrics.go | 277 + .../opentelemetry/internal/pluginoption.go | 41 + .../grpc/stats/opentelemetry/opentelemetry.go | 390 + .../stats/opentelemetry/server_metrics.go | 278 + .../grpc/xds/bootstrap/bootstrap.go | 66 + .../grpc/xds/bootstrap/credentials.go | 70 + .../google.golang.org/grpc/xds/csds/csds.go | 108 + .../grpc/xds/googledirectpath/googlec2p.go | 160 + .../grpc/xds/googledirectpath/utils.go | 101 + .../grpc/xds/internal/balancer/balancer.go | 31 + .../balancer/cdsbalancer/cdsbalancer.go | 671 + .../balancer/cdsbalancer/cluster_watcher.go | 55 + .../internal/balancer/cdsbalancer/logging.go | 34 + .../balancer/clusterimpl/clusterimpl.go | 480 + .../internal/balancer/clusterimpl/config.go | 69 + .../internal/balancer/clusterimpl/logging.go | 34 + .../internal/balancer/clusterimpl/picker.go | 206 + .../clustermanager/balancerstateaggregator.go | 196 + .../balancer/clustermanager/clustermanager.go | 205 + .../balancer/clustermanager/config.go | 46 + .../balancer/clustermanager/picker.go | 70 + .../clusterresolver/clusterresolver.go | 400 + .../balancer/clusterresolver/config.go | 160 + .../balancer/clusterresolver/configbuilder.go | 291 + .../configbuilder_childname.go | 88 + .../balancer/clusterresolver/logging.go | 34 + .../clusterresolver/resource_resolver.go | 321 + .../clusterresolver/resource_resolver_dns.go | 188 + .../clusterresolver/resource_resolver_eds.go | 144 + .../balancer/loadstore/load_store_wrapper.go | 120 + .../balancer/outlierdetection/balancer.go | 918 ++ .../balancer/outlierdetection/callcounter.go | 66 + .../balancer/outlierdetection/config.go | 240 + .../balancer/outlierdetection/logging.go | 34 + .../outlierdetection/subconn_wrapper.go | 74 + .../internal/balancer/priority/balancer.go | 289 + .../balancer/priority/balancer_child.go | 173 + .../balancer/priority/balancer_priority.go | 210 + .../xds/internal/balancer/priority/config.go | 67 + .../balancer/priority/ignore_resolve_now.go | 58 + .../xds/internal/balancer/priority/logging.go | 34 + .../xds/internal/balancer/priority/utils.go | 31 + .../xds/internal/balancer/ringhash/config.go | 70 + .../xds/internal/balancer/ringhash/logging.go | 38 + .../xds/internal/balancer/ringhash/picker.go | 161 + .../xds/internal/balancer/ringhash/ring.go | 182 + .../internal/balancer/ringhash/ringhash.go | 527 + .../xds/internal/balancer/ringhash/util.go | 40 + .../internal/balancer/wrrlocality/balancer.go | 201 + .../internal/balancer/wrrlocality/logging.go | 34 + .../clusterspecifier/cluster_specifier.go | 72 + .../xds/internal/clusterspecifier/rls/rls.go | 105 + .../xds/internal/httpfilter/fault/fault.go | 300 + .../xds/internal/httpfilter/httpfilter.go | 108 + .../grpc/xds/internal/httpfilter/rbac/rbac.go | 214 + .../xds/internal/httpfilter/router/router.go | 113 + .../grpc/xds/internal/internal.go | 97 + .../internal/resolver/internal/internal.go | 30 + .../grpc/xds/internal/resolver/logging.go | 34 + .../xds/internal/resolver/serviceconfig.go | 348 + .../xds/internal/resolver/watch_service.go | 92 + .../xds/internal/resolver/xds_resolver.go | 570 + .../grpc/xds/internal/server/conn_wrapper.go | 189 + .../xds/internal/server/listener_wrapper.go | 454 + .../grpc/xds/internal/server/rds_handler.go | 191 + .../grpc/xds/internal/xdsclient/attributes.go | 36 + .../grpc/xds/internal/xdsclient/authority.go | 703 + .../grpc/xds/internal/xdsclient/client.go | 52 + .../grpc/xds/internal/xdsclient/client_new.go | 158 + .../internal/xdsclient/client_refcounted.go | 104 + .../grpc/xds/internal/xdsclient/clientimpl.go | 101 + .../xdsclient/clientimpl_authority.go | 145 + .../xds/internal/xdsclient/clientimpl_dump.go | 53 + .../xdsclient/clientimpl_loadreport.go | 47 + .../internal/xdsclient/clientimpl_watchers.go | 111 + .../internal/xdsclient/internal/internal.go | 25 + .../xds/internal/xdsclient/load/reporter.go | 27 + .../grpc/xds/internal/xdsclient/load/store.go | 441 + .../grpc/xds/internal/xdsclient/logging.go | 40 + .../internal/xdsclient/requests_counter.go | 107 + .../xdsclient/transport/internal/internal.go | 25 + .../xdsclient/transport/loadreport.go | 259 + .../internal/xdsclient/transport/transport.go | 704 + .../xdslbregistry/converter/converter.go | 255 + .../xdsclient/xdslbregistry/xdslbregistry.go | 84 + .../xdsresource/cluster_resource_type.go | 153 + .../xdsresource/endpoints_resource_type.go | 149 + .../internal/xdsclient/xdsresource/errors.go | 66 + .../xdsclient/xdsresource/filter_chain.go | 895 ++ .../xdsresource/listener_resource_type.go | 186 + .../internal/xdsclient/xdsresource/logging.go | 28 + .../internal/xdsclient/xdsresource/matcher.go | 280 + .../xdsclient/xdsresource/matcher_path.go | 102 + .../internal/xdsclient/xdsresource/name.go | 127 + .../xdsclient/xdsresource/resource_type.go | 174 + .../xdsresource/route_config_resource_type.go | 150 + .../internal/xdsclient/xdsresource/type.go | 135 + .../xdsclient/xdsresource/type_cds.go | 81 + .../xdsclient/xdsresource/type_eds.go | 75 + .../xdsclient/xdsresource/type_lds.go | 79 + .../xdsclient/xdsresource/type_rds.go | 248 + .../xdsclient/xdsresource/unmarshal_cds.go | 699 + .../xdsclient/xdsresource/unmarshal_eds.go | 176 + .../xdsclient/xdsresource/unmarshal_lds.go | 277 + .../xdsclient/xdsresource/unmarshal_rds.go | 436 + .../xdsclient/xdsresource/version/version.go | 41 + vendor/google.golang.org/grpc/xds/server.go | 321 + .../grpc/xds/server_options.go | 76 + vendor/google.golang.org/grpc/xds/xds.go | 99 + .../gopkg.in/evanphx/json-patch.v4/.gitignore | 6 + vendor/gopkg.in/evanphx/json-patch.v4/LICENSE | 25 + .../gopkg.in/evanphx/json-patch.v4/README.md | 317 + .../gopkg.in/evanphx/json-patch.v4/errors.go | 38 + .../gopkg.in/evanphx/json-patch.v4/merge.go | 389 + .../gopkg.in/evanphx/json-patch.v4/patch.go | 851 ++ .../api/admissionregistration/v1/doc.go | 1 + .../admissionregistration/v1/generated.pb.go | 6011 ++++++-- .../admissionregistration/v1/generated.proto | 583 +- .../api/admissionregistration/v1/register.go | 4 + .../api/admissionregistration/v1/types.go | 610 +- .../v1/types_swagger_doc_generated.go | 178 +- .../v1/zz_generated.deepcopy.go | 432 + .../v1/zz_generated.prerelease-lifecycle.go | 70 + .../v1alpha1/generated.pb.go | 235 +- .../v1alpha1/generated.proto | 18 +- .../v1beta1/generated.pb.go | 309 +- .../v1beta1/generated.proto | 53 +- .../admissionregistration/v1beta1/types.go | 15 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- vendor/k8s.io/api/apidiscovery/v2/doc.go | 23 + .../api/apidiscovery/v2/generated.pb.go | 1742 +++ .../api/apidiscovery/v2/generated.proto | 156 + vendor/k8s.io/api/apidiscovery/v2/register.go | 56 + vendor/k8s.io/api/apidiscovery/v2/types.go | 157 + .../apidiscovery/v2/zz_generated.deepcopy.go | 190 + .../v2/zz_generated.prerelease-lifecycle.go | 34 + .../api/apidiscovery/v2beta1/generated.pb.go | 113 +- .../api/apidiscovery/v2beta1/generated.proto | 10 +- .../v1alpha1/generated.pb.go | 119 +- .../v1alpha1/generated.proto | 7 +- .../api/apiserverinternal/v1alpha1/types.go | 1 - vendor/k8s.io/api/apps/v1/doc.go | 1 + vendor/k8s.io/api/apps/v1/generated.pb.go | 341 +- vendor/k8s.io/api/apps/v1/generated.proto | 76 +- vendor/k8s.io/api/apps/v1/types.go | 26 +- .../apps/v1/types_swagger_doc_generated.go | 4 +- .../v1/zz_generated.prerelease-lifecycle.go | 82 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 299 +- .../k8s.io/api/apps/v1beta1/generated.proto | 47 +- vendor/k8s.io/api/apps/v1beta1/types.go | 9 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 365 +- .../k8s.io/api/apps/v1beta2/generated.proto | 75 +- vendor/k8s.io/api/apps/v1beta2/types.go | 13 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/authentication/v1/doc.go | 1 + .../api/authentication/v1/generated.pb.go | 154 +- .../api/authentication/v1/generated.proto | 12 +- vendor/k8s.io/api/authentication/v1/types.go | 7 + .../v1/zz_generated.prerelease-lifecycle.go | 40 + .../authentication/v1alpha1/generated.pb.go | 59 +- .../authentication/v1alpha1/generated.proto | 4 +- .../authentication/v1beta1/generated.pb.go | 117 +- .../authentication/v1beta1/generated.proto | 9 +- .../api/authentication/v1beta1/types.go | 3 + vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../api/authorization/v1/generated.pb.go | 737 +- .../api/authorization/v1/generated.proto | 85 +- vendor/k8s.io/api/authorization/v1/types.go | 79 + .../v1/types_swagger_doc_generated.go | 38 +- .../authorization/v1/zz_generated.deepcopy.go | 61 +- .../v1/zz_generated.prerelease-lifecycle.go | 46 + .../api/authorization/v1beta1/generated.pb.go | 294 +- .../api/authorization/v1beta1/generated.proto | 26 +- .../k8s.io/api/authorization/v1beta1/types.go | 16 + .../v1beta1/types_swagger_doc_generated.go | 18 +- .../v1beta1/zz_generated.deepcopy.go | 15 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 1 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 255 +- .../k8s.io/api/autoscaling/v1/generated.proto | 50 +- vendor/k8s.io/api/autoscaling/v1/types.go | 3 + .../v1/zz_generated.prerelease-lifecycle.go | 40 + vendor/k8s.io/api/autoscaling/v2/doc.go | 1 + .../k8s.io/api/autoscaling/v2/generated.pb.go | 243 +- .../k8s.io/api/autoscaling/v2/generated.proto | 18 +- vendor/k8s.io/api/autoscaling/v2/types.go | 2 + .../v2/zz_generated.prerelease-lifecycle.go | 34 + .../api/autoscaling/v2beta1/generated.pb.go | 243 +- .../api/autoscaling/v2beta1/generated.proto | 51 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 3 + .../api/autoscaling/v2beta2/generated.pb.go | 255 +- .../api/autoscaling/v2beta2/generated.proto | 22 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 4 + vendor/k8s.io/api/batch/v1/doc.go | 2 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 747 +- vendor/k8s.io/api/batch/v1/generated.proto | 131 +- vendor/k8s.io/api/batch/v1/types.go | 123 +- .../batch/v1/types_swagger_doc_generated.go | 41 +- .../api/batch/v1/zz_generated.deepcopy.go | 59 + .../v1/zz_generated.prerelease-lifecycle.go | 46 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 115 +- .../k8s.io/api/batch/v1beta1/generated.proto | 14 +- vendor/k8s.io/api/certificates/v1/doc.go | 2 +- .../api/certificates/v1/generated.pb.go | 133 +- .../api/certificates/v1/generated.proto | 8 +- vendor/k8s.io/api/certificates/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../api/certificates/v1alpha1/generated.pb.go | 74 +- .../api/certificates/v1alpha1/generated.proto | 4 +- .../api/certificates/v1beta1/generated.pb.go | 135 +- .../api/certificates/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/coordination/v1/doc.go | 1 + .../api/coordination/v1/generated.pb.go | 179 +- .../api/coordination/v1/generated.proto | 26 +- vendor/k8s.io/api/coordination/v1/types.go | 30 +- .../v1/types_swagger_doc_generated.go | 6 +- .../coordination/v1/zz_generated.deepcopy.go | 10 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../k8s.io/api/coordination/v1alpha1/doc.go | 24 + .../api/coordination/v1alpha1/generated.pb.go | 1036 ++ .../api/coordination/v1alpha1/generated.proto | 105 + .../api/coordination/v1alpha1/register.go | 53 + .../k8s.io/api/coordination/v1alpha1/types.go | 100 + .../v1alpha1/types_swagger_doc_generated.go | 64 + .../v1alpha1/zz_generated.deepcopy.go | 116 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/coordination/v1beta1/generated.pb.go | 182 +- .../api/coordination/v1beta1/generated.proto | 23 +- .../k8s.io/api/coordination/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 11 + .../api/core/v1/annotation_key_constants.go | 21 +- vendor/k8s.io/api/core/v1/doc.go | 2 + vendor/k8s.io/api/core/v1/generated.pb.go | 7210 +++++---- vendor/k8s.io/api/core/v1/generated.proto | 728 +- vendor/k8s.io/api/core/v1/types.go | 666 +- .../core/v1/types_swagger_doc_generated.go | 238 +- .../api/core/v1/zz_generated.deepcopy.go | 300 +- .../v1/zz_generated.prerelease-lifecycle.go | 274 + vendor/k8s.io/api/discovery/v1/doc.go | 1 + .../k8s.io/api/discovery/v1/generated.pb.go | 137 +- .../k8s.io/api/discovery/v1/generated.proto | 6 +- vendor/k8s.io/api/discovery/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../api/discovery/v1beta1/generated.pb.go | 135 +- .../api/discovery/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/events/v1/doc.go | 2 +- vendor/k8s.io/api/events/v1/generated.pb.go | 111 +- vendor/k8s.io/api/events/v1/generated.proto | 18 +- vendor/k8s.io/api/events/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../k8s.io/api/events/v1beta1/generated.pb.go | 111 +- .../k8s.io/api/events/v1beta1/generated.proto | 18 +- .../api/extensions/v1beta1/generated.pb.go | 447 +- .../api/extensions/v1beta1/generated.proto | 81 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 21 +- vendor/k8s.io/api/flowcontrol/v1/doc.go | 1 + .../k8s.io/api/flowcontrol/v1/generated.pb.go | 257 +- .../k8s.io/api/flowcontrol/v1/generated.proto | 12 +- vendor/k8s.io/api/flowcontrol/v1/types.go | 4 + .../v1/zz_generated.prerelease-lifecycle.go | 46 + .../api/flowcontrol/v1beta1/generated.pb.go | 249 +- .../api/flowcontrol/v1beta1/generated.proto | 12 +- .../api/flowcontrol/v1beta2/generated.pb.go | 261 +- .../api/flowcontrol/v1beta2/generated.proto | 12 +- .../api/flowcontrol/v1beta3/generated.pb.go | 259 +- .../api/flowcontrol/v1beta3/generated.proto | 12 +- vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go | 23 + .../api/imagepolicy/v1alpha1/generated.pb.go | 1374 ++ .../api/imagepolicy/v1alpha1/generated.proto | 89 + .../api/imagepolicy/v1alpha1/register.go | 51 + .../k8s.io/api/imagepolicy/v1alpha1/types.go | 83 + .../v1alpha1/types_swagger_doc_generated.go | 72 + .../v1alpha1/zz_generated.deepcopy.go | 121 + vendor/k8s.io/api/networking/v1/doc.go | 1 + .../k8s.io/api/networking/v1/generated.pb.go | 275 +- .../k8s.io/api/networking/v1/generated.proto | 32 +- vendor/k8s.io/api/networking/v1/types.go | 18 +- .../v1/zz_generated.prerelease-lifecycle.go | 58 + .../api/networking/v1alpha1/generated.pb.go | 109 +- .../api/networking/v1alpha1/generated.proto | 14 +- .../k8s.io/api/networking/v1alpha1/types.go | 4 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../api/networking/v1beta1/generated.pb.go | 3212 +++- .../api/networking/v1beta1/generated.proto | 126 +- .../k8s.io/api/networking/v1beta1/register.go | 4 + vendor/k8s.io/api/networking/v1beta1/types.go | 137 +- .../v1beta1/types_swagger_doc_generated.go | 80 + .../networking/v1beta1/well_known_labels.go | 33 + .../v1beta1/zz_generated.deepcopy.go | 203 + .../zz_generated.prerelease-lifecycle.go | 72 + vendor/k8s.io/api/node/v1/doc.go | 2 +- vendor/k8s.io/api/node/v1/generated.pb.go | 99 +- vendor/k8s.io/api/node/v1/generated.proto | 8 +- vendor/k8s.io/api/node/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 103 +- .../k8s.io/api/node/v1alpha1/generated.proto | 8 +- .../k8s.io/api/node/v1beta1/generated.pb.go | 99 +- .../k8s.io/api/node/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/policy/v1/doc.go | 1 + vendor/k8s.io/api/policy/v1/generated.pb.go | 125 +- vendor/k8s.io/api/policy/v1/generated.proto | 18 +- vendor/k8s.io/api/policy/v1/types.go | 3 + .../v1/zz_generated.prerelease-lifecycle.go | 40 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 125 +- .../k8s.io/api/policy/v1beta1/generated.proto | 18 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 137 +- vendor/k8s.io/api/rbac/v1/generated.proto | 28 +- vendor/k8s.io/api/rbac/v1/types.go | 20 +- .../v1/zz_generated.prerelease-lifecycle.go | 70 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 141 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 28 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 12 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 137 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 28 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 12 +- .../api/resource/v1alpha2/generated.pb.go | 4817 ------ .../api/resource/v1alpha2/generated.proto | 400 - vendor/k8s.io/api/resource/v1alpha2/types.go | 462 - .../v1alpha2/types_swagger_doc_generated.go | 232 - .../v1alpha2/zz_generated.deepcopy.go | 498 - .../resource/{v1alpha2 => v1alpha3}/doc.go | 4 +- .../api/resource/v1alpha3/generated.pb.go | 8987 ++++++++++++ .../api/resource/v1alpha3/generated.proto | 912 ++ .../{v1alpha2 => v1alpha3}/register.go | 13 +- vendor/k8s.io/api/resource/v1alpha3/types.go | 1048 ++ .../v1alpha3/types_swagger_doc_generated.go | 404 + .../v1alpha3/zz_generated.deepcopy.go | 927 ++ vendor/k8s.io/api/scheduling/v1/doc.go | 2 +- .../k8s.io/api/scheduling/v1/generated.pb.go | 73 +- .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../api/scheduling/v1alpha1/generated.pb.go | 71 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../api/scheduling/v1beta1/generated.pb.go | 73 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/storage/v1/doc.go | 1 + vendor/k8s.io/api/storage/v1/generated.pb.go | 259 +- vendor/k8s.io/api/storage/v1/generated.proto | 42 +- vendor/k8s.io/api/storage/v1/types.go | 20 +- .../storage/v1/types_swagger_doc_generated.go | 4 +- .../v1/zz_generated.prerelease-lifecycle.go | 82 + .../api/storage/v1alpha1/generated.pb.go | 160 +- .../api/storage/v1alpha1/generated.proto | 24 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 2 - .../api/storage/v1beta1/generated.pb.go | 866 +- .../api/storage/v1beta1/generated.proto | 79 +- vendor/k8s.io/api/storage/v1beta1/register.go | 3 + vendor/k8s.io/api/storage/v1beta1/types.go | 59 +- .../v1beta1/types_swagger_doc_generated.go | 21 + .../storage/v1beta1/zz_generated.deepcopy.go | 66 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/storagemigration/v1alpha1/doc.go | 23 + .../storagemigration/v1alpha1/generated.pb.go | 1688 +++ .../storagemigration/v1alpha1/generated.proto | 127 + .../api/storagemigration/v1alpha1/register.go | 58 + .../api/storagemigration/v1alpha1/types.go | 131 + .../v1alpha1/types_swagger_doc_generated.go | 95 + .../v1alpha1/zz_generated.deepcopy.go | 160 + .../zz_generated.prerelease-lifecycle.go | 58 + .../meta/testrestmapper/test_restmapper.go | 165 + .../pkg/api/resource/generated.pb.go | 43 +- .../apimachinery/pkg/api/resource/quantity.go | 29 + .../apimachinery/pkg/api/validation/OWNERS | 11 + .../internalversion/validation/validation.go | 76 + .../pkg/apis/meta/v1/controller_ref.go | 13 +- .../pkg/apis/meta/v1/generated.pb.go | 708 +- .../pkg/apis/meta/v1/generated.proto | 45 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 83 +- .../pkg/apis/meta/v1/micro_time.go | 28 + .../apimachinery/pkg/apis/meta/v1/time.go | 29 + .../apimachinery/pkg/apis/meta/v1/types.go | 62 + .../meta/v1/types_swagger_doc_generated.go | 11 + .../pkg/apis/meta/v1/validation/validation.go | 41 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 21 + .../pkg/apis/meta/v1beta1/generated.pb.go | 45 +- .../pkg/apis/meta/v1beta1/generated.proto | 4 +- .../apimachinery/pkg/labels/selector.go | 20 + .../apimachinery/pkg/runtime/extension.go | 100 +- .../apimachinery/pkg/runtime/generated.pb.go | 65 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 12 +- .../pkg/runtime/schema/generated.pb.go | 31 +- .../runtime/serializer/cbor/direct/direct.go | 36 + .../serializer/cbor/internal/modes/buffers.go | 65 + .../serializer/cbor/internal/modes/custom.go | 422 + .../serializer/cbor/internal/modes/decode.go | 158 + .../cbor/internal/modes/diagnostic.go | 36 + .../serializer/cbor/internal/modes/encode.go | 155 + .../k8s.io/apimachinery/pkg/runtime/types.go | 1 + .../apimachinery/pkg/util/framer/framer.go | 18 +- .../pkg/util/intstr/generated.pb.go | 47 +- .../apimachinery/pkg/util/intstr/intstr.go | 26 + .../pkg/util/managedfields/node.yaml | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 135 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 2 +- .../apimachinery/pkg/util/sets/ordered.go | 53 - .../k8s.io/apimachinery/pkg/util/sets/set.go | 17 +- .../pkg/util/strategicpatch/patch.go | 4 + .../apimachinery/pkg/util/validation/OWNERS | 11 + .../pkg/util/validation/validation.go | 56 +- .../apimachinery/pkg/util/version/version.go | 112 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 40 +- .../client-go/applyconfigurations/OWNERS | 5 + .../v1/auditannotation.go | 48 + .../v1/expressionwarning.go | 48 + .../v1/matchcondition.go | 4 +- .../v1/matchresources.go | 90 + .../v1/mutatingwebhook.go | 4 +- .../v1/mutatingwebhookconfiguration.go | 10 +- .../v1/namedrulewithoperations.go | 94 + .../admissionregistration/v1/paramkind.go | 48 + .../admissionregistration/v1/paramref.go | 71 + .../admissionregistration/v1/rule.go | 4 +- .../v1/rulewithoperations.go | 4 +- .../v1/servicereference.go | 4 +- .../admissionregistration/v1/typechecking.go | 44 + .../v1/validatingadmissionpolicy.go | 262 + .../v1/validatingadmissionpolicybinding.go | 253 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1/validatingadmissionpolicyspec.go | 117 + .../v1/validatingadmissionpolicystatus.go | 66 + .../v1/validatingwebhook.go | 4 +- .../v1/validatingwebhookconfiguration.go | 10 +- .../admissionregistration/v1/validation.go | 70 + .../admissionregistration/v1/variable.go | 48 + .../v1/webhookclientconfig.go | 4 +- .../v1alpha1/auditannotation.go | 4 +- .../v1alpha1/expressionwarning.go | 4 +- .../v1alpha1/matchcondition.go | 4 +- .../v1alpha1/matchresources.go | 4 +- .../v1alpha1/namedrulewithoperations.go | 4 +- .../v1alpha1/paramkind.go | 4 +- .../v1alpha1/paramref.go | 4 +- .../v1alpha1/typechecking.go | 4 +- .../v1alpha1/validatingadmissionpolicy.go | 10 +- .../validatingadmissionpolicybinding.go | 10 +- .../validatingadmissionpolicybindingspec.go | 4 +- .../v1alpha1/validatingadmissionpolicyspec.go | 4 +- .../validatingadmissionpolicystatus.go | 4 +- .../v1alpha1/validation.go | 4 +- .../v1alpha1/variable.go | 4 +- .../v1beta1/auditannotation.go | 4 +- .../v1beta1/expressionwarning.go | 4 +- .../v1beta1/matchcondition.go | 4 +- .../v1beta1/matchresources.go | 4 +- .../v1beta1/mutatingwebhook.go | 4 +- .../v1beta1/mutatingwebhookconfiguration.go | 10 +- .../v1beta1/namedrulewithoperations.go | 4 +- .../v1beta1/paramkind.go | 4 +- .../admissionregistration/v1beta1/paramref.go | 4 +- .../v1beta1/servicereference.go | 4 +- .../v1beta1/typechecking.go | 4 +- .../v1beta1/validatingadmissionpolicy.go | 10 +- .../validatingadmissionpolicybinding.go | 10 +- .../validatingadmissionpolicybindingspec.go | 4 +- .../v1beta1/validatingadmissionpolicyspec.go | 4 +- .../validatingadmissionpolicystatus.go | 4 +- .../v1beta1/validatingwebhook.go | 4 +- .../v1beta1/validatingwebhookconfiguration.go | 10 +- .../v1beta1/validation.go | 4 +- .../admissionregistration/v1beta1/variable.go | 4 +- .../v1beta1/webhookclientconfig.go | 4 +- .../v1alpha1/serverstorageversion.go | 4 +- .../v1alpha1/storageversion.go | 10 +- .../v1alpha1/storageversioncondition.go | 4 +- .../v1alpha1/storageversionstatus.go | 4 +- .../apps/v1/controllerrevision.go | 10 +- .../applyconfigurations/apps/v1/daemonset.go | 10 +- .../apps/v1/daemonsetcondition.go | 4 +- .../apps/v1/daemonsetspec.go | 4 +- .../apps/v1/daemonsetstatus.go | 4 +- .../apps/v1/daemonsetupdatestrategy.go | 4 +- .../applyconfigurations/apps/v1/deployment.go | 10 +- .../apps/v1/deploymentcondition.go | 4 +- .../apps/v1/deploymentspec.go | 4 +- .../apps/v1/deploymentstatus.go | 4 +- .../apps/v1/deploymentstrategy.go | 4 +- .../applyconfigurations/apps/v1/replicaset.go | 10 +- .../apps/v1/replicasetcondition.go | 4 +- .../apps/v1/replicasetspec.go | 4 +- .../apps/v1/replicasetstatus.go | 4 +- .../apps/v1/rollingupdatedaemonset.go | 4 +- .../apps/v1/rollingupdatedeployment.go | 4 +- .../v1/rollingupdatestatefulsetstrategy.go | 4 +- .../apps/v1/statefulset.go | 10 +- .../apps/v1/statefulsetcondition.go | 4 +- .../apps/v1/statefulsetordinals.go | 4 +- ...setpersistentvolumeclaimretentionpolicy.go | 4 +- .../apps/v1/statefulsetspec.go | 4 +- .../apps/v1/statefulsetstatus.go | 4 +- .../apps/v1/statefulsetupdatestrategy.go | 4 +- .../apps/v1beta1/controllerrevision.go | 10 +- .../apps/v1beta1/deployment.go | 10 +- .../apps/v1beta1/deploymentcondition.go | 4 +- .../apps/v1beta1/deploymentspec.go | 4 +- .../apps/v1beta1/deploymentstatus.go | 4 +- .../apps/v1beta1/deploymentstrategy.go | 4 +- .../apps/v1beta1/rollbackconfig.go | 4 +- .../apps/v1beta1/rollingupdatedeployment.go | 4 +- .../rollingupdatestatefulsetstrategy.go | 4 +- .../apps/v1beta1/statefulset.go | 10 +- .../apps/v1beta1/statefulsetcondition.go | 4 +- .../apps/v1beta1/statefulsetordinals.go | 4 +- ...setpersistentvolumeclaimretentionpolicy.go | 4 +- .../apps/v1beta1/statefulsetspec.go | 4 +- .../apps/v1beta1/statefulsetstatus.go | 4 +- .../apps/v1beta1/statefulsetupdatestrategy.go | 4 +- .../apps/v1beta2/controllerrevision.go | 10 +- .../apps/v1beta2/daemonset.go | 10 +- .../apps/v1beta2/daemonsetcondition.go | 4 +- .../apps/v1beta2/daemonsetspec.go | 4 +- .../apps/v1beta2/daemonsetstatus.go | 4 +- .../apps/v1beta2/daemonsetupdatestrategy.go | 4 +- .../apps/v1beta2/deployment.go | 10 +- .../apps/v1beta2/deploymentcondition.go | 4 +- .../apps/v1beta2/deploymentspec.go | 4 +- .../apps/v1beta2/deploymentstatus.go | 4 +- .../apps/v1beta2/deploymentstrategy.go | 4 +- .../apps/v1beta2/replicaset.go | 10 +- .../apps/v1beta2/replicasetcondition.go | 4 +- .../apps/v1beta2/replicasetspec.go | 4 +- .../apps/v1beta2/replicasetstatus.go | 4 +- .../apps/v1beta2/rollingupdatedaemonset.go | 4 +- .../apps/v1beta2/rollingupdatedeployment.go | 4 +- .../rollingupdatestatefulsetstrategy.go | 4 +- .../applyconfigurations/apps/v1beta2/scale.go | 10 +- .../apps/v1beta2/statefulset.go | 10 +- .../apps/v1beta2/statefulsetcondition.go | 4 +- .../apps/v1beta2/statefulsetordinals.go | 4 +- ...setpersistentvolumeclaimretentionpolicy.go | 4 +- .../apps/v1beta2/statefulsetspec.go | 4 +- .../apps/v1beta2/statefulsetstatus.go | 4 +- .../apps/v1beta2/statefulsetupdatestrategy.go | 4 +- .../v1/crossversionobjectreference.go | 4 +- .../autoscaling/v1/horizontalpodautoscaler.go | 10 +- .../v1/horizontalpodautoscalerspec.go | 4 +- .../v1/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v1/scale.go | 10 +- .../autoscaling/v1/scalespec.go | 4 +- .../autoscaling/v1/scalestatus.go | 4 +- .../v2/containerresourcemetricsource.go | 4 +- .../v2/containerresourcemetricstatus.go | 4 +- .../v2/crossversionobjectreference.go | 4 +- .../autoscaling/v2/externalmetricsource.go | 4 +- .../autoscaling/v2/externalmetricstatus.go | 4 +- .../autoscaling/v2/horizontalpodautoscaler.go | 10 +- .../v2/horizontalpodautoscalerbehavior.go | 4 +- .../v2/horizontalpodautoscalercondition.go | 4 +- .../v2/horizontalpodautoscalerspec.go | 4 +- .../v2/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v2/hpascalingpolicy.go | 4 +- .../autoscaling/v2/hpascalingrules.go | 4 +- .../autoscaling/v2/metricidentifier.go | 4 +- .../autoscaling/v2/metricspec.go | 4 +- .../autoscaling/v2/metricstatus.go | 4 +- .../autoscaling/v2/metrictarget.go | 4 +- .../autoscaling/v2/metricvaluestatus.go | 4 +- .../autoscaling/v2/objectmetricsource.go | 4 +- .../autoscaling/v2/objectmetricstatus.go | 4 +- .../autoscaling/v2/podsmetricsource.go | 4 +- .../autoscaling/v2/podsmetricstatus.go | 4 +- .../autoscaling/v2/resourcemetricsource.go | 4 +- .../autoscaling/v2/resourcemetricstatus.go | 4 +- .../v2beta1/containerresourcemetricsource.go | 4 +- .../v2beta1/containerresourcemetricstatus.go | 4 +- .../v2beta1/crossversionobjectreference.go | 4 +- .../v2beta1/externalmetricsource.go | 4 +- .../v2beta1/externalmetricstatus.go | 4 +- .../v2beta1/horizontalpodautoscaler.go | 10 +- .../horizontalpodautoscalercondition.go | 4 +- .../v2beta1/horizontalpodautoscalerspec.go | 4 +- .../v2beta1/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v2beta1/metricspec.go | 4 +- .../autoscaling/v2beta1/metricstatus.go | 4 +- .../autoscaling/v2beta1/objectmetricsource.go | 4 +- .../autoscaling/v2beta1/objectmetricstatus.go | 4 +- .../autoscaling/v2beta1/podsmetricsource.go | 4 +- .../autoscaling/v2beta1/podsmetricstatus.go | 4 +- .../v2beta1/resourcemetricsource.go | 4 +- .../v2beta1/resourcemetricstatus.go | 4 +- .../v2beta2/containerresourcemetricsource.go | 4 +- .../v2beta2/containerresourcemetricstatus.go | 4 +- .../v2beta2/crossversionobjectreference.go | 4 +- .../v2beta2/externalmetricsource.go | 4 +- .../v2beta2/externalmetricstatus.go | 4 +- .../v2beta2/horizontalpodautoscaler.go | 10 +- .../horizontalpodautoscalerbehavior.go | 4 +- .../horizontalpodautoscalercondition.go | 4 +- .../v2beta2/horizontalpodautoscalerspec.go | 4 +- .../v2beta2/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v2beta2/hpascalingpolicy.go | 4 +- .../autoscaling/v2beta2/hpascalingrules.go | 4 +- .../autoscaling/v2beta2/metricidentifier.go | 4 +- .../autoscaling/v2beta2/metricspec.go | 4 +- .../autoscaling/v2beta2/metricstatus.go | 4 +- .../autoscaling/v2beta2/metrictarget.go | 4 +- .../autoscaling/v2beta2/metricvaluestatus.go | 4 +- .../autoscaling/v2beta2/objectmetricsource.go | 4 +- .../autoscaling/v2beta2/objectmetricstatus.go | 4 +- .../autoscaling/v2beta2/podsmetricsource.go | 4 +- .../autoscaling/v2beta2/podsmetricstatus.go | 4 +- .../v2beta2/resourcemetricsource.go | 4 +- .../v2beta2/resourcemetricstatus.go | 4 +- .../applyconfigurations/batch/v1/cronjob.go | 10 +- .../batch/v1/cronjobspec.go | 4 +- .../batch/v1/cronjobstatus.go | 4 +- .../applyconfigurations/batch/v1/job.go | 10 +- .../batch/v1/jobcondition.go | 4 +- .../applyconfigurations/batch/v1/jobspec.go | 22 +- .../applyconfigurations/batch/v1/jobstatus.go | 4 +- .../batch/v1/jobtemplatespec.go | 10 +- .../batch/v1/podfailurepolicy.go | 4 +- .../podfailurepolicyonexitcodesrequirement.go | 4 +- .../podfailurepolicyonpodconditionspattern.go | 4 +- .../batch/v1/podfailurepolicyrule.go | 4 +- .../batch/v1/successpolicy.go | 44 + .../batch/v1/successpolicyrule.go | 48 + .../batch/v1/uncountedterminatedpods.go | 4 +- .../batch/v1beta1/cronjob.go | 10 +- .../batch/v1beta1/cronjobspec.go | 4 +- .../batch/v1beta1/cronjobstatus.go | 4 +- .../batch/v1beta1/jobtemplatespec.go | 10 +- .../v1/certificatesigningrequest.go | 10 +- .../v1/certificatesigningrequestcondition.go | 4 +- .../v1/certificatesigningrequestspec.go | 4 +- .../v1/certificatesigningrequeststatus.go | 4 +- .../v1alpha1/clustertrustbundle.go | 10 +- .../v1alpha1/clustertrustbundlespec.go | 4 +- .../v1beta1/certificatesigningrequest.go | 10 +- .../certificatesigningrequestcondition.go | 4 +- .../v1beta1/certificatesigningrequestspec.go | 4 +- .../certificatesigningrequeststatus.go | 4 +- .../coordination/v1/lease.go | 10 +- .../coordination/v1/leasespec.go | 33 +- .../coordination/v1alpha1/leasecandidate.go | 255 + .../v1alpha1/leasecandidatespec.go | 91 + .../coordination/v1beta1/lease.go | 10 +- .../coordination/v1beta1/leasespec.go | 33 +- .../applyconfigurations/core/v1/affinity.go | 4 +- .../core/v1/apparmorprofile.go | 52 + .../core/v1/attachedvolume.go | 4 +- .../v1/awselasticblockstorevolumesource.go | 4 +- .../core/v1/azurediskvolumesource.go | 4 +- .../v1/azurefilepersistentvolumesource.go | 4 +- .../core/v1/azurefilevolumesource.go | 4 +- .../core/v1/capabilities.go | 4 +- .../core/v1/cephfspersistentvolumesource.go | 4 +- .../core/v1/cephfsvolumesource.go | 4 +- .../core/v1/cinderpersistentvolumesource.go | 4 +- .../core/v1/cindervolumesource.go | 4 +- .../core/v1/claimsource.go | 48 - .../core/v1/clientipconfig.go | 4 +- .../core/v1/clustertrustbundleprojection.go | 4 +- .../core/v1/componentcondition.go | 4 +- .../core/v1/componentstatus.go | 10 +- .../applyconfigurations/core/v1/configmap.go | 10 +- .../core/v1/configmapenvsource.go | 4 +- .../core/v1/configmapkeyselector.go | 4 +- .../core/v1/configmapnodeconfigsource.go | 4 +- .../core/v1/configmapprojection.go | 4 +- .../core/v1/configmapvolumesource.go | 4 +- .../applyconfigurations/core/v1/container.go | 4 +- .../core/v1/containerimage.go | 4 +- .../core/v1/containerport.go | 4 +- .../core/v1/containerresizepolicy.go | 4 +- .../core/v1/containerstate.go | 4 +- .../core/v1/containerstaterunning.go | 4 +- .../core/v1/containerstateterminated.go | 4 +- .../core/v1/containerstatewaiting.go | 4 +- .../core/v1/containerstatus.go | 67 +- .../core/v1/containeruser.go | 39 + .../core/v1/csipersistentvolumesource.go | 4 +- .../core/v1/csivolumesource.go | 4 +- .../core/v1/daemonendpoint.go | 4 +- .../core/v1/downwardapiprojection.go | 4 +- .../core/v1/downwardapivolumefile.go | 4 +- .../core/v1/downwardapivolumesource.go | 4 +- .../core/v1/emptydirvolumesource.go | 4 +- .../core/v1/endpointaddress.go | 4 +- .../core/v1/endpointport.go | 4 +- .../applyconfigurations/core/v1/endpoints.go | 10 +- .../core/v1/endpointsubset.go | 4 +- .../core/v1/envfromsource.go | 4 +- .../applyconfigurations/core/v1/envvar.go | 4 +- .../core/v1/envvarsource.go | 4 +- .../core/v1/ephemeralcontainer.go | 4 +- .../core/v1/ephemeralcontainercommon.go | 4 +- .../core/v1/ephemeralvolumesource.go | 4 +- .../applyconfigurations/core/v1/event.go | 10 +- .../core/v1/eventseries.go | 4 +- .../core/v1/eventsource.go | 4 +- .../applyconfigurations/core/v1/execaction.go | 4 +- .../core/v1/fcvolumesource.go | 4 +- .../core/v1/flexpersistentvolumesource.go | 4 +- .../core/v1/flexvolumesource.go | 4 +- .../core/v1/flockervolumesource.go | 4 +- .../core/v1/gcepersistentdiskvolumesource.go | 4 +- .../core/v1/gitrepovolumesource.go | 4 +- .../v1/glusterfspersistentvolumesource.go | 4 +- .../core/v1/glusterfsvolumesource.go | 4 +- .../applyconfigurations/core/v1/grpcaction.go | 4 +- .../applyconfigurations/core/v1/hostalias.go | 4 +- .../applyconfigurations/core/v1/hostip.go | 4 +- .../core/v1/hostpathvolumesource.go | 4 +- .../core/v1/httpgetaction.go | 4 +- .../applyconfigurations/core/v1/httpheader.go | 4 +- .../core/v1/imagevolumesource.go | 52 + .../core/v1/iscsipersistentvolumesource.go | 4 +- .../core/v1/iscsivolumesource.go | 4 +- .../applyconfigurations/core/v1/keytopath.go | 4 +- .../applyconfigurations/core/v1/lifecycle.go | 4 +- .../core/v1/lifecyclehandler.go | 4 +- .../applyconfigurations/core/v1/limitrange.go | 10 +- .../core/v1/limitrangeitem.go | 4 +- .../core/v1/limitrangespec.go | 4 +- .../core/v1/linuxcontaineruser.go | 59 + .../core/v1/loadbalanceringress.go | 4 +- .../core/v1/loadbalancerstatus.go | 4 +- .../core/v1/localobjectreference.go | 4 +- .../core/v1/localvolumesource.go | 4 +- .../core/v1/modifyvolumestatus.go | 4 +- .../applyconfigurations/core/v1/namespace.go | 10 +- .../core/v1/namespacecondition.go | 4 +- .../core/v1/namespacespec.go | 4 +- .../core/v1/namespacestatus.go | 4 +- .../core/v1/nfsvolumesource.go | 4 +- .../applyconfigurations/core/v1/node.go | 10 +- .../core/v1/nodeaddress.go | 4 +- .../core/v1/nodeaffinity.go | 4 +- .../core/v1/nodecondition.go | 4 +- .../core/v1/nodeconfigsource.go | 4 +- .../core/v1/nodeconfigstatus.go | 4 +- .../core/v1/nodedaemonendpoints.go | 4 +- .../core/v1/nodefeatures.go | 39 + .../core/v1/noderuntimehandler.go | 48 + .../core/v1/noderuntimehandlerfeatures.go | 48 + .../core/v1/nodeselector.go | 4 +- .../core/v1/nodeselectorrequirement.go | 4 +- .../core/v1/nodeselectorterm.go | 4 +- .../applyconfigurations/core/v1/nodespec.go | 4 +- .../applyconfigurations/core/v1/nodestatus.go | 27 +- .../core/v1/nodesysteminfo.go | 4 +- .../core/v1/objectfieldselector.go | 4 +- .../core/v1/objectreference.go | 4 +- .../core/v1/persistentvolume.go | 10 +- .../core/v1/persistentvolumeclaim.go | 10 +- .../core/v1/persistentvolumeclaimcondition.go | 4 +- .../core/v1/persistentvolumeclaimspec.go | 4 +- .../core/v1/persistentvolumeclaimstatus.go | 4 +- .../core/v1/persistentvolumeclaimtemplate.go | 10 +- .../v1/persistentvolumeclaimvolumesource.go | 4 +- .../core/v1/persistentvolumesource.go | 4 +- .../core/v1/persistentvolumespec.go | 4 +- .../core/v1/persistentvolumestatus.go | 4 +- .../v1/photonpersistentdiskvolumesource.go | 4 +- .../applyconfigurations/core/v1/pod.go | 10 +- .../core/v1/podaffinity.go | 4 +- .../core/v1/podaffinityterm.go | 4 +- .../core/v1/podantiaffinity.go | 4 +- .../core/v1/podcondition.go | 4 +- .../core/v1/poddnsconfig.go | 4 +- .../core/v1/poddnsconfigoption.go | 4 +- .../applyconfigurations/core/v1/podip.go | 4 +- .../applyconfigurations/core/v1/podos.go | 4 +- .../core/v1/podreadinessgate.go | 4 +- .../core/v1/podresourceclaim.go | 25 +- .../core/v1/podresourceclaimstatus.go | 4 +- .../core/v1/podschedulinggate.go | 4 +- .../core/v1/podsecuritycontext.go | 42 +- .../applyconfigurations/core/v1/podspec.go | 4 +- .../applyconfigurations/core/v1/podstatus.go | 4 +- .../core/v1/podtemplate.go | 10 +- .../core/v1/podtemplatespec.go | 10 +- .../applyconfigurations/core/v1/portstatus.go | 4 +- .../core/v1/portworxvolumesource.go | 4 +- .../core/v1/preferredschedulingterm.go | 4 +- .../applyconfigurations/core/v1/probe.go | 4 +- .../core/v1/probehandler.go | 4 +- .../core/v1/projectedvolumesource.go | 4 +- .../core/v1/quobytevolumesource.go | 4 +- .../core/v1/rbdpersistentvolumesource.go | 4 +- .../core/v1/rbdvolumesource.go | 4 +- .../core/v1/replicationcontroller.go | 10 +- .../core/v1/replicationcontrollercondition.go | 4 +- .../core/v1/replicationcontrollerspec.go | 4 +- .../core/v1/replicationcontrollerstatus.go | 4 +- .../core/v1/resourceclaim.go | 15 +- .../core/v1/resourcefieldselector.go | 4 +- .../core/v1/resourcehealth.go | 52 + .../core/v1/resourcequota.go | 10 +- .../core/v1/resourcequotaspec.go | 4 +- .../core/v1/resourcequotastatus.go | 4 +- .../core/v1/resourcerequirements.go | 4 +- .../core/v1/resourcestatus.go | 57 + .../core/v1/scaleiopersistentvolumesource.go | 4 +- .../core/v1/scaleiovolumesource.go | 4 +- .../v1/scopedresourceselectorrequirement.go | 4 +- .../core/v1/scopeselector.go | 4 +- .../core/v1/seccompprofile.go | 4 +- .../applyconfigurations/core/v1/secret.go | 10 +- .../core/v1/secretenvsource.go | 4 +- .../core/v1/secretkeyselector.go | 4 +- .../core/v1/secretprojection.go | 4 +- .../core/v1/secretreference.go | 4 +- .../core/v1/secretvolumesource.go | 4 +- .../core/v1/securitycontext.go | 13 +- .../core/v1/selinuxoptions.go | 4 +- .../applyconfigurations/core/v1/service.go | 10 +- .../core/v1/serviceaccount.go | 10 +- .../core/v1/serviceaccounttokenprojection.go | 4 +- .../core/v1/serviceport.go | 4 +- .../core/v1/servicespec.go | 13 +- .../core/v1/servicestatus.go | 4 +- .../core/v1/sessionaffinityconfig.go | 4 +- .../core/v1/sleepaction.go | 4 +- .../v1/storageospersistentvolumesource.go | 4 +- .../core/v1/storageosvolumesource.go | 4 +- .../applyconfigurations/core/v1/sysctl.go | 4 +- .../applyconfigurations/core/v1/taint.go | 4 +- .../core/v1/tcpsocketaction.go | 4 +- .../applyconfigurations/core/v1/toleration.go | 4 +- .../v1/topologyselectorlabelrequirement.go | 4 +- .../core/v1/topologyselectorterm.go | 4 +- .../core/v1/topologyspreadconstraint.go | 4 +- .../core/v1/typedlocalobjectreference.go | 4 +- .../core/v1/typedobjectreference.go | 4 +- .../applyconfigurations/core/v1/volume.go | 12 +- .../core/v1/volumedevice.go | 4 +- .../core/v1/volumemount.go | 25 +- .../core/v1/volumemountstatus.go | 70 + .../core/v1/volumenodeaffinity.go | 4 +- .../core/v1/volumeprojection.go | 4 +- .../core/v1/volumeresourcerequirements.go | 4 +- .../core/v1/volumesource.go | 13 +- .../core/v1/vspherevirtualdiskvolumesource.go | 4 +- .../core/v1/weightedpodaffinityterm.go | 4 +- .../core/v1/windowssecuritycontextoptions.go | 4 +- .../discovery/v1/endpoint.go | 4 +- .../discovery/v1/endpointconditions.go | 4 +- .../discovery/v1/endpointhints.go | 4 +- .../discovery/v1/endpointport.go | 4 +- .../discovery/v1/endpointslice.go | 10 +- .../discovery/v1/forzone.go | 4 +- .../discovery/v1beta1/endpoint.go | 4 +- .../discovery/v1beta1/endpointconditions.go | 4 +- .../discovery/v1beta1/endpointhints.go | 4 +- .../discovery/v1beta1/endpointport.go | 4 +- .../discovery/v1beta1/endpointslice.go | 10 +- .../discovery/v1beta1/forzone.go | 4 +- .../client-go/applyconfigurations/doc.go | 151 + .../applyconfigurations/events/v1/event.go | 10 +- .../events/v1/eventseries.go | 4 +- .../events/v1beta1/event.go | 10 +- .../events/v1beta1/eventseries.go | 4 +- .../extensions/v1beta1/daemonset.go | 10 +- .../extensions/v1beta1/daemonsetcondition.go | 4 +- .../extensions/v1beta1/daemonsetspec.go | 4 +- .../extensions/v1beta1/daemonsetstatus.go | 4 +- .../v1beta1/daemonsetupdatestrategy.go | 4 +- .../extensions/v1beta1/deployment.go | 10 +- .../extensions/v1beta1/deploymentcondition.go | 4 +- .../extensions/v1beta1/deploymentspec.go | 4 +- .../extensions/v1beta1/deploymentstatus.go | 4 +- .../extensions/v1beta1/deploymentstrategy.go | 4 +- .../extensions/v1beta1/httpingresspath.go | 4 +- .../v1beta1/httpingressrulevalue.go | 4 +- .../extensions/v1beta1/ingress.go | 10 +- .../extensions/v1beta1/ingressbackend.go | 4 +- .../v1beta1/ingressloadbalanceringress.go | 4 +- .../v1beta1/ingressloadbalancerstatus.go | 4 +- .../extensions/v1beta1/ingressportstatus.go | 4 +- .../extensions/v1beta1/ingressrule.go | 6 +- .../extensions/v1beta1/ingressrulevalue.go | 4 +- .../extensions/v1beta1/ingressspec.go | 4 +- .../extensions/v1beta1/ingressstatus.go | 4 +- .../extensions/v1beta1/ingresstls.go | 4 +- .../extensions/v1beta1/ipblock.go | 4 +- .../extensions/v1beta1/networkpolicy.go | 10 +- .../v1beta1/networkpolicyegressrule.go | 4 +- .../v1beta1/networkpolicyingressrule.go | 4 +- .../extensions/v1beta1/networkpolicypeer.go | 4 +- .../extensions/v1beta1/networkpolicyport.go | 4 +- .../extensions/v1beta1/networkpolicyspec.go | 4 +- .../extensions/v1beta1/replicaset.go | 10 +- .../extensions/v1beta1/replicasetcondition.go | 4 +- .../extensions/v1beta1/replicasetspec.go | 4 +- .../extensions/v1beta1/replicasetstatus.go | 4 +- .../extensions/v1beta1/rollbackconfig.go | 4 +- .../v1beta1/rollingupdatedaemonset.go | 4 +- .../v1beta1/rollingupdatedeployment.go | 4 +- .../extensions/v1beta1/scale.go | 10 +- .../v1/exemptprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1/flowdistinguishermethod.go | 4 +- .../flowcontrol/v1/flowschema.go | 10 +- .../flowcontrol/v1/flowschemacondition.go | 4 +- .../flowcontrol/v1/flowschemaspec.go | 4 +- .../flowcontrol/v1/flowschemastatus.go | 4 +- .../flowcontrol/v1/groupsubject.go | 4 +- .../v1/limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1/limitresponse.go | 4 +- .../flowcontrol/v1/nonresourcepolicyrule.go | 4 +- .../flowcontrol/v1/policyruleswithsubjects.go | 4 +- .../v1/prioritylevelconfiguration.go | 10 +- .../v1/prioritylevelconfigurationcondition.go | 4 +- .../v1/prioritylevelconfigurationreference.go | 4 +- .../v1/prioritylevelconfigurationspec.go | 4 +- .../v1/prioritylevelconfigurationstatus.go | 4 +- .../flowcontrol/v1/queuingconfiguration.go | 4 +- .../flowcontrol/v1/resourcepolicyrule.go | 4 +- .../flowcontrol/v1/serviceaccountsubject.go | 4 +- .../flowcontrol/v1/subject.go | 4 +- .../flowcontrol/v1/usersubject.go | 4 +- .../exemptprioritylevelconfiguration.go | 4 +- .../v1beta1/flowdistinguishermethod.go | 4 +- .../flowcontrol/v1beta1/flowschema.go | 10 +- .../v1beta1/flowschemacondition.go | 4 +- .../flowcontrol/v1beta1/flowschemaspec.go | 4 +- .../flowcontrol/v1beta1/flowschemastatus.go | 4 +- .../flowcontrol/v1beta1/groupsubject.go | 4 +- .../limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1beta1/limitresponse.go | 4 +- .../v1beta1/nonresourcepolicyrule.go | 4 +- .../v1beta1/policyruleswithsubjects.go | 4 +- .../v1beta1/prioritylevelconfiguration.go | 10 +- .../prioritylevelconfigurationcondition.go | 4 +- .../prioritylevelconfigurationreference.go | 4 +- .../v1beta1/prioritylevelconfigurationspec.go | 4 +- .../prioritylevelconfigurationstatus.go | 4 +- .../v1beta1/queuingconfiguration.go | 4 +- .../flowcontrol/v1beta1/resourcepolicyrule.go | 4 +- .../v1beta1/serviceaccountsubject.go | 4 +- .../flowcontrol/v1beta1/subject.go | 4 +- .../flowcontrol/v1beta1/usersubject.go | 4 +- .../exemptprioritylevelconfiguration.go | 4 +- .../v1beta2/flowdistinguishermethod.go | 4 +- .../flowcontrol/v1beta2/flowschema.go | 10 +- .../v1beta2/flowschemacondition.go | 4 +- .../flowcontrol/v1beta2/flowschemaspec.go | 4 +- .../flowcontrol/v1beta2/flowschemastatus.go | 4 +- .../flowcontrol/v1beta2/groupsubject.go | 4 +- .../limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1beta2/limitresponse.go | 4 +- .../v1beta2/nonresourcepolicyrule.go | 4 +- .../v1beta2/policyruleswithsubjects.go | 4 +- .../v1beta2/prioritylevelconfiguration.go | 10 +- .../prioritylevelconfigurationcondition.go | 4 +- .../prioritylevelconfigurationreference.go | 4 +- .../v1beta2/prioritylevelconfigurationspec.go | 4 +- .../prioritylevelconfigurationstatus.go | 4 +- .../v1beta2/queuingconfiguration.go | 4 +- .../flowcontrol/v1beta2/resourcepolicyrule.go | 4 +- .../v1beta2/serviceaccountsubject.go | 4 +- .../flowcontrol/v1beta2/subject.go | 4 +- .../flowcontrol/v1beta2/usersubject.go | 4 +- .../exemptprioritylevelconfiguration.go | 4 +- .../v1beta3/flowdistinguishermethod.go | 4 +- .../flowcontrol/v1beta3/flowschema.go | 10 +- .../v1beta3/flowschemacondition.go | 4 +- .../flowcontrol/v1beta3/flowschemaspec.go | 4 +- .../flowcontrol/v1beta3/flowschemastatus.go | 4 +- .../flowcontrol/v1beta3/groupsubject.go | 4 +- .../limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1beta3/limitresponse.go | 4 +- .../v1beta3/nonresourcepolicyrule.go | 4 +- .../v1beta3/policyruleswithsubjects.go | 4 +- .../v1beta3/prioritylevelconfiguration.go | 10 +- .../prioritylevelconfigurationcondition.go | 4 +- .../prioritylevelconfigurationreference.go | 4 +- .../v1beta3/prioritylevelconfigurationspec.go | 4 +- .../prioritylevelconfigurationstatus.go | 4 +- .../v1beta3/queuingconfiguration.go | 4 +- .../flowcontrol/v1beta3/resourcepolicyrule.go | 4 +- .../v1beta3/serviceaccountsubject.go | 4 +- .../flowcontrol/v1beta3/subject.go | 4 +- .../flowcontrol/v1beta3/usersubject.go | 4 +- .../imagepolicy/v1alpha1/imagereview.go | 262 + .../v1alpha1/imagereviewcontainerspec.go | 39 + .../imagepolicy/v1alpha1/imagereviewspec.go | 68 + .../imagepolicy/v1alpha1/imagereviewstatus.go | 63 + .../applyconfigurations/internal/internal.go | 1136 +- .../applyconfigurations/meta/v1/condition.go | 4 +- .../meta/v1/deleteoptions.go | 4 +- .../meta/v1/labelselector.go | 4 +- .../meta/v1/labelselectorrequirement.go | 4 +- .../meta/v1/managedfieldsentry.go | 4 +- .../applyconfigurations/meta/v1/objectmeta.go | 9 +- .../meta/v1/ownerreference.go | 4 +- .../meta/v1/preconditions.go | 4 +- .../applyconfigurations/meta/v1/typemeta.go | 4 +- .../networking/v1/httpingresspath.go | 4 +- .../networking/v1/httpingressrulevalue.go | 4 +- .../networking/v1/ingress.go | 10 +- .../networking/v1/ingressbackend.go | 4 +- .../networking/v1/ingressclass.go | 10 +- .../v1/ingressclassparametersreference.go | 4 +- .../networking/v1/ingressclassspec.go | 4 +- .../v1/ingressloadbalanceringress.go | 4 +- .../v1/ingressloadbalancerstatus.go | 4 +- .../networking/v1/ingressportstatus.go | 4 +- .../networking/v1/ingressrule.go | 6 +- .../networking/v1/ingressrulevalue.go | 4 +- .../networking/v1/ingressservicebackend.go | 4 +- .../networking/v1/ingressspec.go | 4 +- .../networking/v1/ingressstatus.go | 4 +- .../networking/v1/ingresstls.go | 4 +- .../networking/v1/ipblock.go | 4 +- .../networking/v1/networkpolicy.go | 10 +- .../networking/v1/networkpolicyegressrule.go | 4 +- .../networking/v1/networkpolicyingressrule.go | 4 +- .../networking/v1/networkpolicypeer.go | 4 +- .../networking/v1/networkpolicyport.go | 4 +- .../networking/v1/networkpolicyspec.go | 4 +- .../networking/v1/servicebackendport.go | 4 +- .../networking/v1alpha1/ipaddress.go | 10 +- .../networking/v1alpha1/ipaddressspec.go | 4 +- .../networking/v1alpha1/parentreference.go | 4 +- .../networking/v1alpha1/servicecidr.go | 10 +- .../networking/v1alpha1/servicecidrspec.go | 4 +- .../networking/v1alpha1/servicecidrstatus.go | 4 +- .../networking/v1beta1/httpingresspath.go | 4 +- .../v1beta1/httpingressrulevalue.go | 4 +- .../networking/v1beta1/ingress.go | 10 +- .../networking/v1beta1/ingressbackend.go | 4 +- .../networking/v1beta1/ingressclass.go | 10 +- .../ingressclassparametersreference.go | 4 +- .../networking/v1beta1/ingressclassspec.go | 4 +- .../v1beta1/ingressloadbalanceringress.go | 4 +- .../v1beta1/ingressloadbalancerstatus.go | 4 +- .../networking/v1beta1/ingressportstatus.go | 4 +- .../networking/v1beta1/ingressrule.go | 6 +- .../networking/v1beta1/ingressrulevalue.go | 4 +- .../networking/v1beta1/ingressspec.go | 4 +- .../networking/v1beta1/ingressstatus.go | 4 +- .../networking/v1beta1/ingresstls.go | 4 +- .../networking/v1beta1/ipaddress.go | 253 + .../networking/v1beta1/ipaddressspec.go | 39 + .../v1beta1/parentreference.go} | 50 +- .../networking/v1beta1/servicecidr.go | 262 + .../networking/v1beta1/servicecidrspec.go | 41 + .../networking/v1beta1/servicecidrstatus.go | 48 + .../applyconfigurations/node/v1/overhead.go | 4 +- .../node/v1/runtimeclass.go | 10 +- .../applyconfigurations/node/v1/scheduling.go | 4 +- .../node/v1alpha1/overhead.go | 4 +- .../node/v1alpha1/runtimeclass.go | 10 +- .../node/v1alpha1/runtimeclassspec.go | 4 +- .../node/v1alpha1/scheduling.go | 4 +- .../node/v1beta1/overhead.go | 4 +- .../node/v1beta1/runtimeclass.go | 10 +- .../node/v1beta1/scheduling.go | 4 +- .../applyconfigurations/policy/v1/eviction.go | 10 +- .../policy/v1/poddisruptionbudget.go | 10 +- .../policy/v1/poddisruptionbudgetspec.go | 4 +- .../policy/v1/poddisruptionbudgetstatus.go | 4 +- .../policy/v1beta1/eviction.go | 10 +- .../policy/v1beta1/poddisruptionbudget.go | 10 +- .../policy/v1beta1/poddisruptionbudgetspec.go | 4 +- .../v1beta1/poddisruptionbudgetstatus.go | 4 +- .../rbac/v1/aggregationrule.go | 4 +- .../rbac/v1/clusterrole.go | 10 +- .../rbac/v1/clusterrolebinding.go | 10 +- .../applyconfigurations/rbac/v1/policyrule.go | 4 +- .../applyconfigurations/rbac/v1/role.go | 10 +- .../rbac/v1/rolebinding.go | 10 +- .../applyconfigurations/rbac/v1/roleref.go | 4 +- .../applyconfigurations/rbac/v1/subject.go | 4 +- .../rbac/v1alpha1/aggregationrule.go | 4 +- .../rbac/v1alpha1/clusterrole.go | 10 +- .../rbac/v1alpha1/clusterrolebinding.go | 10 +- .../rbac/v1alpha1/policyrule.go | 4 +- .../applyconfigurations/rbac/v1alpha1/role.go | 10 +- .../rbac/v1alpha1/rolebinding.go | 10 +- .../rbac/v1alpha1/roleref.go | 4 +- .../rbac/v1alpha1/subject.go | 4 +- .../rbac/v1beta1/aggregationrule.go | 4 +- .../rbac/v1beta1/clusterrole.go | 10 +- .../rbac/v1beta1/clusterrolebinding.go | 10 +- .../rbac/v1beta1/policyrule.go | 4 +- .../applyconfigurations/rbac/v1beta1/role.go | 10 +- .../rbac/v1beta1/rolebinding.go | 10 +- .../rbac/v1beta1/roleref.go | 4 +- .../rbac/v1beta1/subject.go | 4 +- .../resource/v1alpha2/allocationresult.go | 66 - .../resourceclaimparametersreference.go | 57 - .../resource/v1alpha2/resourceclaimspec.go | 61 - .../resource/v1alpha2/resourcehandle.go | 48 - .../resource/v1alpha3/allocationresult.go | 61 + .../resource/v1alpha3/basicdevice.go | 65 + .../resource/v1alpha3/celdeviceselector.go | 39 + .../resource/v1alpha3/device.go | 48 + .../v1alpha3/deviceallocationconfiguration.go | 63 + .../v1alpha3/deviceallocationresult.go | 58 + .../resource/v1alpha3/deviceattribute.go | 66 + .../resource/v1alpha3/deviceclaim.go | 72 + .../v1alpha3/deviceclaimconfiguration.go | 50 + .../resource/v1alpha3/deviceclass.go | 253 + .../v1alpha3/deviceclassconfiguration.go | 39 + .../resource/v1alpha3/deviceclassspec.go | 71 + .../resource/v1alpha3/deviceconfiguration.go | 39 + .../resource/v1alpha3/deviceconstraint.go | 54 + .../resource/v1alpha3/devicerequest.go | 93 + .../v1alpha3/devicerequestallocationresult.go | 66 + .../resource/v1alpha3/deviceselector.go | 39 + .../v1alpha3/opaquedeviceconfiguration.go | 52 + .../podschedulingcontext.go | 26 +- .../podschedulingcontextspec.go | 6 +- .../podschedulingcontextstatus.go | 6 +- .../{v1alpha2 => v1alpha3}/resourceclaim.go | 26 +- .../resourceclaimconsumerreference.go | 6 +- .../resourceclaimschedulingstatus.go | 6 +- .../resource/v1alpha3/resourceclaimspec.go | 48 + .../resourceclaimstatus.go | 15 +- .../resourceclaimtemplate.go | 26 +- .../resourceclaimtemplatespec.go | 12 +- .../resource/v1alpha3/resourcepool.go | 57 + .../resourceslice.go} | 113 +- .../resource/v1alpha3/resourceslicespec.go | 93 + .../scheduling/v1/priorityclass.go | 10 +- .../scheduling/v1alpha1/priorityclass.go | 10 +- .../scheduling/v1beta1/priorityclass.go | 10 +- .../storage/v1/csidriver.go | 10 +- .../storage/v1/csidriverspec.go | 4 +- .../applyconfigurations/storage/v1/csinode.go | 10 +- .../storage/v1/csinodedriver.go | 4 +- .../storage/v1/csinodespec.go | 4 +- .../storage/v1/csistoragecapacity.go | 10 +- .../storage/v1/storageclass.go | 10 +- .../storage/v1/tokenrequest.go | 4 +- .../storage/v1/volumeattachment.go | 10 +- .../storage/v1/volumeattachmentsource.go | 4 +- .../storage/v1/volumeattachmentspec.go | 4 +- .../storage/v1/volumeattachmentstatus.go | 4 +- .../storage/v1/volumeerror.go | 4 +- .../storage/v1/volumenoderesources.go | 4 +- .../storage/v1alpha1/csistoragecapacity.go | 10 +- .../storage/v1alpha1/volumeattachment.go | 10 +- .../v1alpha1/volumeattachmentsource.go | 4 +- .../storage/v1alpha1/volumeattachmentspec.go | 4 +- .../v1alpha1/volumeattachmentstatus.go | 4 +- .../storage/v1alpha1/volumeattributesclass.go | 10 +- .../storage/v1alpha1/volumeerror.go | 4 +- .../storage/v1beta1/csidriver.go | 10 +- .../storage/v1beta1/csidriverspec.go | 4 +- .../storage/v1beta1/csinode.go | 10 +- .../storage/v1beta1/csinodedriver.go | 4 +- .../storage/v1beta1/csinodespec.go | 4 +- .../storage/v1beta1/csistoragecapacity.go | 10 +- .../storage/v1beta1/storageclass.go | 10 +- .../storage/v1beta1/tokenrequest.go | 4 +- .../storage/v1beta1/volumeattachment.go | 10 +- .../storage/v1beta1/volumeattachmentsource.go | 4 +- .../storage/v1beta1/volumeattachmentspec.go | 4 +- .../storage/v1beta1/volumeattachmentstatus.go | 4 +- .../storage/v1beta1/volumeattributesclass.go | 268 + .../storage/v1beta1/volumeerror.go | 4 +- .../storage/v1beta1/volumenoderesources.go | 4 +- .../v1alpha1/groupversionresource.go | 57 + .../v1alpha1/migrationcondition.go | 81 + .../v1alpha1/storageversionmigration.go | 262 + .../v1alpha1/storageversionmigrationspec.go | 48 + .../v1alpha1/storageversionmigrationstatus.go | 53 + .../client-go/applyconfigurations/utils.go | 1740 +++ .../discovery/aggregated_discovery.go | 124 +- .../client-go/discovery/discovery_client.go | 29 +- .../client-go/discovery/fake/discovery.go | 12 +- vendor/k8s.io/client-go/dynamic/simple.go | 43 + vendor/k8s.io/client-go/features/envvar.go | 188 + vendor/k8s.io/client-go/features/features.go | 143 + .../client-go/features/known_features.go | 54 + vendor/k8s.io/client-go/gentype/type.go | 360 + .../admissionregistration/v1/interface.go | 14 + .../v1/validatingadmissionpolicy.go | 89 + .../v1/validatingadmissionpolicybinding.go | 89 + .../informers/coordination/interface.go | 8 + .../coordination/v1alpha1/interface.go | 45 + .../coordination/v1alpha1/leasecandidate.go | 90 + vendor/k8s.io/client-go/informers/doc.go | 2 +- vendor/k8s.io/client-go/informers/factory.go | 7 + vendor/k8s.io/client-go/informers/generic.go | 42 +- .../informers/networking/v1beta1/interface.go | 14 + .../informers/networking/v1beta1/ipaddress.go | 89 + .../networking/v1beta1/servicecidr.go | 89 + .../client-go/informers/resource/interface.go | 12 +- .../resource/v1alpha3/deviceclass.go | 89 + .../{v1alpha2 => v1alpha3}/interface.go | 19 +- .../podschedulingcontext.go | 20 +- .../{v1alpha2 => v1alpha3}/resourceclaim.go | 20 +- .../resourceclaimtemplate.go | 20 +- .../resourceslice.go} | 44 +- .../informers/storage/v1beta1/interface.go | 7 + .../storage/v1beta1/volumeattributesclass.go | 89 + .../informers/storagemigration/interface.go | 46 + .../storagemigration/v1alpha1/interface.go | 45 + .../v1alpha1/storageversionmigration.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 42 +- vendor/k8s.io/client-go/kubernetes/doc.go | 2 +- .../kubernetes/fake/clientset_generated.go | 63 +- .../client-go/kubernetes/fake/register.go | 8 +- .../client-go/kubernetes/scheme/register.go | 8 +- .../v1/admissionregistration_client.go | 10 + .../fake/fake_admissionregistration_client.go | 8 + .../fake/fake_mutatingwebhookconfiguration.go | 34 +- .../v1/fake/fake_validatingadmissionpolicy.go | 186 + .../fake_validatingadmissionpolicybinding.go | 151 + .../fake_validatingwebhookconfiguration.go | 34 +- .../v1/generated_expansion.go | 4 + .../v1/mutatingwebhookconfiguration.go | 146 +- .../v1/validatingadmissionpolicy.go | 73 + .../v1/validatingadmissionpolicybinding.go | 69 + .../v1/validatingwebhookconfiguration.go | 146 +- .../fake/fake_validatingadmissionpolicy.go | 46 +- .../fake_validatingadmissionpolicybinding.go | 34 +- .../v1alpha1/validatingadmissionpolicy.go | 192 +- .../validatingadmissionpolicybinding.go | 148 +- .../fake/fake_mutatingwebhookconfiguration.go | 34 +- .../fake/fake_validatingadmissionpolicy.go | 46 +- .../fake_validatingadmissionpolicybinding.go | 34 +- .../fake_validatingwebhookconfiguration.go | 34 +- .../v1beta1/mutatingwebhookconfiguration.go | 146 +- .../v1beta1/validatingadmissionpolicy.go | 192 +- .../validatingadmissionpolicybinding.go | 148 +- .../v1beta1/validatingwebhookconfiguration.go | 148 +- .../v1alpha1/fake/fake_storageversion.go | 46 +- .../v1alpha1/storageversion.go | 192 +- .../typed/apps/v1/controllerrevision.go | 157 +- .../kubernetes/typed/apps/v1/daemonset.go | 205 +- .../kubernetes/typed/apps/v1/deployment.go | 215 +- .../apps/v1/fake/fake_controllerrevision.go | 34 +- .../typed/apps/v1/fake/fake_daemonset.go | 46 +- .../typed/apps/v1/fake/fake_deployment.go | 61 +- .../typed/apps/v1/fake/fake_replicaset.go | 61 +- .../typed/apps/v1/fake/fake_statefulset.go | 61 +- .../kubernetes/typed/apps/v1/replicaset.go | 215 +- .../kubernetes/typed/apps/v1/statefulset.go | 215 +- .../typed/apps/v1beta1/controllerrevision.go | 157 +- .../typed/apps/v1beta1/deployment.go | 205 +- .../v1beta1/fake/fake_controllerrevision.go | 34 +- .../apps/v1beta1/fake/fake_deployment.go | 46 +- .../apps/v1beta1/fake/fake_statefulset.go | 46 +- .../typed/apps/v1beta1/statefulset.go | 205 +- .../typed/apps/v1beta2/controllerrevision.go | 157 +- .../typed/apps/v1beta2/daemonset.go | 205 +- .../typed/apps/v1beta2/deployment.go | 205 +- .../v1beta2/fake/fake_controllerrevision.go | 34 +- .../typed/apps/v1beta2/fake/fake_daemonset.go | 46 +- .../apps/v1beta2/fake/fake_deployment.go | 46 +- .../apps/v1beta2/fake/fake_replicaset.go | 46 +- .../apps/v1beta2/fake/fake_statefulset.go | 61 +- .../typed/apps/v1beta2/replicaset.go | 205 +- .../typed/apps/v1beta2/statefulset.go | 215 +- .../v1/fake/fake_selfsubjectreview.go | 5 +- .../v1/fake/fake_tokenreview.go | 5 +- .../authentication/v1/selfsubjectreview.go | 23 +- .../typed/authentication/v1/tokenreview.go | 23 +- .../v1alpha1/fake/fake_selfsubjectreview.go | 5 +- .../v1alpha1/selfsubjectreview.go | 23 +- .../v1beta1/fake/fake_selfsubjectreview.go | 5 +- .../v1beta1/fake/fake_tokenreview.go | 5 +- .../v1beta1/selfsubjectreview.go | 23 +- .../authentication/v1beta1/tokenreview.go | 23 +- .../v1/fake/fake_localsubjectaccessreview.go | 5 +- .../v1/fake/fake_selfsubjectaccessreview.go | 5 +- .../v1/fake/fake_selfsubjectrulesreview.go | 5 +- .../v1/fake/fake_subjectaccessreview.go | 5 +- .../v1/localsubjectaccessreview.go | 26 +- .../v1/selfsubjectaccessreview.go | 23 +- .../v1/selfsubjectrulesreview.go | 23 +- .../authorization/v1/subjectaccessreview.go | 23 +- .../fake/fake_localsubjectaccessreview.go | 5 +- .../fake/fake_selfsubjectaccessreview.go | 5 +- .../fake/fake_selfsubjectrulesreview.go | 5 +- .../v1beta1/fake/fake_subjectaccessreview.go | 5 +- .../v1beta1/localsubjectaccessreview.go | 26 +- .../v1beta1/selfsubjectaccessreview.go | 23 +- .../v1beta1/selfsubjectrulesreview.go | 23 +- .../v1beta1/subjectaccessreview.go | 23 +- .../v1/fake/fake_horizontalpodautoscaler.go | 46 +- .../autoscaling/v1/horizontalpodautoscaler.go | 205 +- .../v2/fake/fake_horizontalpodautoscaler.go | 46 +- .../autoscaling/v2/horizontalpodautoscaler.go | 205 +- .../fake/fake_horizontalpodautoscaler.go | 46 +- .../v2beta1/horizontalpodautoscaler.go | 205 +- .../fake/fake_horizontalpodautoscaler.go | 46 +- .../v2beta2/horizontalpodautoscaler.go | 205 +- .../kubernetes/typed/batch/v1/cronjob.go | 205 +- .../typed/batch/v1/fake/fake_cronjob.go | 46 +- .../typed/batch/v1/fake/fake_job.go | 46 +- .../kubernetes/typed/batch/v1/job.go | 205 +- .../kubernetes/typed/batch/v1beta1/cronjob.go | 205 +- .../typed/batch/v1beta1/fake/fake_cronjob.go | 46 +- .../v1/certificatesigningrequest.go | 194 +- .../v1/fake/fake_certificatesigningrequest.go | 51 +- .../v1alpha1/clustertrustbundle.go | 146 +- .../v1alpha1/fake/fake_clustertrustbundle.go | 34 +- .../v1beta1/certificatesigningrequest.go | 192 +- .../certificatesigningrequest_expansion.go | 2 +- .../fake/fake_certificatesigningrequest.go | 46 +- .../typed/coordination/v1/fake/fake_lease.go | 34 +- .../kubernetes/typed/coordination/v1/lease.go | 157 +- .../v1alpha1/coordination_client.go | 107 + .../v1alpha2 => coordination/v1alpha1}/doc.go | 2 +- .../v1alpha1}/fake/doc.go | 0 .../v1alpha1/fake/fake_coordination_client.go | 40 + .../v1alpha1/fake/fake_leasecandidate.go | 160 + .../v1alpha1/generated_expansion.go | 21 + .../coordination/v1alpha1/leasecandidate.go | 69 + .../coordination/v1beta1/fake/fake_lease.go | 34 +- .../typed/coordination/v1beta1/lease.go | 157 +- .../typed/core/v1/componentstatus.go | 146 +- .../kubernetes/typed/core/v1/configmap.go | 157 +- .../kubernetes/typed/core/v1/endpoints.go | 157 +- .../kubernetes/typed/core/v1/event.go | 157 +- .../typed/core/v1/event_expansion.go | 22 +- .../core/v1/fake/fake_componentstatus.go | 34 +- .../typed/core/v1/fake/fake_configmap.go | 34 +- .../typed/core/v1/fake/fake_endpoints.go | 34 +- .../typed/core/v1/fake/fake_event.go | 34 +- .../typed/core/v1/fake/fake_limitrange.go | 34 +- .../typed/core/v1/fake/fake_namespace.go | 44 +- .../typed/core/v1/fake/fake_node.go | 46 +- .../core/v1/fake/fake_persistentvolume.go | 46 +- .../v1/fake/fake_persistentvolumeclaim.go | 46 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 51 +- .../typed/core/v1/fake/fake_podtemplate.go | 34 +- .../v1/fake/fake_replicationcontroller.go | 56 +- .../typed/core/v1/fake/fake_resourcequota.go | 46 +- .../typed/core/v1/fake/fake_secret.go | 34 +- .../typed/core/v1/fake/fake_service.go | 44 +- .../typed/core/v1/fake/fake_serviceaccount.go | 39 +- .../kubernetes/typed/core/v1/limitrange.go | 157 +- .../kubernetes/typed/core/v1/namespace.go | 177 +- .../typed/core/v1/namespace_expansion.go | 2 +- .../kubernetes/typed/core/v1/node.go | 192 +- .../typed/core/v1/node_expansion.go | 2 +- .../typed/core/v1/persistentvolume.go | 192 +- .../typed/core/v1/persistentvolumeclaim.go | 205 +- .../client-go/kubernetes/typed/core/v1/pod.go | 209 +- .../kubernetes/typed/core/v1/pod_expansion.go | 14 +- .../kubernetes/typed/core/v1/podtemplate.go | 157 +- .../typed/core/v1/replicationcontroller.go | 213 +- .../kubernetes/typed/core/v1/resourcequota.go | 205 +- .../kubernetes/typed/core/v1/secret.go | 157 +- .../kubernetes/typed/core/v1/service.go | 189 +- .../typed/core/v1/service_expansion.go | 4 +- .../typed/core/v1/serviceaccount.go | 161 +- .../typed/discovery/v1/endpointslice.go | 157 +- .../discovery/v1/fake/fake_endpointslice.go | 34 +- .../typed/discovery/v1beta1/endpointslice.go | 157 +- .../v1beta1/fake/fake_endpointslice.go | 34 +- .../kubernetes/typed/events/v1/event.go | 157 +- .../typed/events/v1/fake/fake_event.go | 34 +- .../kubernetes/typed/events/v1beta1/event.go | 157 +- .../typed/events/v1beta1/event_expansion.go | 18 +- .../typed/events/v1beta1/fake/fake_event.go | 34 +- .../typed/extensions/v1beta1/daemonset.go | 205 +- .../typed/extensions/v1beta1/deployment.go | 215 +- .../v1beta1/deployment_expansion.go | 2 +- .../extensions/v1beta1/fake/fake_daemonset.go | 46 +- .../v1beta1/fake/fake_deployment.go | 61 +- .../extensions/v1beta1/fake/fake_ingress.go | 46 +- .../v1beta1/fake/fake_networkpolicy.go | 34 +- .../v1beta1/fake/fake_replicaset.go | 61 +- .../typed/extensions/v1beta1/ingress.go | 205 +- .../typed/extensions/v1beta1/networkpolicy.go | 157 +- .../typed/extensions/v1beta1/replicaset.go | 215 +- .../flowcontrol/v1/fake/fake_flowschema.go | 46 +- .../fake/fake_prioritylevelconfiguration.go | 46 +- .../typed/flowcontrol/v1/flowschema.go | 192 +- .../v1/prioritylevelconfiguration.go | 192 +- .../v1beta1/fake/fake_flowschema.go | 46 +- .../fake/fake_prioritylevelconfiguration.go | 46 +- .../typed/flowcontrol/v1beta1/flowschema.go | 192 +- .../v1beta1/prioritylevelconfiguration.go | 192 +- .../v1beta2/fake/fake_flowschema.go | 46 +- .../fake/fake_prioritylevelconfiguration.go | 46 +- .../typed/flowcontrol/v1beta2/flowschema.go | 192 +- .../v1beta2/prioritylevelconfiguration.go | 192 +- .../v1beta3/fake/fake_flowschema.go | 46 +- .../fake/fake_prioritylevelconfiguration.go | 46 +- .../typed/flowcontrol/v1beta3/flowschema.go | 192 +- .../v1beta3/prioritylevelconfiguration.go | 192 +- .../typed/networking/v1/fake/fake_ingress.go | 46 +- .../networking/v1/fake/fake_ingressclass.go | 34 +- .../networking/v1/fake/fake_networkpolicy.go | 34 +- .../kubernetes/typed/networking/v1/ingress.go | 205 +- .../typed/networking/v1/ingressclass.go | 146 +- .../typed/networking/v1/networkpolicy.go | 157 +- .../v1alpha1/fake/fake_ipaddress.go | 34 +- .../v1alpha1/fake/fake_servicecidr.go | 46 +- .../typed/networking/v1alpha1/ipaddress.go | 146 +- .../typed/networking/v1alpha1/servicecidr.go | 192 +- .../networking/v1beta1/fake/fake_ingress.go | 46 +- .../v1beta1/fake/fake_ingressclass.go | 34 +- .../networking/v1beta1/fake/fake_ipaddress.go | 151 + .../v1beta1/fake/fake_networking_client.go | 8 + .../v1beta1/fake/fake_servicecidr.go | 186 + .../networking/v1beta1/generated_expansion.go | 4 + .../typed/networking/v1beta1/ingress.go | 205 +- .../typed/networking/v1beta1/ingressclass.go | 146 +- .../typed/networking/v1beta1/ipaddress.go | 69 + .../networking/v1beta1/networking_client.go | 10 + .../typed/networking/v1beta1/servicecidr.go | 73 + .../typed/node/v1/fake/fake_runtimeclass.go | 34 +- .../kubernetes/typed/node/v1/runtimeclass.go | 146 +- .../node/v1alpha1/fake/fake_runtimeclass.go | 34 +- .../typed/node/v1alpha1/runtimeclass.go | 146 +- .../node/v1beta1/fake/fake_runtimeclass.go | 34 +- .../typed/node/v1beta1/runtimeclass.go | 146 +- .../kubernetes/typed/policy/v1/eviction.go | 15 +- .../typed/policy/v1/eviction_expansion.go | 2 +- .../v1/fake/fake_poddisruptionbudget.go | 46 +- .../typed/policy/v1/poddisruptionbudget.go | 205 +- .../typed/policy/v1beta1/eviction.go | 15 +- .../policy/v1beta1/eviction_expansion.go | 2 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 46 +- .../policy/v1beta1/poddisruptionbudget.go | 205 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 146 +- .../typed/rbac/v1/clusterrolebinding.go | 146 +- .../typed/rbac/v1/fake/fake_clusterrole.go | 34 +- .../rbac/v1/fake/fake_clusterrolebinding.go | 34 +- .../typed/rbac/v1/fake/fake_role.go | 34 +- .../typed/rbac/v1/fake/fake_rolebinding.go | 34 +- .../kubernetes/typed/rbac/v1/role.go | 157 +- .../kubernetes/typed/rbac/v1/rolebinding.go | 157 +- .../typed/rbac/v1alpha1/clusterrole.go | 146 +- .../typed/rbac/v1alpha1/clusterrolebinding.go | 146 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 34 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 34 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 34 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 34 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 157 +- .../typed/rbac/v1alpha1/rolebinding.go | 157 +- .../typed/rbac/v1beta1/clusterrole.go | 146 +- .../typed/rbac/v1beta1/clusterrolebinding.go | 146 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 34 +- .../v1beta1/fake/fake_clusterrolebinding.go | 34 +- .../typed/rbac/v1beta1/fake/fake_role.go | 34 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 34 +- .../kubernetes/typed/rbac/v1beta1/role.go | 157 +- .../typed/rbac/v1beta1/rolebinding.go | 157 +- .../v1alpha2/fake/fake_resourceclass.go | 145 - .../resource/v1alpha2/podschedulingcontext.go | 256 - .../typed/resource/v1alpha2/resourceclaim.go | 256 - .../v1alpha2/resourceclaimtemplate.go | 208 - .../typed/resource/v1alpha2/resourceclass.go | 197 - .../typed/resource/v1alpha3/deviceclass.go | 69 + .../kubernetes/typed/resource/v1alpha3/doc.go | 20 + .../typed/resource/v1alpha3/fake/doc.go | 20 + .../v1alpha3/fake/fake_deviceclass.go | 151 + .../fake/fake_podschedulingcontext.go | 92 +- .../fake/fake_resource_client.go | 20 +- .../fake/fake_resourceclaim.go | 92 +- .../fake/fake_resourceclaimtemplate.go | 74 +- .../v1alpha3/fake/fake_resourceslice.go | 151 + .../generated_expansion.go | 6 +- .../resource/v1alpha3/podschedulingcontext.go | 73 + .../{v1alpha2 => v1alpha3}/resource_client.go | 51 +- .../typed/resource/v1alpha3/resourceclaim.go | 73 + .../v1alpha3/resourceclaimtemplate.go | 69 + .../typed/resource/v1alpha3/resourceslice.go | 69 + .../scheduling/v1/fake/fake_priorityclass.go | 34 +- .../typed/scheduling/v1/priorityclass.go | 146 +- .../v1alpha1/fake/fake_priorityclass.go | 34 +- .../scheduling/v1alpha1/priorityclass.go | 146 +- .../v1beta1/fake/fake_priorityclass.go | 34 +- .../typed/scheduling/v1beta1/priorityclass.go | 146 +- .../kubernetes/typed/storage/v1/csidriver.go | 146 +- .../kubernetes/typed/storage/v1/csinode.go | 146 +- .../typed/storage/v1/csistoragecapacity.go | 157 +- .../typed/storage/v1/fake/fake_csidriver.go | 34 +- .../typed/storage/v1/fake/fake_csinode.go | 34 +- .../v1/fake/fake_csistoragecapacity.go | 34 +- .../storage/v1/fake/fake_storageclass.go | 34 +- .../storage/v1/fake/fake_volumeattachment.go | 46 +- .../typed/storage/v1/storageclass.go | 146 +- .../typed/storage/v1/volumeattachment.go | 192 +- .../storage/v1alpha1/csistoragecapacity.go | 157 +- .../v1alpha1/fake/fake_csistoragecapacity.go | 34 +- .../v1alpha1/fake/fake_volumeattachment.go | 46 +- .../fake/fake_volumeattributesclass.go | 34 +- .../storage/v1alpha1/volumeattachment.go | 192 +- .../storage/v1alpha1/volumeattributesclass.go | 146 +- .../typed/storage/v1beta1/csidriver.go | 146 +- .../typed/storage/v1beta1/csinode.go | 146 +- .../storage/v1beta1/csistoragecapacity.go | 157 +- .../storage/v1beta1/fake/fake_csidriver.go | 34 +- .../storage/v1beta1/fake/fake_csinode.go | 34 +- .../v1beta1/fake/fake_csistoragecapacity.go | 34 +- .../v1beta1/fake/fake_storage_client.go | 4 + .../storage/v1beta1/fake/fake_storageclass.go | 34 +- .../v1beta1/fake/fake_volumeattachment.go | 46 +- .../fake/fake_volumeattributesclass.go | 151 + .../storage/v1beta1/generated_expansion.go | 2 + .../typed/storage/v1beta1/storage_client.go | 5 + .../typed/storage/v1beta1/storageclass.go | 146 +- .../typed/storage/v1beta1/volumeattachment.go | 192 +- .../storage/v1beta1/volumeattributesclass.go | 69 + .../typed/storagemigration/v1alpha1/doc.go | 20 + .../storagemigration/v1alpha1/fake/doc.go | 20 + .../fake/fake_storagemigration_client.go | 40 + .../fake/fake_storageversionmigration.go | 186 + .../v1alpha1/generated_expansion.go | 21 + .../v1alpha1/storagemigration_client.go | 107 + .../v1alpha1/storageversionmigration.go | 73 + .../v1/expansion_generated.go | 8 + .../v1/mutatingwebhookconfiguration.go | 26 +- .../v1/validatingadmissionpolicy.go | 48 + .../v1/validatingadmissionpolicybinding.go | 48 + .../v1/validatingwebhookconfiguration.go | 26 +- .../v1alpha1/validatingadmissionpolicy.go | 26 +- .../validatingadmissionpolicybinding.go | 26 +- .../v1beta1/mutatingwebhookconfiguration.go | 26 +- .../v1beta1/validatingadmissionpolicy.go | 26 +- .../validatingadmissionpolicybinding.go | 26 +- .../v1beta1/validatingwebhookconfiguration.go | 26 +- .../v1alpha1/storageversion.go | 26 +- .../listers/apps/v1/controllerrevision.go | 39 +- .../client-go/listers/apps/v1/daemonset.go | 39 +- .../client-go/listers/apps/v1/deployment.go | 39 +- .../client-go/listers/apps/v1/replicaset.go | 39 +- .../client-go/listers/apps/v1/statefulset.go | 39 +- .../apps/v1beta1/controllerrevision.go | 39 +- .../listers/apps/v1beta1/deployment.go | 39 +- .../listers/apps/v1beta1/statefulset.go | 39 +- .../apps/v1beta2/controllerrevision.go | 39 +- .../listers/apps/v1beta2/daemonset.go | 39 +- .../listers/apps/v1beta2/deployment.go | 39 +- .../listers/apps/v1beta2/replicaset.go | 39 +- .../listers/apps/v1beta2/statefulset.go | 39 +- .../autoscaling/v1/horizontalpodautoscaler.go | 39 +- .../autoscaling/v2/horizontalpodautoscaler.go | 39 +- .../v2beta1/horizontalpodautoscaler.go | 39 +- .../v2beta2/horizontalpodautoscaler.go | 39 +- .../client-go/listers/batch/v1/cronjob.go | 39 +- .../k8s.io/client-go/listers/batch/v1/job.go | 39 +- .../listers/batch/v1beta1/cronjob.go | 39 +- .../v1/certificatesigningrequest.go | 26 +- .../v1alpha1/clustertrustbundle.go | 26 +- .../v1beta1/certificatesigningrequest.go | 26 +- .../listers/coordination/v1/lease.go | 39 +- .../v1alpha1/expansion_generated.go | 27 + .../coordination/v1alpha1/leasecandidate.go | 70 + .../listers/coordination/v1beta1/lease.go | 39 +- .../listers/core/v1/componentstatus.go | 26 +- .../client-go/listers/core/v1/configmap.go | 39 +- .../client-go/listers/core/v1/endpoints.go | 39 +- .../k8s.io/client-go/listers/core/v1/event.go | 39 +- .../client-go/listers/core/v1/limitrange.go | 39 +- .../client-go/listers/core/v1/namespace.go | 26 +- .../k8s.io/client-go/listers/core/v1/node.go | 26 +- .../listers/core/v1/persistentvolume.go | 26 +- .../listers/core/v1/persistentvolumeclaim.go | 39 +- .../k8s.io/client-go/listers/core/v1/pod.go | 39 +- .../client-go/listers/core/v1/podtemplate.go | 39 +- .../listers/core/v1/replicationcontroller.go | 39 +- .../listers/core/v1/resourcequota.go | 39 +- .../client-go/listers/core/v1/secret.go | 39 +- .../client-go/listers/core/v1/service.go | 39 +- .../listers/core/v1/serviceaccount.go | 39 +- .../listers/discovery/v1/endpointslice.go | 39 +- .../discovery/v1beta1/endpointslice.go | 39 +- vendor/k8s.io/client-go/listers/doc.go | 18 + .../client-go/listers/events/v1/event.go | 39 +- .../client-go/listers/events/v1beta1/event.go | 39 +- .../listers/extensions/v1beta1/daemonset.go | 39 +- .../listers/extensions/v1beta1/deployment.go | 39 +- .../listers/extensions/v1beta1/ingress.go | 39 +- .../extensions/v1beta1/networkpolicy.go | 39 +- .../listers/extensions/v1beta1/replicaset.go | 39 +- .../listers/flowcontrol/v1/flowschema.go | 26 +- .../v1/prioritylevelconfiguration.go | 26 +- .../listers/flowcontrol/v1beta1/flowschema.go | 26 +- .../v1beta1/prioritylevelconfiguration.go | 26 +- .../listers/flowcontrol/v1beta2/flowschema.go | 26 +- .../v1beta2/prioritylevelconfiguration.go | 26 +- .../listers/flowcontrol/v1beta3/flowschema.go | 26 +- .../v1beta3/prioritylevelconfiguration.go | 26 +- .../client-go/listers/generic_helpers.go | 72 + .../listers/networking/v1/ingress.go | 39 +- .../listers/networking/v1/ingressclass.go | 26 +- .../listers/networking/v1/networkpolicy.go | 39 +- .../listers/networking/v1alpha1/ipaddress.go | 26 +- .../networking/v1alpha1/servicecidr.go | 26 +- .../networking/v1beta1/expansion_generated.go | 8 + .../listers/networking/v1beta1/ingress.go | 39 +- .../networking/v1beta1/ingressclass.go | 26 +- .../listers/networking/v1beta1/ipaddress.go | 48 + .../listers/networking/v1beta1/servicecidr.go | 48 + .../client-go/listers/node/v1/runtimeclass.go | 26 +- .../listers/node/v1alpha1/runtimeclass.go | 26 +- .../listers/node/v1beta1/runtimeclass.go | 26 +- .../client-go/listers/policy/v1/eviction.go | 39 +- .../listers/policy/v1/poddisruptionbudget.go | 39 +- .../listers/policy/v1beta1/eviction.go | 39 +- .../policy/v1beta1/poddisruptionbudget.go | 39 +- .../client-go/listers/rbac/v1/clusterrole.go | 26 +- .../listers/rbac/v1/clusterrolebinding.go | 26 +- .../k8s.io/client-go/listers/rbac/v1/role.go | 39 +- .../client-go/listers/rbac/v1/rolebinding.go | 39 +- .../listers/rbac/v1alpha1/clusterrole.go | 26 +- .../rbac/v1alpha1/clusterrolebinding.go | 26 +- .../client-go/listers/rbac/v1alpha1/role.go | 39 +- .../listers/rbac/v1alpha1/rolebinding.go | 39 +- .../listers/rbac/v1beta1/clusterrole.go | 26 +- .../rbac/v1beta1/clusterrolebinding.go | 26 +- .../client-go/listers/rbac/v1beta1/role.go | 39 +- .../listers/rbac/v1beta1/rolebinding.go | 39 +- .../resource/v1alpha2/resourceclass.go | 68 - .../listers/resource/v1alpha3/deviceclass.go | 48 + .../expansion_generated.go | 12 +- .../podschedulingcontext.go | 49 +- .../{v1alpha2 => v1alpha3}/resourceclaim.go | 49 +- .../resourceclaimtemplate.go | 49 +- .../resource/v1alpha3/resourceslice.go | 48 + .../listers/scheduling/v1/priorityclass.go | 26 +- .../scheduling/v1alpha1/priorityclass.go | 26 +- .../scheduling/v1beta1/priorityclass.go | 26 +- .../client-go/listers/storage/v1/csidriver.go | 26 +- .../client-go/listers/storage/v1/csinode.go | 26 +- .../listers/storage/v1/csistoragecapacity.go | 39 +- .../listers/storage/v1/storageclass.go | 26 +- .../listers/storage/v1/volumeattachment.go | 26 +- .../storage/v1alpha1/csistoragecapacity.go | 39 +- .../storage/v1alpha1/volumeattachment.go | 26 +- .../storage/v1alpha1/volumeattributesclass.go | 26 +- .../listers/storage/v1beta1/csidriver.go | 26 +- .../listers/storage/v1beta1/csinode.go | 26 +- .../storage/v1beta1/csistoragecapacity.go | 39 +- .../storage/v1beta1/expansion_generated.go | 4 + .../listers/storage/v1beta1/storageclass.go | 26 +- .../storage/v1beta1/volumeattachment.go | 26 +- .../storage/v1beta1/volumeattributesclass.go | 48 + .../v1alpha1/expansion_generated.go | 23 + .../v1alpha1/storageversionmigration.go | 48 + .../pkg/client/auth/plugins_providers.go | 3 - vendor/k8s.io/client-go/rest/request.go | 146 + vendor/k8s.io/client-go/rest/watch/decoder.go | 2 +- .../k8s.io/client-go/restmapper/shortcut.go | 2 +- vendor/k8s.io/client-go/testing/actions.go | 235 +- vendor/k8s.io/client-go/testing/fixture.go | 670 +- .../client-go/tools/cache/controller.go | 142 +- .../client-go/tools/cache/delta_fifo.go | 53 +- vendor/k8s.io/client-go/tools/cache/index.go | 3 +- .../k8s.io/client-go/tools/cache/listers.go | 6 +- .../k8s.io/client-go/tools/cache/listwatch.go | 4 + .../k8s.io/client-go/tools/cache/reflector.go | 265 +- .../reflector_data_consistency_detector.go | 94 +- .../client-go/tools/cache/shared_informer.go | 10 +- .../tools/cache/thread_safe_store.go | 92 +- .../client-go/tools/clientcmd/api/doc.go | 2 +- .../client-go/tools/clientcmd/api/helpers.go | 5 +- .../client-go/tools/clientcmd/api/v1/doc.go | 2 +- .../tools/clientcmd/client_config.go | 49 +- .../client-go/tools/clientcmd/config.go | 3 +- .../tools/leaderelection/leaderelection.go | 125 +- .../tools/leaderelection/leasecandidate.go | 202 + .../client-go/tools/leaderelection/metrics.go | 30 +- .../leaderelection/resourcelock/interface.go | 17 +- .../leaderelection/resourcelock/leaselock.go | 15 +- vendor/k8s.io/client-go/tools/record/event.go | 23 +- .../client-go/transport/cert_rotation.go | 7 +- .../data_consistency_detector.go | 146 + .../list_data_consistency_detector.go | 70 + .../watch_list_data_consistency_detector.go | 54 + .../client-go/util/flowcontrol/backoff.go | 3 +- .../client-go/util/watchlist/watch_list.go | 82 + .../util/workqueue/default_rate_limiters.go | 139 +- .../util/workqueue/delaying_queue.go | 66 +- .../k8s.io/client-go/util/workqueue/queue.go | 153 +- .../util/workqueue/rate_limiting_queue.go | 64 +- vendor/k8s.io/klog/v2/klog.go | 76 +- .../k8s.io/kube-openapi/pkg/common/common.go | 3 + .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 3 - vendor/k8s.io/utils/integer/integer.go | 73 - vendor/k8s.io/utils/net/multi_listen.go | 195 + vendor/modules.txt | 347 +- 2679 files changed, 473775 insertions(+), 51507 deletions(-) create mode 100644 vendor/cel.dev/expr/.bazelversion create mode 100644 vendor/cel.dev/expr/.gitattributes create mode 100644 vendor/cel.dev/expr/.gitignore create mode 100644 vendor/cel.dev/expr/BUILD.bazel create mode 100644 vendor/cel.dev/expr/CODE_OF_CONDUCT.md create mode 100644 vendor/cel.dev/expr/CONTRIBUTING.md create mode 100644 vendor/cel.dev/expr/GOVERNANCE.md create mode 100644 vendor/cel.dev/expr/LICENSE create mode 100644 vendor/cel.dev/expr/MAINTAINERS.md create mode 100644 vendor/cel.dev/expr/README.md create mode 100644 vendor/cel.dev/expr/WORKSPACE create mode 100644 vendor/cel.dev/expr/checked.pb.go create mode 100644 vendor/cel.dev/expr/cloudbuild.yaml create mode 100644 vendor/cel.dev/expr/eval.pb.go create mode 100644 vendor/cel.dev/expr/explain.pb.go create mode 100644 vendor/cel.dev/expr/regen_go_proto.sh create mode 100644 vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh create mode 100644 vendor/cel.dev/expr/syntax.pb.go create mode 100644 vendor/cel.dev/expr/value.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/LICENSE create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go create mode 100644 vendor/cloud.google.com/go/monitoring/internal/version.go create mode 100644 vendor/cloud.google.com/go/storage/dynamic_delay.go create mode 100644 vendor/cloud.google.com/go/storage/experimental/experimental.go create mode 100644 vendor/cloud.google.com/go/storage/grpc_dp.go create mode 100644 vendor/cloud.google.com/go/storage/grpc_metrics.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go create mode 100644 vendor/cloud.google.com/go/storage/internal/experimental.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE create mode 100644 vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go create mode 100644 vendor/github.com/aws/smithy-go/metrics/metrics.go create mode 100644 vendor/github.com/aws/smithy-go/metrics/nop.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/context.go create mode 100644 vendor/github.com/aws/smithy-go/tracing/context.go create mode 100644 vendor/github.com/aws/smithy-go/tracing/nop.go create mode 100644 vendor/github.com/aws/smithy-go/tracing/tracing.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/metrics.go delete mode 100644 vendor/github.com/cenkalti/backoff/v3/.travis.yml rename vendor/github.com/cenkalti/backoff/{v3 => v4}/.gitignore (94%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/LICENSE (100%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/README.md (67%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/backoff.go (100%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/context.go (86%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/exponential.go (67%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/retry.go (54%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/ticker.go (97%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/timer.go (100%) rename vendor/github.com/cenkalti/backoff/{v3 => v4}/tries.go (94%) create mode 100644 vendor/github.com/cncf/xds/go/LICENSE create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go create mode 100644 vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go create mode 100644 vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/LICENSE create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go create mode 100644 vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go create mode 100644 vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE create mode 100644 vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD create mode 100644 vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h create mode 100644 vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go create mode 100644 vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh create mode 100644 vendor/github.com/fxamacker/cbor/v2/.gitignore create mode 100644 vendor/github.com/fxamacker/cbor/v2/.golangci.yml create mode 100644 vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/LICENSE create mode 100644 vendor/github.com/fxamacker/cbor/v2/README.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/SECURITY.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/bytestring.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/cache.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/common.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/decode.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/diagnose.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/doc.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/encode.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/simplevalue.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/stream.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/structfields.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/tag.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/valid.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/LICENSE create mode 100644 vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go create mode 100644 vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go create mode 100644 vendor/github.com/x448/float16/.travis.yml create mode 100644 vendor/github.com/x448/float16/LICENSE create mode 100644 vendor/github.com/x448/float16/README.md create mode 100644 vendor/github.com/x448/float16/float16.go delete mode 100644 vendor/github.com/youmark/pkcs8/.travis.yml delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go create mode 100644 vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/sdk/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/cache.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/reader.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/version.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/view.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/config.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/container.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/process.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/version.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go create mode 100644 vendor/google.golang.org/grpc/authz/audit/audit_logger.go create mode 100644 vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go create mode 100644 vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/cache.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/child_policy.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/config.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/control_channel.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go create mode 100644 vendor/google.golang.org/grpc/balancer/rls/picker.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go create mode 100644 vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go create mode 100644 vendor/google.golang.org/grpc/encoding/gzip/gzip.go create mode 100644 vendor/google.golang.org/grpc/internal/admin/admin.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/nop/nop.go create mode 100644 vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go create mode 100644 vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go create mode 100644 vendor/google.golang.org/grpc/internal/cache/timeoutCache.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go create mode 100644 vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go create mode 100644 vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go create mode 100644 vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go create mode 100644 vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/internal/wrr/edf.go create mode 100644 vendor/google.golang.org/grpc/internal/wrr/random.go create mode 100644 vendor/google.golang.org/grpc/internal/wrr/wrr.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/rbac/converter.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go create mode 100644 vendor/google.golang.org/grpc/orca/call_metrics.go create mode 100644 vendor/google.golang.org/grpc/orca/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/orca/orca.go create mode 100644 vendor/google.golang.org/grpc/orca/producer.go create mode 100644 vendor/google.golang.org/grpc/orca/server_metrics.go create mode 100644 vendor/google.golang.org/grpc/orca/service.go create mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE create mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go create mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go create mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go create mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go create mode 100644 vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go create mode 100644 vendor/google.golang.org/grpc/xds/bootstrap/credentials.go create mode 100644 vendor/google.golang.org/grpc/xds/csds/csds.go create mode 100644 vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go create mode 100644 vendor/google.golang.org/grpc/xds/googledirectpath/utils.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/resolver/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go create mode 100644 vendor/google.golang.org/grpc/xds/server.go create mode 100644 vendor/google.golang.org/grpc/xds/server_options.go create mode 100644 vendor/google.golang.org/grpc/xds/xds.go create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/.gitignore create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/LICENSE create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/README.md create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/errors.go create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/merge.go create mode 100644 vendor/gopkg.in/evanphx/json-patch.v4/patch.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/doc.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/generated.pb.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/generated.proto create mode 100644 vendor/k8s.io/api/apidiscovery/v2/register.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/types.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_labels.go create mode 100644 vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.pb.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.proto delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/types.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go rename vendor/k8s.io/api/resource/{v1alpha2 => v1alpha3}/doc.go (84%) create mode 100644 vendor/k8s.io/api/resource/v1alpha3/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/generated.proto rename vendor/k8s.io/api/resource/{v1alpha2 => v1alpha3}/register.go (92%) create mode 100644 vendor/k8s.io/api/resource/v1alpha3/types.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS create mode 100644 vendor/k8s.io/client-go/applyconfigurations/OWNERS create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/doc.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go rename vendor/k8s.io/client-go/applyconfigurations/{resource/v1alpha2/resourceclassparametersreference.go => networking/v1beta1/parentreference.go} (51%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/podschedulingcontext.go (93%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/podschedulingcontextspec.go (87%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/podschedulingcontextstatus.go (84%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaim.go (93%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimconsumerreference.go (94%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimschedulingstatus.go (86%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimstatus.go (77%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimtemplate.go (93%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimtemplatespec.go (94%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2/resourceclass.go => v1alpha3/resourceslice.go} (61%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/utils.go create mode 100644 vendor/k8s.io/client-go/features/envvar.go create mode 100644 vendor/k8s.io/client-go/features/features.go create mode 100644 vendor/k8s.io/client-go/features/known_features.go create mode 100644 vendor/k8s.io/client-go/gentype/type.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/interface.go (80%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/podschedulingcontext.go (86%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/resourceclaim.go (85%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/resourceclaimtemplate.go (86%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2/resourceclass.go => v1alpha3/resourceslice.go} (64%) create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/informers/storagemigration/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go rename vendor/k8s.io/client-go/kubernetes/typed/{resource/v1alpha2 => coordination/v1alpha1}/doc.go (97%) rename vendor/k8s.io/client-go/kubernetes/typed/{resource/v1alpha2 => coordination/v1alpha1}/fake/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/fake/fake_podschedulingcontext.go (56%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/fake/fake_resource_client.go (59%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/fake/fake_resourceclaim.go (57%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/fake/fake_resourceclaimtemplate.go (58%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/generated_expansion.go (88%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/resource_client.go (65%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/listers/doc.go create mode 100644 vendor/k8s.io/client-go/listers/generic_helpers.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go delete mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1alpha3}/expansion_generated.go (85%) rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1alpha3}/podschedulingcontext.go (59%) rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1alpha3}/resourceclaim.go (58%) rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1alpha3}/resourceclaimtemplate.go (59%) create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/util/watchlist/watch_list.go delete mode 100644 vendor/k8s.io/utils/integer/integer.go create mode 100644 vendor/k8s.io/utils/net/multi_listen.go diff --git a/go.mod b/go.mod index 5358217c2f..fcb1274322 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,7 @@ module github.com/tektoncd/cli go 1.22.7 +toolchain go1.22.9 require ( github.com/AlecAivazis/survey/v2 v2.3.7 @@ -20,10 +21,10 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/pkg/errors v0.9.1 github.com/sigstore/cosign/v2 v2.4.1 - github.com/sigstore/sigstore v1.8.9 + github.com/sigstore/sigstore v1.8.10 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/tektoncd/chains v0.22.2 + github.com/tektoncd/chains v0.23.0 github.com/tektoncd/hub v1.19.0 github.com/tektoncd/pipeline v0.65.1 github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1 @@ -35,10 +36,10 @@ require ( golang.org/x/term v0.25.0 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.5.1 - k8s.io/api v0.29.10 - k8s.io/apimachinery v0.29.10 + k8s.io/api v0.31.2 + k8s.io/apimachinery v0.31.2 k8s.io/cli-runtime v0.29.10 - k8s.io/client-go v0.29.10 + k8s.io/client-go v0.31.2 knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 sigs.k8s.io/yaml v1.4.0 ) @@ -46,22 +47,24 @@ require ( replace github.com/alibabacloud-go/cr-20160607 => github.com/vdemeester/cr-20160607 v1.0.1 require ( + cel.dev/expr v0.16.1 // indirect cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.8 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/firestore v1.17.0 // indirect cloud.google.com/go/iam v1.2.1 // indirect cloud.google.com/go/kms v1.20.0 // indirect cloud.google.com/go/longrunning v0.6.1 // indirect - cloud.google.com/go/storage v1.43.0 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + cloud.google.com/go/storage v1.46.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect @@ -75,7 +78,10 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/IBM/sarama v1.43.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect + github.com/IBM/sarama v1.43.3 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect @@ -93,32 +99,33 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect - github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.43 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect - github.com/aws/smithy-go v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blendle/zapdriver v1.3.1 // indirect - github.com/cenkalti/backoff/v3 v3.2.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect github.com/cloudevents/sdk-go/v2 v2.15.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/coreos/go-oidc/v3 v3.11.0 // indirect @@ -130,14 +137,17 @@ require ( github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect github.com/gdamore/tcell/v2 v2.6.0 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect @@ -195,7 +205,7 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect - github.com/hashicorp/vault/api v1.14.0 // indirect + github.com/hashicorp/vault/api v1.15.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/in-toto/attestation v1.1.0 // indirect github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 // indirect @@ -206,7 +216,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/jellydator/ttlcache/v3 v3.2.0 // indirect + github.com/jellydator/ttlcache/v3 v3.3.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joho/godotenv v1.5.1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -241,10 +251,11 @@ require ( github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/client_golang v1.20.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect @@ -263,10 +274,10 @@ require ( github.com/sigstore/fulcio v1.6.3 // indirect github.com/sigstore/protobuf-specs v0.3.2 // indirect github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 // indirect - github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 // indirect + github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 // indirect github.com/sigstore/timestamp-authority v1.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect @@ -274,7 +285,7 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/viper v1.19.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.3.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.4.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -287,27 +298,31 @@ require ( github.com/tjfoc/gmsm v1.4.1 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect github.com/vbatts/tar-split v0.11.5 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/go-gitlab v0.109.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/zeebo/errs v1.3.0 // indirect - go.mongodb.org/mongo-driver v1.15.0 // indirect + go.mongodb.org/mongo-driver v1.16.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect go.opentelemetry.io/otel/trace v1.29.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.step.sm/crypto v0.51.2 // indirect goa.design/goa/v3 v3.19.1 // indirect - gocloud.dev v0.37.0 // indirect - gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18 // indirect - gocloud.dev/pubsub/kafkapubsub v0.37.0 // indirect + gocloud.dev v0.40.0 // indirect + gocloud.dev/docstore/mongodocstore v0.40.0 // indirect + gocloud.dev/pubsub/kafkapubsub v0.40.0 // indirect golang.org/x/crypto v0.28.0 // indirect - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect @@ -315,22 +330,24 @@ require ( golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.201.0 // indirect + google.golang.org/api v0.203.0 // indirect google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/grpc v1.67.1 // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.29.2 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect knative.dev/eventing v0.30.1-0.20220407170245-58865afba92c // indirect knative.dev/networking v0.0.0-20231017124814-2a7676e912b7 // indirect knative.dev/serving v0.39.0 // indirect diff --git a/go.sum b/go.sum index c71c9490d7..5531cfd160 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -28,10 +30,10 @@ cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Ud cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= -cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -49,8 +51,12 @@ cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY= cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -63,8 +69,10 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +cloud.google.com/go/storage v1.46.0 h1:OTXISBpFd8KaA2ClT3K3oRk8UGOcTHtrZ1bW88xKiic= +cloud.google.com/go/storage v1.46.0/go.mod h1:lM+gMAW91EfXIeMTBmixRsKL/XCxysytoAgduVikjMk= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= code.gitea.io/sdk/gitea v0.18.0 h1:+zZrwVmujIrgobt6wVBWCqITz6bn1aBjnCUHmpZrerI= code.gitea.io/sdk/gitea v0.18.0/go.mod h1:IG9xZJoltDNeDSW0qiF2Vqx5orMWa7OhVWrjvrd5NpI= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI= @@ -92,10 +100,12 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0 h1:eXzkOEXbSTOa7cJ7EqeCVi/OFi/ppDrUtQuttCWy74c= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= @@ -131,12 +141,22 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= -github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ= @@ -227,41 +247,41 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= -github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= -github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.27.43 h1:p33fDDihFC390dhhuv8nOmX419wjOSDQRb+USt20RrU= +github.com/aws/aws-sdk-go-v2/config v1.27.43/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE= github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 h1:v0D1LeMkA/X+JHAZWERrr+sUGOt8KrCZKnJA6KszkcE= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.7/go.mod h1:K9lwD0Rsx9+NSaJKsdAdlDK4b2G4KKOEve9PzHxPoMI= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 h1:tfBABi5R6aSZlhgTWHxL+opYUDOnIGoNcJLwVYv0jLM= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.2/go.mod h1:dZYFcQwuoh+cLOlFnZItijZptmyDhRIkOKWFO1CfzV8= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= -github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= @@ -291,8 +311,6 @@ github.com/buildkite/roko v1.2.0 h1:hbNURz//dQqNl6Eo9awjQOVOZwSDJ8VEbBDxSfT9rGQ= github.com/buildkite/roko v1.2.0/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -333,6 +351,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= @@ -390,6 +410,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-gk v0.0.0-20140819190930-201884a44051/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= github.com/dgryski/go-lttb v0.0.0-20180810165845-318fcdf10a77/go.mod h1:Va5MyIzkU0rAM92tn3hb3Anb7oz7KcnixF49+2wOMe4= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= @@ -413,8 +435,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= @@ -438,7 +460,11 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -462,8 +488,10 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.6.0 h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg= @@ -554,7 +582,8 @@ github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh4 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8= @@ -671,8 +700,8 @@ github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JV github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= -github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= +github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo= +github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -702,8 +731,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= @@ -814,8 +843,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= -github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82 h1:uf1FmugJNeFovjWtxD7FSPWQXdi0KuKnZfvN4CFUAtA= github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= @@ -855,8 +884,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= -github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/jenkins-x/go-scm v1.14.37 h1:Tq59JXyg5p4iuvIKf6+EA+Yzgxgpn/yG/yfM1mL8DDg= github.com/jenkins-x/go-scm v1.14.37/go.mod h1:MRLj/i0mhpMtqwwZV+x78SkEB8mx9rv3ebdRg9WunS8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -891,6 +920,8 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -982,8 +1013,9 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= @@ -1031,8 +1063,8 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1042,8 +1074,8 @@ github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuK github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -1063,8 +1095,8 @@ github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.2/go.mod h1:+X+aW6gUj6Hda43TeYHVCIvYNG/jqY/8ZFXAeXXHl+Q= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= @@ -1082,6 +1114,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1134,6 +1168,8 @@ github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0V github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rickb777/date v1.13.0/go.mod h1:GZf3LoGnxPWjX+/1TXOuzHefZFDovTyNLHDMd3qH70k= github.com/rickb777/plural v1.2.1/go.mod h1:j058+3M5QQFgcZZ2oKIOekcygoZUL8gKW5yRO14BuAw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -1185,18 +1221,18 @@ github.com/sigstore/protobuf-specs v0.3.2 h1:nCVARCN+fHjlNCk3ThNXwrZRqIommIeNKWw github.com/sigstore/protobuf-specs v0.3.2/go.mod h1:RZ0uOdJR4OB3tLQeAyWoJFbNCBFrPQdcokntde4zRBA= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= -github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= +github.com/sigstore/sigstore v1.8.10 h1:r4t+TYzJlG9JdFxMy+um9GZhZ2N1hBTyTex0AHEZxFs= +github.com/sigstore/sigstore v1.8.10/go.mod h1:BekjqxS5ZtHNJC4u3Q3Stvfx2eyisbW/lUZzmPU2u4A= github.com/sigstore/sigstore-go v0.6.1 h1:tGkkv1oDIER+QYU5MrjqlttQOVDWfSkmYwMqkJhB/cg= github.com/sigstore/sigstore-go v0.6.1/go.mod h1:Xe5GHmUeACRFbomUWzVkf/xYCn8xVifb9DgqJrV2dIw= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 h1:2zHmUvaYCwV6LVeTo+OAkTm8ykOGzA9uFlAjwDPAUWM= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8/go.mod h1:OEhheBplZinUsm7W9BupafztVZV3ldkAxEHbpAeC0Pk= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 h1:RKk4Z+qMaLORUdT7zntwMqKiYAej1VQlCswg0S7xNSY= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8/go.mod h1:dMJdlBWKHMu2xf0wIKpbo7+QfG+RzVkBB3nHP8EMM5o= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 h1:89Xtxj8oqZt3UlSpCP4wApFvnQ2Z/dgowW5QOVhQigI= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8/go.mod h1:Wa4xn/H3pU/yW/6tHiMXTpObBtBSGC5q29KYFEPKN6o= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 h1:Zte3Oogkd8m+nu2oK3yHtGmN++TZWh2Lm6q2iSprT1M= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8/go.mod h1:j00crVw6ki4/WViXflw0zWgNALrAzZT+GbIK8v7Xlz4= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 h1:e5GfVngPjGap/N3ODefayt7vKIPS1/v3hWLZ9+4MrN4= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10/go.mod h1:HOr3AdFPKdND2FNl/sUD5ZifPl1OMJvrbf9xIaaWcus= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 h1:9tZEpfIL/ewAG9G87AHe3aVoy8Ujos2F1qLfCckX6jQ= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10/go.mod h1:VnIAcitund62R45ezK/dtUeEhuRtB3LsAgJ8m0H34zc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 h1:Xre51HdjIIaVo5ox5zyL+6h0tkrx7Ke9Neh7fLmmZK0= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10/go.mod h1:VNfdklQDbyGJog8S7apdxiEfmYmCkKyxrsCL9xprkTY= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 h1:HjfjL3x3dP2kaGqQHVog974cTcKfzFaGjfZyLQ9KXrg= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10/go.mod h1:jaeEjkTW1p3gUyPjz9lTcT4TydCs208FoyAwIs6bIT4= github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1245,8 +1281,8 @@ github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= -github.com/spiffe/go-spiffe/v2 v2.3.0 h1:g2jYNb/PDMB8I7mBGL2Zuq/Ur6hUhoroxGQFyD6tTj8= -github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY= +github.com/spiffe/go-spiffe/v2 v2.4.0 h1:j/FynG7hi2azrBG5cvjRcnQ4sux/VNj8FAVc99Fl66c= +github.com/spiffe/go-spiffe/v2 v2.4.0/go.mod h1:m5qJ1hGzjxjtrkGHZupoXHo/FDWwCB1MdSyBzfHugx0= github.com/spiffe/spire-api-sdk v1.10.0 h1:QFZ8fucWhbV4y4TKWKLxGc7SNrCLThn2t5qpVNIiiRY= github.com/spiffe/spire-api-sdk v1.10.0/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A= @@ -1272,7 +1308,6 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= @@ -1283,8 +1318,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDd github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tektoncd/chains v0.22.2 h1:OjlEUyVw1kWBewVrZ3GRYipN8w8cLU2nMCCz0lZMG9Y= -github.com/tektoncd/chains v0.22.2/go.mod h1:5FsO4gIKUIlJ4ohmmMXep0GPMWN1oEwRLXiETmU7XhY= +github.com/tektoncd/chains v0.23.0 h1:zRhzSUlAPxL/dUH0AXWcW8AtJbF2FrqM2yTyR3J8A5U= +github.com/tektoncd/chains v0.23.0/go.mod h1:Bhyomb/vr0cnI4+VJmjeukRCyti4XGWb9GPWSttAFIs= github.com/tektoncd/hub v1.19.0 h1:UXl/jPB9uoa4ULyh64SN1VuKMMIZvXq6bkRGI8ATU+g= github.com/tektoncd/hub v1.19.0/go.mod h1:JJvxZ37nPygZOaUgDIQddihWnD+hSKExPs261FXmttc= github.com/tektoncd/pipeline v0.65.1 h1:7Ee/nqG+QWE25NGzwKZdFE0p5COb/aljfDysUFv8+0o= @@ -1327,6 +1362,8 @@ github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8ok github.com/vdemeester/cr-20160607 v1.0.1 h1:nHyI7BZNR04QFtgItJFVAr8SLeoVIFd8co+DODxnPKE= github.com/vdemeester/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/go-gitlab v0.109.0 h1:RcRme5w8VpLXTSTTMZdVoQWY37qTJWg+gwdQl4aAttE= github.com/xanzy/go-gitlab v0.109.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -1349,8 +1386,8 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 h1:tBiBTKHnIjovYoLX/TPkcf+OjqqKGQrPtGT3Foz+Pgo= -github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76/go.mod h1:SQliXeA7Dhkt//vS29v3zpbEwoa+zb2Cn5xj5uO4K5U= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= @@ -1383,8 +1420,8 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= -go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8= +go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1395,6 +1432,8 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= @@ -1419,6 +1458,8 @@ go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHy go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= @@ -1449,12 +1490,12 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= goa.design/goa/v3 v3.19.1 h1:jpV3LEy7YANzPMwm++Lu17RoThRJgXrPxdEM0A1nlOE= goa.design/goa/v3 v3.19.1/go.mod h1:astNE9ube0YCxqq7DQkt1MtLxB/b3kRPEFkEZovcO2I= -gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= -gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= -gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18 h1:71QcVqNRzyPI+f3v38XK+TjrLusNUFIYA7Wro48aX2I= -gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18/go.mod h1:IN/uj8zLRQ0sZ5UffdERqoB5MT7DsmyMQtZDht9cknQ= -gocloud.dev/pubsub/kafkapubsub v0.37.0 h1:rH122Q2COVNhAGRyid/FHY164LAZhss9V5DiIKQ5Gos= -gocloud.dev/pubsub/kafkapubsub v0.37.0/go.mod h1:y0a+Rv5JvNuVyv1qGic2m1bDoy0ZArWLqcxm/1WxuB8= +gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng= +gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= +gocloud.dev/docstore/mongodocstore v0.40.0 h1:KTwP9Wr3PNPxN3bItxtK/RHiF4D6cTYjE5GDkPKcJrI= +gocloud.dev/docstore/mongodocstore v0.40.0/go.mod h1:wjrLAUAoAKA1jbteT4nP36Gy5egPAHvwBfAJFUH9YKI= +gocloud.dev/pubsub/kafkapubsub v0.40.0 h1:6gfRtOUd/ztKAQLYPUl7xFIVre9okcvKUp0CMaMSY18= +gocloud.dev/pubsub/kafkapubsub v0.40.0/go.mod h1:/03R9nFjSFfjM/dPVnQdtbAQL4InkE7832zhvE1x034= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1497,8 +1538,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1858,8 +1899,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= @@ -1898,8 +1939,8 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= -google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= +google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= +google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2008,6 +2049,8 @@ google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2033,6 +2076,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= @@ -2080,23 +2125,23 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= -k8s.io/api v0.29.10 h1:Fao3HOxccbGRC1HZtXD+Y41xJhP0tEToVo5W7EEUBm0= -k8s.io/api v0.29.10/go.mod h1:rF0sRh64w1hMNAVGh4YYniSxODyHye3GLmymAbWBDvY= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.29.10 h1:57OLNqOJUgp5KlRRY3JOBFOTTa5Rt/LVkmKiiN2cvaQ= -k8s.io/apimachinery v0.29.10/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc= k8s.io/cli-runtime v0.29.10 h1:iX8bo6jmxvmoq046YX/Ry+s3Vc21sfxmYzjKLfEaP6M= k8s.io/cli-runtime v0.29.10/go.mod h1:sGE4CX6FM600SEX8h/tPS0WGFYkTH+W5oU9eyp5Xtpc= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= -k8s.io/client-go v0.29.10 h1:hPmG1pmKslRhmCIzVd90sA58B0sJwNwduNgXFWsFqhI= -k8s.io/client-go v0.29.10/go.mod h1:gnMCQiRXGL9K0VtlW8gTkhzptGrHm2BJ4qBbujNemc4= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E= @@ -2111,17 +2156,17 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.2-0.20220227211518-7ea6d6adb645/go.mod h1:N3kgBtsFxMb4nQ0eBDgbHEt/dtxBuTkSFQ+7K5OUoz4= k8s.io/klog/v2 v2.60.1-0.20220317184644-43cc75f9ae89/go.mod h1:N3kgBtsFxMb4nQ0eBDgbHEt/dtxBuTkSFQ+7K5OUoz4= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/eventing v0.30.1-0.20220407170245-58865afba92c h1:5QvWwe3uB7BefHGMg6hC05YVi+zr1kGK7DVMFquqJMI= knative.dev/eventing v0.30.1-0.20220407170245-58865afba92c/go.mod h1:6hU8ayB9weJ6cqZejOjZiHkqy1PuNA2I9T32hWj2laI= knative.dev/hack v0.0.0-20220224013837-e1785985d364/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion new file mode 100644 index 0000000000..579c9d21e7 --- /dev/null +++ b/vendor/cel.dev/expr/.bazelversion @@ -0,0 +1,2 @@ +6.4.0 +# Keep this pinned version in parity with cel-go diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes new file mode 100644 index 0000000000..3de1ec213a --- /dev/null +++ b/vendor/cel.dev/expr/.gitattributes @@ -0,0 +1,2 @@ +*.pb.go linguist-generated=true +*.pb.go -diff -merge diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore new file mode 100644 index 0000000000..ac51a054d2 --- /dev/null +++ b/vendor/cel.dev/expr/.gitignore @@ -0,0 +1 @@ +bazel-* diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel new file mode 100644 index 0000000000..0bbe9ed773 --- /dev/null +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +go_library( + name = "expr", + srcs = [ + "checked.pb.go", + "eval.pb.go", + "explain.pb.go", + "syntax.pb.go", + "value.pb.go", + ], + importpath = "cel.dev/expr", + visibility = ["//visibility:public"], + deps = [ + "//proto/cel/expr:google_rpc_status_go_proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +alias( + name = "go_default_library", + actual = ":expr", + visibility = ["//visibility:public"], +) diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..59908e2d8e --- /dev/null +++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md @@ -0,0 +1,25 @@ +# Contributor Code of Conduct +## Version 0.1.1 (adapted from 0.3b-angular) + +As contributors and maintainers of the Common Expression Language +(CEL) project, we pledge to respect everyone who contributes by +posting issues, updating documentation, submitting pull requests, +providing feedback in comments, and any other activities. + +Communication through any of CEL's channels (GitHub, Gitter, IRC, +mailing lists, Google+, Twitter, etc.) must be constructive and never +resort to personal attacks, trolling, public or private harassment, +insults, or other unprofessional conduct. + +We promise to extend courtesy and respect to everyone involved in this +project regardless of gender, gender identity, sexual orientation, +disability, age, race, ethnicity, religion, or level of experience. We +expect anyone contributing to the project to do the same. + +If any member of the community violates this code of conduct, the +maintainers of the CEL project may take action, removing issues, +comments, and PRs or blocking accounts as deemed appropriate. + +If you are subject to or witness unacceptable behavior, or have any +other concerns, please email us at +[cel-conduct@google.com](mailto:cel-conduct@google.com). diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md new file mode 100644 index 0000000000..8f5fd5c31f --- /dev/null +++ b/vendor/cel.dev/expr/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are a +few guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## What to expect from maintainers + +Expect maintainers to respond to new issues or pull requests within a week. +For outstanding and ongoing issues and particularly for long-running +pull requests, expect the maintainers to review within a week of a +contributor asking for a new review. There is no commitment to resolution -- +merging or closing a pull request, or fixing or closing an issue -- because some +issues will require more discussion than others. diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md new file mode 100644 index 0000000000..0a525bc17d --- /dev/null +++ b/vendor/cel.dev/expr/GOVERNANCE.md @@ -0,0 +1,43 @@ +# Project Governance + +This document defines the governance process for the CEL language. CEL is +Google-developed, but openly governed. Major contributors to the CEL +specification and its corresponding implementations constitute the CEL +Language Council. New members may be added by a unanimous vote of the +Council. + +The MAINTAINERS.md file lists the members of the CEL Language Council, and +unofficially indicates the "areas of expertise" of each member with respect +to the publicly available CEL repos. + +## Code Changes + +Code changes must follow the standard pull request (PR) model documented in the +CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a +maintainer. The maintainer reserves the right to request that any feature +request (FR) or PR be reviewed by the language council. + +## Syntax and Semantic Changes + +Syntactic and semantic changes must be reviewed by the CEL Language Council. +Maintainers may also request language council review at their discretion. + +The review process is as follows: + +- Create a Feature Request in the CEL-Spec repo. The feature description will + serve as an abstract for the detailed design document. +- Co-develop a design document with the Language Council. +- Once the proposer gives the design document approval, the document will be + linked to the FR in the CEL-Spec repo and opened for comments to members of + the cel-lang-discuss@googlegroups.com. +- The Language Council will review the design doc at the next council meeting + (once every three weeks) and the council decision included in the document. + +If the proposal is approved, the spec will be updated by a maintainer (if +applicable) and a rationale will be included in the CEL-Spec wiki to ensure +future developers may follow CEL's growth and direction over time. + +Approved proposals may be implemented by the proposer or by the maintainers as +the parties see fit. At the discretion of the maintainer, changes from the +approved design are permitted during implementation if they improve the user +experience and clarity of the feature. diff --git a/vendor/cel.dev/expr/LICENSE b/vendor/cel.dev/expr/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/cel.dev/expr/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md new file mode 100644 index 0000000000..1ed2eb8ab3 --- /dev/null +++ b/vendor/cel.dev/expr/MAINTAINERS.md @@ -0,0 +1,13 @@ +# CEL Language Council + +| Name | Company | Area of Expertise | +|-----------------|--------------|-------------------| +| Alfred Fuller | Facebook | cel-cpp, cel-spec | +| Jim Larson | Google | cel-go, cel-spec | +| Matthais Blume | Google | cel-spec | +| Tristan Swadell | Google | cel-go, cel-spec | + +## Emeritus + +* Sanjay Ghemawat (Google) +* Wolfgang Grieskamp (Facebook) diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md new file mode 100644 index 0000000000..2da1e7f2fa --- /dev/null +++ b/vendor/cel.dev/expr/README.md @@ -0,0 +1,65 @@ +# Common Expression Language + +The Common Expression Language (CEL) implements common semantics for expression +evaluation, enabling different applications to more easily interoperate. + +Key Applications + +* Security policy: organizations have complex infrastructure and need common + tooling to reason about the system as a whole +* Protocols: expressions are a useful data type and require interoperability + across programming languages and platforms. + + +Guiding philosophy: + +1. Keep it small & fast. + * CEL evaluates in linear time, is mutation free, and not Turing-complete. + This limitation is a feature of the language design, which allows the + implementation to evaluate orders of magnitude faster than equivalently + sandboxed JavaScript. +2. Make it extensible. + * CEL is designed to be embedded in applications, and allows for + extensibility via its context which allows for functions and data to be + provided by the software that embeds it. +3. Developer-friendly. + * The language is approachable to developers. The initial spec was based + on the experience of developing Firebase Rules and usability testing + many prior iterations. + * The library itself and accompanying toolings should be easy to adopt by + teams that seek to integrate CEL into their platforms. + +The required components of a system that supports CEL are: + +* The textual representation of an expression as written by a developer. It is + of similar syntax to expressions in C/C++/Java/JavaScript +* A binary representation of an expression. It is an abstract syntax tree + (AST). +* A compiler library that converts the textual representation to the binary + representation. This can be done ahead of time (in the control plane) or + just before evaluation (in the data plane). +* A context containing one or more typed variables, often protobuf messages. + Most use-cases will use `attribute_context.proto` +* An evaluator library that takes the binary format in the context and + produces a result, usually a Boolean. + +Example of boolean conditions and object construction: + +``` c +// Condition +account.balance >= transaction.withdrawal + || (account.overdraftProtection + && account.overdraftLimit >= transaction.withdrawal - account.balance) + +// Object construction +common.GeoPoint{ latitude: 10.0, longitude: -5.5 } +``` + +For more detail, see: + +* [Introduction](doc/intro.md) +* [Language Definition](doc/langdef.md) + +Released under the [Apache License](LICENSE). + +Disclaimer: This is not an official Google product. diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE new file mode 100644 index 0000000000..bb4c469adb --- /dev/null +++ b/vendor/cel.dev/expr/WORKSPACE @@ -0,0 +1,145 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_go", + sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + ], +) + +http_archive( + name = "rules_proto", + sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d", + strip_prefix = "rules_proto-4.0.0-3.20.0", + urls = [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz", + ], +) + +# googleapis as of 05/26/2023 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-07c27163ac591955d736f3057b1619ece66f5b99", + sha256 = "bd8e735d881fb829751ecb1a77038dda4a8d274c45490cb9fcf004583ee10571", + urls = [ + "https://github.com/googleapis/googleapis/archive/07c27163ac591955d736f3057b1619ece66f5b99.tar.gz", + ], +) + +# protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2", + strip_prefix = "protobuf-3.21.5", + urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"], +) + +# googletest +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21", + strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8", + urls = [ + "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + ], +) + +# absl +http_archive( + name = "com_google_absl", + strip_prefix = "abseil-cpp-master", + urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + cc = True, +) + +# Do *not* call *_dependencies(), etc, yet. See comment at the end. + +# Generated Google APIs protos for Golang +# Generated Google APIs protos for Golang 05/25/2023 +go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=", + version = "v0.0.0-20230525234035-dd9d682886f9", +) + +# Generated Google APIs protos for Golang 05/25/2023 +go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=", + version = "v0.0.0-20230525234030-28d5490b6b19", +) + +# gRPC deps +go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + tag = "v1.49.0", +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=", + version = "v0.0.0-20190311183353-d8887717615a", +) + +go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", +) + +# Run the dependencies at the end. These will silently try to import some +# of the above repositories but at different versions, so ours must come first. +go_rules_dependencies() +go_register_toolchains(version = "1.19.1") +gazelle_dependencies() +rules_proto_dependencies() +rules_proto_toolchains() +protobuf_deps() diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go new file mode 100644 index 0000000000..bb225c8ab3 --- /dev/null +++ b/vendor/cel.dev/expr/checked.pb.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/checked.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Type_PrimitiveType int32 + +const ( + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + Type_BOOL Type_PrimitiveType = 1 + Type_INT64 Type_PrimitiveType = 2 + Type_UINT64 Type_PrimitiveType = 3 + Type_DOUBLE Type_PrimitiveType = 4 + Type_STRING Type_PrimitiveType = 5 + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +type Type_WellKnownType int32 + +const ( + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + Type_ANY Type_WellKnownType = 1 + Type_TIMESTAMP Type_WellKnownType = 2 + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue(0) +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_cel_expr_checked_proto protoreflect.FileDescriptor + +var file_cel_expr_checked_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, + 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, + 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, + 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, + 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_checked_proto_rawDescOnce sync.Once + file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc +) + +func file_cel_expr_checked_proto_rawDescGZIP() []byte { + file_cel_expr_checked_proto_rawDescOnce.Do(func() { + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + }) + return file_cel_expr_checked_proto_rawDescData +} + +var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cel_expr_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType + (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr + (*Type)(nil), // 3: cel.expr.Type + (*Decl)(nil), // 4: cel.expr.Decl + (*Reference)(nil), // 5: cel.expr.Reference + nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry + nil, // 7: cel.expr.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: cel.expr.Type.ListType + (*Type_MapType)(nil), // 9: cel.expr.Type.MapType + (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType + (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: cel.expr.SourceInfo + (*Expr)(nil), // 16: cel.expr.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: cel.expr.Constant +} +var file_cel_expr_checked_proto_depIdxs = []int32{ + 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry + 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry + 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo + 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr + 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType + 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType + 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType + 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType + 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType + 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType + 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type + 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty + 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType + 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl + 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl + 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant + 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference + 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type + 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type + 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type + 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type + 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type + 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type + 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type + 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type + 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant + 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload + 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type + 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_cel_expr_checked_proto_init() } +func file_cel_expr_checked_proto_init() { + if File_cel_expr_checked_proto != nil { + return + } + file_cel_expr_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_checked_proto_goTypes, + DependencyIndexes: file_cel_expr_checked_proto_depIdxs, + EnumInfos: file_cel_expr_checked_proto_enumTypes, + MessageInfos: file_cel_expr_checked_proto_msgTypes, + }.Build() + File_cel_expr_checked_proto = out.File + file_cel_expr_checked_proto_rawDesc = nil + file_cel_expr_checked_proto_goTypes = nil + file_cel_expr_checked_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml new file mode 100644 index 0000000000..8a8ea3763f --- /dev/null +++ b/vendor/cel.dev/expr/cloudbuild.yaml @@ -0,0 +1,9 @@ +steps: +- name: 'gcr.io/cloud-builders/bazel:6.4.0' + entrypoint: bazel + args: ['test', '--test_output=errors', '...'] + id: bazel-test + waitFor: ['-'] +timeout: 15m +options: + machineType: 'N1_HIGHCPU_32' diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go new file mode 100644 index 0000000000..8f651f9cc6 --- /dev/null +++ b/vendor/cel.dev/expr/eval.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/eval.proto + +package expr + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_cel_expr_eval_proto protoreflect.FileDescriptor + +var file_cel_expr_eval_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_eval_proto_rawDescOnce sync.Once + file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc +) + +func file_cel_expr_eval_proto_rawDescGZIP() []byte { + file_cel_expr_eval_proto_rawDescOnce.Do(func() { + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + }) + return file_cel_expr_eval_proto_rawDescData +} + +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: cel.expr.EvalState + (*ExprValue)(nil), // 1: cel.expr.ExprValue + (*ErrorSet)(nil), // 2: cel.expr.ErrorSet + (*UnknownSet)(nil), // 3: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result + (*Value)(nil), // 5: cel.expr.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_cel_expr_eval_proto_depIdxs = []int32{ + 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue + 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet + 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_cel_expr_eval_proto_init() } +func file_cel_expr_eval_proto_init() { + if File_cel_expr_eval_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_eval_proto_goTypes, + DependencyIndexes: file_cel_expr_eval_proto_depIdxs, + MessageInfos: file_cel_expr_eval_proto_msgTypes, + }.Build() + File_cel_expr_eval_proto = out.File + file_cel_expr_eval_proto_rawDesc = nil + file_cel_expr_eval_proto_goTypes = nil + file_cel_expr_eval_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go new file mode 100644 index 0000000000..79fd5443b9 --- /dev/null +++ b/vendor/cel.dev/expr/explain.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/explain.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_cel_expr_explain_proto protoreflect.FileDescriptor + +var file_cel_expr_explain_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_explain_proto_rawDescOnce sync.Once + file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc +) + +func file_cel_expr_explain_proto_rawDescGZIP() []byte { + file_cel_expr_explain_proto_rawDescOnce.Do(func() { + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + }) + return file_cel_expr_explain_proto_rawDescData +} + +var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cel_expr_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: cel.expr.Explain + (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep + (*Value)(nil), // 2: cel.expr.Value +} +var file_cel_expr_explain_proto_depIdxs = []int32{ + 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value + 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_cel_expr_explain_proto_init() } +func file_cel_expr_explain_proto_init() { + if File_cel_expr_explain_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_explain_proto_goTypes, + DependencyIndexes: file_cel_expr_explain_proto_depIdxs, + MessageInfos: file_cel_expr_explain_proto_msgTypes, + }.Build() + File_cel_expr_explain_proto = out.File + file_cel_expr_explain_proto_rawDesc = nil + file_cel_expr_explain_proto_goTypes = nil + file_cel_expr_explain_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh new file mode 100644 index 0000000000..abf2f9788e --- /dev/null +++ b/vendor/cel.dev/expr/regen_go_proto.sh @@ -0,0 +1,9 @@ +#!/bin/sh +bazel build //proto/test/... +files=($(bazel aquery 'kind(proto, //proto/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\%\/github.com\/google\/cel-spec\/\(.*\)\)/\2/') + echo "copying $dst" + $(cp $src $dst) +done diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh new file mode 100644 index 0000000000..9a13479e40 --- /dev/null +++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +bazel build //proto/cel/expr:all + +rm -vf ./*.pb.go + +files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") ) +for src in "${files[@]}"; +do + cp -v "${src}" ./ +done diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go new file mode 100644 index 0000000000..48a952872e --- /dev/null +++ b/vendor/cel.dev/expr/syntax.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/syntax.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SourceInfo_Extension_Component int32 + +const ( + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_cel_expr_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_cel_expr_syntax_proto protoreflect.FileDescriptor + +var file_cel_expr_syntax_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, + 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, + 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, + 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, + 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, + 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, + 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, + 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, + 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, + 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, + 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, + 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, + 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, + 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, + 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, + 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, + 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, + 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cel_expr_syntax_proto_rawDescOnce sync.Once + file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc +) + +func file_cel_expr_syntax_proto_rawDescGZIP() []byte { + file_cel_expr_syntax_proto_rawDescOnce.Do(func() { + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + }) + return file_cel_expr_syntax_proto_rawDescData +} + +var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cel_expr_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr + (*Expr)(nil), // 2: cel.expr.Expr + (*Constant)(nil), // 3: cel.expr.Constant + (*SourceInfo)(nil), // 4: cel.expr.SourceInfo + (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident + (*Expr_Select)(nil), // 6: cel.expr.Expr.Select + (*Expr_Call)(nil), // 7: cel.expr.Expr.Call + (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList + (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry + nil, // 12: cel.expr.SourceInfo.PositionsEntry + nil, // 13: cel.expr.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension + (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 16: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp +} +var file_cel_expr_syntax_proto_depIdxs = []int32{ + 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr + 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo + 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant + 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident + 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select + 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call + 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList + 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct + 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension + 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue + 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration + 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry + 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry + 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension + 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr + 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr + 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr + 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr + 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry + 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr + 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr + 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr + 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr + 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr + 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr + 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr + 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr + 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component + 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_cel_expr_syntax_proto_init() } +func file_cel_expr_syntax_proto_init() { + if File_cel_expr_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_syntax_proto_goTypes, + DependencyIndexes: file_cel_expr_syntax_proto_depIdxs, + EnumInfos: file_cel_expr_syntax_proto_enumTypes, + MessageInfos: file_cel_expr_syntax_proto_msgTypes, + }.Build() + File_cel_expr_syntax_proto = out.File + file_cel_expr_syntax_proto_rawDesc = nil + file_cel_expr_syntax_proto_goTypes = nil + file_cel_expr_syntax_proto_depIdxs = nil +} diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go new file mode 100644 index 0000000000..e5e29228c2 --- /dev/null +++ b/vendor/cel.dev/expr/value.pb.go @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/value.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_cel_expr_value_proto protoreflect.FileDescriptor + +var file_cel_expr_value_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, + 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_value_proto_rawDescOnce sync.Once + file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc +) + +func file_cel_expr_value_proto_rawDescGZIP() []byte { + file_cel_expr_value_proto_rawDescOnce.Do(func() { + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + }) + return file_cel_expr_value_proto_rawDescData +} + +var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: cel.expr.Value + (*EnumValue)(nil), // 1: cel.expr.EnumValue + (*ListValue)(nil), // 2: cel.expr.ListValue + (*MapValue)(nil), // 3: cel.expr.MapValue + (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_cel_expr_value_proto_depIdxs = []int32{ + 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue + 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue + 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue + 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value + 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry + 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value + 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cel_expr_value_proto_init() } +func file_cel_expr_value_proto_init() { + if File_cel_expr_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_value_proto_goTypes, + DependencyIndexes: file_cel_expr_value_proto_depIdxs, + MessageInfos: file_cel_expr_value_proto_msgTypes, + }.Build() + File_cel_expr_value_proto = out.File + file_cel_expr_value_proto_rawDesc = nil + file_cel_expr_value_proto_goTypes = nil + file_cel_expr_value_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index c81df73927..ecdc47daba 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,20 @@ # Changelog +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30) + + +### Features + +* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b)) + +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + ## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 314bd292e3..a7fa84f6f9 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -227,9 +227,7 @@ type CredentialsOptions struct { UniverseDomainProvider CredentialsPropertyProvider } -// NewCredentials returns new [Credentials] from the provided options. Most users -// will want to build this object a function from the -// [cloud.google.com/go/auth/credentials] package. +// NewCredentials returns new [Credentials] from the provided options. func NewCredentials(opts *CredentialsOptions) *Credentials { creds := &Credentials{ TokenProvider: opts.TokenProvider, @@ -242,8 +240,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { return creds } -// CachedTokenProviderOptions provided options for configuring a -// CachedTokenProvider. +// CachedTokenProviderOptions provides options for configuring a cached +// [TokenProvider]. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, // even if it is expired. The default is false. Optional. @@ -253,7 +251,7 @@ type CachedTokenProviderOptions struct { // seconds. Optional. ExpireEarly time.Duration // DisableAsyncRefresh configures a synchronous workflow that refreshes - // stale tokens while blocking. The default is false. Optional. + // tokens in a blocking manner. The default is false. Optional. DisableAsyncRefresh bool } @@ -280,12 +278,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned // by the underlying provider. By default it will refresh tokens asynchronously -// (non-blocking mode) within a window that starts 3 minutes and 45 seconds -// before they expire. The asynchronous (non-blocking) refresh can be changed to -// a synchronous (blocking) refresh using the -// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry -// duration can be configured using the CachedTokenProviderOptions.ExpireEarly -// option. +// a few minutes before they expire. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 30fedf9562..38e8c99399 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -155,6 +155,8 @@ type InternalOptions struct { // transport that sets the Authorization header with the value produced by the // provided [cloud.google.com/go/auth.Credentials]. An error is returned only // if client or creds is nil. +// +// This function does not support setting a universe domain value on the client. func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { if client == nil || creds == nil { return fmt.Errorf("httptransport: client and tp must not be nil") @@ -173,7 +175,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er client.Transport = &authTransport{ creds: creds, base: base, - // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. } return nil } diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 66a51f19c7..d8c1611918 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -200,7 +200,7 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - return metadata.GetWithContext(ctx, "universe/universe_domain") + return metadata.GetWithContext(ctx, "universe/universe-domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 3665159161..6c954ae193 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf824..347aaced72 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -82,10 +82,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index 7faf6e0c98..3df9f72cad 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + ## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index 9835ac571c..e266b0c9b1 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -26,6 +26,13 @@ import ( "golang.org/x/oauth2/google" ) +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + // TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] // into a [cloud.google.com/go/auth.TokenProvider]. func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { @@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { } return nil, err } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } return &auth.Token{ - Value: tok.AccessToken, - Type: tok.Type(), - Expiry: tok.Expiry, + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, }, nil } @@ -76,11 +94,24 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { } return nil, err } - return &oauth2.Token{ + tok2 := &oauth2.Token{ AccessToken: tok.Value, TokenType: tok.Type, Expiry: tok.Expiry, - }, nil + } + // Preserve token metadata. + metadata := tok.Metadata + if metadata != nil { + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil } // AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] diff --git a/vendor/cloud.google.com/go/monitoring/LICENSE b/vendor/cloud.google.com/go/monitoring/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go new file mode 100644 index 0000000000..ae1dd6b9a2 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go @@ -0,0 +1,399 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newAlertPolicyClientHook clientHook + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + return &AlertPolicyCallOptions{ + ListAlertPolicies: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateAlertPolicy: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + } +} + +// internalAlertPolicyClient is an interface that defines the methods available from Cloud Monitoring API. +type internalAlertPolicyClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListAlertPolicies(context.Context, *monitoringpb.ListAlertPoliciesRequest, ...gax.CallOption) *AlertPolicyIterator + GetAlertPolicy(context.Context, *monitoringpb.GetAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) + CreateAlertPolicy(context.Context, *monitoringpb.CreateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) + DeleteAlertPolicy(context.Context, *monitoringpb.DeleteAlertPolicyRequest, ...gax.CallOption) error + UpdateAlertPolicy(context.Context, *monitoringpb.UpdateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error) +} + +// AlertPolicyClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Cloud Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud console (at https://console.cloud.google.com/). +type AlertPolicyClient struct { + // The internal transport-dependent client. + internalClient internalAlertPolicyClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListAlertPolicies lists the existing alerting policies for the workspace. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + return c.internalClient.ListAlertPolicies(ctx, req, opts...) +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.GetAlertPolicy(ctx, req, opts...) +} + +// CreateAlertPolicy creates a new alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.CreateAlertPolicy(ctx, req, opts...) +} + +// DeleteAlertPolicy deletes an alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteAlertPolicy(ctx, req, opts...) +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +// +// Design your application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + return c.internalClient.UpdateAlertPolicy(ctx, req, opts...) +} + +// alertPolicyGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type alertPolicyGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing AlertPolicyClient + CallOptions **AlertPolicyCallOptions + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewAlertPolicyClient creates a new alert policy service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Cloud Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + clientOpts := defaultAlertPolicyGRPCClientOptions() + if newAlertPolicyClientHook != nil { + hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := AlertPolicyClient{CallOptions: defaultAlertPolicyCallOptions()} + + c := &alertPolicyGRPCClient{ + connPool: connPool, + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *alertPolicyGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + resp := &monitoringpb.ListAlertPoliciesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetAlertPolicies(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go new file mode 100644 index 0000000000..4de74e773e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go @@ -0,0 +1,682 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "google.golang.org/api/iterator" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceIterator manages a stream of *monitoringpb.Service. +type ServiceIterator struct { + items []*monitoringpb.Service + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceIterator) Next() (*monitoringpb.Service, error) { + var item *monitoringpb.Service + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective. +type ServiceLevelObjectiveIterator struct { + items []*monitoringpb.ServiceLevelObjective + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) { + var item *monitoringpb.ServiceLevelObjective + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceLevelObjectiveIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SnoozeIterator manages a stream of *monitoringpb.Snooze. +type SnoozeIterator struct { + items []*monitoringpb.Snooze + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnoozeIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) { + var item *monitoringpb.Snooze + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnoozeIterator) bufLen() int { + return len(it.items) +} + +func (it *SnoozeIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesDataIterator manages a stream of *monitoringpb.TimeSeriesData. +type TimeSeriesDataIterator struct { + items []*monitoringpb.TimeSeriesData + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeriesData, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesDataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesDataIterator) Next() (*monitoringpb.TimeSeriesData, error) { + var item *monitoringpb.TimeSeriesData + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesDataIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesDataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go new file mode 100644 index 0000000000..2982e1f84d --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go @@ -0,0 +1,112 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package monitoring + +import ( + "iter" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "github.com/googleapis/gax-go/v2/iterator" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AlertPolicyIterator) All() iter.Seq2[*monitoringpb.AlertPolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *GroupIterator) All() iter.Seq2[*monitoringpb.Group, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MetricDescriptorIterator) All() iter.Seq2[*metricpb.MetricDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MonitoredResourceDescriptorIterator) All() iter.Seq2[*monitoredrespb.MonitoredResourceDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MonitoredResourceIterator) All() iter.Seq2[*monitoredrespb.MonitoredResource, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationChannelDescriptorIterator) All() iter.Seq2[*monitoringpb.NotificationChannelDescriptor, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationChannelIterator) All() iter.Seq2[*monitoringpb.NotificationChannel, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceIterator) All() iter.Seq2[*monitoringpb.Service, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceLevelObjectiveIterator) All() iter.Seq2[*monitoringpb.ServiceLevelObjective, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnoozeIterator) All() iter.Seq2[*monitoringpb.Snooze, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TimeSeriesDataIterator) All() iter.Seq2[*monitoringpb.TimeSeriesData, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TimeSeriesIterator) All() iter.Seq2[*monitoringpb.TimeSeries, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UptimeCheckConfigIterator) All() iter.Seq2[*monitoringpb.UptimeCheckConfig, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UptimeCheckIpIterator) All() iter.Seq2[*monitoringpb.UptimeCheckIp, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go new file mode 100644 index 0000000000..e8c4036475 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go @@ -0,0 +1,124 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Cloud Monitoring API. +// +// Manages your Cloud Monitoring data and configurations. +// +// # General documentation +// +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := monitoring.NewAlertPolicyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := monitoring.NewAlertPolicyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &monitoringpb.CreateAlertPolicyRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb#CreateAlertPolicyRequest. +// } +// resp, err := c.CreateAlertPolicy(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// +// # Use of Context +// +// The ctx passed to NewAlertPolicyClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors +package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2" + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json new file mode 100644 index 0000000000..a33cb6fcf5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json @@ -0,0 +1,336 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.monitoring.v3", + "libraryPackage": "cloud.google.com/go/monitoring/apiv3/v2", + "services": { + "AlertPolicyService": { + "clients": { + "grpc": { + "libraryClient": "AlertPolicyClient", + "rpcs": { + "CreateAlertPolicy": { + "methods": [ + "CreateAlertPolicy" + ] + }, + "DeleteAlertPolicy": { + "methods": [ + "DeleteAlertPolicy" + ] + }, + "GetAlertPolicy": { + "methods": [ + "GetAlertPolicy" + ] + }, + "ListAlertPolicies": { + "methods": [ + "ListAlertPolicies" + ] + }, + "UpdateAlertPolicy": { + "methods": [ + "UpdateAlertPolicy" + ] + } + } + } + } + }, + "GroupService": { + "clients": { + "grpc": { + "libraryClient": "GroupClient", + "rpcs": { + "CreateGroup": { + "methods": [ + "CreateGroup" + ] + }, + "DeleteGroup": { + "methods": [ + "DeleteGroup" + ] + }, + "GetGroup": { + "methods": [ + "GetGroup" + ] + }, + "ListGroupMembers": { + "methods": [ + "ListGroupMembers" + ] + }, + "ListGroups": { + "methods": [ + "ListGroups" + ] + }, + "UpdateGroup": { + "methods": [ + "UpdateGroup" + ] + } + } + } + } + }, + "MetricService": { + "clients": { + "grpc": { + "libraryClient": "MetricClient", + "rpcs": { + "CreateMetricDescriptor": { + "methods": [ + "CreateMetricDescriptor" + ] + }, + "CreateServiceTimeSeries": { + "methods": [ + "CreateServiceTimeSeries" + ] + }, + "CreateTimeSeries": { + "methods": [ + "CreateTimeSeries" + ] + }, + "DeleteMetricDescriptor": { + "methods": [ + "DeleteMetricDescriptor" + ] + }, + "GetMetricDescriptor": { + "methods": [ + "GetMetricDescriptor" + ] + }, + "GetMonitoredResourceDescriptor": { + "methods": [ + "GetMonitoredResourceDescriptor" + ] + }, + "ListMetricDescriptors": { + "methods": [ + "ListMetricDescriptors" + ] + }, + "ListMonitoredResourceDescriptors": { + "methods": [ + "ListMonitoredResourceDescriptors" + ] + }, + "ListTimeSeries": { + "methods": [ + "ListTimeSeries" + ] + } + } + } + } + }, + "NotificationChannelService": { + "clients": { + "grpc": { + "libraryClient": "NotificationChannelClient", + "rpcs": { + "CreateNotificationChannel": { + "methods": [ + "CreateNotificationChannel" + ] + }, + "DeleteNotificationChannel": { + "methods": [ + "DeleteNotificationChannel" + ] + }, + "GetNotificationChannel": { + "methods": [ + "GetNotificationChannel" + ] + }, + "GetNotificationChannelDescriptor": { + "methods": [ + "GetNotificationChannelDescriptor" + ] + }, + "GetNotificationChannelVerificationCode": { + "methods": [ + "GetNotificationChannelVerificationCode" + ] + }, + "ListNotificationChannelDescriptors": { + "methods": [ + "ListNotificationChannelDescriptors" + ] + }, + "ListNotificationChannels": { + "methods": [ + "ListNotificationChannels" + ] + }, + "SendNotificationChannelVerificationCode": { + "methods": [ + "SendNotificationChannelVerificationCode" + ] + }, + "UpdateNotificationChannel": { + "methods": [ + "UpdateNotificationChannel" + ] + }, + "VerifyNotificationChannel": { + "methods": [ + "VerifyNotificationChannel" + ] + } + } + } + } + }, + "QueryService": { + "clients": { + "grpc": { + "libraryClient": "QueryClient", + "rpcs": { + "QueryTimeSeries": { + "methods": [ + "QueryTimeSeries" + ] + } + } + } + } + }, + "ServiceMonitoringService": { + "clients": { + "grpc": { + "libraryClient": "ServiceMonitoringClient", + "rpcs": { + "CreateService": { + "methods": [ + "CreateService" + ] + }, + "CreateServiceLevelObjective": { + "methods": [ + "CreateServiceLevelObjective" + ] + }, + "DeleteService": { + "methods": [ + "DeleteService" + ] + }, + "DeleteServiceLevelObjective": { + "methods": [ + "DeleteServiceLevelObjective" + ] + }, + "GetService": { + "methods": [ + "GetService" + ] + }, + "GetServiceLevelObjective": { + "methods": [ + "GetServiceLevelObjective" + ] + }, + "ListServiceLevelObjectives": { + "methods": [ + "ListServiceLevelObjectives" + ] + }, + "ListServices": { + "methods": [ + "ListServices" + ] + }, + "UpdateService": { + "methods": [ + "UpdateService" + ] + }, + "UpdateServiceLevelObjective": { + "methods": [ + "UpdateServiceLevelObjective" + ] + } + } + } + } + }, + "SnoozeService": { + "clients": { + "grpc": { + "libraryClient": "SnoozeClient", + "rpcs": { + "CreateSnooze": { + "methods": [ + "CreateSnooze" + ] + }, + "GetSnooze": { + "methods": [ + "GetSnooze" + ] + }, + "ListSnoozes": { + "methods": [ + "ListSnoozes" + ] + }, + "UpdateSnooze": { + "methods": [ + "UpdateSnooze" + ] + } + } + } + } + }, + "UptimeCheckService": { + "clients": { + "grpc": { + "libraryClient": "UptimeCheckClient", + "rpcs": { + "CreateUptimeCheckConfig": { + "methods": [ + "CreateUptimeCheckConfig" + ] + }, + "DeleteUptimeCheckConfig": { + "methods": [ + "DeleteUptimeCheckConfig" + ] + }, + "GetUptimeCheckConfig": { + "methods": [ + "GetUptimeCheckConfig" + ] + }, + "ListUptimeCheckConfigs": { + "methods": [ + "ListUptimeCheckConfigs" + ] + }, + "ListUptimeCheckIps": { + "methods": [ + "ListUptimeCheckIps" + ] + }, + "UpdateUptimeCheckConfig": { + "methods": [ + "UpdateUptimeCheckConfig" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go new file mode 100644 index 0000000000..da216081d5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go @@ -0,0 +1,466 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newGroupClientHook clientHook + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + return &GroupCallOptions{ + ListGroups: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateGroup: []gax.CallOption{ + gax.WithTimeout(180000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteGroup: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListGroupMembers: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalGroupClient is an interface that defines the methods available from Cloud Monitoring API. +type internalGroupClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListGroups(context.Context, *monitoringpb.ListGroupsRequest, ...gax.CallOption) *GroupIterator + GetGroup(context.Context, *monitoringpb.GetGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + CreateGroup(context.Context, *monitoringpb.CreateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + UpdateGroup(context.Context, *monitoringpb.UpdateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error) + DeleteGroup(context.Context, *monitoringpb.DeleteGroupRequest, ...gax.CallOption) error + ListGroupMembers(context.Context, *monitoringpb.ListGroupMembersRequest, ...gax.CallOption) *MonitoredResourceIterator +} + +// GroupClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +type GroupClient struct { + // The internal transport-dependent client. + internalClient internalGroupClient + + // The call options for this service. + CallOptions *GroupCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + return c.internalClient.ListGroups(ctx, req, opts...) +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.GetGroup(ctx, req, opts...) +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.CreateGroup(ctx, req, opts...) +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + return c.internalClient.UpdateGroup(ctx, req, opts...) +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteGroup(ctx, req, opts...) +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + return c.internalClient.ListGroupMembers(ctx, req, opts...) +} + +// groupGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type groupGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing GroupClient + CallOptions **GroupCallOptions + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewGroupClient creates a new group service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + clientOpts := defaultGroupGRPCClientOptions() + if newGroupClientHook != nil { + hookOpts, err := newGroupClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := GroupClient{CallOptions: defaultGroupCallOptions()} + + c := &groupGRPCClient{ + connPool: connPool, + groupClient: monitoringpb.NewGroupServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *groupGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *groupGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...) + it := &GroupIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + resp := &monitoringpb.ListGroupsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetGroup(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + resp := &monitoringpb.ListGroupMembersResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetMembers(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go new file mode 100644 index 0000000000..d43d261d18 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go @@ -0,0 +1,578 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newMetricClientHook clientHook + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption + CreateServiceTimeSeries []gax.CallOption +} + +func defaultMetricGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMonitoredResourceDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListMetricDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(12000 * time.Millisecond), + }, + DeleteMetricDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTimeSeries: []gax.CallOption{ + gax.WithTimeout(90000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateTimeSeries: []gax.CallOption{ + gax.WithTimeout(12000 * time.Millisecond), + }, + CreateServiceTimeSeries: []gax.CallOption{}, + } +} + +// internalMetricClient is an interface that defines the methods available from Cloud Monitoring API. +type internalMetricClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListMonitoredResourceDescriptors(context.Context, *monitoringpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator + GetMonitoredResourceDescriptor(context.Context, *monitoringpb.GetMonitoredResourceDescriptorRequest, ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) + ListMetricDescriptors(context.Context, *monitoringpb.ListMetricDescriptorsRequest, ...gax.CallOption) *MetricDescriptorIterator + GetMetricDescriptor(context.Context, *monitoringpb.GetMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error) + CreateMetricDescriptor(context.Context, *monitoringpb.CreateMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error) + DeleteMetricDescriptor(context.Context, *monitoringpb.DeleteMetricDescriptorRequest, ...gax.CallOption) error + ListTimeSeries(context.Context, *monitoringpb.ListTimeSeriesRequest, ...gax.CallOption) *TimeSeriesIterator + CreateTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error + CreateServiceTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error +} + +// MetricClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +type MetricClient struct { + // The internal transport-dependent client. + internalClient internalMetricClient + + // The call options for this service. + CallOptions *MetricCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...) +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...) +} + +// ListMetricDescriptors lists metric descriptors that match a filter. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + return c.internalClient.ListMetricDescriptors(ctx, req, opts...) +} + +// GetMetricDescriptor gets a single metric descriptor. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + return c.internalClient.GetMetricDescriptor(ctx, req, opts...) +} + +// CreateMetricDescriptor creates a new metric descriptor. +// The creation is executed asynchronously. +// User-created metric descriptors define +// custom metrics (at https://cloud.google.com/monitoring/custom-metrics). +// The metric descriptor is updated if it already exists, +// except that metric labels are never removed. +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + return c.internalClient.CreateMetricDescriptor(ctx, req, opts...) +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be +// deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...) +} + +// ListTimeSeries lists time series that match a filter. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + return c.internalClient.ListTimeSeries(ctx, req, opts...) +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +// This method does not support +// resource locations constraint of an organization +// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + return c.internalClient.CreateTimeSeries(ctx, req, opts...) +} + +// CreateServiceTimeSeries creates or adds data to one or more service time series. A service time +// series is a time series for a metric from a Google Cloud service. The +// response is empty if all time series in the request were written. If any +// time series could not be written, a corresponding failure message is +// included in the error response. This endpoint rejects writes to +// user-defined metrics. +// This method is only for use by Google Cloud services. Use +// projects.timeSeries.create +// instead. +func (c *MetricClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + return c.internalClient.CreateServiceTimeSeries(ctx, req, opts...) +} + +// metricGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type metricGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing MetricClient + CallOptions **MetricCallOptions + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewMetricClient creates a new metric service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + clientOpts := defaultMetricGRPCClientOptions() + if newMetricClientHook != nil { + hookOpts, err := newMetricClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := MetricClient{CallOptions: defaultMetricCallOptions()} + + c := &metricGRPCClient{ + connPool: connPool, + metricClient: monitoringpb.NewMetricServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *metricGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *metricGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + resp := &monitoringpb.ListMonitoredResourceDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + resp := &monitoringpb.ListMetricDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetMetricDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + resp := &monitoringpb.ListTimeSeriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetTimeSeries(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateServiceTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go new file mode 100644 index 0000000000..e7b3595fa6 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -0,0 +1,2432 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/alert.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all the conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +// Enum value maps for AlertPolicy_ConditionCombinerType. +var ( + AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", + } + AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, + } +) + +func (x AlertPolicy_ConditionCombinerType) Enum() *AlertPolicy_ConditionCombinerType { + p := new(AlertPolicy_ConditionCombinerType) + *p = x + return p +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_ConditionCombinerType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[0].Descriptor() +} + +func (AlertPolicy_ConditionCombinerType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[0] +} + +func (x AlertPolicy_ConditionCombinerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_ConditionCombinerType.Descriptor instead. +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} +} + +// An enumeration of possible severity level for an Alert Policy. +type AlertPolicy_Severity int32 + +const ( + // No severity is specified. This is the default value. + AlertPolicy_SEVERITY_UNSPECIFIED AlertPolicy_Severity = 0 + // This is the highest severity level. Use this if the problem could + // cause significant damage or downtime. + AlertPolicy_CRITICAL AlertPolicy_Severity = 1 + // This is the medium severity level. Use this if the problem could + // cause minor damage or downtime. + AlertPolicy_ERROR AlertPolicy_Severity = 2 + // This is the lowest severity level. Use this if the problem is not causing + // any damage or downtime, but could potentially lead to a problem in the + // future. + AlertPolicy_WARNING AlertPolicy_Severity = 3 +) + +// Enum value maps for AlertPolicy_Severity. +var ( + AlertPolicy_Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "CRITICAL", + 2: "ERROR", + 3: "WARNING", + } + AlertPolicy_Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "CRITICAL": 1, + "ERROR": 2, + "WARNING": 3, + } +) + +func (x AlertPolicy_Severity) Enum() *AlertPolicy_Severity { + p := new(AlertPolicy_Severity) + *p = x + return p +} + +func (x AlertPolicy_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[1].Descriptor() +} + +func (AlertPolicy_Severity) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[1] +} + +func (x AlertPolicy_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_Severity.Descriptor instead. +func (AlertPolicy_Severity) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} +} + +// A condition control that determines how metric-threshold conditions +// are evaluated when data stops arriving. +// This control doesn't affect metric-absence policies. +type AlertPolicy_Condition_EvaluationMissingData int32 + +const ( + // An unspecified evaluation missing data option. Equivalent to + // EVALUATION_MISSING_DATA_NO_OP. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED AlertPolicy_Condition_EvaluationMissingData = 0 + // If there is no data to evaluate the condition, then evaluate the + // condition as false. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_INACTIVE AlertPolicy_Condition_EvaluationMissingData = 1 + // If there is no data to evaluate the condition, then evaluate the + // condition as true. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_ACTIVE AlertPolicy_Condition_EvaluationMissingData = 2 + // Do not evaluate the condition to any value if there is no data. + AlertPolicy_Condition_EVALUATION_MISSING_DATA_NO_OP AlertPolicy_Condition_EvaluationMissingData = 3 +) + +// Enum value maps for AlertPolicy_Condition_EvaluationMissingData. +var ( + AlertPolicy_Condition_EvaluationMissingData_name = map[int32]string{ + 0: "EVALUATION_MISSING_DATA_UNSPECIFIED", + 1: "EVALUATION_MISSING_DATA_INACTIVE", + 2: "EVALUATION_MISSING_DATA_ACTIVE", + 3: "EVALUATION_MISSING_DATA_NO_OP", + } + AlertPolicy_Condition_EvaluationMissingData_value = map[string]int32{ + "EVALUATION_MISSING_DATA_UNSPECIFIED": 0, + "EVALUATION_MISSING_DATA_INACTIVE": 1, + "EVALUATION_MISSING_DATA_ACTIVE": 2, + "EVALUATION_MISSING_DATA_NO_OP": 3, + } +) + +func (x AlertPolicy_Condition_EvaluationMissingData) Enum() *AlertPolicy_Condition_EvaluationMissingData { + p := new(AlertPolicy_Condition_EvaluationMissingData) + *p = x + return p +} + +func (x AlertPolicy_Condition_EvaluationMissingData) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlertPolicy_Condition_EvaluationMissingData) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_alert_proto_enumTypes[2].Descriptor() +} + +func (AlertPolicy_Condition_EvaluationMissingData) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_alert_proto_enumTypes[2] +} + +func (x AlertPolicy_Condition_EvaluationMissingData) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlertPolicy_Condition_EvaluationMissingData.Descriptor instead. +func (AlertPolicy_Condition_EvaluationMissingData) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](https://cloud.google.com/monitoring/alerts/). +type AlertPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required if the policy exists. The resource name for this policy. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Cloud Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + // + // The convention for the display_name of a PrometheusQueryLanguageCondition + // is "{rule group name}/{alert name}", where the {rule group name} and + // {alert name} should be taken from the corresponding Prometheus + // configuration file. This convention is not enforced. + // In any case the display_name is not a unique key of the AlertPolicy. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + // + // Note that Prometheus {alert name} is a + // [valid Prometheus label + // names](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels), + // whereas Prometheus {rule group} is an unrestricted UTF-8 string. + // This means that they cannot be stored as-is in user labels, because + // they may contain characters that are not allowed in user-label values. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + // If `condition_time_series_query_language` is present, it must be the only + // `condition`. + // If `condition_monitoring_query_language` is present, it must be the only + // `condition`. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` + // How to combine the results of multiple conditions to determine if an + // incident should be opened. + // If `condition_time_series_query_language` is present, this must be + // `COMBINE_UNSPECIFIED`. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Read-only description of how the alert policy is invalid. This field is + // only set when the alert policy is invalid. An invalid alert policy will not + // generate incidents. + Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The format of the entries in this field is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` + // Control over how this alert policy's notification channels are notified. + AlertStrategy *AlertPolicy_AlertStrategy `protobuf:"bytes,21,opt,name=alert_strategy,json=alertStrategy,proto3" json:"alert_strategy,omitempty"` + // Optional. The severity of an alert policy indicates how important incidents + // generated by that policy are. The severity level will be displayed on the + // Incident detail page and in notifications. + Severity AlertPolicy_Severity `protobuf:"varint,22,opt,name=severity,proto3,enum=google.monitoring.v3.AlertPolicy_Severity" json:"severity,omitempty"` +} + +func (x *AlertPolicy) Reset() { + *x = AlertPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy) ProtoMessage() {} + +func (x *AlertPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy.ProtoReflect.Descriptor instead. +func (*AlertPolicy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0} +} + +func (x *AlertPolicy) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlertPolicy) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if x != nil { + return x.Documentation + } + return nil +} + +func (x *AlertPolicy) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +func (x *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if x != nil { + return x.Conditions + } + return nil +} + +func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if x != nil { + return x.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *AlertPolicy) GetValidity() *status.Status { + if x != nil { + return x.Validity + } + return nil +} + +func (x *AlertPolicy) GetNotificationChannels() []string { + if x != nil { + return x.NotificationChannels + } + return nil +} + +func (x *AlertPolicy) GetCreationRecord() *MutationRecord { + if x != nil { + return x.CreationRecord + } + return nil +} + +func (x *AlertPolicy) GetMutationRecord() *MutationRecord { + if x != nil { + return x.MutationRecord + } + return nil +} + +func (x *AlertPolicy) GetAlertStrategy() *AlertPolicy_AlertStrategy { + if x != nil { + return x.AlertStrategy + } + return nil +} + +func (x *AlertPolicy) GetSeverity() AlertPolicy_Severity { + if x != nil { + return x.Severity + } + return AlertPolicy_SEVERITY_UNSPECIFIED +} + +// Documentation that is included in the notifications and incidents +// pertaining to this policy. +type AlertPolicy_Documentation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The body of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. This text can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` + // Optional. The subject line of the notification. The subject line may not + // exceed 10,240 bytes. In notifications generated by this policy, the + // contents of the subject line after variable expansion will be truncated + // to 255 bytes or shorter at the latest UTF-8 character boundary. The + // 255-byte limit is recommended by [this + // thread](https://stackoverflow.com/questions/1592291/what-is-the-email-subject-length-limit). + // It is both the limit imposed by some third-party ticketing products and + // it is common to define textual fields in databases as VARCHAR(255). + // + // The contents of the subject line can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + // If this field is missing or empty, a default subject line will be + // generated. + Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Optional. Links to content such as playbooks, repositories, and other + // resources. This field can contain up to 3 entries. + Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *AlertPolicy_Documentation) Reset() { + *x = AlertPolicy_Documentation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation) ProtoMessage() {} + +func (x *AlertPolicy_Documentation) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *AlertPolicy_Documentation) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *AlertPolicy_Documentation) GetMimeType() string { + if x != nil { + return x.MimeType + } + return "" +} + +func (x *AlertPolicy_Documentation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link { + if x != nil { + return x.Links + } + return nil +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required if the condition exists. The unique resource name for this + // condition. Its format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Cloud Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Cloud Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are assignable to Condition: + // + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + // *AlertPolicy_Condition_ConditionMatchedLog + // *AlertPolicy_Condition_ConditionMonitoringQueryLanguage + // *AlertPolicy_Condition_ConditionPrometheusQueryLanguage + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` +} + +func (x *AlertPolicy_Condition) Reset() { + *x = AlertPolicy_Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition) ProtoMessage() {} + +func (x *AlertPolicy_Condition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *AlertPolicy_Condition) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlertPolicy_Condition) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionMatchedLog() *AlertPolicy_Condition_LogMatch { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMatchedLog); ok { + return x.ConditionMatchedLog + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionMonitoringQueryLanguage() *AlertPolicy_Condition_MonitoringQueryLanguageCondition { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMonitoringQueryLanguage); ok { + return x.ConditionMonitoringQueryLanguage + } + return nil +} + +func (x *AlertPolicy_Condition) GetConditionPrometheusQueryLanguage() *AlertPolicy_Condition_PrometheusQueryLanguageCondition { + if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionPrometheusQueryLanguage); ok { + return x.ConditionPrometheusQueryLanguage + } + return nil +} + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + // A condition that compares a time series against a threshold. + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionAbsent struct { + // A condition that checks that a time series continues to + // receive new data points. + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionMatchedLog struct { + // A condition that checks for log messages matching given constraints. If + // set, no other conditions can be present. + ConditionMatchedLog *AlertPolicy_Condition_LogMatch `protobuf:"bytes,20,opt,name=condition_matched_log,json=conditionMatchedLog,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionMonitoringQueryLanguage struct { + // A condition that uses the Monitoring Query Language to define + // alerts. + ConditionMonitoringQueryLanguage *AlertPolicy_Condition_MonitoringQueryLanguageCondition `protobuf:"bytes,19,opt,name=condition_monitoring_query_language,json=conditionMonitoringQueryLanguage,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionPrometheusQueryLanguage struct { + // A condition that uses the Prometheus query language to define alerts. + ConditionPrometheusQueryLanguage *AlertPolicy_Condition_PrometheusQueryLanguageCondition `protobuf:"bytes,21,opt,name=condition_prometheus_query_language,json=conditionPrometheusQueryLanguage,proto3,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionMatchedLog) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage) isAlertPolicy_Condition_Condition() {} + +// Control over how the notification channels in `notification_channels` +// are notified when this alert fires. +type AlertPolicy_AlertStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required for alert policies with a `LogMatch` condition. + // + // This limit is not implemented for alert policies that are not log-based. + NotificationRateLimit *AlertPolicy_AlertStrategy_NotificationRateLimit `protobuf:"bytes,1,opt,name=notification_rate_limit,json=notificationRateLimit,proto3" json:"notification_rate_limit,omitempty"` + // If an alert policy that was active has no data for this long, any open + // incidents will close + AutoClose *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_close,json=autoClose,proto3" json:"auto_close,omitempty"` + // Control how notifications will be sent out, on a per-channel basis. + NotificationChannelStrategy []*AlertPolicy_AlertStrategy_NotificationChannelStrategy `protobuf:"bytes,4,rep,name=notification_channel_strategy,json=notificationChannelStrategy,proto3" json:"notification_channel_strategy,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy) Reset() { + *x = AlertPolicy_AlertStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *AlertPolicy_AlertStrategy) GetNotificationRateLimit() *AlertPolicy_AlertStrategy_NotificationRateLimit { + if x != nil { + return x.NotificationRateLimit + } + return nil +} + +func (x *AlertPolicy_AlertStrategy) GetAutoClose() *durationpb.Duration { + if x != nil { + return x.AutoClose + } + return nil +} + +func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPolicy_AlertStrategy_NotificationChannelStrategy { + if x != nil { + return x.NotificationChannelStrategy + } + return nil +} + +// Links to content such as playbooks, repositories, and other resources. +type AlertPolicy_Documentation_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A short display name for the link. The display name must not be empty + // or exceed 63 characters. Example: "playbook". + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The url of a webpage. + // A url can be templatized by using variables + // in the path or the query parameters. The total length of a URL should + // not exceed 2083 characters before and after variable expansion. + // Example: "https://my_domain.com/playbook?name=${resource.name}" + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *AlertPolicy_Documentation_Link) Reset() { + *x = AlertPolicy_Documentation_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Documentation_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Documentation_Link) ProtoMessage() {} + +func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *AlertPolicy_Documentation_Link) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *AlertPolicy_Documentation_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A type of trigger. + // + // Types that are assignable to Type: + // + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` +} + +func (x *AlertPolicy_Condition_Trigger) Reset() { + *x = AlertPolicy_Condition_Trigger{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_Trigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} + +func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_Trigger.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (x *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + // The absolute number of time series that must fail + // the predicate for the condition to be triggered. + Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` +} + +type AlertPolicy_Condition_Trigger_Percent struct { + // The percentage of time series that must fail the + // predicate for the condition to be triggered. + Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} + +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A + // [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved / + // processed). The filter must specify the metric type and the resource + // type. Optionally, it can specify resource labels and metric labels. + // This field must not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies a time series that should be used as the denominator of a + // ratio that will be compared with the threshold. If a + // `denominator_filter` is specified, the time series specified by the + // `filter` field will be used as the numerator. + // + // The filter must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` + // When this field is present, the `MetricThreshold` condition forecasts + // whether the time series is predicted to violate the threshold within + // the `forecast_horizon`. When this field is not set, the + // `MetricThreshold` tests the current value of the timeseries against the + // threshold. + ForecastOptions *AlertPolicy_Condition_MetricThreshold_ForecastOptions `protobuf:"bytes,12,opt,name=forecast_options,json=forecastOptions,proto3" json:"forecast_options,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` + // A condition control that determines how metric-threshold conditions + // are evaluated when data stops arriving. To use this control, the value + // of the `duration` field must be greater than or equal to 60 seconds. + EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,11,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricThreshold) Reset() { + *x = AlertPolicy_Condition_MetricThreshold{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricThreshold.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if x != nil { + return x.Aggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if x != nil { + return x.DenominatorFilter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if x != nil { + return x.DenominatorAggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetForecastOptions() *AlertPolicy_Condition_MetricThreshold_ForecastOptions { + if x != nil { + return x.ForecastOptions + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if x != nil { + return x.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if x != nil { + return x.ThresholdValue + } + return 0 +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +func (x *AlertPolicy_Condition_MetricThreshold) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData { + if x != nil { + return x.EvaluationMissingData + } + return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A + // [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved / + // processed). The filter must specify the metric type and the resource + // type. Optionally, it can specify resource labels and metric labels. + // This field must not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. The minimum value of this field + // is 120 seconds. Larger values that are a multiple of a + // minute--for example, 240 or 300 seconds--are supported. + // If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricAbsence) Reset() { + *x = AlertPolicy_Condition_MetricAbsence{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricAbsence) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricAbsence.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if x != nil { + return x.Aggregations + } + return nil +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +// A condition type that checks whether a log message in the [scoping +// project](https://cloud.google.com/monitoring/api/v3#project_name) +// satisfies the given filter. Logs from other projects in the metrics +// scope are not evaluated. +type AlertPolicy_Condition_LogMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A logs-based filter. See [Advanced Logs + // Queries](https://cloud.google.com/logging/docs/view/advanced-queries) + // for how this filter should be constructed. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. A map from a label key to an extractor expression, which is + // used to extract the value for this label key. Each entry in this map is + // a specification for how data should be extracted from log entries that + // match `filter`. Each combination of extracted values is treated as a + // separate rule for the purposes of triggering notifications. Label keys + // and corresponding values can be used in notifications generated by this + // condition. + // + // Please see [the documentation on logs-based metric + // `valueExtractor`s](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor) + // for syntax and examples. + LabelExtractors map[string]string `protobuf:"bytes,2,rep,name=label_extractors,json=labelExtractors,proto3" json:"label_extractors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AlertPolicy_Condition_LogMatch) Reset() { + *x = AlertPolicy_Condition_LogMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_LogMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {} + +func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_LogMatch.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_LogMatch) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 3} +} + +func (x *AlertPolicy_Condition_LogMatch) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *AlertPolicy_Condition_LogMatch) GetLabelExtractors() map[string]string { + if x != nil { + return x.LabelExtractors + } + return nil +} + +// A condition type that allows alert policies to be defined using +// [Monitoring Query Language](https://cloud.google.com/monitoring/mql). +type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [Monitoring Query Language](https://cloud.google.com/monitoring/mql) + // query that outputs a boolean stream. + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` + // A condition control that determines how metric-threshold conditions + // are evaluated when data stops arriving. + EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,4,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"` +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() { + *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MonitoringQueryLanguageCondition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 4} +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetTrigger() *AlertPolicy_Condition_Trigger { + if x != nil { + return x.Trigger + } + return nil +} + +func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData { + if x != nil { + return x.EvaluationMissingData + } + return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED +} + +// A condition type that allows alert policies to be defined using +// [Prometheus Query Language +// (PromQL)](https://prometheus.io/docs/prometheus/latest/querying/basics/). +// +// The PrometheusQueryLanguageCondition message contains information +// from a Prometheus alerting rule and its associated rule group. +// +// A Prometheus alerting rule is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/). +// The semantics of a Prometheus alerting rule is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule). +// +// A Prometheus rule group is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). +// The semantics of a Prometheus rule group is described +// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group). +// +// Because Cloud Alerting has no representation of a Prometheus rule +// group resource, we must embed the information of the parent rule +// group inside each of the conditions that refer to it. We must also +// update the contents of all Prometheus alerts in case the information +// of their rule group changes. +// +// The PrometheusQueryLanguageCondition protocol buffer combines the +// information of the corresponding rule group and alerting rule. +// The structure of the PrometheusQueryLanguageCondition protocol buffer +// does NOT mimic the structure of the Prometheus rule group and alerting +// rule YAML declarations. The PrometheusQueryLanguageCondition protocol +// buffer may change in the future to support future rule group and/or +// alerting rule features. There are no new such features at the present +// time (2023-06-26). +type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The PromQL expression to evaluate. Every evaluation cycle + // this expression is evaluated at the current time, and all resultant + // time series become pending/firing alerts. This field must not be empty. + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + // Optional. Alerts are considered firing once their PromQL expression was + // evaluated to be "true" for this long. + // Alerts whose PromQL expression was not evaluated to be "true" for + // long enough are considered pending. + // Must be a non-negative duration or missing. + // This field is optional. Its default value is zero. + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // Optional. How often this rule should be evaluated. + // Must be a positive multiple of 30 seconds or missing. + // This field is optional. Its default value is 30 seconds. + // If this PrometheusQueryLanguageCondition was generated from a + // Prometheus alerting rule, then this value should be taken from the + // enclosing rule group. + EvaluationInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=evaluation_interval,json=evaluationInterval,proto3" json:"evaluation_interval,omitempty"` + // Optional. Labels to add to or overwrite in the PromQL query result. + // Label names [must be + // valid](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + // Label values can be [templatized by using + // variables](https://cloud.google.com/monitoring/alerts/doc-variables). + // The only available variable names are the names of the labels in the + // PromQL result, including "__name__" and "value". "labels" may be empty. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional. The rule group name of this alert in the corresponding + // Prometheus configuration file. + // + // Some external tools may require this field to be populated correctly + // in order to refer to the original Prometheus configuration file. + // The rule group name and the alert name are necessary to update the + // relevant AlertPolicies in case the definition of the rule group changes + // in the future. + // + // This field is optional. If this field is not empty, then it must + // contain a valid UTF-8 string. + // This field may not exceed 2048 Unicode characters in length. + RuleGroup string `protobuf:"bytes,5,opt,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` + // Optional. The alerting rule name of this alert in the corresponding + // Prometheus configuration file. + // + // Some external tools may require this field to be populated correctly + // in order to refer to the original Prometheus configuration file. + // The rule group name and the alert name are necessary to update the + // relevant AlertPolicies in case the definition of the rule group changes + // in the future. + // + // This field is optional. If this field is not empty, then it must be a + // [valid Prometheus label + // name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). + // This field may not exceed 2048 Unicode characters in length. + AlertRule string `protobuf:"bytes,6,opt,name=alert_rule,json=alertRule,proto3" json:"alert_rule,omitempty"` +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() { + *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_PrometheusQueryLanguageCondition.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 5} +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetEvaluationInterval() *durationpb.Duration { + if x != nil { + return x.EvaluationInterval + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetRuleGroup() string { + if x != nil { + return x.RuleGroup + } + return "" +} + +func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetAlertRule() string { + if x != nil { + return x.AlertRule + } + return "" +} + +// Options used when forecasting the time series and testing +// the predicted value against the threshold. +type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The length of time into the future to forecast whether a + // time series will violate the threshold. If the predicted value is + // found to violate the threshold, and the violation is observed in all + // forecasts made for the configured `duration`, then the time series is + // considered to be failing. + // The forecast horizon can range from 1 hour to 60 hours. + ForecastHorizon *durationpb.Duration `protobuf:"bytes,1,opt,name=forecast_horizon,json=forecastHorizon,proto3" json:"forecast_horizon,omitempty"` +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() { + *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_Condition_MetricThreshold_ForecastOptions.ProtoReflect.Descriptor instead. +func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1, 0} +} + +func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) GetForecastHorizon() *durationpb.Duration { + if x != nil { + return x.ForecastHorizon + } + return nil +} + +// Control over the rate of notifications sent to this alert policy's +// notification channels. +type AlertPolicy_AlertStrategy_NotificationRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Not more than one notification per `period`. + Period *durationpb.Duration `protobuf:"bytes,1,opt,name=period,proto3" json:"period,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() { + *x = AlertPolicy_AlertStrategy_NotificationRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy_NotificationRateLimit.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy_NotificationRateLimit) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +// Control over how the notification channels in `notification_channels` +// are notified when this alert fires, on a per-channel basis. +type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full REST resource name for the notification channels that these + // settings apply to. Each of these correspond to the name field in one + // of the NotificationChannel objects referenced in the + // notification_channels field of this AlertPolicy. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannelNames []string `protobuf:"bytes,1,rep,name=notification_channel_names,json=notificationChannelNames,proto3" json:"notification_channel_names,omitempty"` + // The frequency at which to send reminder notifications for open + // incidents. + RenotifyInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=renotify_interval,json=renotifyInterval,proto3" json:"renotify_interval,omitempty"` +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() { + *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlertPolicy_AlertStrategy_NotificationChannelStrategy.ProtoReflect.Descriptor instead. +func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 1} +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetNotificationChannelNames() []string { + if x != nil { + return x.NotificationChannelNames + } + return nil +} + +func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetRenotifyInterval() *durationpb.Duration { + if x != nil { + return x.RenotifyInterval + } + return nil +} + +var File_google_monitoring_v3_alert_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_alert_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x2b, 0x0a, + 0x0b, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64, 0x6f, 0x63, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4b, + 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x63, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, + 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, + 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, + 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x56, 0x0a, 0x0e, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x4b, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x1a, 0xf3, + 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, + 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x1a, 0x92, 0x1a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73, 0x65, 0x6e, + 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x9d, 0x01, + 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x1a, 0x45, 0x0a, + 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x06, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x0a, 0x12, + 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x18, 0x64, + 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10, 0x66, 0x6f, 0x72, + 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, + 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x1a, + 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, + 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, + 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1, 0x01, 0x0a, 0x08, + 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, + 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0xc4, 0x03, 0x0a, 0x20, + 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x27, 0x0a, 0x23, + 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x45, + 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, + 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, + 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x4f, 0x50, + 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0b, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcc, 0x04, 0x0a, 0x0d, 0x41, 0x6c, + 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x7d, 0x0a, 0x17, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, + 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x3c, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, + 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, + 0x44, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, + 0x12, 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x0a, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_alert_proto_rawDescOnce sync.Once + file_google_monitoring_v3_alert_proto_rawDescData = file_google_monitoring_v3_alert_proto_rawDesc +) + +func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_alert_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_alert_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_proto_rawDescData) + }) + return file_google_monitoring_v3_alert_proto_rawDescData +} + +var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_alert_proto_goTypes = []any{ + (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType + (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity + (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + (*AlertPolicy)(nil), // 3: google.monitoring.v3.AlertPolicy + (*AlertPolicy_Documentation)(nil), // 4: google.monitoring.v3.AlertPolicy.Documentation + (*AlertPolicy_Condition)(nil), // 5: google.monitoring.v3.AlertPolicy.Condition + (*AlertPolicy_AlertStrategy)(nil), // 6: google.monitoring.v3.AlertPolicy.AlertStrategy + nil, // 7: google.monitoring.v3.AlertPolicy.UserLabelsEntry + (*AlertPolicy_Documentation_Link)(nil), // 8: google.monitoring.v3.AlertPolicy.Documentation.Link + (*AlertPolicy_Condition_Trigger)(nil), // 9: google.monitoring.v3.AlertPolicy.Condition.Trigger + (*AlertPolicy_Condition_MetricThreshold)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + (*AlertPolicy_Condition_MetricAbsence)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + (*AlertPolicy_Condition_LogMatch)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.LogMatch + (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + nil, // 16: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + nil, // 17: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*status.Status)(nil), // 21: google.rpc.Status + (*MutationRecord)(nil), // 22: google.monitoring.v3.MutationRecord + (*durationpb.Duration)(nil), // 23: google.protobuf.Duration + (*Aggregation)(nil), // 24: google.monitoring.v3.Aggregation + (ComparisonType)(0), // 25: google.monitoring.v3.ComparisonType +} +var file_google_monitoring_v3_alert_proto_depIdxs = []int32{ + 4, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation + 7, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry + 5, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition + 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType + 20, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue + 21, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status + 22, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord + 22, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord + 6, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy + 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity + 8, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link + 10, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold + 11, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence + 12, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch + 13, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition + 14, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition + 18, // 16: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit + 23, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration + 19, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy + 24, // 19: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation + 24, // 20: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation + 15, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions + 25, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType + 23, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration + 9, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 24, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation + 23, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration + 9, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 16, // 29: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry + 23, // 30: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 9, // 31: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger + 2, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData + 23, // 33: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration + 23, // 34: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration + 17, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry + 23, // 36: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration + 23, // 37: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration + 23, // 38: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_alert_proto_init() } +func file_google_monitoring_v3_alert_proto_init() { + if File_google_monitoring_v3_alert_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_mutation_record_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_alert_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Documentation_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_Trigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricThreshold); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricAbsence); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_LogMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MonitoringQueryLanguageCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_PrometheusQueryLanguageCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_Condition_MetricThreshold_ForecastOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy_NotificationRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*AlertPolicy_AlertStrategy_NotificationChannelStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + (*AlertPolicy_Condition_ConditionMatchedLog)(nil), + (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil), + (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil), + } + file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc, + NumEnums: 3, + NumMessages: 17, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_alert_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_alert_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_alert_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_alert_proto_msgTypes, + }.Build() + File_google_monitoring_v3_alert_proto = out.File + file_google_monitoring_v3_alert_proto_rawDesc = nil + file_google_monitoring_v3_alert_proto_goTypes = nil + file_google_monitoring_v3_alert_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go new file mode 100644 index 0000000000..f0e149d16b --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -0,0 +1,1045 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/alert_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the alerting policy. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. |name| must be + // a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will + // return. The alerting policy that is returned will have a name that contains + // a normalized representation of this name as a prefix but adds a suffix of + // the form `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the + // container. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The requested alerting policy. You should omit the `name` field + // in this policy. The name will be returned in the new policy, including a + // new `[ALERT_POLICY_ID]` value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` +} + +func (x *CreateAlertPolicyRequest) Reset() { + *x = CreateAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAlertPolicyRequest) ProtoMessage() {} + +func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if x != nil { + return x.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The alerting policy to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetAlertPolicyRequest) Reset() { + *x = GetAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAlertPolicyRequest) ProtoMessage() {} + +func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // alert policies are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListAlertPoliciesRequest) Reset() { + *x = ListAlertPoliciesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAlertPoliciesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAlertPoliciesRequest) ProtoMessage() {} + +func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead. +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListAlertPoliciesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListAlertPoliciesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListAlertPoliciesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of alert policies in all pages. This number is only an + // estimate, and may change in subsequent pages. https://aip.dev/158 + TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListAlertPoliciesResponse) Reset() { + *x = ListAlertPoliciesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListAlertPoliciesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListAlertPoliciesResponse) ProtoMessage() {} + +func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead. +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if x != nil { + return x.AlertPolicies + } + return nil +} + +func (x *ListAlertPoliciesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListAlertPoliciesResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // - The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // - Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` +} + +func (x *UpdateAlertPolicyRequest) Reset() { + *x = UpdateAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAlertPolicyRequest) ProtoMessage() {} + +func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if x != nil { + return x.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteAlertPolicyRequest) Reset() { + *x = DeleteAlertPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteAlertPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteAlertPolicyRequest) ProtoMessage() {} + +func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead. +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteAlertPolicyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x18, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xac, 0x01, 0x0a, 0x19, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x61, 0x6c, 0x65, 0x72, + 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x18, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x5d, + 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, + 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, 0x08, + 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x4d, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x0c, 0x61, + 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x23, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x34, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, + 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, 0xda, + 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, + 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, + 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x32, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, + 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, 0x65, + 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, + 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc +) + +func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData) + }) + return file_google_monitoring_v3_alert_service_proto_rawDescData +} + +var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_google_monitoring_v3_alert_service_proto_goTypes = []any{ + (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest + (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest + (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest + (*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse + (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest + (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest + (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy + (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty +} +var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{ + 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy + 6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy + 7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask + 6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy + 2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest + 1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest + 0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest + 5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest + 4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest + 3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse + 6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty + 6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy + 9, // [9:14] is the sub-list for method output_type + 4, // [4:9] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_alert_service_proto_init() } +func file_google_monitoring_v3_alert_service_proto_init() { + if File_google_monitoring_v3_alert_service_proto != nil { + return + } + file_google_monitoring_v3_alert_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_alert_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListAlertPoliciesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListAlertPoliciesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_alert_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteAlertPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_alert_service_proto = out.File + file_google_monitoring_v3_alert_service_proto_rawDesc = nil + file_google_monitoring_v3_alert_service_proto_goTypes = nil + file_google_monitoring_v3_alert_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// AlertPolicyServiceClient is the client API for AlertPolicyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the workspace. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AlertPolicyServiceServer is the server API for AlertPolicyService service. +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the workspace. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + // + // Design your application to single-thread API calls that modify the state of + // alerting policies in a single project. This includes calls to + // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations. +type UnimplementedAlertPolicyServiceServer struct { +} + +func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented") +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go new file mode 100644 index 0000000000..c9aa5a0247 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -0,0 +1,1165 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/common.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + distribution "google.golang.org/genproto/googleapis/api/distribution" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies an ordering relationship on two arguments, called `left` and +// `right`. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // True if the left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // True if the left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // True if the left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // True if the left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // True if the left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // True if the left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +// Enum value maps for ComparisonType. +var ( + ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", + } + ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, + } +) + +func (x ComparisonType) Enum() *ComparisonType { + p := new(ComparisonType) + *p = x + return p +} + +func (x ComparisonType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ComparisonType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[0].Descriptor() +} + +func (ComparisonType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[0] +} + +func (x ComparisonType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ComparisonType.Descriptor instead. +func (ComparisonType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} +} + +// The tier of service for a Metrics Scope. Please see the +// [service tiers +// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more +// details. +// +// Deprecated: Marked as deprecated in google/monitoring/v3/common.proto. +type ServiceTier int32 + +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Cloud Monitoring Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Cloud Monitoring Premium tier, a higher, more expensive tier of service + // that provides access to all Cloud Monitoring features, lets you use Cloud + // Monitoring with AWS accounts, and has a larger allotments for logs and + // metrics. For more details, see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +// Enum value maps for ServiceTier. +var ( + ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", + } + ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, + } +) + +func (x ServiceTier) Enum() *ServiceTier { + p := new(ServiceTier) + *p = x + return p +} + +func (x ServiceTier) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceTier) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[1].Descriptor() +} + +func (ServiceTier) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[1] +} + +func (x ServiceTier) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceTier.Descriptor instead. +func (ServiceTier) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} +} + +// The `Aligner` specifies the operation that will be applied to the data +// points in each alignment period in a time series. Except for +// `ALIGN_NONE`, which specifies that no operation be applied, each alignment +// operation replaces the set of data values in each alignment period with +// a single value: the result of applying the operation to the data values. +// An aligned time series has a single data value at the end of each +// `alignment_period`. +// +// An alignment operation can change the data type of the values, too. For +// example, if you apply a counting operation to boolean values, the data +// `value_type` in the original time series is `BOOLEAN`, but the `value_type` +// in the aligned result is `INT64`. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-series reduction + // is requested. The `value_type` of the result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA]. + // The output is `delta = y1 - y0`. + // + // This alignment is valid for + // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and + // `DELTA` metrics. If the selected alignment period results in periods + // with no data, then the aligned value for such a period is created by + // interpolation. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. The result is computed as + // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time". + // Think of this aligner as providing the slope of the line that passes + // through the value at the start and at the end of the `alignment_period`. + // + // This aligner is valid for `CUMULATIVE` + // and `DELTA` metrics with numeric values. If the selected alignment + // period results in periods with no data, then the aligned value for + // such a period is created by interpolation. The output is a `GAUGE` + // metric with `value_type` `DOUBLE`. + // + // If, by "rate", you mean "percentage change", see the + // `ALIGN_PERCENT_CHANGE` aligner instead. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the alignment + // period boundary. This aligner is valid for `GAUGE` metrics with + // numeric values. The `value_type` of the aligned result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by moving the most recent data point before the end of the + // alignment period to the boundary at the end of the alignment + // period. This aligner is valid for `GAUGE` metrics. The `value_type` of + // the aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align the time series by returning the minimum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align the time series by returning the maximum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align the time series by returning the mean value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is `DOUBLE`. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align the time series by returning the number of values in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric or Boolean values. The `value_type` of the aligned result is + // `INT64`. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align the time series by returning the sum of the values in each + // alignment period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with numeric and distribution values. The `value_type` of the + // aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align the time series by returning the standard deviation of the values + // in each alignment period. This aligner is valid for `GAUGE` and + // `DELTA` metrics with numeric values. The `value_type` of the output is + // `DOUBLE`. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align the time series by returning the number of `True` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align the time series by returning the number of `False` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align the time series by returning the ratio of the number of `True` + // values to the total number of values in each alignment period. This + // aligner is valid for `GAUGE` metrics with Boolean values. The output + // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 99th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 95th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 50th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 5th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This aligner is valid for + // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns + // `((current - previous)/previous) * 100`, where the value of `previous` is + // determined based on the `alignment_period`. + // + // If the values of `current` and `previous` are both 0, then the returned + // value is 0. If only `previous` is 0, the returned value is infinity. + // + // A 10-minute moving mean is computed at each point of the alignment period + // prior to the above calculation to smooth the metric and prevent false + // positives from very short-lived spikes. The moving mean is only + // applicable for data whose values are `>= 0`. Any values `< 0` are + // treated as a missing datapoint, and are ignored. While `DELTA` + // metrics are accepted by this alignment, special care should be taken that + // the values for the metric will always be positive. The output is a + // `GAUGE` metric with `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 +) + +// Enum value maps for Aggregation_Aligner. +var ( + Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", + } + Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, + } +) + +func (x Aggregation_Aligner) Enum() *Aggregation_Aligner { + p := new(Aggregation_Aligner) + *p = x + return p +} + +func (x Aggregation_Aligner) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Aggregation_Aligner) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[2].Descriptor() +} + +func (Aggregation_Aligner) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[2] +} + +func (x Aggregation_Aligner) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Aggregation_Aligner.Descriptor instead. +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 0} +} + +// A Reducer operation describes how to aggregate data points from multiple +// time series into a single time series, where the value of each data point +// in the resulting series is a function of all the already aligned values in +// the input time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the `Aligner` is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean value across time series for each + // alignment period. This reducer is valid for + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and + // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with + // numeric or distribution values. The `value_type` of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric and distribution values. The `value_type` of the output is + // the same as the `value_type` of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics with numeric or distribution values. The `value_type` + // of the output is `DOUBLE`. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the number of data points across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of numeric, Boolean, distribution, and string + // `value_type`. The `value_type` of the output is `INT64`. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the number of `True`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the number of `False`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 + // Reduce by computing the ratio of the number of `True`-valued data points + // to the total number of data points for each alignment period. This + // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`. + // The output value is in the range [0.0, 1.0] and has `value_type` + // `DOUBLE`. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing the [99th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing the [95th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing the [50th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing the [5th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +// Enum value maps for Aggregation_Reducer. +var ( + Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", + } + Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, + } +) + +func (x Aggregation_Reducer) Enum() *Aggregation_Reducer { + p := new(Aggregation_Reducer) + *p = x + return p +} + +func (x Aggregation_Reducer) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Aggregation_Reducer) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_common_proto_enumTypes[3].Descriptor() +} + +func (Aggregation_Reducer) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_common_proto_enumTypes[3] +} + +func (x Aggregation_Reducer) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Aggregation_Reducer.Descriptor instead. +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 1} +} + +// A single strongly-typed value. +type TypedValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The typed value field. + // + // Types that are assignable to Value: + // + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` +} + +func (x *TypedValue) Reset() { + *x = TypedValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedValue) ProtoMessage() {} + +func (x *TypedValue) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedValue.ProtoReflect.Descriptor instead. +func (*TypedValue) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *TypedValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *TypedValue) GetInt64Value() int64 { + if x, ok := x.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *TypedValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *TypedValue) GetStringValue() string { + if x, ok := x.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *TypedValue) GetDistributionValue() *distribution.Distribution { + if x, ok := x.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + // A Boolean value: `true` or `false`. + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type TypedValue_Int64Value struct { + // A 64-bit integer. Its range is approximately ±9.2x1018. + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type TypedValue_DoubleValue struct { + // A 64-bit double-precision floating-point number. Its magnitude + // is approximately ±10±300 and it has 16 + // significant digits of precision. + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type TypedValue_StringValue struct { + // A variable-length string value. + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type TypedValue_DistributionValue struct { + // A distribution value. + DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} + +func (*TypedValue_Int64Value) isTypedValue_Value() {} + +func (*TypedValue_DoubleValue) isTypedValue_Value() {} + +func (*TypedValue_StringValue) isTypedValue_Value() {} + +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +// Describes a time interval: +// +// - Reads: A half-open time interval. It includes the end time but +// excludes the start time: `(startTime, endTime]`. The start time +// must be specified, must be earlier than the end time, and should be +// no older than the data retention period for the metric. +// - Writes: A closed time interval. It extends from the start time to the end +// time, +// and includes both: `[startTime, endTime]`. Valid time intervals +// depend on the +// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. The end time must not be earlier than the start +// time, and the end time must not be more than 25 hours in the past or more +// than five minutes in the future. +// - For `GAUGE` metrics, the `startTime` value is technically optional; if +// no value is specified, the start time defaults to the value of the +// end time, and the interval represents a single point in time. If both +// start and end times are specified, they must be identical. Such an +// interval is valid only for `GAUGE` metrics, which are point-in-time +// measurements. The end time of a new interval must be at least a +// millisecond after the end time of the previous interval. +// - For `DELTA` metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying contiguous and +// non-overlapping intervals. For `DELTA` metrics, the start time of +// the next interval must be at least a millisecond after the end time +// of the previous interval. +// - For `CUMULATIVE` metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying the same +// start time and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. The new start time must be at least a millisecond after the +// end time of the previous interval. +// - The start time of a new interval must be at least a millisecond after +// the +// end time of the previous interval because intervals are closed. If the +// start time of a new interval is the same as the end time of the +// previous interval, then data written at the new start time could +// overwrite data written at the previous end time. +type TimeInterval struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The end of the time interval. + EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` +} + +func (x *TimeInterval) Reset() { + *x = TimeInterval{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeInterval) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeInterval) ProtoMessage() {} + +func (x *TimeInterval) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeInterval.ProtoReflect.Descriptor instead. +func (*TimeInterval) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + +func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide a different view of +// the data. Aggregation of time series is done in two steps. First, each time +// series in the set is _aligned_ to the same time interval boundaries, then the +// set of time series is optionally _reduced_ in number. +// +// Alignment consists of applying the `per_series_aligner` operation +// to each time series after its data has been divided into regular +// `alignment_period` time intervals. This process takes _all_ of the data +// points in an alignment period, applies a mathematical transformation such as +// averaging, minimum, maximum, delta, etc., and converts them into a single +// data point per period. +// +// Reduction is when the aligned and transformed time series can optionally be +// combined, reducing the number of time series through similar mathematical +// transformations. Reduction involves applying a `cross_series_reducer` to +// all the time series, optionally sorting the time series into subsets with +// `group_by_fields`, and applying the reducer to each subset. +// +// The raw time series data can contain a huge amount of information from +// multiple sources. Alignment and reduction transforms this mass of data into +// a more manageable and representative collection of data, for example "the +// 95% latency across the average of all tasks in a cluster". This +// representative data can be more easily graphed and comprehended, and the +// individual time series data is still available for later drilldown. For more +// details, see [Filtering and +// aggregation](https://cloud.google.com/monitoring/api/v3/aggregation). +type Aggregation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `alignment_period` specifies a time interval, in seconds, that is used + // to divide the data in all the + // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + // time. This will be done before the per-series aligner can be applied to + // the data. + // + // The value must be at least 60 seconds. If a per-series + // aligner other than `ALIGN_NONE` is specified, this field is required or an + // error is returned. If no per-series aligner is specified, or the aligner + // `ALIGN_NONE` is specified, then this field is ignored. + // + // The maximum value of the `alignment_period` is 104 weeks (2 years) for + // charts, and 90,000 seconds (25 hours) for alerting policies. + AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + // An `Aligner` describes how to bring the data points in a single + // time series into temporal alignment. Except for `ALIGN_NONE`, all + // alignments cause all the data points in an `alignment_period` to be + // mathematically grouped together, resulting in a single data point for + // each `alignment_period` with end timestamp at the end of the period. + // + // Not all alignment operations may be applied to all time series. The valid + // choices depend on the `metric_kind` and `value_type` of the original time + // series. Alignment can change the `metric_kind` or the `value_type` of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `cross_series_reducer` is specified, then + // `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + // and `alignment_period` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The reduction operation to be used to combine time series into a single + // time series, where the value of each data point in the resulting series is + // a function of all the already aligned values in the input time series. + // + // Not all reducer operations can be applied to all time series. The valid + // choices depend on the `metric_kind` and the `value_type` of the original + // time series. Reduction can yield a time series with a different + // `metric_kind` or `value_type` than the input time series. + // + // Time series data must first be aligned (see `per_series_aligner`) in order + // to perform cross-time series reduction. If `cross_series_reducer` is + // specified, then `per_series_aligner` must be specified, and must not be + // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + // error is returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `cross_series_reducer` is + // specified. The `group_by_fields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // operation. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `cross_series_reducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `group_by_fields` are aggregated away. If + // `group_by_fields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `cross_series_reducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` +} + +func (x *Aggregation) Reset() { + *x = Aggregation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Aggregation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Aggregation) ProtoMessage() {} + +func (x *Aggregation) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Aggregation.ProtoReflect.Descriptor instead. +func (*Aggregation) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration { + if x != nil { + return x.AlignmentPeriod + } + return nil +} + +func (x *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if x != nil { + return x.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (x *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if x != nil { + return x.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (x *Aggregation) GetGroupByFields() []string { + if x != nil { + return x.GroupByFields + } + return nil +} + +var File_google_monitoring_v3_common_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x0a, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, + 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x54, + 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x35, 0x0a, 0x08, 0x65, + 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf3, 0x07, + 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x10, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x70, 0x65, 0x72, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14, + 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x75, 0x63, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x52, 0x12, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, + 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, + 0x0b, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x0e, + 0x0a, 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15, + 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x4f, 0x4c, + 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, + 0x45, 0x58, 0x54, 0x5f, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x41, + 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x0a, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4c, 0x49, + 0x47, 0x4e, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x41, + 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, + 0x10, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x18, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, + 0x4e, 0x5f, 0x46, 0x52, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, + 0x11, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, + 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x39, 0x10, 0x12, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, + 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, + 0x35, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, + 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x14, 0x12, 0x17, 0x0a, 0x13, + 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, + 0x5f, 0x30, 0x35, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, + 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, 0x22, + 0xb1, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x04, 0x12, 0x11, 0x0a, + 0x0d, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x44, + 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, + 0x0f, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x46, 0x52, 0x41, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x52, + 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, + 0x5f, 0x39, 0x39, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, + 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x35, 0x10, 0x0a, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, + 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, + 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x30, + 0x35, 0x10, 0x0c, 0x2a, 0x9e, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, + 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, + 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, + 0x5f, 0x47, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, + 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, + 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, + 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x11, + 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x51, 0x10, + 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, + 0x4e, 0x45, 0x10, 0x06, 0x2a, 0x61, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, + 0x69, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, + 0x49, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, + 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, + 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55, + 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_common_proto_rawDescOnce sync.Once + file_google_monitoring_v3_common_proto_rawDescData = file_google_monitoring_v3_common_proto_rawDesc +) + +func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_common_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_common_proto_rawDescData) + }) + return file_google_monitoring_v3_common_proto_rawDescData +} + +var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_monitoring_v3_common_proto_goTypes = []any{ + (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType + (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier + (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner + (Aggregation_Reducer)(0), // 3: google.monitoring.v3.Aggregation.Reducer + (*TypedValue)(nil), // 4: google.monitoring.v3.TypedValue + (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval + (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation + (*distribution.Distribution)(nil), // 7: google.api.Distribution + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_google_monitoring_v3_common_proto_depIdxs = []int32{ + 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution + 8, // 1: google.monitoring.v3.TimeInterval.end_time:type_name -> google.protobuf.Timestamp + 8, // 2: google.monitoring.v3.TimeInterval.start_time:type_name -> google.protobuf.Timestamp + 9, // 3: google.monitoring.v3.Aggregation.alignment_period:type_name -> google.protobuf.Duration + 2, // 4: google.monitoring.v3.Aggregation.per_series_aligner:type_name -> google.monitoring.v3.Aggregation.Aligner + 3, // 5: google.monitoring.v3.Aggregation.cross_series_reducer:type_name -> google.monitoring.v3.Aggregation.Reducer + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_common_proto_init() } +func file_google_monitoring_v3_common_proto_init() { + if File_google_monitoring_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*TypedValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_common_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TimeInterval); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_common_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Aggregation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_common_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_common_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_common_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_common_proto_msgTypes, + }.Build() + File_google_monitoring_v3_common_proto = out.File + file_google_monitoring_v3_common_proto_rawDesc = nil + file_google_monitoring_v3_common_proto_goTypes = nil + file_google_monitoring_v3_common_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go new file mode 100644 index 0000000000..7b1dc962da --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -0,0 +1,197 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/dropped_labels.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A set of (label, value) pairs that were removed from a Distribution +// time series during aggregation and then added as an attachment to a +// Distribution.Exemplar. +// +// The full label set for the exemplars is constructed by using the dropped +// pairs in combination with the label values that remain on the aggregated +// Distribution time series. The constructed full label set can be used to +// identify the specific entity, such as the instance or job, which might be +// contributing to a long-tail. However, with dropped labels, the storage +// requirements are reduced because only the aggregated distribution values for +// a large group of time series are stored. +// +// Note that there are no guarantees on ordering of the labels from +// exemplar-to-exemplar and from distribution-to-distribution in the same +// stream, and there may be duplicates. It is up to clients to resolve any +// ambiguities. +type DroppedLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map from label to its value, for all labels dropped in any aggregation. + Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DroppedLabels) Reset() { + *x = DroppedLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DroppedLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DroppedLabels) ProtoMessage() {} + +func (x *DroppedLabels) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead. +func (*DroppedLabels) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0} +} + +func (x *DroppedLabels) GetLabel() map[string]string { + if x != nil { + return x.Label + } + return nil +} + +var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, + 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once + file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc +) + +func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData) + }) + return file_google_monitoring_v3_dropped_labels_proto_rawDescData +} + +var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{ + (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels + nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry +} +var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_dropped_labels_proto_init() } +func file_google_monitoring_v3_dropped_labels_proto_init() { + if File_google_monitoring_v3_dropped_labels_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_dropped_labels_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DroppedLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes, + }.Build() + File_google_monitoring_v3_dropped_labels_proto = out.File + file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil + file_google_monitoring_v3_dropped_labels_proto_goTypes = nil + file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go new file mode 100644 index 0000000000..dff27f9d8c --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -0,0 +1,265 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/group.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The name of this group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `[GROUP_ID]` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // For groups with no parent, `parent_name` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this + // group. + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0} +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Group) GetParentName() string { + if x != nil { + return x.ParentName + } + return "" +} + +func (x *Group) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *Group) GetIsCluster() bool { + if x != nil { + return x.IsCluster + } + return false +} + +var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_group_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea, + 0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, + 0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_group_proto_rawDescOnce sync.Once + file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc +) + +func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData) + }) + return file_google_monitoring_v3_group_proto_rawDescData +} + +var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_group_proto_goTypes = []any{ + (*Group)(nil), // 0: google.monitoring.v3.Group +} +var file_google_monitoring_v3_group_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_group_proto_init() } +func file_google_monitoring_v3_group_proto_init() { + if File_google_monitoring_v3_group_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_group_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_group_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_group_proto_msgTypes, + }.Build() + File_google_monitoring_v3_group_proto = out.File + file_google_monitoring_v3_group_proto_rawDesc = nil + file_google_monitoring_v3_group_proto_goTypes = nil + file_google_monitoring_v3_group_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go new file mode 100644 index 0000000000..46747d9064 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -0,0 +1,1319 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/group_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `ListGroup` request. +type ListGroupsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // groups are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit + // the groups returned based on their parent-child relationship with the + // specified group. If no filter is specified, all groups are returned. + // + // Types that are assignable to Filter: + // + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListGroupsRequest) Reset() { + *x = ListGroupsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupsRequest) ProtoMessage() {} + +func (x *ListGroupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupsRequest.ProtoReflect.Descriptor instead. +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListGroupsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (x *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := x.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (x *ListGroupsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListGroupsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns groups whose `parent_name` field contains the group + // name. If no groups have this parent, the results are empty. + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_AncestorsOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns groups that are ancestors of the specified group. + // The groups are returned in order, starting with the immediate parent and + // ending with the most distant ancestor. If the specified group has no + // immediate parent, the results are empty. + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_DescendantsOfGroup struct { + // A group name. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + // + // Returns the descendants of the specified group. This is a superset of + // the results returned by the `children_of_group` filter, and includes + // children-of-children, and so forth. + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +// The `ListGroups` response. +type ListGroupsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListGroupsResponse) Reset() { + *x = ListGroupsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupsResponse) ProtoMessage() {} + +func (x *ListGroupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupsResponse.ProtoReflect.Descriptor instead. +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListGroupsResponse) GetGroup() []*Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *ListGroupsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetGroupRequest) Reset() { + *x = GetGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetGroupRequest) ProtoMessage() {} + +func (x *GetGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetGroupRequest.ProtoReflect.Descriptor instead. +func (*GetGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Required. A group definition. It is an error to define the `name` field + // because the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` +} + +func (x *CreateGroupRequest) Reset() { + *x = CreateGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateGroupRequest) ProtoMessage() {} + +func (x *CreateGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateGroupRequest.ProtoReflect.Descriptor instead. +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateGroupRequest) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *CreateGroupRequest) GetValidateOnly() bool { + if x != nil { + return x.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The new definition of the group. All fields of the existing + // group, excepting `name`, are replaced with the corresponding fields of this + // group. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` +} + +func (x *UpdateGroupRequest) Reset() { + *x = UpdateGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateGroupRequest) ProtoMessage() {} + +func (x *UpdateGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateGroupRequest.ProtoReflect.Descriptor instead. +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateGroupRequest) GetGroup() *Group { + if x != nil { + return x.Group + } + return nil +} + +func (x *UpdateGroupRequest) GetValidateOnly() bool { + if x != nil { + return x.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. The default behavior is to be able to delete a +// single group without any descendants. +type DeleteGroupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If this field is true, then the request means to delete a group with all + // its descendants. Otherwise, the request means to delete a group only when + // it has no descendants. The default value is false. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` +} + +func (x *DeleteGroupRequest) Reset() { + *x = DeleteGroupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteGroupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteGroupRequest) ProtoMessage() {} + +func (x *DeleteGroupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteGroupRequest.ProtoReflect.Descriptor instead. +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteGroupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteGroupRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The group whose members are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // An optional [list + // filter](https://cloud.google.com/monitoring/api/learn_more#filtering) + // describing the members to be returned. The filter may reference the type, + // labels, and metadata of monitored resources that comprise the group. For + // example, to return only resources representing Compute Engine VM instances, + // use this filter: + // + // `resource.type = "gce_instance"` + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` +} + +func (x *ListGroupMembersRequest) Reset() { + *x = ListGroupMembersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupMembersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupMembersRequest) ProtoMessage() {} + +func (x *ListGroupMembersRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupMembersRequest.ProtoReflect.Descriptor instead. +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListGroupMembersRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListGroupMembersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListGroupMembersRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListGroupMembersRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListGroupMembersRequest) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of monitored resources in the group. + Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListGroupMembersResponse) Reset() { + *x = ListGroupMembersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListGroupMembersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListGroupMembersResponse) ProtoMessage() {} + +func (x *ListGroupMembersResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListGroupMembersResponse.ProtoReflect.Descriptor instead. +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { + if x != nil { + return x.Members + } + return nil +} + +func (x *ListGroupMembersResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListGroupMembersResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +var File_google_monitoring_v3_group_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_group_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, + 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x03, 0x0a, 0x11, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, + 0x52, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x54, 0x0a, 0x12, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x6f, + 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, + 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x10, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, + 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x58, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x63, 0x65, + 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x64, + 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x08, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, + 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x71, 0x0a, 0x12, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x36, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x6f, 0x0a, 0x12, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xea, 0x01, + 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, + 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x32, 0x98, 0x08, 0x0a, 0x0c, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, + 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x7d, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x2d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x38, 0xda, + 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x25, 0x3a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x3b, + 0xda, 0x41, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x3a, 0x05, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x24, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0b, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2d, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1e, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x10, + 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, + 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x35, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, + 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_group_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_group_service_proto_rawDescData = file_google_monitoring_v3_group_service_proto_rawDesc +) + +func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_group_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_group_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_service_proto_rawDescData) + }) + return file_google_monitoring_v3_group_service_proto_rawDescData +} + +var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_monitoring_v3_group_service_proto_goTypes = []any{ + (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest + (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse + (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest + (*CreateGroupRequest)(nil), // 3: google.monitoring.v3.CreateGroupRequest + (*UpdateGroupRequest)(nil), // 4: google.monitoring.v3.UpdateGroupRequest + (*DeleteGroupRequest)(nil), // 5: google.monitoring.v3.DeleteGroupRequest + (*ListGroupMembersRequest)(nil), // 6: google.monitoring.v3.ListGroupMembersRequest + (*ListGroupMembersResponse)(nil), // 7: google.monitoring.v3.ListGroupMembersResponse + (*Group)(nil), // 8: google.monitoring.v3.Group + (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval + (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty +} +var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{ + 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group + 8, // 1: google.monitoring.v3.CreateGroupRequest.group:type_name -> google.monitoring.v3.Group + 8, // 2: google.monitoring.v3.UpdateGroupRequest.group:type_name -> google.monitoring.v3.Group + 9, // 3: google.monitoring.v3.ListGroupMembersRequest.interval:type_name -> google.monitoring.v3.TimeInterval + 10, // 4: google.monitoring.v3.ListGroupMembersResponse.members:type_name -> google.api.MonitoredResource + 0, // 5: google.monitoring.v3.GroupService.ListGroups:input_type -> google.monitoring.v3.ListGroupsRequest + 2, // 6: google.monitoring.v3.GroupService.GetGroup:input_type -> google.monitoring.v3.GetGroupRequest + 3, // 7: google.monitoring.v3.GroupService.CreateGroup:input_type -> google.monitoring.v3.CreateGroupRequest + 4, // 8: google.monitoring.v3.GroupService.UpdateGroup:input_type -> google.monitoring.v3.UpdateGroupRequest + 5, // 9: google.monitoring.v3.GroupService.DeleteGroup:input_type -> google.monitoring.v3.DeleteGroupRequest + 6, // 10: google.monitoring.v3.GroupService.ListGroupMembers:input_type -> google.monitoring.v3.ListGroupMembersRequest + 1, // 11: google.monitoring.v3.GroupService.ListGroups:output_type -> google.monitoring.v3.ListGroupsResponse + 8, // 12: google.monitoring.v3.GroupService.GetGroup:output_type -> google.monitoring.v3.Group + 8, // 13: google.monitoring.v3.GroupService.CreateGroup:output_type -> google.monitoring.v3.Group + 8, // 14: google.monitoring.v3.GroupService.UpdateGroup:output_type -> google.monitoring.v3.Group + 11, // 15: google.monitoring.v3.GroupService.DeleteGroup:output_type -> google.protobuf.Empty + 7, // 16: google.monitoring.v3.GroupService.ListGroupMembers:output_type -> google.monitoring.v3.ListGroupMembersResponse + 11, // [11:17] is the sub-list for method output_type + 5, // [5:11] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_group_service_proto_init() } +func file_google_monitoring_v3_group_service_proto_init() { + if File_google_monitoring_v3_group_service_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_group_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_group_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteGroupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupMembersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListGroupMembersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_group_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_group_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_group_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_group_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_group_service_proto = out.File + file_google_monitoring_v3_group_service_proto_rawDesc = nil + file_google_monitoring_v3_group_service_proto_goTypes = nil + file_google_monitoring_v3_group_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// GroupServiceClient is the client API for GroupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupServiceServer is the server API for GroupService service. +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations. +type UnimplementedGroupServiceServer struct { +} + +func (*UnimplementedGroupServiceServer) ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented") +} +func (*UnimplementedGroupServiceServer) GetGroup(context.Context, *GetGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented") +} +func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented") +} +func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented") +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go new file mode 100644 index 0000000000..b22c22d07e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -0,0 +1,1194 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/metric.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + label "google.golang.org/genproto/googleapis/api/label" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A single data point in a time series. +type Point struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time interval to which the data point applies. For `GAUGE` metrics, + // the start time is optional, but if it is supplied, it must equal the + // end time. For `DELTA` metrics, the start + // and end time should specify a non-zero interval, with subsequent points + // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` + // metrics, the start and end time should specify a non-zero interval, with + // subsequent points specifying the same start time and increasing end times, + // until an event resets the cumulative value to zero and sets a new start + // time for the following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Point) Reset() { + *x = Point{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Point) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Point) ProtoMessage() {} + +func (x *Point) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Point.ProtoReflect.Descriptor instead. +func (*Point) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{0} +} + +func (x *Point) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Point) GetValue() *TypedValue { + if x != nil { + return x.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` + // The associated monitored resource. Custom metrics can use only certain + // monitored resource types in their time series data. For more information, + // see [Monitored resources for custom + // metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources). + Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Output only. The associated monitored resource metadata. When reading a + // time series, this field will include metadata labels that are explicitly + // named in the reduction. When creating a time series, this field is ignored. + Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, points are + // returned in reverse time order. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + Unit string `protobuf:"bytes,8,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *TimeSeries) Reset() { + *x = TimeSeries{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeries) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeries) ProtoMessage() {} + +func (x *TimeSeries) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead. +func (*TimeSeries) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeSeries) GetMetric() *metric.Metric { + if x != nil { + return x.Metric + } + return nil +} + +func (x *TimeSeries) GetResource() *monitoredres.MonitoredResource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return metric.MetricDescriptor_MetricKind(0) +} + +func (x *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return metric.MetricDescriptor_ValueType(0) +} + +func (x *TimeSeries) GetPoints() []*Point { + if x != nil { + return x.Points + } + return nil +} + +func (x *TimeSeries) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// A descriptor for the labels and points in a time series. +type TimeSeriesDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Descriptors for the labels. + LabelDescriptors []*label.LabelDescriptor `protobuf:"bytes,1,rep,name=label_descriptors,json=labelDescriptors,proto3" json:"label_descriptors,omitempty"` + // Descriptors for the point data value columns. + PointDescriptors []*TimeSeriesDescriptor_ValueDescriptor `protobuf:"bytes,5,rep,name=point_descriptors,json=pointDescriptors,proto3" json:"point_descriptors,omitempty"` +} + +func (x *TimeSeriesDescriptor) Reset() { + *x = TimeSeriesDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesDescriptor) ProtoMessage() {} + +func (x *TimeSeriesDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesDescriptor.ProtoReflect.Descriptor instead. +func (*TimeSeriesDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2} +} + +func (x *TimeSeriesDescriptor) GetLabelDescriptors() []*label.LabelDescriptor { + if x != nil { + return x.LabelDescriptors + } + return nil +} + +func (x *TimeSeriesDescriptor) GetPointDescriptors() []*TimeSeriesDescriptor_ValueDescriptor { + if x != nil { + return x.PointDescriptors + } + return nil +} + +// Represents the values of a time series associated with a +// TimeSeriesDescriptor. +type TimeSeriesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values of the labels in the time series identifier, given in the same + // order as the `label_descriptors` field of the TimeSeriesDescriptor + // associated with this object. Each value must have a value of the type + // given in the corresponding entry of `label_descriptors`. + LabelValues []*LabelValue `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The points in the time series. + PointData []*TimeSeriesData_PointData `protobuf:"bytes,2,rep,name=point_data,json=pointData,proto3" json:"point_data,omitempty"` +} + +func (x *TimeSeriesData) Reset() { + *x = TimeSeriesData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesData) ProtoMessage() {} + +func (x *TimeSeriesData) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesData.ProtoReflect.Descriptor instead. +func (*TimeSeriesData) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3} +} + +func (x *TimeSeriesData) GetLabelValues() []*LabelValue { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *TimeSeriesData) GetPointData() []*TimeSeriesData_PointData { + if x != nil { + return x.PointData + } + return nil +} + +// A label value. +type LabelValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The label value can be a bool, int64, or string. + // + // Types that are assignable to Value: + // + // *LabelValue_BoolValue + // *LabelValue_Int64Value + // *LabelValue_StringValue + Value isLabelValue_Value `protobuf_oneof:"value"` +} + +func (x *LabelValue) Reset() { + *x = LabelValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValue) ProtoMessage() {} + +func (x *LabelValue) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead. +func (*LabelValue) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{4} +} + +func (m *LabelValue) GetValue() isLabelValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *LabelValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*LabelValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *LabelValue) GetInt64Value() int64 { + if x, ok := x.GetValue().(*LabelValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *LabelValue) GetStringValue() string { + if x, ok := x.GetValue().(*LabelValue_StringValue); ok { + return x.StringValue + } + return "" +} + +type isLabelValue_Value interface { + isLabelValue_Value() +} + +type LabelValue_BoolValue struct { + // A bool label value. + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type LabelValue_Int64Value struct { + // An int64 label value. + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type LabelValue_StringValue struct { + // A string label value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +func (*LabelValue_BoolValue) isLabelValue_Value() {} + +func (*LabelValue_Int64Value) isLabelValue_Value() {} + +func (*LabelValue_StringValue) isLabelValue_Value() {} + +// An error associated with a query in the time series query language format. +type QueryError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the time series query language text that this error applies + // to. + Locator *TextLocator `protobuf:"bytes,1,opt,name=locator,proto3" json:"locator,omitempty"` + // The error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *QueryError) Reset() { + *x = QueryError{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryError) ProtoMessage() {} + +func (x *QueryError) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryError.ProtoReflect.Descriptor instead. +func (*QueryError) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{5} +} + +func (x *QueryError) GetLocator() *TextLocator { + if x != nil { + return x.Locator + } + return nil +} + +func (x *QueryError) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A locator for text. Indicates a particular part of the text of a request or +// of an object referenced in the request. +// +// For example, suppose the request field `text` contains: +// +// text: "The quick brown fox jumps over the lazy dog." +// +// Then the locator: +// +// source: "text" +// start_position { +// line: 1 +// column: 17 +// } +// end_position { +// line: 1 +// column: 19 +// } +// +// refers to the part of the text: "fox". +type TextLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The source of the text. The source may be a field in the request, in which + // case its format is the format of the + // google.rpc.BadRequest.FieldViolation.field field in + // https://cloud.google.com/apis/design/errors#error_details. It may also be + // be a source other than the request field (e.g. a macro definition + // referenced in the text of the query), in which case this is the name of + // the source (e.g. the macro name). + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + // The position of the first byte within the text. + StartPosition *TextLocator_Position `protobuf:"bytes,2,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"` + // The position of the last byte within the text. + EndPosition *TextLocator_Position `protobuf:"bytes,3,opt,name=end_position,json=endPosition,proto3" json:"end_position,omitempty"` + // If `source`, `start_position`, and `end_position` describe a call on + // some object (e.g. a macro in the time series query language text) and a + // location is to be designated in that object's text, `nested_locator` + // identifies the location within that object. + NestedLocator *TextLocator `protobuf:"bytes,4,opt,name=nested_locator,json=nestedLocator,proto3" json:"nested_locator,omitempty"` + // When `nested_locator` is set, this field gives the reason for the nesting. + // Usually, the reason is a macro invocation. In that case, the macro name + // (including the leading '@') signals the location of the macro call + // in the text and a macro argument name (including the leading '$') signals + // the location of the macro argument inside the macro body that got + // substituted away. + NestingReason string `protobuf:"bytes,5,opt,name=nesting_reason,json=nestingReason,proto3" json:"nesting_reason,omitempty"` +} + +func (x *TextLocator) Reset() { + *x = TextLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TextLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TextLocator) ProtoMessage() {} + +func (x *TextLocator) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TextLocator.ProtoReflect.Descriptor instead. +func (*TextLocator) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6} +} + +func (x *TextLocator) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *TextLocator) GetStartPosition() *TextLocator_Position { + if x != nil { + return x.StartPosition + } + return nil +} + +func (x *TextLocator) GetEndPosition() *TextLocator_Position { + if x != nil { + return x.EndPosition + } + return nil +} + +func (x *TextLocator) GetNestedLocator() *TextLocator { + if x != nil { + return x.NestedLocator + } + return nil +} + +func (x *TextLocator) GetNestingReason() string { + if x != nil { + return x.NestingReason + } + return "" +} + +// A descriptor for the value columns in a data point. +type TimeSeriesDescriptor_ValueDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value type. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The value stream kind. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The unit in which `time_series` point values are reported. `unit` + // follows the UCUM format for units as seen in + // https://unitsofmeasure.org/ucum.html. + // `unit` is only valid if `value_type` is INTEGER, DOUBLE, DISTRIBUTION. + Unit string `protobuf:"bytes,4,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) Reset() { + *x = TimeSeriesDescriptor_ValueDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesDescriptor_ValueDescriptor) ProtoMessage() {} + +func (x *TimeSeriesDescriptor_ValueDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesDescriptor_ValueDescriptor.ProtoReflect.Descriptor instead. +func (*TimeSeriesDescriptor_ValueDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetValueType() metric.MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return metric.MetricDescriptor_ValueType(0) +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetMetricKind() metric.MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return metric.MetricDescriptor_MetricKind(0) +} + +func (x *TimeSeriesDescriptor_ValueDescriptor) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// A point's value columns and time interval. Each point has one or more +// point values corresponding to the entries in `point_descriptors` field in +// the TimeSeriesDescriptor associated with this object. +type TimeSeriesData_PointData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values that make up the point. + Values []*TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + // The time interval associated with the point. + TimeInterval *TimeInterval `protobuf:"bytes,2,opt,name=time_interval,json=timeInterval,proto3" json:"time_interval,omitempty"` +} + +func (x *TimeSeriesData_PointData) Reset() { + *x = TimeSeriesData_PointData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesData_PointData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesData_PointData) ProtoMessage() {} + +func (x *TimeSeriesData_PointData) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesData_PointData.ProtoReflect.Descriptor instead. +func (*TimeSeriesData_PointData) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *TimeSeriesData_PointData) GetValues() []*TypedValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *TimeSeriesData_PointData) GetTimeInterval() *TimeInterval { + if x != nil { + return x.TimeInterval + } + return nil +} + +// The position of a byte within the text. +type TextLocator_Position struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The line, starting with 1, where the byte is positioned. + Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` + // The column within the line, starting with 1, where the byte is + // positioned. This is a byte index even though the text is UTF-8. + Column int32 `protobuf:"varint,2,opt,name=column,proto3" json:"column,omitempty"` +} + +func (x *TextLocator_Position) Reset() { + *x = TextLocator_Position{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TextLocator_Position) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TextLocator_Position) ProtoMessage() {} + +func (x *TextLocator_Position) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TextLocator_Position.ProtoReflect.Descriptor instead. +func (*TextLocator_Position) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *TextLocator_Position) GetLine() int32 { + if x != nil { + return x.Line + } + return 0 +} + +func (x *TextLocator_Position) GetColumn() int32 { + if x != nil { + return x.Column + } + return 0 +} + +var File_google_monitoring_v3_metric_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_metric_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x7f, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x90, 0x03, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x39, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, + 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x94, 0x03, 0x0a, 0x14, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, + 0x48, 0x0a, 0x11, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x10, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x48, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xb5, 0x02, + 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x43, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x8e, 0x01, 0x0a, 0x09, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0d, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x7e, 0x0a, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf0, 0x02, 0x0a, 0x0b, 0x54, + 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, + 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, + 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x36, 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0xc6, 0x01, + 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, + 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_metric_proto_rawDescOnce sync.Once + file_google_monitoring_v3_metric_proto_rawDescData = file_google_monitoring_v3_metric_proto_rawDesc +) + +func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_metric_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_proto_rawDescData) + }) + return file_google_monitoring_v3_metric_proto_rawDescData +} + +var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_google_monitoring_v3_metric_proto_goTypes = []any{ + (*Point)(nil), // 0: google.monitoring.v3.Point + (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries + (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor + (*TimeSeriesData)(nil), // 3: google.monitoring.v3.TimeSeriesData + (*LabelValue)(nil), // 4: google.monitoring.v3.LabelValue + (*QueryError)(nil), // 5: google.monitoring.v3.QueryError + (*TextLocator)(nil), // 6: google.monitoring.v3.TextLocator + (*TimeSeriesDescriptor_ValueDescriptor)(nil), // 7: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor + (*TimeSeriesData_PointData)(nil), // 8: google.monitoring.v3.TimeSeriesData.PointData + (*TextLocator_Position)(nil), // 9: google.monitoring.v3.TextLocator.Position + (*TimeInterval)(nil), // 10: google.monitoring.v3.TimeInterval + (*TypedValue)(nil), // 11: google.monitoring.v3.TypedValue + (*metric.Metric)(nil), // 12: google.api.Metric + (*monitoredres.MonitoredResource)(nil), // 13: google.api.MonitoredResource + (*monitoredres.MonitoredResourceMetadata)(nil), // 14: google.api.MonitoredResourceMetadata + (metric.MetricDescriptor_MetricKind)(0), // 15: google.api.MetricDescriptor.MetricKind + (metric.MetricDescriptor_ValueType)(0), // 16: google.api.MetricDescriptor.ValueType + (*label.LabelDescriptor)(nil), // 17: google.api.LabelDescriptor +} +var file_google_monitoring_v3_metric_proto_depIdxs = []int32{ + 10, // 0: google.monitoring.v3.Point.interval:type_name -> google.monitoring.v3.TimeInterval + 11, // 1: google.monitoring.v3.Point.value:type_name -> google.monitoring.v3.TypedValue + 12, // 2: google.monitoring.v3.TimeSeries.metric:type_name -> google.api.Metric + 13, // 3: google.monitoring.v3.TimeSeries.resource:type_name -> google.api.MonitoredResource + 14, // 4: google.monitoring.v3.TimeSeries.metadata:type_name -> google.api.MonitoredResourceMetadata + 15, // 5: google.monitoring.v3.TimeSeries.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 16, // 6: google.monitoring.v3.TimeSeries.value_type:type_name -> google.api.MetricDescriptor.ValueType + 0, // 7: google.monitoring.v3.TimeSeries.points:type_name -> google.monitoring.v3.Point + 17, // 8: google.monitoring.v3.TimeSeriesDescriptor.label_descriptors:type_name -> google.api.LabelDescriptor + 7, // 9: google.monitoring.v3.TimeSeriesDescriptor.point_descriptors:type_name -> google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor + 4, // 10: google.monitoring.v3.TimeSeriesData.label_values:type_name -> google.monitoring.v3.LabelValue + 8, // 11: google.monitoring.v3.TimeSeriesData.point_data:type_name -> google.monitoring.v3.TimeSeriesData.PointData + 6, // 12: google.monitoring.v3.QueryError.locator:type_name -> google.monitoring.v3.TextLocator + 9, // 13: google.monitoring.v3.TextLocator.start_position:type_name -> google.monitoring.v3.TextLocator.Position + 9, // 14: google.monitoring.v3.TextLocator.end_position:type_name -> google.monitoring.v3.TextLocator.Position + 6, // 15: google.monitoring.v3.TextLocator.nested_locator:type_name -> google.monitoring.v3.TextLocator + 16, // 16: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 15, // 17: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 11, // 18: google.monitoring.v3.TimeSeriesData.PointData.values:type_name -> google.monitoring.v3.TypedValue + 10, // 19: google.monitoring.v3.TimeSeriesData.PointData.time_interval:type_name -> google.monitoring.v3.TimeInterval + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_metric_proto_init() } +func file_google_monitoring_v3_metric_proto_init() { + if File_google_monitoring_v3_metric_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_metric_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Point); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeries); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*LabelValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*QueryError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*TextLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesDescriptor_ValueDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesData_PointData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*TextLocator_Position); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{ + (*LabelValue_BoolValue)(nil), + (*LabelValue_Int64Value)(nil), + (*LabelValue_StringValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_metric_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_metric_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_metric_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_metric_proto_msgTypes, + }.Build() + File_google_monitoring_v3_metric_proto = out.File + file_google_monitoring_v3_metric_proto_rawDesc = nil + file_google_monitoring_v3_metric_proto_goTypes = nil + file_google_monitoring_v3_metric_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go new file mode 100644 index 0000000000..52e1c1e0b9 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -0,0 +1,2502 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/metric_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status1 "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Controls which fields are returned by `ListTimeSeries*`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +// Enum value maps for ListTimeSeriesRequest_TimeSeriesView. +var ( + ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", + } + ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, + } +) + +func (x ListTimeSeriesRequest_TimeSeriesView) Enum() *ListTimeSeriesRequest_TimeSeriesView { + p := new(ListTimeSeriesRequest_TimeSeriesView) + *p = x + return p +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ListTimeSeriesRequest_TimeSeriesView) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_metric_service_proto_enumTypes[0].Descriptor() +} + +func (ListTimeSeriesRequest_TimeSeriesView) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_metric_service_proto_enumTypes[0] +} + +func (x ListTimeSeriesRequest_TimeSeriesView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ListTimeSeriesRequest_TimeSeriesView.Descriptor instead. +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters) + // describing the descriptors to be returned. The filter can reference the + // descriptor's type and labels. For example, the following filter returns + // only Google Compute Engine descriptors that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListMonitoredResourceDescriptorsRequest) Reset() { + *x = ListMonitoredResourceDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMonitoredResourceDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} + +func (x *ListMonitoredResourceDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMonitoredResourceDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListMonitoredResourceDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListMonitoredResourceDescriptorsResponse) Reset() { + *x = ListMonitoredResourceDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMonitoredResourceDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} + +func (x *ListMonitoredResourceDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMonitoredResourceDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { + if x != nil { + return x.ResourceDescriptors + } + return nil +} + +func (x *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The monitored resource descriptor to get. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + // + // The `[RESOURCE_TYPE]` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMonitoredResourceDescriptorRequest) Reset() { + *x = GetMonitoredResourceDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMonitoredResourceDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} + +func (x *GetMonitoredResourceDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMonitoredResourceDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetMonitoredResourceDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. The + // default and maximum value is 10,000. If a page_size <= 0 or > 10,000 is + // submitted, will instead return a maximum of 10,000 results. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListMetricDescriptorsRequest) Reset() { + *x = ListMetricDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMetricDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMetricDescriptorsRequest) ProtoMessage() {} + +func (x *ListMetricDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMetricDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListMetricDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListMetricDescriptorsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListMetricDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListMetricDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListMetricDescriptorsResponse) Reset() { + *x = ListMetricDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMetricDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMetricDescriptorsResponse) ProtoMessage() {} + +func (x *ListMetricDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMetricDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { + if x != nil { + return x.MetricDescriptors + } + return nil +} + +func (x *ListMetricDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The metric descriptor on which to execute the request. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example value of `[METRIC_ID]` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetricDescriptorRequest) Reset() { + *x = GetMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetricDescriptorRequest) ProtoMessage() {} + +func (x *GetMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // 4 + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new [custom + // metric](https://cloud.google.com/monitoring/custom-metrics) descriptor. + MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` +} + +func (x *CreateMetricDescriptorRequest) Reset() { + *x = CreateMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateMetricDescriptorRequest) ProtoMessage() {} + +func (x *CreateMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { + if x != nil { + return x.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The metric descriptor on which to execute the request. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example of `[METRIC_ID]` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteMetricDescriptorRequest) Reset() { + *x = DeleteMetricDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteMetricDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} + +func (x *DeleteMetricDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteMetricDescriptorRequest.ProtoReflect.Descriptor instead. +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteMetricDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name), + // organization or folder on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // organizations/[ORGANIZATION_ID] + // folders/[FOLDER_ID] + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + // Required. A [monitoring + // filter](https://cloud.google.com/monitoring/api/v3/filters) that specifies + // which time series should be returned. The filter must specify a single + // metric type, and can additionally specify metric labels and other + // information. For example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.labels.instance_name = "my-instance-name" + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Required. The time interval for which results should be returned. Only time + // series that contain data points in the specified interval are included in + // the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series across specified labels. + // + // By default (if no `aggregation` is explicitly specified), the raw time + // series data is returned. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` + // Apply a second aggregation after `aggregation` is applied. May only be + // specified if `aggregation` is specified. + SecondaryAggregation *Aggregation `protobuf:"bytes,11,opt,name=secondary_aggregation,json=secondaryAggregation,proto3" json:"secondary_aggregation,omitempty"` + // Unsupported: must be left blank. The points in each time series are + // currently returned in reverse time order (most recent to oldest). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Required. Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. If + // `page_size` is empty or more than 100,000 results, the effective + // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the + // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is + // the maximum number of `TimeSeries` returned. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTimeSeriesRequest) Reset() { + *x = ListTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTimeSeriesRequest) ProtoMessage() {} + +func (x *ListTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8} +} + +func (x *ListTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListTimeSeriesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if x != nil { + return x.Aggregation + } + return nil +} + +func (x *ListTimeSeriesRequest) GetSecondaryAggregation() *Aggregation { + if x != nil { + return x.SecondaryAggregation + } + return nil +} + +func (x *ListTimeSeriesRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if x != nil { + return x.View + } + return ListTimeSeriesRequest_FULL +} + +func (x *ListTimeSeriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTimeSeriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. + ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` + // The unit in which all `time_series` point values are reported. `unit` + // follows the UCUM format for units as seen in + // https://unitsofmeasure.org/ucum.html. + // If different `time_series` have different units (for example, because they + // come from different metric types, or a unit is absent), then `unit` will be + // "{not_a_unit}". + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` +} + +func (x *ListTimeSeriesResponse) Reset() { + *x = ListTimeSeriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTimeSeriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTimeSeriesResponse) ProtoMessage() {} + +func (x *ListTimeSeriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTimeSeriesResponse.ProtoReflect.Descriptor instead. +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{9} +} + +func (x *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *ListTimeSeriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { + if x != nil { + return x.ExecutionErrors + } + return nil +} + +func (x *ListTimeSeriesResponse) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + // + // The maximum number of `TimeSeries` objects per `Create` request is 200. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` +} + +func (x *CreateTimeSeriesRequest) Reset() { + *x = CreateTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesRequest) ProtoMessage() {} + +func (x *CreateTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{10} +} + +func (x *CreateTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +// DEPRECATED. Used to hold per-time-series error status. +type CreateTimeSeriesError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DEPRECATED. Time series ID that resulted in the `status` error. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // DEPRECATED. The status of the requested write operation for `time_series`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *CreateTimeSeriesError) Reset() { + *x = CreateTimeSeriesError{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesError) ProtoMessage() {} + +func (x *CreateTimeSeriesError) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesError.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{11} +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. +func (x *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if x != nil { + return x.TimeSeries + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto. +func (x *CreateTimeSeriesError) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +// Summary of the result of a failed request to write data to a time series. +type CreateTimeSeriesSummary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of points in the request. + TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"` + // The number of points that were successfully written. + SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"` + // The number of points that failed to be written. Order is not guaranteed. + Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *CreateTimeSeriesSummary) Reset() { + *x = CreateTimeSeriesSummary{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesSummary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesSummary) ProtoMessage() {} + +func (x *CreateTimeSeriesSummary) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesSummary.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateTimeSeriesSummary) GetTotalPointCount() int32 { + if x != nil { + return x.TotalPointCount + } + return 0 +} + +func (x *CreateTimeSeriesSummary) GetSuccessPointCount() int32 { + if x != nil { + return x.SuccessPointCount + } + return 0 +} + +func (x *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error { + if x != nil { + return x.Errors + } + return nil +} + +// The `QueryTimeSeries` request. +type QueryTimeSeriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The query in the [Monitoring Query + // Language](https://cloud.google.com/monitoring/mql/reference) format. + // The default time zone is in UTC. + Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"` + // A positive number that is the maximum number of time_series_data to return. + PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *QueryTimeSeriesRequest) Reset() { + *x = QueryTimeSeriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryTimeSeriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryTimeSeriesRequest) ProtoMessage() {} + +func (x *QueryTimeSeriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryTimeSeriesRequest.ProtoReflect.Descriptor instead. +func (*QueryTimeSeriesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{13} +} + +func (x *QueryTimeSeriesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QueryTimeSeriesRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *QueryTimeSeriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *QueryTimeSeriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `QueryTimeSeries` response. +type QueryTimeSeriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The descriptor for the time series data. + TimeSeriesDescriptor *TimeSeriesDescriptor `protobuf:"bytes,8,opt,name=time_series_descriptor,json=timeSeriesDescriptor,proto3" json:"time_series_descriptor,omitempty"` + // The time series data. + TimeSeriesData []*TimeSeriesData `protobuf:"bytes,9,rep,name=time_series_data,json=timeSeriesData,proto3" json:"time_series_data,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,10,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. The available data will be available in the + // response. + PartialErrors []*status.Status `protobuf:"bytes,11,rep,name=partial_errors,json=partialErrors,proto3" json:"partial_errors,omitempty"` +} + +func (x *QueryTimeSeriesResponse) Reset() { + *x = QueryTimeSeriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryTimeSeriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryTimeSeriesResponse) ProtoMessage() {} + +func (x *QueryTimeSeriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryTimeSeriesResponse.ProtoReflect.Descriptor instead. +func (*QueryTimeSeriesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{14} +} + +func (x *QueryTimeSeriesResponse) GetTimeSeriesDescriptor() *TimeSeriesDescriptor { + if x != nil { + return x.TimeSeriesDescriptor + } + return nil +} + +func (x *QueryTimeSeriesResponse) GetTimeSeriesData() []*TimeSeriesData { + if x != nil { + return x.TimeSeriesData + } + return nil +} + +func (x *QueryTimeSeriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *QueryTimeSeriesResponse) GetPartialErrors() []*status.Status { + if x != nil { + return x.PartialErrors + } + return nil +} + +// This is an error detail intended to be used with INVALID_ARGUMENT errors. +type QueryErrorList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Errors in parsing the time series query language text. The number of errors + // in the response may be limited. + Errors []*QueryError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + // A summary of all the errors. + ErrorSummary string `protobuf:"bytes,2,opt,name=error_summary,json=errorSummary,proto3" json:"error_summary,omitempty"` +} + +func (x *QueryErrorList) Reset() { + *x = QueryErrorList{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryErrorList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryErrorList) ProtoMessage() {} + +func (x *QueryErrorList) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryErrorList.ProtoReflect.Descriptor instead. +func (*QueryErrorList) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{15} +} + +func (x *QueryErrorList) GetErrors() []*QueryError { + if x != nil { + return x.Errors + } + return nil +} + +func (x *QueryErrorList) GetErrorSummary() string { + if x != nil { + return x.ErrorSummary + } + return "" +} + +// Detailed information about an error category. +type CreateTimeSeriesSummary_Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of the requested write operation. + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The number of points that couldn't be written because of `status`. + PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` +} + +func (x *CreateTimeSeriesSummary_Error) Reset() { + *x = CreateTimeSeriesSummary_Error{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTimeSeriesSummary_Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTimeSeriesSummary_Error) ProtoMessage() {} + +func (x *CreateTimeSeriesSummary_Error) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTimeSeriesSummary_Error.ProtoReflect.Descriptor instead. +func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *CreateTimeSeriesSummary_Error) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *CreateTimeSeriesSummary_Error) GetPointCount() int32 { + if x != nil { + return x.PointCount + } + return 0 +} + +var File_google_monitoring_v3_metric_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_metric_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0, + 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x37, + 0x12, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, + 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x7a, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x37, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xba, + 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1d, + 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, + 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x1d, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, + 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x1d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xad, 0x04, 0x0a, 0x15, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x12, 0x24, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x0b, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, + 0x0a, 0x15, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x14, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, + 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, + 0x79, 0x12, 0x53, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x56, 0x69, 0x65, 0x77, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x10, 0x01, 0x22, 0xd6, 0x01, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x3d, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x6e, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x98, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, + 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x54, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x88, 0x01, + 0x0a, 0x16, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xae, 0x02, 0x0a, 0x17, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, + 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x32, 0xbc, 0x0f, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xe4, 0x01, 0x0a, + 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x73, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, + 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0xcc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x44, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, + 0x2a, 0x7d, 0x12, 0xb8, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x29, 0x12, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, + 0x12, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x5b, + 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, + 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x22, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x16, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xfe, + 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x01, 0xda, + 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x2c, 0x76, 0x69, 0x65, 0x77, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x6e, 0x5a, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x99, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, 0xda, 0x41, 0x10, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0xae, 0x01, 0x0a, 0x17, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, + 0xda, 0x41, 0x10, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x01, 0x2a, 0x22, 0x2e, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xda, 0x01, 0xca, + 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xba, 0x01, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x89, 0x08, 0xea, 0x41, 0xf0, 0x01, + 0x0a, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x39, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, + 0xea, 0x41, 0xb7, 0x02, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4f, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x59, 0x6f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, + 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x4d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, + 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0x51, 0x0a, 0x23, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x12, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x12, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0xea, + 0x41, 0xb5, 0x01, 0x0a, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x35, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, + 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x29, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x42, 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, + 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_metric_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_metric_service_proto_rawDescData = file_google_monitoring_v3_metric_service_proto_rawDesc +) + +func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_metric_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_metric_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_service_proto_rawDescData) + }) + return file_google_monitoring_v3_metric_service_proto_rawDescData +} + +var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_monitoring_v3_metric_service_proto_goTypes = []any{ + (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView + (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest + (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse + (*GetMonitoredResourceDescriptorRequest)(nil), // 3: google.monitoring.v3.GetMonitoredResourceDescriptorRequest + (*ListMetricDescriptorsRequest)(nil), // 4: google.monitoring.v3.ListMetricDescriptorsRequest + (*ListMetricDescriptorsResponse)(nil), // 5: google.monitoring.v3.ListMetricDescriptorsResponse + (*GetMetricDescriptorRequest)(nil), // 6: google.monitoring.v3.GetMetricDescriptorRequest + (*CreateMetricDescriptorRequest)(nil), // 7: google.monitoring.v3.CreateMetricDescriptorRequest + (*DeleteMetricDescriptorRequest)(nil), // 8: google.monitoring.v3.DeleteMetricDescriptorRequest + (*ListTimeSeriesRequest)(nil), // 9: google.monitoring.v3.ListTimeSeriesRequest + (*ListTimeSeriesResponse)(nil), // 10: google.monitoring.v3.ListTimeSeriesResponse + (*CreateTimeSeriesRequest)(nil), // 11: google.monitoring.v3.CreateTimeSeriesRequest + (*CreateTimeSeriesError)(nil), // 12: google.monitoring.v3.CreateTimeSeriesError + (*CreateTimeSeriesSummary)(nil), // 13: google.monitoring.v3.CreateTimeSeriesSummary + (*QueryTimeSeriesRequest)(nil), // 14: google.monitoring.v3.QueryTimeSeriesRequest + (*QueryTimeSeriesResponse)(nil), // 15: google.monitoring.v3.QueryTimeSeriesResponse + (*QueryErrorList)(nil), // 16: google.monitoring.v3.QueryErrorList + (*CreateTimeSeriesSummary_Error)(nil), // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error + (*monitoredres.MonitoredResourceDescriptor)(nil), // 18: google.api.MonitoredResourceDescriptor + (*metric.MetricDescriptor)(nil), // 19: google.api.MetricDescriptor + (*TimeInterval)(nil), // 20: google.monitoring.v3.TimeInterval + (*Aggregation)(nil), // 21: google.monitoring.v3.Aggregation + (*TimeSeries)(nil), // 22: google.monitoring.v3.TimeSeries + (*status.Status)(nil), // 23: google.rpc.Status + (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor + (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData + (*QueryError)(nil), // 26: google.monitoring.v3.QueryError + (*emptypb.Empty)(nil), // 27: google.protobuf.Empty +} +var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{ + 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor + 19, // 1: google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors:type_name -> google.api.MetricDescriptor + 19, // 2: google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor:type_name -> google.api.MetricDescriptor + 20, // 3: google.monitoring.v3.ListTimeSeriesRequest.interval:type_name -> google.monitoring.v3.TimeInterval + 21, // 4: google.monitoring.v3.ListTimeSeriesRequest.aggregation:type_name -> google.monitoring.v3.Aggregation + 21, // 5: google.monitoring.v3.ListTimeSeriesRequest.secondary_aggregation:type_name -> google.monitoring.v3.Aggregation + 0, // 6: google.monitoring.v3.ListTimeSeriesRequest.view:type_name -> google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView + 22, // 7: google.monitoring.v3.ListTimeSeriesResponse.time_series:type_name -> google.monitoring.v3.TimeSeries + 23, // 8: google.monitoring.v3.ListTimeSeriesResponse.execution_errors:type_name -> google.rpc.Status + 22, // 9: google.monitoring.v3.CreateTimeSeriesRequest.time_series:type_name -> google.monitoring.v3.TimeSeries + 22, // 10: google.monitoring.v3.CreateTimeSeriesError.time_series:type_name -> google.monitoring.v3.TimeSeries + 23, // 11: google.monitoring.v3.CreateTimeSeriesError.status:type_name -> google.rpc.Status + 17, // 12: google.monitoring.v3.CreateTimeSeriesSummary.errors:type_name -> google.monitoring.v3.CreateTimeSeriesSummary.Error + 24, // 13: google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor:type_name -> google.monitoring.v3.TimeSeriesDescriptor + 25, // 14: google.monitoring.v3.QueryTimeSeriesResponse.time_series_data:type_name -> google.monitoring.v3.TimeSeriesData + 23, // 15: google.monitoring.v3.QueryTimeSeriesResponse.partial_errors:type_name -> google.rpc.Status + 26, // 16: google.monitoring.v3.QueryErrorList.errors:type_name -> google.monitoring.v3.QueryError + 23, // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error.status:type_name -> google.rpc.Status + 1, // 18: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:input_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsRequest + 3, // 19: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:input_type -> google.monitoring.v3.GetMonitoredResourceDescriptorRequest + 4, // 20: google.monitoring.v3.MetricService.ListMetricDescriptors:input_type -> google.monitoring.v3.ListMetricDescriptorsRequest + 6, // 21: google.monitoring.v3.MetricService.GetMetricDescriptor:input_type -> google.monitoring.v3.GetMetricDescriptorRequest + 7, // 22: google.monitoring.v3.MetricService.CreateMetricDescriptor:input_type -> google.monitoring.v3.CreateMetricDescriptorRequest + 8, // 23: google.monitoring.v3.MetricService.DeleteMetricDescriptor:input_type -> google.monitoring.v3.DeleteMetricDescriptorRequest + 9, // 24: google.monitoring.v3.MetricService.ListTimeSeries:input_type -> google.monitoring.v3.ListTimeSeriesRequest + 11, // 25: google.monitoring.v3.MetricService.CreateTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest + 11, // 26: google.monitoring.v3.MetricService.CreateServiceTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest + 2, // 27: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:output_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsResponse + 18, // 28: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:output_type -> google.api.MonitoredResourceDescriptor + 5, // 29: google.monitoring.v3.MetricService.ListMetricDescriptors:output_type -> google.monitoring.v3.ListMetricDescriptorsResponse + 19, // 30: google.monitoring.v3.MetricService.GetMetricDescriptor:output_type -> google.api.MetricDescriptor + 19, // 31: google.monitoring.v3.MetricService.CreateMetricDescriptor:output_type -> google.api.MetricDescriptor + 27, // 32: google.monitoring.v3.MetricService.DeleteMetricDescriptor:output_type -> google.protobuf.Empty + 10, // 33: google.monitoring.v3.MetricService.ListTimeSeries:output_type -> google.monitoring.v3.ListTimeSeriesResponse + 27, // 34: google.monitoring.v3.MetricService.CreateTimeSeries:output_type -> google.protobuf.Empty + 27, // 35: google.monitoring.v3.MetricService.CreateServiceTimeSeries:output_type -> google.protobuf.Empty + 27, // [27:36] is the sub-list for method output_type + 18, // [18:27] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_metric_service_proto_init() } +func file_google_monitoring_v3_metric_service_proto_init() { + if File_google_monitoring_v3_metric_service_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_metric_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_metric_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListMonitoredResourceDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListMonitoredResourceDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetMonitoredResourceDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListMetricDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListMetricDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GetMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CreateMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DeleteMetricDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ListTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ListTimeSeriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesSummary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*QueryTimeSeriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*QueryTimeSeriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*QueryErrorList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_metric_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*CreateTimeSeriesSummary_Error); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_metric_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_metric_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_metric_service_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_metric_service_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_metric_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_metric_service_proto = out.File + file_google_monitoring_v3_metric_service_proto_rawDesc = nil + file_google_monitoring_v3_metric_service_proto_goTypes = nil + file_google_monitoring_v3_metric_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// MetricServiceClient is the client API for MetricService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // The creation is executed asynchronously. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Lists time series that match a filter. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Creates or adds data to one or more service time series. A service time + // series is a time series for a metric from a Google Cloud service. The + // response is empty if all time series in the request were written. If any + // time series could not be written, a corresponding failure message is + // included in the error response. This endpoint rejects writes to + // user-defined metrics. + // This method is only for use by Google Cloud services. Use + // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries] + // instead. + CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type metricServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { + out := new(monitoredres.MonitoredResourceDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricServiceServer is the server API for MetricService service. +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // The creation is executed asynchronously. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + // The metric descriptor is updated if it already exists, + // except that metric labels are never removed. + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) + // Lists time series that match a filter. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + // This method does not support + // [resource locations constraint of an organization + // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy). + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) + // Creates or adds data to one or more service time series. A service time + // series is a time series for a metric from a Google Cloud service. The + // response is empty if all time series in the request were written. If any + // time series could not be written, a corresponding failure message is + // included in the error response. This endpoint rejects writes to + // user-defined metrics. + // This method is only for use by Google Cloud services. Use + // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries] + // instead. + CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) +} + +// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricServiceServer struct { +} + +func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateServiceTimeSeries not implemented") +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateServiceTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + { + MethodName: "CreateServiceTimeSeries", + Handler: _MetricService_CreateServiceTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go new file mode 100644 index 0000000000..643b244e4d --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -0,0 +1,192 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/mutation_record.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes a change made to a configuration. +type MutationRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the change occurred. + MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` +} + +func (x *MutationRecord) Reset() { + *x = MutationRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MutationRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MutationRecord) ProtoMessage() {} + +func (x *MutationRecord) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead. +func (*MutationRecord) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0} +} + +func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp { + if x != nil { + return x.MutateTime + } + return nil +} + +func (x *MutationRecord) GetMutatedBy() string { + if x != nil { + return x.MutatedBy + } + return "" +} + +var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x79, 0x42, 0xce, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13, + 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, + 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, + 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once + file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc +) + +func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData) + }) + return file_google_monitoring_v3_mutation_record_proto_rawDescData +} + +var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{ + (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp +} +var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_mutation_record_proto_init() } +func file_google_monitoring_v3_mutation_record_proto_init() { + if File_google_monitoring_v3_mutation_record_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_mutation_record_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*MutationRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes, + }.Build() + File_google_monitoring_v3_mutation_record_proto = out.File + file_google_monitoring_v3_mutation_record_proto_rawDesc = nil + file_google_monitoring_v3_mutation_record_proto_goTypes = nil + file_google_monitoring_v3_mutation_record_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go new file mode 100644 index 0000000000..603b5bcdde --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -0,0 +1,647 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/notification.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + _ "google.golang.org/genproto/googleapis/api/annotations" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +// Enum value maps for NotificationChannel_VerificationStatus. +var ( + NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", + } + NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, + } +) + +func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus { + p := new(NotificationChannel_VerificationStatus) + *p = x + return p +} + +func (x NotificationChannel_VerificationStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor() +} + +func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_notification_proto_enumTypes[0] +} + +func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead. +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full REST resource name for this descriptor. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // The type of notification channel, such as "email" and "sms". To view the + // full list of channels, see + // [Channel + // descriptors](https://cloud.google.com/monitoring/alerts/using-channels-api#ncd). + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` + // The product launch stage for channels of this type. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` +} + +func (x *NotificationChannelDescriptor) Reset() { + *x = NotificationChannelDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationChannelDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationChannelDescriptor) ProtoMessage() {} + +func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead. +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0} +} + +func (x *NotificationChannelDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationChannelDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *NotificationChannelDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *NotificationChannelDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto. +func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if x != nil { + return x.SupportedTiers + } + return nil +} + +func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage(0) +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the notification channel. This field matches the + // value of the + // [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] + // field. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The full REST resource name for this channel. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] + // of the `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Record of the creation of this channel. + CreationRecord *MutationRecord `protobuf:"bytes,12,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // Records of the modification of this channel. + MutationRecords []*MutationRecord `protobuf:"bytes,13,rep,name=mutation_records,json=mutationRecords,proto3" json:"mutation_records,omitempty"` +} + +func (x *NotificationChannel) Reset() { + *x = NotificationChannel{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationChannel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationChannel) ProtoMessage() {} + +func (x *NotificationChannel) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead. +func (*NotificationChannel) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1} +} + +func (x *NotificationChannel) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *NotificationChannel) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationChannel) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *NotificationChannel) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *NotificationChannel) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *NotificationChannel) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if x != nil { + return x.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *NotificationChannel) GetCreationRecord() *MutationRecord { + if x != nil { + return x.CreationRecord + } + return nil +} + +func (x *NotificationChannel) GetMutationRecords() []*MutationRecord { + if x != nil { + return x.MutationRecords + } + return nil +} + +var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_notification_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x65, 0x72, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, + 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, + 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, + 0x12, 0x01, 0x2a, 0x22, 0xc6, 0x08, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4f, 0x0a, 0x10, 0x6d, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, + 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x02, 0x3a, 0xfe, 0x01, 0xea, 0x41, + 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x3c, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xcc, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, + 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, + 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once + file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc +) + +func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData) + }) + return file_google_monitoring_v3_notification_proto_rawDescData +} + +var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_monitoring_v3_notification_proto_goTypes = []any{ + (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus + (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor + (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel + nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry + nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry + (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor + (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue + (*MutationRecord)(nil), // 9: google.monitoring.v3.MutationRecord +} +var file_google_monitoring_v3_notification_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor + 6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier + 7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage + 3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry + 4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry + 0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus + 8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue + 9, // 7: google.monitoring.v3.NotificationChannel.creation_record:type_name -> google.monitoring.v3.MutationRecord + 9, // 8: google.monitoring.v3.NotificationChannel.mutation_records:type_name -> google.monitoring.v3.MutationRecord + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_notification_proto_init() } +func file_google_monitoring_v3_notification_proto_init() { + if File_google_monitoring_v3_notification_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + file_google_monitoring_v3_mutation_record_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_notification_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*NotificationChannelDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*NotificationChannel); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_notification_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes, + }.Build() + File_google_monitoring_v3_notification_proto = out.File + file_google_monitoring_v3_notification_proto_rawDesc = nil + file_google_monitoring_v3_notification_proto_goTypes = nil + file_google_monitoring_v3_notification_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go new file mode 100644 index 0000000000..ac7bafd1f1 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -0,0 +1,2002 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/notification_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this + // [names](https://cloud.google.com/monitoring/api/v3#project_name) the parent + // container in which to look for the descriptors; to retrieve a single + // descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationChannelDescriptorsRequest) Reset() { + *x = ListNotificationChannelDescriptorsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelDescriptorsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} + +func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListNotificationChannelDescriptorsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListNotificationChannelDescriptorsResponse) Reset() { + *x = ListNotificationChannelDescriptorsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelDescriptorsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} + +func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if x != nil { + return x.ChannelDescriptors + } + return nil +} + +func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel type for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationChannelDescriptorRequest) Reset() { + *x = GetNotificationChannelDescriptorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelDescriptorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} + +func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetNotificationChannelDescriptorRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container into which the channel will be + // written, this does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` +} + +func (x *CreateNotificationChannelRequest) Reset() { + *x = CreateNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateNotificationChannelRequest) ProtoMessage() {} + +func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if x != nil { + return x.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which + // to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationChannelsRequest) Reset() { + *x = ListNotificationChannelsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelsRequest) ProtoMessage() {} + +func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListNotificationChannelsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +func (x *ListNotificationChannelsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationChannelsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of notification channels in all pages. This number is only + // an estimate, and may change in subsequent pages. https://aip.dev/158 + TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListNotificationChannelsResponse) Reset() { + *x = ListNotificationChannelsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationChannelsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationChannelsResponse) ProtoMessage() {} + +func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5} +} + +func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if x != nil { + return x.NotificationChannels + } + return nil +} + +func (x *ListNotificationChannelsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListNotificationChannelsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationChannelRequest) Reset() { + *x = GetNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelRequest) ProtoMessage() {} + +func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6} +} + +func (x *GetNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fields to update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` +} + +func (x *UpdateNotificationChannelRequest) Reset() { + *x = UpdateNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateNotificationChannelRequest) ProtoMessage() {} + +func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if x != nil { + return x.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *DeleteNotificationChannelRequest) Reset() { + *x = DeleteNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNotificationChannelRequest) ProtoMessage() {} + +func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteNotificationChannelRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *SendNotificationChannelVerificationCodeRequest) Reset() { + *x = SendNotificationChannelVerificationCodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendNotificationChannelVerificationCodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} + +func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9} +} + +func (x *SendNotificationChannelVerificationCodeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel for which a verification code is to be + // generated and retrieved. This must name a channel that is already verified; + // if the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *GetNotificationChannelVerificationCodeRequest) Reset() { + *x = GetNotificationChannelVerificationCodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelVerificationCodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} + +func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10} +} + +func (x *GetNotificationChannelVerificationCodeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` +} + +func (x *GetNotificationChannelVerificationCodeResponse) Reset() { + *x = GetNotificationChannelVerificationCodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationChannelVerificationCodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} + +func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead. +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11} +} + +func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *VerifyNotificationChannelRequest) Reset() { + *x = VerifyNotificationChannelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyNotificationChannelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyNotificationChannelRequest) ProtoMessage() {} + +func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead. +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12} +} + +func (x *VerifyNotificationChannelRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *VerifyNotificationChannelRequest) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, + 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x7e, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, + 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0xd0, 0x01, 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x22, 0xdb, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x42, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xc9, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, + 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x6a, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc2, 0x01, 0x0a, 0x20, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, + 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, + 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, + 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, + 0x01, 0x0a, 0x20, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x43, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, + 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xdd, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, + 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, + 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc4, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, + 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0xb5, 0x01, 0x0a, + 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64, 0xda, 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, + 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x83, 0x02, 0x0a, 0x19, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0xda, + 0x41, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x32, 0x41, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, + 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x41, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0xdc, 0x01, 0x0a, 0x27, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x6e, + 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x87, 0x02, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x3a, 0x01, 0x2a, 0x22, 0x40, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0xda, 0x41, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x64, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, + 0x3a, 0x01, 0x2a, 0x22, 0x33, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x72, 0x65, 0x61, 0x64, 0x42, 0xd3, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc +) + +func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData) + }) + return file_google_monitoring_v3_notification_service_proto_rawDescData +} + +var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_google_monitoring_v3_notification_service_proto_goTypes = []any{ + (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest + (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse + (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest + (*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest + (*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest + (*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse + (*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest + (*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest + (*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest + (*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest + (*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest + (*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse + (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest + (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor + (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel + (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty +} +var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{ + 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor + 14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel + 14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel + 15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask + 14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel + 16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp + 16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp + 0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest + 2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest + 4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest + 6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest + 3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest + 7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest + 8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest + 9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest + 10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest + 12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest + 1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse + 13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor + 5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse + 14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty + 17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty + 11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse + 14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel + 17, // [17:27] is the sub-list for method output_type + 7, // [7:17] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_notification_service_proto_init() } +func file_google_monitoring_v3_notification_service_proto_init() { + if File_google_monitoring_v3_notification_service_proto != nil { + return + } + file_google_monitoring_v3_notification_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_notification_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelDescriptorsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelDescriptorsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelDescriptorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationChannelsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*UpdateNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*DeleteNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*SendNotificationChannelVerificationCodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelVerificationCodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationChannelVerificationCodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_notification_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*VerifyNotificationChannelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_notification_service_proto = out.File + file_google_monitoring_v3_notification_service_proto_rawDesc = nil + file_google_monitoring_v3_notification_service_proto_goTypes = nil + file_google_monitoring_v3_notification_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NotificationChannelServiceClient is the client API for NotificationChannelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + // To list the types of notification channels that are supported, use + // the `ListNotificationChannelDescriptors` method. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotificationChannelServiceServer is the server API for NotificationChannelService service. +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + // To list the types of notification channels that are supported, use + // the `ListNotificationChannelDescriptors` method. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + // + // Design your application to single-thread API calls that modify the state of + // notification channels in a single project. This includes calls to + // CreateNotificationChannel, DeleteNotificationChannel and + // UpdateNotificationChannel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations. +type UnimplementedNotificationChannelServiceServer struct { +} + +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented") +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go new file mode 100644 index 0000000000..e9bfbd68f5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -0,0 +1,212 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/query_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_google_monitoring_v3_query_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x32, 0xde, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, + 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, + 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, + 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_monitoring_v3_query_service_proto_goTypes = []any{ + (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest + (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse +} +var file_google_monitoring_v3_query_service_proto_depIdxs = []int32{ + 0, // 0: google.monitoring.v3.QueryService.QueryTimeSeries:input_type -> google.monitoring.v3.QueryTimeSeriesRequest + 1, // 1: google.monitoring.v3.QueryService.QueryTimeSeries:output_type -> google.monitoring.v3.QueryTimeSeriesResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_query_service_proto_init() } +func file_google_monitoring_v3_query_service_proto_init() { + if File_google_monitoring_v3_query_service_proto != nil { + return + } + file_google_monitoring_v3_metric_service_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_query_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_query_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_query_service_proto_depIdxs, + }.Build() + File_google_monitoring_v3_query_service_proto = out.File + file_google_monitoring_v3_query_service_proto_rawDesc = nil + file_google_monitoring_v3_query_service_proto_goTypes = nil + file_google_monitoring_v3_query_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// QueryServiceClient is the client API for QueryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryServiceClient interface { + // Queries time series using Monitoring Query Language. + QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) +} + +type queryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient { + return &queryServiceClient{cc} +} + +func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) { + out := new(QueryTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.QueryService/QueryTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServiceServer is the server API for QueryService service. +type QueryServiceServer interface { + // Queries time series using Monitoring Query Language. + QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) +} + +// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServiceServer struct { +} + +func (*UnimplementedQueryServiceServer) QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryTimeSeries not implemented") +} + +func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) { + s.RegisterService(&_QueryService_serviceDesc, srv) +} + +func _QueryService_QueryTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServiceServer).QueryTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.QueryService/QueryTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServiceServer).QueryTimeSeries(ctx, req.(*QueryTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.QueryService", + HandlerType: (*QueryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "QueryTimeSeries", + Handler: _QueryService_QueryTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/query_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go new file mode 100644 index 0000000000..869a3738c0 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -0,0 +1,3107 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/service.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `ServiceLevelObjective.View` determines what form of +// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`, +// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs. +type ServiceLevelObjective_View int32 + +const ( + // Same as FULL. + ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0 + // Return the embedded `ServiceLevelIndicator` in the form in which it was + // defined. If it was defined using a `BasicSli`, return that `BasicSli`. + ServiceLevelObjective_FULL ServiceLevelObjective_View = 2 + // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead + // return the `ServiceLevelIndicator` with its mode of computation fully + // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using + // `RequestBasedSli` or `WindowsBasedSli`, return the + // `ServiceLevelIndicator` as it was provided. + ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1 +) + +// Enum value maps for ServiceLevelObjective_View. +var ( + ServiceLevelObjective_View_name = map[int32]string{ + 0: "VIEW_UNSPECIFIED", + 2: "FULL", + 1: "EXPLICIT", + } + ServiceLevelObjective_View_value = map[string]int32{ + "VIEW_UNSPECIFIED": 0, + "FULL": 2, + "EXPLICIT": 1, + } +) + +func (x ServiceLevelObjective_View) Enum() *ServiceLevelObjective_View { + p := new(ServiceLevelObjective_View) + *p = x + return p +} + +func (x ServiceLevelObjective_View) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceLevelObjective_View) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_service_proto_enumTypes[0].Descriptor() +} + +func (ServiceLevelObjective_View) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_service_proto_enumTypes[0] +} + +func (x ServiceLevelObjective_View) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceLevelObjective_View.Descriptor instead. +func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1, 0} +} + +// A `Service` is a discrete, autonomous, and network-accessible unit, designed +// to solve an individual concern +// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In +// Cloud Monitoring, a `Service` acts as the root resource under which +// operational aspects of the service are accessible. +type Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Resource name for this Service. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this Service. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // REQUIRED. Service-identifying atoms specifying the underlying service. + // + // Types that are assignable to Identifier: + // + // *Service_Custom_ + // *Service_AppEngine_ + // *Service_CloudEndpoints_ + // *Service_ClusterIstio_ + // *Service_MeshIstio_ + // *Service_IstioCanonicalService_ + // *Service_CloudRun_ + // *Service_GkeNamespace_ + // *Service_GkeWorkload_ + // *Service_GkeService_ + Identifier isService_Identifier `protobuf_oneof:"identifier"` + // Message that contains the service type and service labels of this service + // if it is a basic service. + // Documentation and examples + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"` + // Configuration for how to query telemetry on a Service. + Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` + // Labels which have been used to annotate the service. Label keys must start + // with a letter. Label keys and values may contain lowercase letters, + // numbers, underscores, and dashes. Label keys and values have a maximum + // length of 63 characters, and must be less than 128 bytes in size. Up to 64 + // label entries may be stored. For labels which do not have a semantic value, + // the empty string may be supplied for the label value. + UserLabels map[string]string `protobuf:"bytes,14,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service) Reset() { + *x = Service{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service) ProtoMessage() {} + +func (x *Service) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service.ProtoReflect.Descriptor instead. +func (*Service) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0} +} + +func (x *Service) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Service) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *Service) GetIdentifier() isService_Identifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (x *Service) GetCustom() *Service_Custom { + if x, ok := x.GetIdentifier().(*Service_Custom_); ok { + return x.Custom + } + return nil +} + +func (x *Service) GetAppEngine() *Service_AppEngine { + if x, ok := x.GetIdentifier().(*Service_AppEngine_); ok { + return x.AppEngine + } + return nil +} + +func (x *Service) GetCloudEndpoints() *Service_CloudEndpoints { + if x, ok := x.GetIdentifier().(*Service_CloudEndpoints_); ok { + return x.CloudEndpoints + } + return nil +} + +func (x *Service) GetClusterIstio() *Service_ClusterIstio { + if x, ok := x.GetIdentifier().(*Service_ClusterIstio_); ok { + return x.ClusterIstio + } + return nil +} + +func (x *Service) GetMeshIstio() *Service_MeshIstio { + if x, ok := x.GetIdentifier().(*Service_MeshIstio_); ok { + return x.MeshIstio + } + return nil +} + +func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService { + if x, ok := x.GetIdentifier().(*Service_IstioCanonicalService_); ok { + return x.IstioCanonicalService + } + return nil +} + +func (x *Service) GetCloudRun() *Service_CloudRun { + if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok { + return x.CloudRun + } + return nil +} + +func (x *Service) GetGkeNamespace() *Service_GkeNamespace { + if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok { + return x.GkeNamespace + } + return nil +} + +func (x *Service) GetGkeWorkload() *Service_GkeWorkload { + if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok { + return x.GkeWorkload + } + return nil +} + +func (x *Service) GetGkeService() *Service_GkeService { + if x, ok := x.GetIdentifier().(*Service_GkeService_); ok { + return x.GkeService + } + return nil +} + +func (x *Service) GetBasicService() *Service_BasicService { + if x != nil { + return x.BasicService + } + return nil +} + +func (x *Service) GetTelemetry() *Service_Telemetry { + if x != nil { + return x.Telemetry + } + return nil +} + +func (x *Service) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isService_Identifier interface { + isService_Identifier() +} + +type Service_Custom_ struct { + // Custom service type. + Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"` +} + +type Service_AppEngine_ struct { + // Type used for App Engine services. + AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"` +} + +type Service_CloudEndpoints_ struct { + // Type used for Cloud Endpoints services. + CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"` +} + +type Service_ClusterIstio_ struct { + // Type used for Istio services that live in a Kubernetes cluster. + ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"` +} + +type Service_MeshIstio_ struct { + // Type used for Istio services scoped to an Istio mesh. + MeshIstio *Service_MeshIstio `protobuf:"bytes,10,opt,name=mesh_istio,json=meshIstio,proto3,oneof"` +} + +type Service_IstioCanonicalService_ struct { + // Type used for canonical services scoped to an Istio mesh. + // Metrics for Istio are + // [documented here](https://istio.io/latest/docs/reference/config/metrics/) + IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"` +} + +type Service_CloudRun_ struct { + // Type used for Cloud Run services. + CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"` +} + +type Service_GkeNamespace_ struct { + // Type used for GKE Namespaces. + GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"` +} + +type Service_GkeWorkload_ struct { + // Type used for GKE Workloads. + GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"` +} + +type Service_GkeService_ struct { + // Type used for GKE Services (the Kubernetes concept of a service). + GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"` +} + +func (*Service_Custom_) isService_Identifier() {} + +func (*Service_AppEngine_) isService_Identifier() {} + +func (*Service_CloudEndpoints_) isService_Identifier() {} + +func (*Service_ClusterIstio_) isService_Identifier() {} + +func (*Service_MeshIstio_) isService_Identifier() {} + +func (*Service_IstioCanonicalService_) isService_Identifier() {} + +func (*Service_CloudRun_) isService_Identifier() {} + +func (*Service_GkeNamespace_) isService_Identifier() {} + +func (*Service_GkeWorkload_) isService_Identifier() {} + +func (*Service_GkeService_) isService_Identifier() {} + +// A Service-Level Objective (SLO) describes a level of desired good service. It +// consists of a service-level indicator (SLI), a performance goal, and a period +// over which the objective is to be evaluated against that goal. The SLO can +// use SLIs defined in a number of different manners. Typical SLOs might include +// "99% of requests in each rolling week have latency below 200 milliseconds" or +// "99.5% of requests in each calendar month return successfully." +type ServiceLevelObjective struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Resource name for this `ServiceLevelObjective`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this SLO. + DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The definition of good service, used to measure and calculate the quality + // of the `Service`'s performance with respect to a single aspect of service + // quality. + ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"` + // The fraction of service that must be good in order for this objective to be + // met. `0 < goal <= 0.999`. + Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"` + // The time period over which the objective will be evaluated. + // + // Types that are assignable to Period: + // + // *ServiceLevelObjective_RollingPeriod + // *ServiceLevelObjective_CalendarPeriod + Period isServiceLevelObjective_Period `protobuf_oneof:"period"` + // Labels which have been used to annotate the service-level objective. Label + // keys must start with a letter. Label keys and values may contain lowercase + // letters, numbers, underscores, and dashes. Label keys and values have a + // maximum length of 63 characters, and must be less than 128 bytes in size. + // Up to 64 label entries may be stored. For labels which do not have a + // semantic value, the empty string may be supplied for the label value. + UserLabels map[string]string `protobuf:"bytes,12,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ServiceLevelObjective) Reset() { + *x = ServiceLevelObjective{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceLevelObjective) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceLevelObjective) ProtoMessage() {} + +func (x *ServiceLevelObjective) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceLevelObjective.ProtoReflect.Descriptor instead. +func (*ServiceLevelObjective) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ServiceLevelObjective) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceLevelObjective) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator { + if x != nil { + return x.ServiceLevelIndicator + } + return nil +} + +func (x *ServiceLevelObjective) GetGoal() float64 { + if x != nil { + return x.Goal + } + return 0 +} + +func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period { + if m != nil { + return m.Period + } + return nil +} + +func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration { + if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok { + return x.RollingPeriod + } + return nil +} + +func (x *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod { + if x, ok := x.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok { + return x.CalendarPeriod + } + return calendarperiod.CalendarPeriod(0) +} + +func (x *ServiceLevelObjective) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isServiceLevelObjective_Period interface { + isServiceLevelObjective_Period() +} + +type ServiceLevelObjective_RollingPeriod struct { + // A rolling time period, semantically "in the past ``". + // Must be an integer multiple of 1 day no larger than 30 days. + RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` +} + +type ServiceLevelObjective_CalendarPeriod struct { + // A calendar period, semantically "since the start of the current + // ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and + // `MONTH` are supported. + CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"` +} + +func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {} + +func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {} + +// A Service-Level Indicator (SLI) describes the "performance" of a service. For +// some services, the SLI is well-defined. In such cases, the SLI can be +// described easily by referencing the well-known SLI and providing the needed +// parameters. Alternatively, a "custom" SLI can be defined with a query to the +// underlying metric store. An SLI is defined to be `good_service / +// total_service` over any queried time interval. The value of performance +// always falls into the range `0 <= performance <= 1`. A custom SLI describes +// how to compute this ratio, whether this is by dividing values from a pair of +// time series, cutting a `Distribution` into good and bad counts, or counting +// time windows in which the service complies with a criterion. For separation +// of concerns, a single Service-Level Indicator measures performance for only +// one aspect of service quality, such as fraction of successful queries or +// fast-enough queries. +type ServiceLevelIndicator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Service level indicators can be grouped by whether the "unit" of service + // being measured is based on counts of good requests or on counts of good + // time windows + // + // Types that are assignable to Type: + // + // *ServiceLevelIndicator_BasicSli + // *ServiceLevelIndicator_RequestBased + // *ServiceLevelIndicator_WindowsBased + Type isServiceLevelIndicator_Type `protobuf_oneof:"type"` +} + +func (x *ServiceLevelIndicator) Reset() { + *x = ServiceLevelIndicator{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceLevelIndicator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceLevelIndicator) ProtoMessage() {} + +func (x *ServiceLevelIndicator) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceLevelIndicator.ProtoReflect.Descriptor instead. +func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{2} +} + +func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ServiceLevelIndicator) GetBasicSli() *BasicSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_BasicSli); ok { + return x.BasicSli + } + return nil +} + +func (x *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_RequestBased); ok { + return x.RequestBased + } + return nil +} + +func (x *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli { + if x, ok := x.GetType().(*ServiceLevelIndicator_WindowsBased); ok { + return x.WindowsBased + } + return nil +} + +type isServiceLevelIndicator_Type interface { + isServiceLevelIndicator_Type() +} + +type ServiceLevelIndicator_BasicSli struct { + // Basic SLI on a well-known service type. + BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"` +} + +type ServiceLevelIndicator_RequestBased struct { + // Request-based SLIs + RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"` +} + +type ServiceLevelIndicator_WindowsBased struct { + // Windows-based SLIs + WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"` +} + +func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {} + +// An SLI measuring performance on a well-known service type. Performance will +// be computed on the basis of pre-defined metrics. The type of the +// `service_resource` determines the metrics to use and the +// `service_resource.labels` and `metric_labels` are used to construct a +// monitoring filter to filter that metric down to just the data relevant to +// this service. +type BasicSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from + // other methods will not be used to calculate performance for this SLI. If + // omitted, this SLI applies to all the Service's methods. For service types + // that don't support breaking down by method, setting this field will result + // in an error. + Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"` + // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry + // from other locations will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all locations in which the Service has + // activity. For service types that don't support breaking down by location, + // setting this field will result in an error. + Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"` + // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry + // from other API versions will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all API versions. For service types + // that don't support breaking down by version, setting this field will result + // in an error. + Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"` + // This SLI can be evaluated on the basis of availability or latency. + // + // Types that are assignable to SliCriteria: + // + // *BasicSli_Availability + // *BasicSli_Latency + SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"` +} + +func (x *BasicSli) Reset() { + *x = BasicSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli) ProtoMessage() {} + +func (x *BasicSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli.ProtoReflect.Descriptor instead. +func (*BasicSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3} +} + +func (x *BasicSli) GetMethod() []string { + if x != nil { + return x.Method + } + return nil +} + +func (x *BasicSli) GetLocation() []string { + if x != nil { + return x.Location + } + return nil +} + +func (x *BasicSli) GetVersion() []string { + if x != nil { + return x.Version + } + return nil +} + +func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria { + if m != nil { + return m.SliCriteria + } + return nil +} + +func (x *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria { + if x, ok := x.GetSliCriteria().(*BasicSli_Availability); ok { + return x.Availability + } + return nil +} + +func (x *BasicSli) GetLatency() *BasicSli_LatencyCriteria { + if x, ok := x.GetSliCriteria().(*BasicSli_Latency); ok { + return x.Latency + } + return nil +} + +type isBasicSli_SliCriteria interface { + isBasicSli_SliCriteria() +} + +type BasicSli_Availability struct { + // Good service is defined to be the count of requests made to this service + // that return successfully. + Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"` +} + +type BasicSli_Latency struct { + // Good service is defined to be the count of requests made to this service + // that are fast enough with respect to `latency.threshold`. + Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"` +} + +func (*BasicSli_Availability) isBasicSli_SliCriteria() {} + +func (*BasicSli_Latency) isBasicSli_SliCriteria() {} + +// Range of numerical values within `min` and `max`. +type Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Range minimum. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // Range maximum. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` +} + +func (x *Range) Reset() { + *x = Range{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Range) ProtoMessage() {} + +func (x *Range) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Range.ProtoReflect.Descriptor instead. +func (*Range) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{4} +} + +func (x *Range) GetMin() float64 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *Range) GetMax() float64 { + if x != nil { + return x.Max + } + return 0 +} + +// Service Level Indicators for which atomic units of service are counted +// directly. +type RequestBasedSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The means to compute a ratio of `good_service` to `total_service`. + // + // Types that are assignable to Method: + // + // *RequestBasedSli_GoodTotalRatio + // *RequestBasedSli_DistributionCut + Method isRequestBasedSli_Method `protobuf_oneof:"method"` +} + +func (x *RequestBasedSli) Reset() { + *x = RequestBasedSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBasedSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBasedSli) ProtoMessage() {} + +func (x *RequestBasedSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBasedSli.ProtoReflect.Descriptor instead. +func (*RequestBasedSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{5} +} + +func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method { + if m != nil { + return m.Method + } + return nil +} + +func (x *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio { + if x, ok := x.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok { + return x.GoodTotalRatio + } + return nil +} + +func (x *RequestBasedSli) GetDistributionCut() *DistributionCut { + if x, ok := x.GetMethod().(*RequestBasedSli_DistributionCut); ok { + return x.DistributionCut + } + return nil +} + +type isRequestBasedSli_Method interface { + isRequestBasedSli_Method() +} + +type RequestBasedSli_GoodTotalRatio struct { + // `good_total_ratio` is used when the ratio of `good_service` to + // `total_service` is computed from two `TimeSeries`. + GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"` +} + +type RequestBasedSli_DistributionCut struct { + // `distribution_cut` is used when `good_service` is a count of values + // aggregated in a `Distribution` that fall into a good range. The + // `total_service` is the total count of all values aggregated in the + // `Distribution`. + DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"` +} + +func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {} + +func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {} + +// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the +// `good_service / total_service` ratio. The specified `TimeSeries` must have +// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = +// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify +// exactly two of good, bad, and total, and the relationship `good_service + +// bad_service = total_service` will be assumed. +type TimeSeriesRatio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying good service provided. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying bad service, either demanded service + // that was not provided or demanded service that was of inadequate quality. + // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have + // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying total demanded service. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"` +} + +func (x *TimeSeriesRatio) Reset() { + *x = TimeSeriesRatio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesRatio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesRatio) ProtoMessage() {} + +func (x *TimeSeriesRatio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesRatio.ProtoReflect.Descriptor instead. +func (*TimeSeriesRatio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{6} +} + +func (x *TimeSeriesRatio) GetGoodServiceFilter() string { + if x != nil { + return x.GoodServiceFilter + } + return "" +} + +func (x *TimeSeriesRatio) GetBadServiceFilter() string { + if x != nil { + return x.BadServiceFilter + } + return "" +} + +func (x *TimeSeriesRatio) GetTotalServiceFilter() string { + if x != nil { + return x.TotalServiceFilter + } + return "" +} + +// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring +// good service and total service. The `TimeSeries` must have `ValueType = +// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The +// computed `good_service` will be the estimated count of values in the +// `Distribution` that fall within the specified `min` and `max`. +type DistributionCut struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` aggregating values. Must have `ValueType = + // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound to + // an infinite value. + Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"` +} + +func (x *DistributionCut) Reset() { + *x = DistributionCut{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributionCut) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributionCut) ProtoMessage() {} + +func (x *DistributionCut) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributionCut.ProtoReflect.Descriptor instead. +func (*DistributionCut) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{7} +} + +func (x *DistributionCut) GetDistributionFilter() string { + if x != nil { + return x.DistributionFilter + } + return "" +} + +func (x *DistributionCut) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +// A `WindowsBasedSli` defines `good_service` as the count of time windows for +// which the provided service was of good quality. Criteria for determining +// if service was good are embedded in the `window_criterion`. +type WindowsBasedSli struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The criterion to use for evaluating window goodness. + // + // Types that are assignable to WindowCriterion: + // + // *WindowsBasedSli_GoodBadMetricFilter + // *WindowsBasedSli_GoodTotalRatioThreshold + // *WindowsBasedSli_MetricMeanInRange + // *WindowsBasedSli_MetricSumInRange + WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"` + // Duration over which window quality is evaluated. Must be an integer + // fraction of a day and at least `60s`. + WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` +} + +func (x *WindowsBasedSli) Reset() { + *x = WindowsBasedSli{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli) ProtoMessage() {} + +func (x *WindowsBasedSli) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8} +} + +func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion { + if m != nil { + return m.WindowCriterion + } + return nil +} + +func (x *WindowsBasedSli) GetGoodBadMetricFilter() string { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok { + return x.GoodBadMetricFilter + } + return "" +} + +func (x *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok { + return x.GoodTotalRatioThreshold + } + return nil +} + +func (x *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok { + return x.MetricMeanInRange + } + return nil +} + +func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange { + if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok { + return x.MetricSumInRange + } + return nil +} + +func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration { + if x != nil { + return x.WindowPeriod + } + return nil +} + +type isWindowsBasedSli_WindowCriterion interface { + isWindowsBasedSli_WindowCriterion() +} + +type WindowsBasedSli_GoodBadMetricFilter struct { + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if + // any `true` values appear in the window. + GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"` +} + +type WindowsBasedSli_GoodTotalRatioThreshold struct { + // A window is good if its `performance` is high enough. + GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"` +} + +type WindowsBasedSli_MetricMeanInRange struct { + // A window is good if the metric's value is in a good range, averaged + // across returned streams. + MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"` +} + +type WindowsBasedSli_MetricSumInRange struct { + // A window is good if the metric's value is in a good range, summed across + // returned streams. + MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"` +} + +func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} + +// Use a custom service to designate a service that you want to monitor +// when none of the other service types (like App Engine, Cloud Run, or +// a GKE type) matches your intended service. +type Service_Custom struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Service_Custom) Reset() { + *x = Service_Custom{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_Custom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_Custom) ProtoMessage() {} + +func (x *Service_Custom) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_Custom.ProtoReflect.Descriptor instead. +func (*Service_Custom) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 0} +} + +// App Engine service. Learn more at https://cloud.google.com/appengine. +type Service_AppEngine struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the App Engine module underlying this service. Corresponds to + // the `module_id` resource label in the [`gae_app` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app). + ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` +} + +func (x *Service_AppEngine) Reset() { + *x = Service_AppEngine{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_AppEngine) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_AppEngine) ProtoMessage() {} + +func (x *Service_AppEngine) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_AppEngine.ProtoReflect.Descriptor instead. +func (*Service_AppEngine) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Service_AppEngine) GetModuleId() string { + if x != nil { + return x.ModuleId + } + return "" +} + +// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints. +type Service_CloudEndpoints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Endpoints service underlying this service. + // Corresponds to the `service` resource label in the [`api` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_api). + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *Service_CloudEndpoints) Reset() { + *x = Service_CloudEndpoints{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudEndpoints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudEndpoints) ProtoMessage() {} + +func (x *Service_CloudEndpoints) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudEndpoints.ProtoReflect.Descriptor instead. +func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Service_CloudEndpoints) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +// Istio service scoped to a single Kubernetes cluster. Learn more at +// https://istio.io. Clusters running OSS Istio will have their services +// ingested as this type. +type Service_ClusterIstio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The location of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `location` resource label in `k8s_cluster` + // resources. + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The name of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `cluster_name` resource label in + // `k8s_cluster` resources. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_ClusterIstio) Reset() { + *x = Service_ClusterIstio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_ClusterIstio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_ClusterIstio) ProtoMessage() {} + +func (x *Service_ClusterIstio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_ClusterIstio.ProtoReflect.Descriptor instead. +func (*Service_ClusterIstio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *Service_ClusterIstio) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_ClusterIstio) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_ClusterIstio) GetServiceNamespace() string { + if x != nil { + return x.ServiceNamespace + } + return "" +} + +func (x *Service_ClusterIstio) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Istio service scoped to an Istio mesh. Anthos clusters running ASM < 1.6.8 +// will have their services ingested as this type. +type Service_MeshIstio struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the mesh in which this Istio service is defined. + // Corresponds to the `mesh_uid` metric label in Istio metrics. + MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_MeshIstio) Reset() { + *x = Service_MeshIstio{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_MeshIstio) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_MeshIstio) ProtoMessage() {} + +func (x *Service_MeshIstio) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_MeshIstio.ProtoReflect.Descriptor instead. +func (*Service_MeshIstio) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *Service_MeshIstio) GetMeshUid() string { + if x != nil { + return x.MeshUid + } + return "" +} + +func (x *Service_MeshIstio) GetServiceNamespace() string { + if x != nil { + return x.ServiceNamespace + } + return "" +} + +func (x *Service_MeshIstio) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Canonical service scoped to an Istio mesh. Anthos clusters running ASM >= +// 1.6.8 will have their services ingested as this type. +type Service_IstioCanonicalService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the Istio mesh in which this canonical service is defined. + // Corresponds to the `mesh_uid` metric label in + // [Istio metrics](https://cloud.google.com/monitoring/api/metrics_istio). + MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"` + // The namespace of the canonical service underlying this service. + // Corresponds to the `destination_canonical_service_namespace` metric + // label in [Istio + // metrics](https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalServiceNamespace string `protobuf:"bytes,3,opt,name=canonical_service_namespace,json=canonicalServiceNamespace,proto3" json:"canonical_service_namespace,omitempty"` + // The name of the canonical service underlying this service. + // Corresponds to the `destination_canonical_service_name` metric label in + // label in [Istio + // metrics](https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalService string `protobuf:"bytes,4,opt,name=canonical_service,json=canonicalService,proto3" json:"canonical_service,omitempty"` +} + +func (x *Service_IstioCanonicalService) Reset() { + *x = Service_IstioCanonicalService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_IstioCanonicalService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_IstioCanonicalService) ProtoMessage() {} + +func (x *Service_IstioCanonicalService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_IstioCanonicalService.ProtoReflect.Descriptor instead. +func (*Service_IstioCanonicalService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 5} +} + +func (x *Service_IstioCanonicalService) GetMeshUid() string { + if x != nil { + return x.MeshUid + } + return "" +} + +func (x *Service_IstioCanonicalService) GetCanonicalServiceNamespace() string { + if x != nil { + return x.CanonicalServiceNamespace + } + return "" +} + +func (x *Service_IstioCanonicalService) GetCanonicalService() string { + if x != nil { + return x.CanonicalService + } + return "" +} + +// Cloud Run service. Learn more at https://cloud.google.com/run. +type Service_CloudRun struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud Run service. Corresponds to the `service_name` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The location the service is run. Corresponds to the `location` + // resource label in the [`cloud_run_revision` monitored + // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Service_CloudRun) Reset() { + *x = Service_CloudRun{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_CloudRun) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_CloudRun) ProtoMessage() {} + +func (x *Service_CloudRun) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead. +func (*Service_CloudRun) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *Service_CloudRun) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *Service_CloudRun) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +// GKE Namespace. The field names correspond to the resource metadata labels +// on monitored resources that fall under a namespace (for example, +// `k8s_container` or `k8s_pod`). +type Service_GkeNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of this namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` +} + +func (x *Service_GkeNamespace) Reset() { + *x = Service_GkeNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeNamespace) ProtoMessage() {} + +func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead. +func (*Service_GkeNamespace) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7} +} + +func (x *Service_GkeNamespace) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeNamespace) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeNamespace) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeNamespace) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond +// to the metadata labels on monitored resources that fall under a workload +// (for example, `k8s_container` or `k8s_pod`). +type Service_GkeWorkload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The type of this workload (for example, "Deployment" or "DaemonSet") + TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"` + // The name of this workload. + TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"` +} + +func (x *Service_GkeWorkload) Reset() { + *x = Service_GkeWorkload{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeWorkload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeWorkload) ProtoMessage() {} + +func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead. +func (*Service_GkeWorkload) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8} +} + +func (x *Service_GkeWorkload) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeWorkload) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeWorkload) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeWorkload) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerType() string { + if x != nil { + return x.TopLevelControllerType + } + return "" +} + +func (x *Service_GkeWorkload) GetTopLevelControllerName() string { + if x != nil { + return x.TopLevelControllerName + } + return "" +} + +// GKE Service. The "service" here represents a +// [Kubernetes service +// object](https://kubernetes.io/docs/concepts/services-networking/service). +// The field names correspond to the resource labels on [`k8s_service` +// monitored +// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service). +type Service_GkeService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The project this resource lives in. For legacy services + // migrated from the `Custom` type, this may be a distinct project from the + // one parenting the service itself. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The location of the parent cluster. This may be a zone or region. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The name of the parent cluster. + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The name of the parent namespace. + NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + // The name of this service. + ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Service_GkeService) Reset() { + *x = Service_GkeService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_GkeService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_GkeService) ProtoMessage() {} + +func (x *Service_GkeService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead. +func (*Service_GkeService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9} +} + +func (x *Service_GkeService) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *Service_GkeService) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Service_GkeService) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *Service_GkeService) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *Service_GkeService) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// A well-known service type, defined by its service type and service labels. +// Documentation and examples +// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). +type Service_BasicService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of service that this basic service defines, e.g. + // APP_ENGINE service type. + // Documentation and valid values + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + // Labels that specify the resource that emits the monitoring data which + // is used for SLO reporting of this `Service`. + // Documentation and valid values for given service types + // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Service_BasicService) Reset() { + *x = Service_BasicService{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_BasicService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_BasicService) ProtoMessage() {} + +func (x *Service_BasicService) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead. +func (*Service_BasicService) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10} +} + +func (x *Service_BasicService) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *Service_BasicService) GetServiceLabels() map[string]string { + if x != nil { + return x.ServiceLabels + } + return nil +} + +// Configuration for how to query telemetry on a Service. +type Service_Telemetry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The full name of the resource that defines this service. Formatted as + // described in https://cloud.google.com/apis/design/resource_names. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` +} + +func (x *Service_Telemetry) Reset() { + *x = Service_Telemetry{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Service_Telemetry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Service_Telemetry) ProtoMessage() {} + +func (x *Service_Telemetry) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead. +func (*Service_Telemetry) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11} +} + +func (x *Service_Telemetry) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +// Future parameters for the availability SLI. +type BasicSli_AvailabilityCriteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *BasicSli_AvailabilityCriteria) Reset() { + *x = BasicSli_AvailabilityCriteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli_AvailabilityCriteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} + +func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli_AvailabilityCriteria.ProtoReflect.Descriptor instead. +func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 0} +} + +// Parameters for a latency threshold SLI. +type BasicSli_LatencyCriteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Good service is defined to be the count of requests made to this service + // that return in no more than `threshold`. + Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` +} + +func (x *BasicSli_LatencyCriteria) Reset() { + *x = BasicSli_LatencyCriteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicSli_LatencyCriteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicSli_LatencyCriteria) ProtoMessage() {} + +func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicSli_LatencyCriteria.ProtoReflect.Descriptor instead. +func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration { + if x != nil { + return x.Threshold + } + return nil +} + +// A `PerformanceThreshold` is used when each window is good when that window +// has a sufficiently high `performance`. +type WindowsBasedSli_PerformanceThreshold struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The means, either a request-based SLI or a basic SLI, by which to compute + // performance over a window. + // + // Types that are assignable to Type: + // + // *WindowsBasedSli_PerformanceThreshold_Performance + // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance + Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"` + // If window `performance >= threshold`, the window is counted as good. + Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"` +} + +func (x *WindowsBasedSli_PerformanceThreshold) Reset() { + *x = WindowsBasedSli_PerformanceThreshold{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli_PerformanceThreshold) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} + +func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli_PerformanceThreshold.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli { + if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok { + return x.Performance + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli { + if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok { + return x.BasicSliPerformance + } + return nil +} + +func (x *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 +} + +type isWindowsBasedSli_PerformanceThreshold_Type interface { + isWindowsBasedSli_PerformanceThreshold_Type() +} + +type WindowsBasedSli_PerformanceThreshold_Performance struct { + // `RequestBasedSli` to evaluate to judge window quality. + Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"` +} + +type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct { + // `BasicSli` to evaluate to judge window quality. + BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"` +} + +func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +// A `MetricRange` is used when each window is good when the value x of a +// single `TimeSeries` satisfies `range.min <= x <= range.max`. The provided +// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and +// `MetricKind = GAUGE`. +type WindowsBasedSli_MetricRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying the `TimeSeries` to use for evaluating window quality. + TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound + // to an infinite value. + Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` +} + +func (x *WindowsBasedSli_MetricRange) Reset() { + *x = WindowsBasedSli_MetricRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowsBasedSli_MetricRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowsBasedSli_MetricRange) ProtoMessage() {} + +func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowsBasedSli_MetricRange.ProtoReflect.Descriptor instead. +func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *WindowsBasedSli_MetricRange) GetTimeSeries() string { + if x != nil { + return x.TimeSeries + } + return "" +} + +func (x *WindowsBasedSli_MetricRange) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +var File_google_monitoring_v3_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_service_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00, + 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f, + 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49, + 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f, + 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b, + 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, + 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, + 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, + 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, + 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08, + 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69, + 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, + 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, + 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a, + 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, + 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67, + 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12, + 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e, + 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c, + 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca, + 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, + 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, + 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c, + 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f, + 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, + 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f, + 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, + 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, + 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54, + 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e, + 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c, + 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75, + 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, + 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f, + 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, + 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, + 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f, + 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65, + 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73, + 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69, + 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, + 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_service_proto_rawDescData = file_google_monitoring_v3_service_proto_rawDesc +) + +func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_proto_rawDescData) + }) + return file_google_monitoring_v3_service_proto_rawDescData +} + +var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_monitoring_v3_service_proto_goTypes = []any{ + (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View + (*Service)(nil), // 1: google.monitoring.v3.Service + (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective + (*ServiceLevelIndicator)(nil), // 3: google.monitoring.v3.ServiceLevelIndicator + (*BasicSli)(nil), // 4: google.monitoring.v3.BasicSli + (*Range)(nil), // 5: google.monitoring.v3.Range + (*RequestBasedSli)(nil), // 6: google.monitoring.v3.RequestBasedSli + (*TimeSeriesRatio)(nil), // 7: google.monitoring.v3.TimeSeriesRatio + (*DistributionCut)(nil), // 8: google.monitoring.v3.DistributionCut + (*WindowsBasedSli)(nil), // 9: google.monitoring.v3.WindowsBasedSli + (*Service_Custom)(nil), // 10: google.monitoring.v3.Service.Custom + (*Service_AppEngine)(nil), // 11: google.monitoring.v3.Service.AppEngine + (*Service_CloudEndpoints)(nil), // 12: google.monitoring.v3.Service.CloudEndpoints + (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio + (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio + (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService + (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun + (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace + (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload + (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService + (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService + (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry + nil, // 22: google.monitoring.v3.Service.UserLabelsEntry + nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria + (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria + (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod +} +var file_google_monitoring_v3_service_proto_depIdxs = []int32{ + 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom + 11, // 1: google.monitoring.v3.Service.app_engine:type_name -> google.monitoring.v3.Service.AppEngine + 12, // 2: google.monitoring.v3.Service.cloud_endpoints:type_name -> google.monitoring.v3.Service.CloudEndpoints + 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio + 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio + 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService + 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun + 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace + 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload + 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService + 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService + 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry + 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry + 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator + 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration + 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod + 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry + 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli + 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli + 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli + 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria + 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria + 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio + 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut + 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range + 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold + 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange + 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration + 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry + 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration + 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli + 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli + 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_service_proto_init() } +func file_google_monitoring_v3_service_proto_init() { + if File_google_monitoring_v3_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ServiceLevelObjective); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServiceLevelIndicator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*RequestBasedSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesRatio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DistributionCut); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*Service_Custom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*Service_AppEngine); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudEndpoints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Service_ClusterIstio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Service_MeshIstio); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*Service_IstioCanonicalService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*Service_CloudRun); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeWorkload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*Service_GkeService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*Service_BasicService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*Service_Telemetry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli_AvailabilityCriteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*BasicSli_LatencyCriteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli_PerformanceThreshold); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*WindowsBasedSli_MetricRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{ + (*Service_Custom_)(nil), + (*Service_AppEngine_)(nil), + (*Service_CloudEndpoints_)(nil), + (*Service_ClusterIstio_)(nil), + (*Service_MeshIstio_)(nil), + (*Service_IstioCanonicalService_)(nil), + (*Service_CloudRun_)(nil), + (*Service_GkeNamespace_)(nil), + (*Service_GkeWorkload_)(nil), + (*Service_GkeService_)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{ + (*ServiceLevelObjective_RollingPeriod)(nil), + (*ServiceLevelObjective_CalendarPeriod)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{ + (*ServiceLevelIndicator_BasicSli)(nil), + (*ServiceLevelIndicator_RequestBased)(nil), + (*ServiceLevelIndicator_WindowsBased)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{ + (*BasicSli_Availability)(nil), + (*BasicSli_Latency)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{ + (*RequestBasedSli_GoodTotalRatio)(nil), + (*RequestBasedSli_DistributionCut)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{ + (*WindowsBasedSli_GoodBadMetricFilter)(nil), + (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), + (*WindowsBasedSli_MetricMeanInRange)(nil), + (*WindowsBasedSli_MetricSumInRange)(nil), + } + file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{ + (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), + (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 28, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_service_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_service_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_service_proto = out.File + file_google_monitoring_v3_service_proto_rawDesc = nil + file_google_monitoring_v3_service_proto_goTypes = nil + file_google_monitoring_v3_service_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go new file mode 100644 index 0000000000..15e1f04d6a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -0,0 +1,1796 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/service_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `CreateService` request. +type CreateServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource + // [name](https://cloud.google.com/monitoring/api/v3#project_name) of the + // parent Metrics Scope. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The Service id to use for this Service. If omitted, an id will be + // generated instead. Must match the pattern `[a-z0-9\-]+` + ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Required. The `Service` to create. + Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *CreateServiceRequest) Reset() { + *x = CreateServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateServiceRequest) ProtoMessage() {} + +func (x *CreateServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateServiceRequest.ProtoReflect.Descriptor instead. +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateServiceRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateServiceRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *CreateServiceRequest) GetService() *Service { + if x != nil { + return x.Service + } + return nil +} + +// The `GetService` request. +type GetServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `Service`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetServiceRequest) Reset() { + *x = GetServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceRequest) ProtoMessage() {} + +func (x *GetServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceRequest.ProtoReflect.Descriptor instead. +func (*GetServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetServiceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `ListServices` request. +type ListServicesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent containing the listed services, + // either a [project](https://cloud.google.com/monitoring/api/v3#project_name) + // or a Monitoring Metrics Scope. The formats are: + // + // projects/[PROJECT_ID_OR_NUMBER] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `Service`s to return. The filter supports + // filtering on a particular service-identifier type or one of its attributes. + // + // To filter on a particular service-identifier type, the `identifier_case` + // refers to which option in the `identifier` field is populated. For example, + // the filter `identifier_case = "CUSTOM"` would match all services with a + // value for the `custom` field. Valid options include "CUSTOM", "APP_ENGINE", + // "MESH_ISTIO", and the other options listed at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service + // + // To filter on an attribute of a service-identifier type, apply the filter + // name by using the snake case of the service-identifier type and the + // attribute of that service-identifier type, and join the two with a period. + // For example, to filter by the `meshUid` field of the `MeshIstio` + // service-identifier type, you must filter on `mesh_istio.mesh_uid = + // "123"` to match all services with mesh UID "123". Service-identifier types + // and their attributes are described at + // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListServicesRequest) Reset() { + *x = ListServicesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServicesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesRequest) ProtoMessage() {} + +func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. +func (*ListServicesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListServicesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListServicesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListServicesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListServicesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The `ListServices` response. +type ListServicesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `Service`s matching the specified filter. + Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListServicesResponse) Reset() { + *x = ListServicesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServicesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesResponse) ProtoMessage() {} + +func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. +func (*ListServicesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListServicesResponse) GetServices() []*Service { + if x != nil { + return x.Services + } + return nil +} + +func (x *ListServicesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `UpdateService` request. +type UpdateServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Service` to draw updates from. + // The given `name` specifies the resource to update. + Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateServiceRequest) Reset() { + *x = UpdateServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateServiceRequest) ProtoMessage() {} + +func (x *UpdateServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateServiceRequest.ProtoReflect.Descriptor instead. +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateServiceRequest) GetService() *Service { + if x != nil { + return x.Service + } + return nil +} + +func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The `DeleteService` request. +type DeleteServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `Service` to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteServiceRequest) Reset() { + *x = DeleteServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteServiceRequest) ProtoMessage() {} + +func (x *DeleteServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteServiceRequest.ProtoReflect.Descriptor instead. +func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteServiceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The `CreateServiceLevelObjective` request. +type CreateServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent `Service`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The ServiceLevelObjective id to use for this + // ServiceLevelObjective. If omitted, an id will be generated instead. Must + // match the pattern `^[a-zA-Z0-9-_:.]+$` + ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"` + // Required. The `ServiceLevelObjective` to create. + // The provided `name` will be respected if no `ServiceLevelObjective` exists + // with this name. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` +} + +func (x *CreateServiceLevelObjectiveRequest) Reset() { + *x = CreateServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *CreateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateServiceLevelObjectiveRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string { + if x != nil { + return x.ServiceLevelObjectiveId + } + return "" +} + +func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjective + } + return nil +} + +// The `GetServiceLevelObjective` request. +type GetServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `ServiceLevelObjective` to get. The format + // is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` +} + +func (x *GetServiceLevelObjectiveRequest) Reset() { + *x = GetServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *GetServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{7} +} + +func (x *GetServiceLevelObjectiveRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View { + if x != nil { + return x.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` request. +type ListServiceLevelObjectivesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the parent containing the listed SLOs, either a + // project or a Monitoring Metrics Scope. The formats are: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] + // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `ServiceLevelObjective`s to return. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` +} + +func (x *ListServiceLevelObjectivesRequest) Reset() { + *x = ListServiceLevelObjectivesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceLevelObjectivesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceLevelObjectivesRequest) ProtoMessage() {} + +func (x *ListServiceLevelObjectivesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceLevelObjectivesRequest.ProtoReflect.Descriptor instead. +func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{8} +} + +func (x *ListServiceLevelObjectivesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListServiceLevelObjectivesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View { + if x != nil { + return x.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` response. +type ListServiceLevelObjectivesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The `ServiceLevelObjective`s matching the specified filter. + ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListServiceLevelObjectivesResponse) Reset() { + *x = ListServiceLevelObjectivesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceLevelObjectivesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceLevelObjectivesResponse) ProtoMessage() {} + +func (x *ListServiceLevelObjectivesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceLevelObjectivesResponse.ProtoReflect.Descriptor instead. +func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{9} +} + +func (x *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjectives + } + return nil +} + +func (x *ListServiceLevelObjectivesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The `UpdateServiceLevelObjective` request. +type UpdateServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `ServiceLevelObjective` to draw updates from. + // The given `name` specifies the resource to update. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateServiceLevelObjectiveRequest) Reset() { + *x = UpdateServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *UpdateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{10} +} + +func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if x != nil { + return x.ServiceLevelObjective + } + return nil +} + +func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The `DeleteServiceLevelObjective` request. +type DeleteServiceLevelObjectiveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Resource name of the `ServiceLevelObjective` to delete. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteServiceLevelObjectiveRequest) Reset() { + *x = DeleteServiceLevelObjectiveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteServiceLevelObjectiveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {} + +func (x *DeleteServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead. +func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{11} +} + +func (x *DeleteServiceLevelObjectiveRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_google_monitoring_v3_service_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_service_service_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x22, 0x52, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8e, 0x02, 0x0a, 0x22, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x49, + 0x64, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1f, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x04, + 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69, + 0x65, 0x77, 0x22, 0x80, 0x02, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, + 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x44, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, + 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x18, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x16, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xcb, 0x01, 0x0a, 0x22, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x3b, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x22, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xea, 0x0f, 0x0a, + 0x18, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0d, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x7e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x32, + 0x21, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0x7d, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xfa, 0x01, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0xda, 0x41, 0x1e, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x4d, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x32, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0xc1, + 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, + 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0xd4, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x73, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0x8c, 0x02, 0x0a, 0x1b, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x22, 0x85, 0x01, 0xda, 0x41, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x65, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x32, 0x4a, 0x2f, 0x76, + 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb2, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x2a, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, + 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd8, 0x01, 0x0a, 0x18, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, + 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_service_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_service_service_proto_rawDescData = file_google_monitoring_v3_service_service_proto_rawDesc +) + +func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_service_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_service_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_service_proto_rawDescData) + }) + return file_google_monitoring_v3_service_service_proto_rawDescData +} + +var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_google_monitoring_v3_service_service_proto_goTypes = []any{ + (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest + (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest + (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest + (*ListServicesResponse)(nil), // 3: google.monitoring.v3.ListServicesResponse + (*UpdateServiceRequest)(nil), // 4: google.monitoring.v3.UpdateServiceRequest + (*DeleteServiceRequest)(nil), // 5: google.monitoring.v3.DeleteServiceRequest + (*CreateServiceLevelObjectiveRequest)(nil), // 6: google.monitoring.v3.CreateServiceLevelObjectiveRequest + (*GetServiceLevelObjectiveRequest)(nil), // 7: google.monitoring.v3.GetServiceLevelObjectiveRequest + (*ListServiceLevelObjectivesRequest)(nil), // 8: google.monitoring.v3.ListServiceLevelObjectivesRequest + (*ListServiceLevelObjectivesResponse)(nil), // 9: google.monitoring.v3.ListServiceLevelObjectivesResponse + (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest + (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest + (*Service)(nil), // 12: google.monitoring.v3.Service + (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask + (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective + (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View + (*emptypb.Empty)(nil), // 16: google.protobuf.Empty +} +var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{ + 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service + 12, // 1: google.monitoring.v3.ListServicesResponse.services:type_name -> google.monitoring.v3.Service + 12, // 2: google.monitoring.v3.UpdateServiceRequest.service:type_name -> google.monitoring.v3.Service + 13, // 3: google.monitoring.v3.UpdateServiceRequest.update_mask:type_name -> google.protobuf.FieldMask + 14, // 4: google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective + 15, // 5: google.monitoring.v3.GetServiceLevelObjectiveRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View + 15, // 6: google.monitoring.v3.ListServiceLevelObjectivesRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View + 14, // 7: google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives:type_name -> google.monitoring.v3.ServiceLevelObjective + 14, // 8: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective + 13, // 9: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 10: google.monitoring.v3.ServiceMonitoringService.CreateService:input_type -> google.monitoring.v3.CreateServiceRequest + 1, // 11: google.monitoring.v3.ServiceMonitoringService.GetService:input_type -> google.monitoring.v3.GetServiceRequest + 2, // 12: google.monitoring.v3.ServiceMonitoringService.ListServices:input_type -> google.monitoring.v3.ListServicesRequest + 4, // 13: google.monitoring.v3.ServiceMonitoringService.UpdateService:input_type -> google.monitoring.v3.UpdateServiceRequest + 5, // 14: google.monitoring.v3.ServiceMonitoringService.DeleteService:input_type -> google.monitoring.v3.DeleteServiceRequest + 6, // 15: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:input_type -> google.monitoring.v3.CreateServiceLevelObjectiveRequest + 7, // 16: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:input_type -> google.monitoring.v3.GetServiceLevelObjectiveRequest + 8, // 17: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:input_type -> google.monitoring.v3.ListServiceLevelObjectivesRequest + 10, // 18: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:input_type -> google.monitoring.v3.UpdateServiceLevelObjectiveRequest + 11, // 19: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:input_type -> google.monitoring.v3.DeleteServiceLevelObjectiveRequest + 12, // 20: google.monitoring.v3.ServiceMonitoringService.CreateService:output_type -> google.monitoring.v3.Service + 12, // 21: google.monitoring.v3.ServiceMonitoringService.GetService:output_type -> google.monitoring.v3.Service + 3, // 22: google.monitoring.v3.ServiceMonitoringService.ListServices:output_type -> google.monitoring.v3.ListServicesResponse + 12, // 23: google.monitoring.v3.ServiceMonitoringService.UpdateService:output_type -> google.monitoring.v3.Service + 16, // 24: google.monitoring.v3.ServiceMonitoringService.DeleteService:output_type -> google.protobuf.Empty + 14, // 25: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 14, // 26: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 9, // 27: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:output_type -> google.monitoring.v3.ListServiceLevelObjectivesResponse + 14, // 28: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective + 16, // 29: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:output_type -> google.protobuf.Empty + 20, // [20:30] is the sub-list for method output_type + 10, // [10:20] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_service_service_proto_init() } +func file_google_monitoring_v3_service_service_proto_init() { + if File_google_monitoring_v3_service_service_proto != nil { + return + } + file_google_monitoring_v3_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_service_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListServicesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CreateServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceLevelObjectivesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceLevelObjectivesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*UpdateServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_service_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*DeleteServiceLevelObjectiveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_service_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_service_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_service_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_service_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_service_service_proto = out.File + file_google_monitoring_v3_service_service_proto_rawDesc = nil + file_google_monitoring_v3_service_service_proto_goTypes = nil + file_google_monitoring_v3_service_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServiceMonitoringServiceClient interface { + // Create a `Service`. + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Get the named `Service`. + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) + // List `Service`s for this Metrics Scope. + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Soft delete this `Service`. + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type serviceMonitoringServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient { + return &serviceMonitoringServiceClient{cc} +} + +func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) { + out := new(ListServiceLevelObjectivesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service. +type ServiceMonitoringServiceServer interface { + // Create a `Service`. + CreateService(context.Context, *CreateServiceRequest) (*Service, error) + // Get the named `Service`. + GetService(context.Context, *GetServiceRequest) (*Service, error) + // List `Service`s for this Metrics Scope. + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) + // Soft delete this `Service`. + DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) +} + +// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations. +type UnimplementedServiceMonitoringServiceServer struct { +} + +func (*UnimplementedServiceMonitoringServiceServer) CreateService(context.Context, *CreateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetService(context.Context, *GetServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented") +} + +func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) { + s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv) +} + +func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceLevelObjectivesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.ServiceMonitoringService", + HandlerType: (*ServiceMonitoringServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateService", + Handler: _ServiceMonitoringService_CreateService_Handler, + }, + { + MethodName: "GetService", + Handler: _ServiceMonitoringService_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _ServiceMonitoringService_ListServices_Handler, + }, + { + MethodName: "UpdateService", + Handler: _ServiceMonitoringService_UpdateService_Handler, + }, + { + MethodName: "DeleteService", + Handler: _ServiceMonitoringService_DeleteService_Handler, + }, + { + MethodName: "CreateServiceLevelObjective", + Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler, + }, + { + MethodName: "GetServiceLevelObjective", + Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler, + }, + { + MethodName: "ListServiceLevelObjectives", + Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler, + }, + { + MethodName: "UpdateServiceLevelObjective", + Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler, + }, + { + MethodName: "DeleteServiceLevelObjective", + Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/service_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go new file mode 100644 index 0000000000..ab49868045 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -0,0 +1,315 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/snooze.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `Snooze` will prevent any alerts from being opened, and close any that +// are already open. The `Snooze` will work on alerts that match the +// criteria defined in the `Snooze`. The `Snooze` will be active from +// `interval.start_time` through `interval.end_time`. +type Snooze struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the `Snooze`. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + // + // The ID of the `Snooze` will be generated by the system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. This defines the criteria for applying the `Snooze`. See + // `Criteria` for more information. + Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"` + // Required. The `Snooze` will be active from `interval.start_time` through + // `interval.end_time`. + // `interval.start_time` cannot be in the past. There is a 15 second clock + // skew to account for the time it takes for a request to reach the API from + // the UI. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Required. A display name for the `Snooze`. This can be, at most, 512 + // unicode characters. + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` +} + +func (x *Snooze) Reset() { + *x = Snooze{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze) ProtoMessage() {} + +func (x *Snooze) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze.ProtoReflect.Descriptor instead. +func (*Snooze) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0} +} + +func (x *Snooze) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Snooze) GetCriteria() *Snooze_Criteria { + if x != nil { + return x.Criteria + } + return nil +} + +func (x *Snooze) GetInterval() *TimeInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *Snooze) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The +// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s +// whose names are supplied. +type Snooze_Criteria struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The specific `AlertPolicy` names for the alert that should be snoozed. + // The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] + // + // There is a limit of 16 policies per snooze. This limit is checked during + // snooze creation. + Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` +} + +func (x *Snooze_Criteria) Reset() { + *x = Snooze_Criteria{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snooze_Criteria) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snooze_Criteria) ProtoMessage() {} + +func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead. +func (*Snooze_Criteria) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Snooze_Criteria) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, 0x02, 0x0a, 0x06, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08, + 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x52, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x3a, 0x4a, 0xea, 0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x7d, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, + 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_monitoring_v3_snooze_proto_goTypes = []any{ + (*Snooze)(nil), // 0: google.monitoring.v3.Snooze + (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria + (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval +} +var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{ + 1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria + 2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_proto_init() } +func file_google_monitoring_v3_snooze_proto_init() { + if File_google_monitoring_v3_snooze_proto != nil { + return + } + file_google_monitoring_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Snooze); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Snooze_Criteria); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_snooze_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_proto = out.File + file_google_monitoring_v3_snooze_proto_rawDesc = nil + file_google_monitoring_v3_snooze_proto_goTypes = nil + file_google_monitoring_v3_snooze_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go new file mode 100644 index 0000000000..39388a9982 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -0,0 +1,867 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/snooze_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message definition for creating a `Snooze`. Users must provide the body +// of the `Snooze` to be created but must omit the `Snooze` field, `name`. +type CreateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // a `Snooze` should be created. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The `Snooze` to create. Omit the `name` field, as it will be + // filled in by the API. + Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"` +} + +func (x *CreateSnoozeRequest) Reset() { + *x = CreateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnoozeRequest) ProtoMessage() {} + +func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSnoozeRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +// The message definition for listing `Snooze`s associated with the given +// `parent`, satisfying the optional `filter`. +type ListSnoozesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // `Snooze`s should be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional filter to restrict results to the given criteria. The + // following fields are supported. + // + // - `interval.start_time` + // - `interval.end_time` + // + // For example: + // + // ``` + // interval.start_time > "2022-03-11T00:00:00-08:00" AND + // interval.end_time < "2022-03-12T00:00:00-08:00" + // ``` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. The maximum number of results to return for a single query. The + // server may further constrain the maximum number of results returned in a + // single page. The value should be in the range [1, 1000]. If the value given + // is outside this range, the server will decide the number of results to be + // returned. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. The `next_page_token` from a previous call to + // `ListSnoozesRequest` to get the next page of results. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSnoozesRequest) Reset() { + *x = ListSnoozesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesRequest) ProtoMessage() {} + +func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead. +func (*ListSnoozesRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListSnoozesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListSnoozesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListSnoozesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSnoozesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The results of a successful `ListSnoozes` call, containing the matching +// `Snooze`s. +type ListSnoozesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Snooze`s matching this list call. + Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"` + // Page token for repeated calls to `ListSnoozes`, to fetch additional pages + // of results. If this is empty or missing, there are no more pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSnoozesResponse) Reset() { + *x = ListSnoozesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnoozesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnoozesResponse) ProtoMessage() {} + +func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead. +func (*ListSnoozesResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListSnoozesResponse) GetSnoozes() []*Snooze { + if x != nil { + return x.Snoozes + } + return nil +} + +func (x *ListSnoozesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The message definition for retrieving a `Snooze`. Users must specify the +// field, `name`, which identifies the `Snooze`. +type GetSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The ID of the `Snooze` to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetSnoozeRequest) Reset() { + *x = GetSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSnoozeRequest) ProtoMessage() {} + +func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead. +func (*GetSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GetSnoozeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The message definition for updating a `Snooze`. The field, `snooze.name` +// identifies the `Snooze` to be updated. The remainder of `snooze` gives the +// content the `Snooze` in question will be assigned. +// +// What fields can be updated depends on the start time and end time of the +// `Snooze`. +// +// - end time is in the past: These `Snooze`s are considered +// read-only and cannot be updated. +// - start time is in the past and end time is in the future: `display_name` +// and `interval.end_time` can be updated. +// - start time is in the future: `display_name`, `interval.start_time` and +// `interval.end_time` can be updated. +type UpdateSnoozeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The `Snooze` to update. Must have the name field present. + Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"` + // Required. The fields to update. + // + // For each field listed in `update_mask`: + // + // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a + // value for that field, the value of the field in the existing `Snooze` + // will be set to the value of the field in the supplied `Snooze`. + // - If the field does not have a value in the supplied `Snooze`, the field + // in the existing `Snooze` is set to its default value. + // + // Fields not listed retain their existing value. + // + // The following are the field names that are accepted in `update_mask`: + // + // - `display_name` + // - `interval.start_time` + // - `interval.end_time` + // + // That said, the start time and end time of the `Snooze` determines which + // fields can legally be updated. Before attempting an update, users should + // consult the documentation for `UpdateSnoozeRequest`, which talks about + // which fields can be updated. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnoozeRequest) Reset() { + *x = UpdateSnoozeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSnoozeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSnoozeRequest) ProtoMessage() {} + +func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateSnoozeRequest) GetSnooze() *Snooze { + if x != nil { + return x.Snooze + } + return nil +} + +func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9, + 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, + 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, + 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x81, 0x01, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, + 0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, + 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, + 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73, + 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x32, + 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, + 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, + 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc +) + +func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData) + }) + return file_google_monitoring_v3_snooze_service_proto_rawDescData +} + +var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{ + (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest + (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest + (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse + (*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest + (*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest + (*Snooze)(nil), // 5: google.monitoring.v3.Snooze + (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask +} +var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{ + 5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze + 5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze + 6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest + 1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest + 3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest + 4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest + 5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze + 2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse + 5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze + 5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze + 8, // [8:12] is the sub-list for method output_type + 4, // [4:8] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_snooze_service_proto_init() } +func file_google_monitoring_v3_snooze_service_proto_init() { + if File_google_monitoring_v3_snooze_service_proto != nil { + return + } + file_google_monitoring_v3_snooze_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_snooze_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListSnoozesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListSnoozesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_snooze_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateSnoozeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_snooze_service_proto = out.File + file_google_monitoring_v3_snooze_service_proto_rawDesc = nil + file_google_monitoring_v3_snooze_service_proto_goTypes = nil + file_google_monitoring_v3_snooze_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SnoozeServiceClient is the client API for SnoozeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SnoozeServiceClient interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) +} + +type snoozeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient { + return &snoozeServiceClient{cc} +} + +func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) { + out := new(ListSnoozesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) { + out := new(Snooze) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnoozeServiceServer is the server API for SnoozeService service. +type SnoozeServiceServer interface { + // Creates a `Snooze` that will prevent alerts, which match the provided + // criteria, from being opened. The `Snooze` applies for a specific time + // interval. + CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) + // Lists the `Snooze`s associated with a project. Can optionally pass in + // `filter`, which specifies predicates to match `Snooze`s. + ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) + // Retrieves a `Snooze` by `name`. + GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) + // Updates a `Snooze`, identified by its `name`, with the parameters in the + // given `Snooze` object. + UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) +} + +// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations. +type UnimplementedSnoozeServiceServer struct { +} + +func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented") +} +func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented") +} +func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented") +} + +func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) { + s.RegisterService(&_SnoozeService_serviceDesc, srv) +} + +func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnoozesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).GetSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnoozeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SnoozeService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.SnoozeService", + HandlerType: (*SnoozeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSnooze", + Handler: _SnoozeService_CreateSnooze_Handler, + }, + { + MethodName: "ListSnoozes", + Handler: _SnoozeService_ListSnoozes_Handler, + }, + { + MethodName: "GetSnooze", + Handler: _SnoozeService_GetSnooze_Handler, + }, + { + MethodName: "UpdateSnooze", + Handler: _SnoozeService_UpdateSnooze_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/snooze_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go new file mode 100644 index 0000000000..5a55ecc665 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -0,0 +1,188 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/span_context.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The context of a span. This is attached to an +// [Exemplar][google.api.Distribution.Exemplar] +// in [Distribution][google.api.Distribution] values during aggregation. +// +// It contains the name of a span with format: +// +// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] +type SpanContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the span. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // `[TRACE_ID]` is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // `[SPAN_ID]` is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` +} + +func (x *SpanContext) Reset() { + *x = SpanContext{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpanContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanContext) ProtoMessage() {} + +func (x *SpanContext) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead. +func (*SpanContext) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0} +} + +func (x *SpanContext) GetSpanName() string { + if x != nil { + return x.SpanName + } + return "" +} + +var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22, + 0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xcb, 0x01, 0x0a, 0x18, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, + 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once + file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc +) + +func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData) + }) + return file_google_monitoring_v3_span_context_proto_rawDescData +} + +var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_monitoring_v3_span_context_proto_goTypes = []any{ + (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext +} +var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_span_context_proto_init() } +func file_google_monitoring_v3_span_context_proto_init() { + if File_google_monitoring_v3_span_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_span_context_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SpanContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_span_context_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes, + }.Build() + File_google_monitoring_v3_span_context_proto = out.File + file_google_monitoring_v3_span_context_proto_rawDesc = nil + file_google_monitoring_v3_span_context_proto_goTypes = nil + file_google_monitoring_v3_span_context_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go new file mode 100644 index 0000000000..e0b9e4a385 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -0,0 +1,2726 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/uptime.proto + +package monitoringpb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The regions from which an Uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in Uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 + // Allows checks to run from locations within the western United States of + // America + UptimeCheckRegion_USA_OREGON UptimeCheckRegion = 5 + // Allows checks to run from locations within the central United States of + // America + UptimeCheckRegion_USA_IOWA UptimeCheckRegion = 6 + // Allows checks to run from locations within the eastern United States of + // America + UptimeCheckRegion_USA_VIRGINIA UptimeCheckRegion = 7 +) + +// Enum value maps for UptimeCheckRegion. +var ( + UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", + 5: "USA_OREGON", + 6: "USA_IOWA", + 7: "USA_VIRGINIA", + } + UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, + "USA_OREGON": 5, + "USA_IOWA": 6, + "USA_VIRGINIA": 7, + } +) + +func (x UptimeCheckRegion) Enum() *UptimeCheckRegion { + p := new(UptimeCheckRegion) + *p = x + return p +} + +func (x UptimeCheckRegion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckRegion) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[0].Descriptor() +} + +func (UptimeCheckRegion) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[0] +} + +func (x UptimeCheckRegion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckRegion.Descriptor instead. +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} +} + +// The supported resource types that can be used as values of +// `group_resource.resource_type`. +// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. +// The resource types `gae_app` and `uptime_url` are not valid here because +// group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances from Google Cloud Platform (GCP) or + // Amazon Web Services (AWS). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of Amazon ELB load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +// Enum value maps for GroupResourceType. +var ( + GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", + } + GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, + } +) + +func (x GroupResourceType) Enum() *GroupResourceType { + p := new(GroupResourceType) + *p = x + return p +} + +func (x GroupResourceType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GroupResourceType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[1].Descriptor() +} + +func (GroupResourceType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[1] +} + +func (x GroupResourceType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GroupResourceType.Descriptor instead. +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +// Operational states for an internal checker. +type InternalChecker_State int32 + +const ( + // An internal checker should never be in the unspecified state. + InternalChecker_UNSPECIFIED InternalChecker_State = 0 + // The checker is being created, provisioned, and configured. A checker in + // this state can be returned by `ListInternalCheckers` or + // `GetInternalChecker`, as well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + InternalChecker_CREATING InternalChecker_State = 1 + // The checker is running and available for use. A checker in this state + // can be returned by `ListInternalCheckers` or `GetInternalChecker` as + // well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + // If a checker is being torn down, it is neither visible nor usable, so + // there is no "deleting" or "down" state. + InternalChecker_RUNNING InternalChecker_State = 2 +) + +// Enum value maps for InternalChecker_State. +var ( + InternalChecker_State_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CREATING", + 2: "RUNNING", + } + InternalChecker_State_value = map[string]int32{ + "UNSPECIFIED": 0, + "CREATING": 1, + "RUNNING": 2, + } +) + +func (x InternalChecker_State) Enum() *InternalChecker_State { + p := new(InternalChecker_State) + *p = x + return p +} + +func (x InternalChecker_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (InternalChecker_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[2].Descriptor() +} + +func (InternalChecker_State) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[2] +} + +func (x InternalChecker_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use InternalChecker_State.Descriptor instead. +func (InternalChecker_State) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0, 0} +} + +// What kind of checkers are available to be used by the check. +type UptimeCheckConfig_CheckerType int32 + +const ( + // The default checker type. Currently converted to `STATIC_IP_CHECKERS` + // on creation, the default conversion behavior may change in the future. + UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED UptimeCheckConfig_CheckerType = 0 + // `STATIC_IP_CHECKERS` are used for uptime checks that perform egress + // across the public internet. `STATIC_IP_CHECKERS` use the static IP + // addresses returned by `ListUptimeCheckIps`. + UptimeCheckConfig_STATIC_IP_CHECKERS UptimeCheckConfig_CheckerType = 1 + // `VPC_CHECKERS` are used for uptime checks that perform egress using + // Service Directory and private network access. When using `VPC_CHECKERS`, + // the monitored resource type must be `servicedirectory_service`. + UptimeCheckConfig_VPC_CHECKERS UptimeCheckConfig_CheckerType = 3 +) + +// Enum value maps for UptimeCheckConfig_CheckerType. +var ( + UptimeCheckConfig_CheckerType_name = map[int32]string{ + 0: "CHECKER_TYPE_UNSPECIFIED", + 1: "STATIC_IP_CHECKERS", + 3: "VPC_CHECKERS", + } + UptimeCheckConfig_CheckerType_value = map[string]int32{ + "CHECKER_TYPE_UNSPECIFIED": 0, + "STATIC_IP_CHECKERS": 1, + "VPC_CHECKERS": 3, + } +) + +func (x UptimeCheckConfig_CheckerType) Enum() *UptimeCheckConfig_CheckerType { + p := new(UptimeCheckConfig_CheckerType) + *p = x + return p +} + +func (x UptimeCheckConfig_CheckerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_CheckerType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[3].Descriptor() +} + +func (UptimeCheckConfig_CheckerType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[3] +} + +func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead. +func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} +} + +// The HTTP request method options. +type UptimeCheckConfig_HttpCheck_RequestMethod int32 + +const ( + // No request method specified. + UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED UptimeCheckConfig_HttpCheck_RequestMethod = 0 + // GET request. + UptimeCheckConfig_HttpCheck_GET UptimeCheckConfig_HttpCheck_RequestMethod = 1 + // POST request. + UptimeCheckConfig_HttpCheck_POST UptimeCheckConfig_HttpCheck_RequestMethod = 2 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_RequestMethod. +var ( + UptimeCheckConfig_HttpCheck_RequestMethod_name = map[int32]string{ + 0: "METHOD_UNSPECIFIED", + 1: "GET", + 2: "POST", + } + UptimeCheckConfig_HttpCheck_RequestMethod_value = map[string]int32{ + "METHOD_UNSPECIFIED": 0, + "GET": 1, + "POST": 2, + } +) + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) Enum() *UptimeCheckConfig_HttpCheck_RequestMethod { + p := new(UptimeCheckConfig_HttpCheck_RequestMethod) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_RequestMethod) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[4].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_RequestMethod) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[4] +} + +func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} +} + +// Header options corresponding to the content type of a HTTP request body. +type UptimeCheckConfig_HttpCheck_ContentType int32 + +const ( + // No content type specified. + UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ContentType = 0 + // `body` is in URL-encoded form. Equivalent to setting the `Content-Type` + // to `application/x-www-form-urlencoded` in the HTTP request. + UptimeCheckConfig_HttpCheck_URL_ENCODED UptimeCheckConfig_HttpCheck_ContentType = 1 + // `body` is in `custom_content_type` form. Equivalent to setting the + // `Content-Type` to the contents of `custom_content_type` in the HTTP + // request. + UptimeCheckConfig_HttpCheck_USER_PROVIDED UptimeCheckConfig_HttpCheck_ContentType = 2 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ContentType. +var ( + UptimeCheckConfig_HttpCheck_ContentType_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "URL_ENCODED", + 2: "USER_PROVIDED", + } + UptimeCheckConfig_HttpCheck_ContentType_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "URL_ENCODED": 1, + "USER_PROVIDED": 2, + } +) + +func (x UptimeCheckConfig_HttpCheck_ContentType) Enum() *UptimeCheckConfig_HttpCheck_ContentType { + p := new(UptimeCheckConfig_HttpCheck_ContentType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ContentType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ContentType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[5].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ContentType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[5] +} + +func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} +} + +// An HTTP status code class. +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass int32 + +const ( + // Default value that matches no status codes. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 0 + // The class of status codes between 100 and 199. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_1XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 100 + // The class of status codes between 200 and 299. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_2XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 200 + // The class of status codes between 300 and 399. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_3XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 300 + // The class of status codes between 400 and 499. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_4XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 400 + // The class of status codes between 500 and 599. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_5XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 500 + // The class of all status codes. + UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_ANY UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 1000 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass. +var ( + UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_name = map[int32]string{ + 0: "STATUS_CLASS_UNSPECIFIED", + 100: "STATUS_CLASS_1XX", + 200: "STATUS_CLASS_2XX", + 300: "STATUS_CLASS_3XX", + 400: "STATUS_CLASS_4XX", + 500: "STATUS_CLASS_5XX", + 1000: "STATUS_CLASS_ANY", + } + UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_value = map[string]int32{ + "STATUS_CLASS_UNSPECIFIED": 0, + "STATUS_CLASS_1XX": 100, + "STATUS_CLASS_2XX": 200, + "STATUS_CLASS_3XX": 300, + "STATUS_CLASS_4XX": 400, + "STATUS_CLASS_5XX": 500, + "STATUS_CLASS_ANY": 1000, + } +) + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Enum() *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass { + p := new(UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[6].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[6] +} + +func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0} +} + +// Type of authentication. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32 + +const ( + // Default value, will result in OIDC Authentication. + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0 + // OIDC Authentication + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1 +) + +// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType. +var ( + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{ + 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED", + 1: "OIDC_TOKEN", + } + UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{ + "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0, + "OIDC_TOKEN": 1, + } +) + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) + *p = x + return p +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor() +} + +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[7] +} + +func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead. +func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0} +} + +// Options to perform content matching. +type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32 + +const ( + // No content matcher type specified (maintained for backward + // compatibility, but deprecated for future use). + // Treated as `CONTAINS_STRING`. + UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0 + // Selects substring matching. The match succeeds if the output contains + // the `content` string. This is the default value for checks without + // a `matcher` option, or where the value of `matcher` is + // `CONTENT_MATCHER_OPTION_UNSPECIFIED`. + UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1 + // Selects negation of substring matching. The match succeeds if the + // output does _NOT_ contain the `content` string. + UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2 + // Selects regular-expression matching. The match succeeds if the output + // matches the regular expression specified in the `content` string. + // Regex matching is only supported for HTTP/HTTPS checks. + UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3 + // Selects negation of regular-expression matching. The match succeeds if + // the output does _NOT_ match the regular expression specified in the + // `content` string. Regex matching is only supported for HTTP/HTTPS + // checks. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4 + // Selects JSONPath matching. See `JsonPathMatcher` for details on when + // the match succeeds. JSONPath matching is only supported for HTTP/HTTPS + // checks. + UptimeCheckConfig_ContentMatcher_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 5 + // Selects JSONPath matching. See `JsonPathMatcher` for details on when + // the match succeeds. Succeeds when output does _NOT_ match as specified. + // JSONPath is only supported for HTTP/HTTPS checks. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 6 +) + +// Enum value maps for UptimeCheckConfig_ContentMatcher_ContentMatcherOption. +var ( + UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{ + 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED", + 1: "CONTAINS_STRING", + 2: "NOT_CONTAINS_STRING", + 3: "MATCHES_REGEX", + 4: "NOT_MATCHES_REGEX", + 5: "MATCHES_JSON_PATH", + 6: "NOT_MATCHES_JSON_PATH", + } + UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{ + "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0, + "CONTAINS_STRING": 1, + "NOT_CONTAINS_STRING": 2, + "MATCHES_REGEX": 3, + "NOT_MATCHES_REGEX": 4, + "MATCHES_JSON_PATH": 5, + "NOT_MATCHES_JSON_PATH": 6, + } +) + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + p := new(UptimeCheckConfig_ContentMatcher_ContentMatcherOption) + *p = x + return p +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor() +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[8] +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead. +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} +} + +// Options to perform JSONPath content matching. +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption int32 + +const ( + // No JSONPath matcher type specified (not valid). + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 0 + // Selects 'exact string' matching. The match succeeds if the content at + // the `json_path` within the output is exactly the same as the + // `content` string. + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_EXACT_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 1 + // Selects regular-expression matching. The match succeeds if the + // content at the `json_path` within the output matches the regular + // expression specified in the `content` string. + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_REGEX_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 2 +) + +// Enum value maps for UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption. +var ( + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_name = map[int32]string{ + 0: "JSON_PATH_MATCHER_OPTION_UNSPECIFIED", + 1: "EXACT_MATCH", + 2: "REGEX_MATCH", + } + UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_value = map[string]int32{ + "JSON_PATH_MATCHER_OPTION_UNSPECIFIED": 0, + "EXACT_MATCH": 1, + "REGEX_MATCH": 2, + } +) + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption { + p := new(UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) + *p = x + return p +} + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor { + return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor() +} + +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType { + return &file_google_monitoring_v3_uptime_proto_enumTypes[9] +} + +func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead. +func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0} +} + +// An internal checker allows Uptime checks to run on private/internal GCP +// resources. +// +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +type InternalChecker struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique resource name for this InternalChecker. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] + // + // `[PROJECT_ID_OR_NUMBER]` is the Cloud Monitoring Metrics Scope project for + // the Uptime check config associated with the internal checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The checker's human-readable name. The display name + // should be unique within a Cloud Monitoring Metrics Scope in order to make + // it easier to identify; however, uniqueness is not enforced. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the + // internal resource lives (ex: "default"). + Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // The GCP zone the Uptime check should egress from. Only respected for + // internal Uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` + // The GCP project ID where the internal checker lives. Not necessary + // the same as the Metrics Scope project. + PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"` + // The current operational state of the internal checker. + State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"` +} + +func (x *InternalChecker) Reset() { + *x = InternalChecker{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalChecker) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalChecker) ProtoMessage() {} + +func (x *InternalChecker) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalChecker.ProtoReflect.Descriptor instead. +func (*InternalChecker) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0} +} + +func (x *InternalChecker) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InternalChecker) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *InternalChecker) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *InternalChecker) GetGcpZone() string { + if x != nil { + return x.GcpZone + } + return "" +} + +func (x *InternalChecker) GetPeerProjectId() string { + if x != nil { + return x.PeerProjectId + } + return "" +} + +func (x *InternalChecker) GetState() InternalChecker_State { + if x != nil { + return x.State + } + return InternalChecker_UNSPECIFIED +} + +// Describes a Synthetic Monitor to be invoked by Uptime. +type SyntheticMonitorTarget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a Synthetic Monitor's execution stack. + // + // Types that are assignable to Target: + // + // *SyntheticMonitorTarget_CloudFunctionV2 + Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"` +} + +func (x *SyntheticMonitorTarget) Reset() { + *x = SyntheticMonitorTarget{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget) ProtoMessage() {} + +func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1} +} + +func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target { + if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok { + return x.CloudFunctionV2 + } + return nil +} + +type isSyntheticMonitorTarget_Target interface { + isSyntheticMonitorTarget_Target() +} + +type SyntheticMonitorTarget_CloudFunctionV2 struct { + // Target a Synthetic Monitor GCFv2 instance. + CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"` +} + +func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {} + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. A unique resource name for this Uptime check configuration. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + // + // `[PROJECT_ID_OR_NUMBER]` is the Workspace host project associated with the + // Uptime check. + // + // This field should be omitted when creating the Uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A human-friendly name for the Uptime check configuration. The display name + // should be unique within a Cloud Monitoring Workspace in order to make it + // easier to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are assignable to Resource: + // + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + // *UptimeCheckConfig_SyntheticMonitor + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of Uptime check request. + // + // Types that are assignable to CheckRequestType: + // + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often, in seconds, the Uptime check is performed. + // Currently, the only supported values are `60s` (1 minute), `300s` + // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, + // defaults to `60s`. + Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The content that is expected to appear in the data returned by the target + // server against which the check is run. Currently, only the first entry + // in the `content_matchers` list is supported, and additional entries will + // be ignored. This field is optional and should only be specified if a + // content match is required as part of the/ Uptime check. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` + // The type of checkers to use to execute the Uptime check. + CheckerType UptimeCheckConfig_CheckerType `protobuf:"varint,17,opt,name=checker_type,json=checkerType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_CheckerType" json:"checker_type,omitempty"` + // The list of regions from which the check will be run. + // Some regions contain one location, and others contain more than one. + // If this field is specified, enough regions must be provided to include a + // minimum of 3 locations. Not specifying this field will result in Uptime + // checks running from all available regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // If this is `true`, then checks are made only from the 'internal_checkers'. + // If it is `false`, then checks are made only from the 'selected_regions'. + // It is an error to provide 'selected_regions' when is_internal is `true`, + // or to provide 'internal_checkers' when is_internal is `false`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. + IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` + // The internal checkers that this check will egress from. If `is_internal` is + // `true` and this list is empty, the check will egress from all the + // InternalCheckers configured for the project that owns this + // `UptimeCheckConfig`. + // + // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. + InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `UptimeCheckConfig` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,20,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UptimeCheckConfig) Reset() { + *x = UptimeCheckConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig) ProtoMessage() {} + +func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2} +} + +func (x *UptimeCheckConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UptimeCheckConfig) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (x *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { + if x, ok := x.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := x.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget { + if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok { + return x.SyntheticMonitor + } + return nil +} + +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (x *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if x != nil { + return x.ContentMatchers + } + return nil +} + +func (x *UptimeCheckConfig) GetCheckerType() UptimeCheckConfig_CheckerType { + if x != nil { + return x.CheckerType + } + return UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED +} + +func (x *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if x != nil { + return x.SelectedRegions + } + return nil +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +func (x *UptimeCheckConfig) GetIsInternal() bool { + if x != nil { + return x.IsInternal + } + return false +} + +// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto. +func (x *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { + if x != nil { + return x.InternalCheckers + } + return nil +} + +func (x *UptimeCheckConfig) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} + +type UptimeCheckConfig_MonitoredResource struct { + // The [monitored + // resource](https://cloud.google.com/monitoring/api/resources) associated + // with the configuration. + // The following monitored resource types are valid for this field: + // + // `uptime_url`, + // `gce_instance`, + // `gae_app`, + // `aws_ec2_instance`, + // `aws_elb_load_balancer` + // `k8s_service` + // `servicedirectory_service` + // `cloud_run_revision` + MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` +} + +type UptimeCheckConfig_ResourceGroup_ struct { + // The group resource associated with the configuration. + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` +} + +type UptimeCheckConfig_SyntheticMonitor struct { + // Specifies a Synthetic Monitor to invoke. + SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {} + +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_HttpCheck_ struct { + // Contains information needed to make an HTTP or HTTPS check. + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` +} + +type UptimeCheckConfig_TcpCheck_ struct { + // Contains information needed to make a TCP check. + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The IP address from which the Uptime check originates. This is a fully + // specified IP address (not an IP address range). Most IP addresses, as of + // this publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely, and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` +} + +func (x *UptimeCheckIp) Reset() { + *x = UptimeCheckIp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckIp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckIp) ProtoMessage() {} + +func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead. +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3} +} + +func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if x != nil { + return x.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (x *UptimeCheckIp) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *UptimeCheckIp) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +// A Synthetic Monitor deployed to a Cloud Functions V2 instance. +type SyntheticMonitorTarget_CloudFunctionV2Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Fully qualified GCFv2 resource name + // i.e. `projects/{project}/locations/{location}/functions/{function}` + // Required. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The `cloud_run_revision` Monitored Resource associated with + // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and + // spans) are reported against this Monitored Resource. This field is output + // only. + CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"` +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() { + *x = SyntheticMonitorTarget_CloudFunctionV2Target{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead. +func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource { + if x != nil { + return x.CloudRunRevision + } + return nil +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The group of resources being monitored. Should be only the `[GROUP_ID]`, + // and not the full-path + // `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` +} + +func (x *UptimeCheckConfig_ResourceGroup) Reset() { + *x = UptimeCheckConfig_ResourceGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ResourceGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} + +func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if x != nil { + return x.GroupId + } + return "" +} + +func (x *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if x != nil { + return x.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in sending ICMP pings alongside public HTTP/TCP +// checks. For HTTP, the pings are performed for each part of the redirect +// chain. +type UptimeCheckConfig_PingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. + PingsCount int32 `protobuf:"varint,1,opt,name=pings_count,json=pingsCount,proto3" json:"pings_count,omitempty"` +} + +func (x *UptimeCheckConfig_PingConfig) Reset() { + *x = UptimeCheckConfig_PingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_PingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_PingConfig) ProtoMessage() {} + +func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 { + if x != nil { + return x.PingsCount + } + return 0 +} + +// Information involved in an HTTP/HTTPS Uptime check request. +type UptimeCheckConfig_HttpCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP request method to use for the check. If set to + // `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`. + RequestMethod UptimeCheckConfig_HttpCheck_RequestMethod `protobuf:"varint,8,opt,name=request_method,json=requestMethod,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_RequestMethod" json:"request_method,omitempty"` + // If `true`, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + // Optional (defaults to "/"). The path to the page against which to run + // the check. Will be combined with the `host` (specified within the + // `monitored_resource`) and `port` to construct the full URL. If the + // provided path does not begin with "/", a "/" will be prepended + // automatically. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when + // `use_ssl` is `true`). The TCP port on the HTTP server against which to + // run the check. Will be combined with host (specified within the + // `monitored_resource`) and `path` to construct the full URL. + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + // Do not set both `auth_method` and `auth_info`. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` + // Boolean specifying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if `mask_headers` is set to `true` then the headers + // will be obscured with `******.` + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` + // The list of headers to send as part of the Uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + // The maximum number of headers allowed is 100. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The content type header to use for the check. The following + // configurations result in errors: + // 1. Content type is specified in both the `headers` field and the + // `content_type` field. + // 2. Request method is `GET` and `content_type` is not `TYPE_UNSPECIFIED` + // 3. Request method is `POST` and `content_type` is `TYPE_UNSPECIFIED`. + // 4. Request method is `POST` and a "Content-Type" header is provided via + // `headers` field. The `content_type` field should be used instead. + ContentType UptimeCheckConfig_HttpCheck_ContentType `protobuf:"varint,9,opt,name=content_type,json=contentType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ContentType" json:"content_type,omitempty"` + // A user provided content type header to use for the check. The invalid + // configurations outlined in the `content_type` field apply to + // `custom_content_type`, as well as the following: + // 1. `content_type` is `URL_ENCODED` and `custom_content_type` is set. + // 2. `content_type` is `USER_PROVIDED` and `custom_content_type` is not + // set. + CustomContentType string `protobuf:"bytes,13,opt,name=custom_content_type,json=customContentType,proto3" json:"custom_content_type,omitempty"` + // Boolean specifying whether to include SSL certificate validation as a + // part of the Uptime check. Only applies to checks where + // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, + // setting `validate_ssl` to `true` has no effect. + ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"` + // The request body associated with the HTTP POST request. If `content_type` + // is `URL_ENCODED`, the body passed in must be URL-encoded. Users can + // provide a `Content-Length` header via the `headers` field or the API will + // do so. If the `request_method` is `GET` and `body` is not empty, the API + // will return an error. The maximum byte size is 1 megabyte. + // + // Note: If client libraries aren't used (which performs the conversion + // automatically) base64 encode your `body` data since the field is of + // `bytes` type. + Body []byte `protobuf:"bytes,10,opt,name=body,proto3" json:"body,omitempty"` + // If present, the check will only pass if the HTTP response status code is + // in this set of status codes. If empty, the HTTP status code will only + // pass if the HTTP status code is 200-299. + AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"` + // Contains information needed to add pings to an HTTP check. + PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` + // This field is optional and should be set only by users interested in + // an authenticated uptime check. + // Do not set both `auth_method` and `auth_info`. + // + // Types that are assignable to AuthMethod: + // + // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ + AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"` +} + +func (x *UptimeCheckConfig_HttpCheck) Reset() { + *x = UptimeCheckConfig_HttpCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod { + if x != nil { + return x.RequestMethod + } + return UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED +} + +func (x *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if x != nil { + return x.UseSsl + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if x != nil { + return x.AuthInfo + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if x != nil { + return x.MaskHeaders + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetContentType() UptimeCheckConfig_HttpCheck_ContentType { + if x != nil { + return x.ContentType + } + return UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED +} + +func (x *UptimeCheckConfig_HttpCheck) GetCustomContentType() string { + if x != nil { + return x.CustomContentType + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool { + if x != nil { + return x.ValidateSsl + } + return false +} + +func (x *UptimeCheckConfig_HttpCheck) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetAcceptedResponseStatusCodes() []*UptimeCheckConfig_HttpCheck_ResponseStatusCode { + if x != nil { + return x.AcceptedResponseStatusCodes + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig { + if x != nil { + return x.PingConfig + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod { + if m != nil { + return m.AuthMethod + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication { + if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok { + return x.ServiceAgentAuthentication + } + return nil +} + +type isUptimeCheckConfig_HttpCheck_AuthMethod interface { + isUptimeCheckConfig_HttpCheck_AuthMethod() +} + +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct { + // If specified, Uptime will generate and attach an OIDC JWT token for the + // Monitoring service agent service account as an `Authorization` header + // in the HTTP request when probing. + ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() { +} + +// Information required for a TCP Uptime check request. +type UptimeCheckConfig_TcpCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TCP port on the server against which to run the check. Will be + // combined with host (specified within the `monitored_resource`) to + // construct the full URL. Required. + Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + // Contains information needed to add pings to a TCP check. + PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,2,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"` +} + +func (x *UptimeCheckConfig_TcpCheck) Reset() { + *x = UptimeCheckConfig_TcpCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_TcpCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} + +func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3} +} + +func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *UptimeCheckConfig_TcpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig { + if x != nil { + return x.PingConfig + } + return nil +} + +// Optional. Used to perform content matching. This allows matching based on +// substrings and regular expressions, together with their negations. Only the +// first 4 MB of an HTTP or HTTPS check's response (and the first +// 1 MB of a TCP check's response) are examined for purposes of content +// matching. +type UptimeCheckConfig_ContentMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // String, regex or JSON content to match. Maximum 1024 bytes. An empty + // `content` string indicates no content matching is to be performed. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The type of content matcher that will be applied to the server output, + // compared to the `content` string when the check is run. + Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"` + // Certain `ContentMatcherOption` types require additional information. + // `MATCHES_JSON_PATH` or `NOT_MATCHES_JSON_PATH` require a + // `JsonPathMatcher`; not used for other options. + // + // Types that are assignable to AdditionalMatcherInfo: + // + // *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ + AdditionalMatcherInfo isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo `protobuf_oneof:"additional_matcher_info"` +} + +func (x *UptimeCheckConfig_ContentMatcher) Reset() { + *x = UptimeCheckConfig_ContentMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ContentMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} + +func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4} +} + +func (x *UptimeCheckConfig_ContentMatcher) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + if x != nil { + return x.Matcher + } + return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED +} + +func (m *UptimeCheckConfig_ContentMatcher) GetAdditionalMatcherInfo() isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo { + if m != nil { + return m.AdditionalMatcherInfo + } + return nil +} + +func (x *UptimeCheckConfig_ContentMatcher) GetJsonPathMatcher() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher { + if x, ok := x.GetAdditionalMatcherInfo().(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_); ok { + return x.JsonPathMatcher + } + return nil +} + +type isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo interface { + isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() +} + +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ struct { + // Matcher information for `MATCHES_JSON_PATH` and `NOT_MATCHES_JSON_PATH` + JsonPathMatcher *UptimeCheckConfig_ContentMatcher_JsonPathMatcher `protobuf:"bytes,3,opt,name=json_path_matcher,json=jsonPathMatcher,proto3,oneof"` +} + +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_) isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() { +} + +// The authentication parameters to provide to the specified resource or +// URL that requires a username and password. Currently, only +// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is +// supported in Uptime checks. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The username to use when authenticating with the HTTP server. + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to use when authenticating with the HTTP server. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0} +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +// A status to accept. Either a status code class like "2xx", or an integer +// status code like "200". +type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Either a specific value or a class of status codes. + // + // Types that are assignable to StatusCode: + // + // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue + // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ + StatusCode isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode `protobuf_oneof:"status_code"` +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() { + *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1} +} + +func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode { + if m != nil { + return m.StatusCode + } + return nil +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusValue() int32 { + if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue); ok { + return x.StatusValue + } + return 0 +} + +func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusClass() UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass { + if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_); ok { + return x.StatusClass + } + return UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED +} + +type isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode interface { + isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() +} + +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue struct { + // A status code to accept. + StatusValue int32 `protobuf:"varint,1,opt,name=status_value,json=statusValue,proto3,oneof"` +} + +type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ struct { + // A class of status codes to accept. + StatusClass UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass `protobuf:"varint,2,opt,name=status_class,json=statusClass,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { +} + +func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() { +} + +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +// The OIDC token will be generated for the Monitoring service agent service +// account. +type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of authentication. + Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"` +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() { + *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2} +} + +func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType { + if x != nil { + return x.Type + } + return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED +} + +// Information needed to perform a JSONPath content match. +// Used for `ContentMatcherOption::MATCHES_JSON_PATH` and +// `ContentMatcherOption::NOT_MATCHES_JSON_PATH`. +type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // JSONPath within the response output pointing to the expected + // `ContentMatcher::content` to match against. + JsonPath string `protobuf:"bytes,1,opt,name=json_path,json=jsonPath,proto3" json:"json_path,omitempty"` + // The type of JSONPath match that will be applied to the JSON output + // (`ContentMatcher.content`) + JsonMatcher UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption `protobuf:"varint,2,opt,name=json_matcher,json=jsonMatcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption" json:"json_matcher,omitempty"` +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() { + *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead. +func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0} +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string { + if x != nil { + return x.JsonPath + } + return "" +} + +func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonMatcher() UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption { + if x != nil { + return x.JsonMatcher + } + return UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED +} + +var File_google_monitoring_v3_uptime_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, + 0x19, 0x0a, 0x08, 0x67, 0x63, 0x70, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x67, 0x63, 0x70, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, + 0x02, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, + 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x56, 0x32, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, + 0x65, 0x74, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, + 0x08, 0x74, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, + 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, + 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x1a, 0x2d, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x1a, 0xef, 0x0e, 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, + 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x89, 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x52, 0x1b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, + 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, + 0x77, 0x6f, 0x72, 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x75, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, + 0xc8, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x5f, 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, + 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, + 0x5f, 0x35, 0x58, 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, + 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, + 0x1e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x31, 0x0a, 0x2d, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, + 0x5f, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, + 0x10, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, + 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, + 0x44, 0x10, 0x02, 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x1a, 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x84, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x11, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, + 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0f, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x1a, 0x94, 0x02, 0x0a, 0x0f, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x7f, 0x0a, 0x0c, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x22, 0x63, 0x0a, 0x15, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x24, + 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, + 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x41, 0x43, 0x54, 0x5f, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x47, 0x45, 0x58, + 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x26, 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x54, + 0x43, 0x48, 0x45, 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, + 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, + 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, + 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x4f, 0x54, 0x5f, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, + 0x48, 0x10, 0x06, 0x42, 0x19, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x3d, + 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x55, 0x0a, + 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, + 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, + 0x41, 0x54, 0x49, 0x43, 0x5f, 0x49, 0x50, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x56, 0x50, 0x43, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, + 0x52, 0x53, 0x10, 0x03, 0x3a, 0xf3, 0x01, 0xea, 0x41, 0xef, 0x01, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, + 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x39, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x01, 0x0a, + 0x0d, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x12, 0x3f, + 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, + 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x95, 0x01, 0x0a, 0x11, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x53, 0x41, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x55, 0x52, 0x4f, 0x50, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, + 0x0d, 0x53, 0x4f, 0x55, 0x54, 0x48, 0x5f, 0x41, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x41, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x49, 0x41, 0x5f, 0x50, 0x41, 0x43, 0x49, 0x46, 0x49, 0x43, + 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x53, 0x41, 0x5f, 0x4f, 0x52, 0x45, 0x47, 0x4f, 0x4e, + 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x55, 0x53, 0x41, 0x5f, 0x49, 0x4f, 0x57, 0x41, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x53, 0x41, 0x5f, 0x56, 0x49, 0x52, 0x47, 0x49, 0x4e, 0x49, 0x41, + 0x10, 0x07, 0x2a, 0x5b, 0x0a, 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, + 0x43, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, + 0xaf, 0x02, 0xea, 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x7b, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, + 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_uptime_proto_rawDescOnce sync.Once + file_google_monitoring_v3_uptime_proto_rawDescData = file_google_monitoring_v3_uptime_proto_rawDesc +) + +func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_uptime_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_uptime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_proto_rawDescData) + }) + return file_google_monitoring_v3_uptime_proto_rawDescData +} + +var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_monitoring_v3_uptime_proto_goTypes = []any{ + (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion + (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType + (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State + (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType + (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker + (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget + (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig + (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp + (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup + (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig + (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck + (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck + (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher + nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource + (*durationpb.Duration)(nil), // 27: google.protobuf.Duration +} +var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{ + 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State + 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target + 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource + 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup + 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget + 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck + 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck + 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration + 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration + 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher + 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType + 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion + 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker + 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry + 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion + 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource + 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType + 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod + 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication + 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry + 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType + 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode + 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication + 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig + 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption + 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher + 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass + 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType + 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_uptime_proto_init() } +func file_google_monitoring_v3_uptime_proto_init() { + if File_google_monitoring_v3_uptime_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_uptime_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*InternalChecker); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckIp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*SyntheticMonitorTarget_CloudFunctionV2Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ResourceGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_PingConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_TcpCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ContentMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_BasicAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ResponseStatusCode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{ + (*SyntheticMonitorTarget_CloudFunctionV2)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_SyntheticMonitor)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{ + (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil), + } + file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{ + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil), + (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc, + NumEnums: 10, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_monitoring_v3_uptime_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_uptime_proto_depIdxs, + EnumInfos: file_google_monitoring_v3_uptime_proto_enumTypes, + MessageInfos: file_google_monitoring_v3_uptime_proto_msgTypes, + }.Build() + File_google_monitoring_v3_uptime_proto = out.File + file_google_monitoring_v3_uptime_proto_rawDesc = nil + file_google_monitoring_v3_uptime_proto_goTypes = nil + file_google_monitoring_v3_uptime_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go new file mode 100644 index 0000000000..d4ba902fb0 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -0,0 +1,1226 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/monitoring/v3/uptime_service.proto + +package monitoringpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose + // Uptime check configurations are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // If provided, this field specifies the criteria that must be met by + // uptime checks to be included in the response. + // + // For more details, see [Filtering + // syntax](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax). + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListUptimeCheckConfigsRequest) Reset() { + *x = ListUptimeCheckConfigsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckConfigsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} + +func (x *ListUptimeCheckConfigsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckConfigsRequest.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListUptimeCheckConfigsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListUptimeCheckConfigsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUptimeCheckConfigsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned Uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of Uptime check configurations for the project, + // irrespective of any pagination. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListUptimeCheckConfigsResponse) Reset() { + *x = ListUptimeCheckConfigsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckConfigsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} + +func (x *ListUptimeCheckConfigsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckConfigsResponse.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfigs + } + return nil +} + +func (x *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The Uptime check configuration to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetUptimeCheckConfigRequest) Reset() { + *x = GetUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *GetUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetUptimeCheckConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The + // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which + // to create the Uptime check. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The new Uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` +} + +func (x *CreateUptimeCheckConfigRequest) Reset() { + *x = CreateUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *CreateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateUptimeCheckConfigRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. If present, only the listed fields in the current Uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. If an `updateMask` has been specified, this field gives + // the values for the set of fields mentioned in the `updateMask`. If an + // `updateMask` has not been given, this Uptime check configuration replaces + // the current configuration. If a field is mentioned in `updateMask` but + // the corresponding field is omitted in this partial Uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + // + // The following fields can be updated: `display_name`, + // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and + // `selected_regions`. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` +} + +func (x *UpdateUptimeCheckConfigRequest) Reset() { + *x = UpdateUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *UpdateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if x != nil { + return x.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The Uptime check configuration to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteUptimeCheckConfigRequest) Reset() { + *x = DeleteUptimeCheckConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteUptimeCheckConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} + +func (x *DeleteUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteUptimeCheckConfigRequest.ProtoReflect.Descriptor instead. +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteUptimeCheckConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListUptimeCheckIpsRequest) Reset() { + *x = ListUptimeCheckIpsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckIpsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} + +func (x *ListUptimeCheckIpsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckIpsRequest.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUptimeCheckIpsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListUptimeCheckIpsResponse) Reset() { + *x = ListUptimeCheckIpsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUptimeCheckIpsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} + +func (x *ListUptimeCheckIpsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUptimeCheckIpsResponse.ProtoReflect.Descriptor instead. +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { + return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if x != nil { + return x.UptimeCheckIps + } + return nil +} + +func (x *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +var File_google_monitoring_v3_uptime_service_proto protoreflect.FileDescriptor + +var file_google_monitoring_v3_uptime_service_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x4c, 0x69, + 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x14, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, + 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x2d, 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x69, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, + 0x19, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, + 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x49, 0x70, 0x52, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x49, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xbd, 0x0a, 0x0a, + 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x39, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x64, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x2a, 0x2f, 0x76, 0x33, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xeb, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x71, 0xda, 0x41, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x55, + 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x3e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, + 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x4c, + 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, + 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, + 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, + 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, + 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, + 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_monitoring_v3_uptime_service_proto_rawDescOnce sync.Once + file_google_monitoring_v3_uptime_service_proto_rawDescData = file_google_monitoring_v3_uptime_service_proto_rawDesc +) + +func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte { + file_google_monitoring_v3_uptime_service_proto_rawDescOnce.Do(func() { + file_google_monitoring_v3_uptime_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_service_proto_rawDescData) + }) + return file_google_monitoring_v3_uptime_service_proto_rawDescData +} + +var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{ + (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest + (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse + (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest + (*CreateUptimeCheckConfigRequest)(nil), // 3: google.monitoring.v3.CreateUptimeCheckConfigRequest + (*UpdateUptimeCheckConfigRequest)(nil), // 4: google.monitoring.v3.UpdateUptimeCheckConfigRequest + (*DeleteUptimeCheckConfigRequest)(nil), // 5: google.monitoring.v3.DeleteUptimeCheckConfigRequest + (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest + (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse + (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig + (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask + (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp + (*emptypb.Empty)(nil), // 11: google.protobuf.Empty +} +var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{ + 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig + 8, // 1: google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig + 9, // 2: google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 8, // 3: google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig + 10, // 4: google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips:type_name -> google.monitoring.v3.UptimeCheckIp + 0, // 5: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:input_type -> google.monitoring.v3.ListUptimeCheckConfigsRequest + 2, // 6: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:input_type -> google.monitoring.v3.GetUptimeCheckConfigRequest + 3, // 7: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:input_type -> google.monitoring.v3.CreateUptimeCheckConfigRequest + 4, // 8: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:input_type -> google.monitoring.v3.UpdateUptimeCheckConfigRequest + 5, // 9: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:input_type -> google.monitoring.v3.DeleteUptimeCheckConfigRequest + 6, // 10: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:input_type -> google.monitoring.v3.ListUptimeCheckIpsRequest + 1, // 11: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:output_type -> google.monitoring.v3.ListUptimeCheckConfigsResponse + 8, // 12: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 8, // 13: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 8, // 14: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig + 11, // 15: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:output_type -> google.protobuf.Empty + 7, // 16: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:output_type -> google.monitoring.v3.ListUptimeCheckIpsResponse + 11, // [11:17] is the sub-list for method output_type + 5, // [5:11] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_monitoring_v3_uptime_service_proto_init() } +func file_google_monitoring_v3_uptime_service_proto_init() { + if File_google_monitoring_v3_uptime_service_proto != nil { + return + } + file_google_monitoring_v3_uptime_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_monitoring_v3_uptime_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckConfigsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckConfigsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteUptimeCheckConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckIpsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_monitoring_v3_uptime_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListUptimeCheckIpsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_monitoring_v3_uptime_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_monitoring_v3_uptime_service_proto_goTypes, + DependencyIndexes: file_google_monitoring_v3_uptime_service_proto_depIdxs, + MessageInfos: file_google_monitoring_v3_uptime_service_proto_msgTypes, + }.Build() + File_google_monitoring_v3_uptime_service_proto = out.File + file_google_monitoring_v3_uptime_service_proto_rawDesc = nil + file_google_monitoring_v3_uptime_service_proto_goTypes = nil + file_google_monitoring_v3_uptime_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// UptimeCheckServiceClient is the client API for UptimeCheckService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UptimeCheckServiceClient interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UptimeCheckServiceServer is the server API for UptimeCheckService service. +type UptimeCheckServiceServer interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations. +type UnimplementedUptimeCheckServiceServer struct { +} + +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented") +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go new file mode 100644 index 0000000000..6f7fe5d7c4 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go @@ -0,0 +1,618 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newNotificationChannelClientHook clientHook + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption + SendNotificationChannelVerificationCode []gax.CallOption + GetNotificationChannelVerificationCode []gax.CallOption + VerifyNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannelDescriptor: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListNotificationChannels: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + SendNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + VerifyNotificationChannel: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalNotificationChannelClient is an interface that defines the methods available from Cloud Monitoring API. +type internalNotificationChannelClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListNotificationChannelDescriptors(context.Context, *monitoringpb.ListNotificationChannelDescriptorsRequest, ...gax.CallOption) *NotificationChannelDescriptorIterator + GetNotificationChannelDescriptor(context.Context, *monitoringpb.GetNotificationChannelDescriptorRequest, ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) + ListNotificationChannels(context.Context, *monitoringpb.ListNotificationChannelsRequest, ...gax.CallOption) *NotificationChannelIterator + GetNotificationChannel(context.Context, *monitoringpb.GetNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + CreateNotificationChannel(context.Context, *monitoringpb.CreateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + UpdateNotificationChannel(context.Context, *monitoringpb.UpdateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) + DeleteNotificationChannel(context.Context, *monitoringpb.DeleteNotificationChannelRequest, ...gax.CallOption) error + SendNotificationChannelVerificationCode(context.Context, *monitoringpb.SendNotificationChannelVerificationCodeRequest, ...gax.CallOption) error + GetNotificationChannelVerificationCode(context.Context, *monitoringpb.GetNotificationChannelVerificationCodeRequest, ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) + VerifyNotificationChannel(context.Context, *monitoringpb.VerifyNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error) +} + +// NotificationChannelClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +type NotificationChannelClient struct { + // The internal transport-dependent client. + internalClient internalNotificationChannelClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + return c.internalClient.ListNotificationChannelDescriptors(ctx, req, opts...) +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + return c.internalClient.GetNotificationChannelDescriptor(ctx, req, opts...) +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +// To list the types of notification channels that are supported, use +// the ListNotificationChannelDescriptors method. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + return c.internalClient.ListNotificationChannels(ctx, req, opts...) +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.GetNotificationChannel(ctx, req, opts...) +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or PagerDuty service. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.CreateNotificationChannel(ctx, req, opts...) +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.UpdateNotificationChannel(ctx, req, opts...) +} + +// DeleteNotificationChannel deletes a notification channel. +// +// Design your application to single-thread API calls that modify the state of +// notification channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotificationChannel(ctx, req, opts...) +} + +// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code +// can then be supplied in VerifyNotificationChannel to verify the channel. +func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + return c.internalClient.SendNotificationChannelVerificationCode(ctx, req, opts...) +} + +// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then +// be used in a call to VerifyNotificationChannel() on a different channel +// with an equivalent identity in the same or in a different project. This +// makes it possible to copy a channel between projects without requiring +// manual reverification of the channel. If the channel is not in the +// verified state, this method will fail (in other words, this may only be +// used if the SendNotificationChannelVerificationCode and +// VerifyNotificationChannel paths have already been used to put the given +// channel into the verified state). +// +// There is no guarantee that the verification codes returned by this method +// will be of a similar structure or form as the ones that are delivered +// to the channel via SendNotificationChannelVerificationCode; while +// VerifyNotificationChannel() will recognize both the codes delivered via +// SendNotificationChannelVerificationCode() and returned from +// GetNotificationChannelVerificationCode(), it is typically the case that +// the verification codes delivered via +// SendNotificationChannelVerificationCode() will be shorter and also +// have a shorter expiration (e.g. codes such as “G-123456”) whereas +// GetVerificationCode() will typically return a much longer, websafe base +// 64 encoded string that has a longer expiration time. +func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + return c.internalClient.GetNotificationChannelVerificationCode(ctx, req, opts...) +} + +// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code +// delivered to the channel as a result of calling +// SendNotificationChannelVerificationCode. +func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + return c.internalClient.VerifyNotificationChannel(ctx, req, opts...) +} + +// notificationChannelGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type notificationChannelGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing NotificationChannelClient + CallOptions **NotificationChannelCallOptions + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewNotificationChannelClient creates a new notification channel service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + clientOpts := defaultNotificationChannelGRPCClientOptions() + if newNotificationChannelClientHook != nil { + hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := NotificationChannelClient{CallOptions: defaultNotificationChannelCallOptions()} + + c := ¬ificationChannelGRPCClient{ + connPool: connPool, + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *notificationChannelGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + resp := &monitoringpb.ListNotificationChannelDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetChannelDescriptors(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + resp := &monitoringpb.ListNotificationChannelsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetNotificationChannels(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.SendNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...) + var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.VerifyNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go new file mode 100644 index 0000000000..3c111637e1 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go @@ -0,0 +1,233 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" +) + +var newQueryClientHook clientHook + +// QueryCallOptions contains the retry settings for each method of QueryClient. +type QueryCallOptions struct { + QueryTimeSeries []gax.CallOption +} + +func defaultQueryGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultQueryCallOptions() *QueryCallOptions { + return &QueryCallOptions{ + QueryTimeSeries: []gax.CallOption{}, + } +} + +// internalQueryClient is an interface that defines the methods available from Cloud Monitoring API. +type internalQueryClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + QueryTimeSeries(context.Context, *monitoringpb.QueryTimeSeriesRequest, ...gax.CallOption) *TimeSeriesDataIterator +} + +// QueryClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The QueryService API is used to manage time series data in Cloud +// Monitoring. Time series data is a collection of data points that describes +// the time-varying values of a metric. +type QueryClient struct { + // The internal transport-dependent client. + internalClient internalQueryClient + + // The call options for this service. + CallOptions *QueryCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *QueryClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *QueryClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *QueryClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// QueryTimeSeries queries time series using Monitoring Query Language. +func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { + return c.internalClient.QueryTimeSeries(ctx, req, opts...) +} + +// queryGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type queryGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing QueryClient + CallOptions **QueryCallOptions + + // The gRPC API client. + queryClient monitoringpb.QueryServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewQueryClient creates a new query service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The QueryService API is used to manage time series data in Cloud +// Monitoring. Time series data is a collection of data points that describes +// the time-varying values of a metric. +func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) { + clientOpts := defaultQueryGRPCClientOptions() + if newQueryClientHook != nil { + hookOpts, err := newQueryClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := QueryClient{CallOptions: defaultQueryCallOptions()} + + c := &queryGRPCClient{ + connPool: connPool, + queryClient: monitoringpb.NewQueryServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *queryGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *queryGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...) + it := &TimeSeriesDataIterator{} + req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeriesData, string, error) { + resp := &monitoringpb.QueryTimeSeriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.queryClient.QueryTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetTimeSeriesData(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go new file mode 100644 index 0000000000..7776c425f9 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go @@ -0,0 +1,565 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newServiceMonitoringClientHook clientHook + +// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient. +type ServiceMonitoringCallOptions struct { + CreateService []gax.CallOption + GetService []gax.CallOption + ListServices []gax.CallOption + UpdateService []gax.CallOption + DeleteService []gax.CallOption + CreateServiceLevelObjective []gax.CallOption + GetServiceLevelObjective []gax.CallOption + ListServiceLevelObjectives []gax.CallOption + UpdateServiceLevelObjective []gax.CallOption + DeleteServiceLevelObjective []gax.CallOption +} + +func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions { + return &ServiceMonitoringCallOptions{ + CreateService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServices: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteService: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + GetServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServiceLevelObjectives: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteServiceLevelObjective: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalServiceMonitoringClient is an interface that defines the methods available from Cloud Monitoring API. +type internalServiceMonitoringClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateService(context.Context, *monitoringpb.CreateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + GetService(context.Context, *monitoringpb.GetServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + ListServices(context.Context, *monitoringpb.ListServicesRequest, ...gax.CallOption) *ServiceIterator + UpdateService(context.Context, *monitoringpb.UpdateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error) + DeleteService(context.Context, *monitoringpb.DeleteServiceRequest, ...gax.CallOption) error + CreateServiceLevelObjective(context.Context, *monitoringpb.CreateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + GetServiceLevelObjective(context.Context, *monitoringpb.GetServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + ListServiceLevelObjectives(context.Context, *monitoringpb.ListServiceLevelObjectivesRequest, ...gax.CallOption) *ServiceLevelObjectiveIterator + UpdateServiceLevelObjective(context.Context, *monitoringpb.UpdateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) + DeleteServiceLevelObjective(context.Context, *monitoringpb.DeleteServiceLevelObjectiveRequest, ...gax.CallOption) error +} + +// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. +type ServiceMonitoringClient struct { + // The internal transport-dependent client. + internalClient internalServiceMonitoringClient + + // The call options for this service. + CallOptions *ServiceMonitoringCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ServiceMonitoringClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateService create a Service. +func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.CreateService(ctx, req, opts...) +} + +// GetService get the named Service. +func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.GetService(ctx, req, opts...) +} + +// ListServices list Services for this Metrics Scope. +func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + return c.internalClient.ListServices(ctx, req, opts...) +} + +// UpdateService update this Service. +func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + return c.internalClient.UpdateService(ctx, req, opts...) +} + +// DeleteService soft delete this Service. +func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteService(ctx, req, opts...) +} + +// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service. +func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.CreateServiceLevelObjective(ctx, req, opts...) +} + +// GetServiceLevelObjective get a ServiceLevelObjective by name. +func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.GetServiceLevelObjective(ctx, req, opts...) +} + +// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service. +func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + return c.internalClient.ListServiceLevelObjectives(ctx, req, opts...) +} + +// UpdateServiceLevelObjective update the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + return c.internalClient.UpdateServiceLevelObjective(ctx, req, opts...) +} + +// DeleteServiceLevelObjective delete the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteServiceLevelObjective(ctx, req, opts...) +} + +// serviceMonitoringGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type serviceMonitoringGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing ServiceMonitoringClient + CallOptions **ServiceMonitoringCallOptions + + // The gRPC API client. + serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewServiceMonitoringClient creates a new service monitoring service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a Metrics Scope’s services. These include +// the Service's monitored resources, its Service-Level Objectives, and a +// taxonomy of categorized Health Metrics. +func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) { + clientOpts := defaultServiceMonitoringGRPCClientOptions() + if newServiceMonitoringClientHook != nil { + hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := ServiceMonitoringClient{CallOptions: defaultServiceMonitoringCallOptions()} + + c := &serviceMonitoringGRPCClient{ + connPool: connPool, + serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *serviceMonitoringGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...) + it := &ServiceIterator{} + req = proto.Clone(req).(*monitoringpb.ListServicesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) { + resp := &monitoringpb.ListServicesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServices(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetServices(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteService(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...) + it := &ServiceLevelObjectiveIterator{} + req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) { + resp := &monitoringpb.ListServiceLevelObjectivesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServiceLevelObjectives(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetServiceLevelObjectives(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go new file mode 100644 index 0000000000..32cad577e3 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go @@ -0,0 +1,343 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newSnoozeClientHook clientHook + +// SnoozeCallOptions contains the retry settings for each method of SnoozeClient. +type SnoozeCallOptions struct { + CreateSnooze []gax.CallOption + ListSnoozes []gax.CallOption + GetSnooze []gax.CallOption + UpdateSnooze []gax.CallOption +} + +func defaultSnoozeGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSnoozeCallOptions() *SnoozeCallOptions { + return &SnoozeCallOptions{ + CreateSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + ListSnoozes: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateSnooze: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + } +} + +// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API. +type internalSnoozeClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator + GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) + UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error) +} + +// SnoozeClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +type SnoozeClient struct { + // The internal transport-dependent client. + internalClient internalSnoozeClient + + // The call options for this service. + CallOptions *SnoozeCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SnoozeClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *SnoozeClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateSnooze creates a Snooze that will prevent alerts, which match the provided +// criteria, from being opened. The Snooze applies for a specific time +// interval. +func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.CreateSnooze(ctx, req, opts...) +} + +// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in +// filter, which specifies predicates to match Snoozes. +func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + return c.internalClient.ListSnoozes(ctx, req, opts...) +} + +// GetSnooze retrieves a Snooze by name. +func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.GetSnooze(ctx, req, opts...) +} + +// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the +// given Snooze object. +func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + return c.internalClient.UpdateSnooze(ctx, req, opts...) +} + +// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type snoozeGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing SnoozeClient + CallOptions **SnoozeCallOptions + + // The gRPC API client. + snoozeClient monitoringpb.SnoozeServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewSnoozeClient creates a new snooze service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The SnoozeService API is used to temporarily prevent an alert policy from +// generating alerts. A Snooze is a description of the criteria under which one +// or more alert policies should not fire alerts for the specified duration. +func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) { + clientOpts := defaultSnoozeGRPCClientOptions() + if newSnoozeClientHook != nil { + hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()} + + c := &snoozeGRPCClient{ + connPool: connPool, + snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *snoozeGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *snoozeGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.CreateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...) + it := &SnoozeIterator{} + req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) { + resp := &monitoringpb.ListSnoozesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.ListSnoozes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetSnoozes(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.GetSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...) + var resp *monitoringpb.Snooze + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.snoozeClient.UpdateSnooze(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go new file mode 100644 index 0000000000..d381525137 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go @@ -0,0 +1,450 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newUptimeCheckClientHook clientHook + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + UpdateUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + }, + DeleteUptimeCheckConfig: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListUptimeCheckIps: []gax.CallOption{ + gax.WithTimeout(30000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// internalUptimeCheckClient is an interface that defines the methods available from Cloud Monitoring API. +type internalUptimeCheckClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListUptimeCheckConfigs(context.Context, *monitoringpb.ListUptimeCheckConfigsRequest, ...gax.CallOption) *UptimeCheckConfigIterator + GetUptimeCheckConfig(context.Context, *monitoringpb.GetUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + CreateUptimeCheckConfig(context.Context, *monitoringpb.CreateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + UpdateUptimeCheckConfig(context.Context, *monitoringpb.UpdateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) + DeleteUptimeCheckConfig(context.Context, *monitoringpb.DeleteUptimeCheckConfigRequest, ...gax.CallOption) error + ListUptimeCheckIps(context.Context, *monitoringpb.ListUptimeCheckIpsRequest, ...gax.CallOption) *UptimeCheckIpIterator +} + +// UptimeCheckClient is a client for interacting with Cloud Monitoring API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Cloud Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud console] +// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Cloud +// Monitoring, and then clicking on “Uptime”. +type UptimeCheckClient struct { + // The internal transport-dependent client. + internalClient internalUptimeCheckClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project +// (leaving out any invalid configurations). +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + return c.internalClient.ListUptimeCheckConfigs(ctx, req, opts...) +} + +// GetUptimeCheckConfig gets a single Uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.GetUptimeCheckConfig(ctx, req, opts...) +} + +// CreateUptimeCheckConfig creates a new Uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.CreateUptimeCheckConfig(ctx, req, opts...) +} + +// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via updateMask. +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + return c.internalClient.UpdateUptimeCheckConfig(ctx, req, opts...) +} + +// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail +// if the Uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteUptimeCheckConfig(ctx, req, opts...) +} + +// ListUptimeCheckIps returns the list of IP addresses that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + return c.internalClient.ListUptimeCheckIps(ctx, req, opts...) +} + +// uptimeCheckGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type uptimeCheckGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing UptimeCheckClient + CallOptions **UptimeCheckCallOptions + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewUptimeCheckClient creates a new uptime check service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Cloud Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud console] +// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Cloud +// Monitoring, and then clicking on “Uptime”. +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + clientOpts := defaultUptimeCheckGRPCClientOptions() + if newUptimeCheckClientHook != nil { + hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := UptimeCheckClient{CallOptions: defaultUptimeCheckCallOptions()} + + c := &uptimeCheckGRPCClient{ + connPool: connPool, + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *uptimeCheckGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + resp := &monitoringpb.ListUptimeCheckConfigsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetUptimeCheckConfigs(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + resp := &monitoringpb.ListUptimeCheckIpsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetUptimeCheckIps(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go new file mode 100644 index 0000000000..accff0f5e4 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package monitoring + +import "cloud.google.com/go/monitoring/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go new file mode 100644 index 0000000000..670f0797ee --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.21.1" diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index e9fb55585b..7b1b063367 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,83 @@ # Changes +## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.45.0...storage/v1.46.0) (2024-10-31) + +### Features + +* **storage:** Add grpc metrics experimental options ([#10984](https://github.com/googleapis/google-cloud-go/issues/10984)) ([5b7397b](https://github.com/googleapis/google-cloud-go/commit/5b7397b169176f030049e1511859a883422c774e)) + + +### Bug Fixes + +* **storage:** Skip only specific transport tests. ([#11016](https://github.com/googleapis/google-cloud-go/issues/11016)) ([d40fbff](https://github.com/googleapis/google-cloud-go/commit/d40fbff9c1984aeed0224a4ac93eb95c5af17126)) +* **storage:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) +* **storage:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([2b8ca4b](https://github.com/googleapis/google-cloud-go/commit/2b8ca4b4127ce3025c7a21cc7247510e07cc5625)) + + +### Miscellaneous Chores + +* **storage/internal:** Remove notification, service account, and hmac RPCS. These API have been migrated to Storage Control and are available via the JSON API. ([#11008](https://github.com/googleapis/google-cloud-go/issues/11008)) ([e0759f4](https://github.com/googleapis/google-cloud-go/commit/e0759f46639b4c542e5b49e4dc81340d8e123370)) + +## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.44.0...storage/v1.45.0) (2024-10-17) + + +### Features + +* **storage/internal:** Adds support for restore token ([70d82fe](https://github.com/googleapis/google-cloud-go/commit/70d82fe93f60f1075298a077ce1616f9ae7e13fe)) +* **storage:** Adding bucket-specific dynamicDelay ([#10987](https://github.com/googleapis/google-cloud-go/issues/10987)) ([a807a7e](https://github.com/googleapis/google-cloud-go/commit/a807a7e7f9fb002374407622c126102c5e61af82)) +* **storage:** Dynamic read request stall timeout ([#10958](https://github.com/googleapis/google-cloud-go/issues/10958)) ([a09f00e](https://github.com/googleapis/google-cloud-go/commit/a09f00eeecac82af98ae769bab284ee58a3a66cb)) + + +### Documentation + +* **storage:** Remove preview wording from NewGRPCClient ([#11002](https://github.com/googleapis/google-cloud-go/issues/11002)) ([40c3a5b](https://github.com/googleapis/google-cloud-go/commit/40c3a5b9c4cd4db2f1695e180419197b6a03ed7f)) + +## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03) + + +### Features + +* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f)) +* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2)) +* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f)) +* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a)) +* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545)) +* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8)) +* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb)) +* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88)) +* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b)) +* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) +* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361) +* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34)) +* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8)) +* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62)) + + +### Bug Fixes + +* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c)) +* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567) +* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978)) +* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) +* **storage:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) +* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d)) +* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0)) +* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2)) +* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + + +### Performance Improvements + +* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d)) + + +### Documentation + +* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a)) +* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a)) + ## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index d582a60d0e..3eded01783 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -416,6 +416,10 @@ type BucketAttrs struct { // This field is read-only. Created time.Time + // Updated is the time at which the bucket was last modified. + // This field is read-only. + Updated time.Time + // VersioningEnabled reports whether this bucket has versioning enabled. VersioningEnabled bool @@ -824,6 +828,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { DefaultEventBasedHold: b.DefaultEventBasedHold, StorageClass: b.StorageClass, Created: convertTime(b.TimeCreated), + Updated: convertTime(b.Updated), VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, ACL: toBucketACLRules(b.Acl), DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), @@ -861,6 +866,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { DefaultEventBasedHold: b.GetDefaultEventBasedHold(), StorageClass: b.GetStorageClass(), Created: b.GetCreateTime().AsTime(), + Updated: b.GetUpdateTime().AsTime(), VersioningEnabled: b.GetVersioning().GetEnabled(), ACL: toBucketACLRulesFromProto(b.GetAcl()), DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index bbe89276a4..aebba22517 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -122,7 +122,7 @@ type settings struct { gax []gax.CallOption // idempotent indicates if the call is idempotent or not when considering - // if the call should be retired or not. + // if the call should be retried or not. idempotent bool // clientOption is a set of option.ClientOption to be used during client @@ -132,6 +132,8 @@ type settings struct { // userProject is the user project that should be billed for the request. userProject string + + metricsContext *metricsContext } func initSettings(opts ...storageOption) *settings { diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index c274c762ea..4fcfb73264 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -331,14 +331,14 @@ to add a [custom audit logging] header: // Use client as usual with the context and the additional headers will be sent. client.Bucket("my-bucket").Attrs(ctx) -# Experimental gRPC API +# gRPC API -This package includes support for the Cloud Storage gRPC API, which is currently -in preview. This implementation uses gRPC rather than the current JSON & XML -APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC -team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to -allowlist to access this API. The Go Storage gRPC library is not yet generally -available, so it may be subject to breaking changes. +This package includes support for the Cloud Storage gRPC API. The +implementation uses gRPC rather than the Default +JSON & XML APIs to make requests to Cloud Storage. +The Go Storage gRPC client is generally available. +The Notifications, Serivce Account HMAC +and GetServiceAccount RPCs are not supported through the gRPC client. To create a client which will use gRPC, use the alternate constructor: @@ -349,15 +349,43 @@ To create a client which will use gRPC, use the alternate constructor: } // Use client as usual. -If the application is running within GCP, users may get better performance by -enabling Direct Google Access (enabling requests to skip some proxy steps). To enable, -set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add -the following side-effect imports to your application: +Using the gRPC API inside GCP with a bucket in the same region can allow for +[Direct Connectivity] (enabling requests to skip some proxy steps and reducing +response latency). A warning is emmitted if gRPC is not used within GCP to +warn that Direct Connectivity could not be initialized. Direct Connectivity +is not required to access the gRPC API. - import ( - _ "google.golang.org/grpc/balancer/rls" - _ "google.golang.org/grpc/xds/googledirectpath" - ) +Dependencies for the gRPC API may slightly increase the size of binaries for +applications depending on this package. If you are not using gRPC, you can use +the build tag `disable_grpc_modules` to opt out of these dependencies and +reduce the binary size. + +The gRPC client emits metrics by default and will export the +gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to +[Google Cloud Monitoring]. The metrics are accessible through Cloud Monitoring +API and you incur no additional cost for publishing the metrics. Google Cloud +Support can use this information to more quickly diagnose problems related to +GCS and gRPC. +Sending this data does not incur any billing charges, and requires minimal +CPU (a single RPC every minute) or memory (a few KiB to batch the +telemetry). + +To access the metrics you can view them through Cloud Monitoring +[metric explorer] with the prefix `storage.googleapis.com/client`. Metrics are emitted +every minute. + +You can disable metrics using the following example when creating a new gRPC +client using [WithDisabledClientMetrics]. + +The metrics exporter uses Cloud Monitoring API which determines +project ID and credentials doing the following: + +* Project ID is determined using OTel Resource Detector for the environment +otherwise it falls back to the project provided by [google.FindCredentials]. + +* Credentials are determined using [Application Default Credentials]. The +principal must have `roles/monitoring.metricWriter` role granted. If not a +logged warning will be emitted. Subsequent are silenced to prevent noisy logs. # Storage Control API @@ -366,6 +394,11 @@ and Managed Folder operations) are supported via the autogenerated Storage Contr client, which is available as a subpackage in this module. See package docs at [cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs. +[Application Default Credentials]: https://cloud.google.com/docs/authentication/application-default-credentials +[google.FindCredentials]: https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentials +[gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md +[gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md +[Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -375,5 +408,7 @@ client, which is available as a subpackage in this module. See package docs at [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview [custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata [Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2 +[metric explorer]: https://console.cloud.google.com/projectselector/monitoring/metrics-explorer +[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/dynamic_delay.go b/vendor/cloud.google.com/go/storage/dynamic_delay.go new file mode 100644 index 0000000000..5944f515d3 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/dynamic_delay.go @@ -0,0 +1,237 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "math" + "sync" + "time" +) + +// dynamicDelay dynamically calculates the delay at a fixed percentile, based on +// delay samples. +// +// dynamicDelay is goroutine-safe. +type dynamicDelay struct { + increaseFactor float64 + decreaseFactor float64 + minDelay time.Duration + maxDelay time.Duration + value time.Duration + + // Guards the value + mu *sync.RWMutex +} + +// validateDynamicDelayParams ensures, +// targetPercentile is a valid fraction (between 0 and 1). +// increaseRate is a positive number. +// minDelay is less than maxDelay. +func validateDynamicDelayParams(targetPercentile, increaseRate float64, minDelay, maxDelay time.Duration) error { + if targetPercentile < 0 || targetPercentile > 1 { + return fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile) + } + if increaseRate <= 0 { + return fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate) + } + if minDelay >= maxDelay { + return fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay) + } + return nil +} + +// NewDynamicDelay returns a dynamicDelay. +// +// targetPercentile is the desired percentile to be computed. For example, a +// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be +// in the range [0, 1]. +// +// increaseRate (must be > 0) determines how many increase calls it takes for +// Value to double. +// +// initialDelay is the start value of the delay. +// +// decrease can never lower the delay past minDelay, increase can never raise +// the delay past maxDelay. +func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) *dynamicDelay { + if initialDelay < minDelay { + initialDelay = minDelay + } + if initialDelay > maxDelay { + initialDelay = maxDelay + } + + // Compute increaseFactor and decreaseFactor such that: + // (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1 + increaseFactor := math.Exp(math.Log(2) / increaseRate) + if increaseFactor < 1.001 { + increaseFactor = 1.001 + } + decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile) + if decreaseFactor > 0.9999 { + decreaseFactor = 0.9999 + } + + return &dynamicDelay{ + increaseFactor: increaseFactor, + decreaseFactor: decreaseFactor, + minDelay: minDelay, + maxDelay: maxDelay, + value: initialDelay, + mu: &sync.RWMutex{}, + } +} + +func (d *dynamicDelay) unsafeIncrease() { + v := time.Duration(float64(d.value) * d.increaseFactor) + if v > d.maxDelay { + d.value = d.maxDelay + } else { + d.value = v + } +} + +// increase notes that the operation took longer than the delay returned by Value. +func (d *dynamicDelay) increase() { + d.mu.Lock() + defer d.mu.Unlock() + + d.unsafeIncrease() +} + +func (d *dynamicDelay) unsafeDecrease() { + v := time.Duration(float64(d.value) * d.decreaseFactor) + if v < d.minDelay { + d.value = d.minDelay + } else { + d.value = v + } +} + +// decrease notes that the operation completed before the delay returned by getValue. +func (d *dynamicDelay) decrease() { + d.mu.Lock() + defer d.mu.Unlock() + + d.unsafeDecrease() +} + +// update updates the delay value depending on the specified latency. +func (d *dynamicDelay) update(latency time.Duration) { + d.mu.Lock() + defer d.mu.Unlock() + + if latency > d.value { + d.unsafeIncrease() + } else { + d.unsafeDecrease() + } +} + +// getValue returns the desired delay to wait before retry the operation. +func (d *dynamicDelay) getValue() time.Duration { + d.mu.RLock() + defer d.mu.RUnlock() + + return d.value +} + +// printDelay prints the state of delay, helpful in debugging. +func (d *dynamicDelay) printDelay() { + d.mu.RLock() + defer d.mu.RUnlock() + + fmt.Println("IncreaseFactor: ", d.increaseFactor) + fmt.Println("DecreaseFactor: ", d.decreaseFactor) + fmt.Println("MinDelay: ", d.minDelay) + fmt.Println("MaxDelay: ", d.maxDelay) + fmt.Println("Value: ", d.value) +} + +// bucketDelayManager wraps dynamicDelay to provide bucket-specific delays. +type bucketDelayManager struct { + targetPercentile float64 + increaseRate float64 + initialDelay time.Duration + minDelay time.Duration + maxDelay time.Duration + + // delays maps bucket names to their dynamic delay instance. + delays map[string]*dynamicDelay + + // mu guards delays. + mu *sync.RWMutex +} + +// newBucketDelayManager returns a new bucketDelayManager instance. +func newBucketDelayManager(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*bucketDelayManager, error) { + err := validateDynamicDelayParams(targetPercentile, increaseRate, minDelay, maxDelay) + if err != nil { + return nil, err + } + + return &bucketDelayManager{ + targetPercentile: targetPercentile, + increaseRate: increaseRate, + initialDelay: initialDelay, + minDelay: minDelay, + maxDelay: maxDelay, + delays: make(map[string]*dynamicDelay), + mu: &sync.RWMutex{}, + }, nil +} + +// getDelay retrieves the dynamicDelay instance for the given bucket name. If no delay +// exists for the bucket, a new one is created with the configured parameters. +func (b *bucketDelayManager) getDelay(bucketName string) *dynamicDelay { + b.mu.RLock() + delay, ok := b.delays[bucketName] + b.mu.RUnlock() + + if !ok { + b.mu.Lock() + defer b.mu.Unlock() + + // Check again, as someone might create b/w the execution of mu.RUnlock() and mu.Lock(). + delay, ok = b.delays[bucketName] + if !ok { + // Create a new dynamicDelay for the bucket if it doesn't exist + delay = newDynamicDelay(b.targetPercentile, b.increaseRate, b.initialDelay, b.minDelay, b.maxDelay) + b.delays[bucketName] = delay + } + } + return delay +} + +// increase notes that the operation took longer than the delay for the given bucket. +func (b *bucketDelayManager) increase(bucketName string) { + b.getDelay(bucketName).increase() +} + +// decrease notes that the operation completed before the delay for the given bucket. +func (b *bucketDelayManager) decrease(bucketName string) { + b.getDelay(bucketName).decrease() +} + +// update updates the delay value for the bucket depending on the specified latency. +func (b *bucketDelayManager) update(bucketName string, latency time.Duration) { + b.getDelay(bucketName).update(latency) +} + +// getValue returns the desired delay to wait before retrying the operation for the given bucket. +func (b *bucketDelayManager) getValue(bucketName string) time.Duration { + return b.getDelay(bucketName).getValue() +} diff --git a/vendor/cloud.google.com/go/storage/experimental/experimental.go b/vendor/cloud.google.com/go/storage/experimental/experimental.go new file mode 100644 index 0000000000..b35de64d39 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/experimental/experimental.go @@ -0,0 +1,75 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package experimental is a collection of experimental features that might +// have some rough edges to them. Housing experimental features in this package +// results in a user accessing these APIs as `experimental.Foo`, thereby making +// it explicit that the feature is experimental and using them in production +// code is at their own risk. +// +// All APIs in this package are experimental. +package experimental + +import ( + "time" + + "cloud.google.com/go/storage/internal" + "go.opentelemetry.io/otel/sdk/metric" + "google.golang.org/api/option" +) + +// WithMetricInterval provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient]. +// It sets how often to emit metrics [metric.WithInterval] when using +// [metric.NewPeriodicReader] +// When using Cloud Monitoring interval must be at minimum 1 [time.Minute]. +func WithMetricInterval(metricInterval time.Duration) option.ClientOption { + return internal.WithMetricInterval.(func(time.Duration) option.ClientOption)(metricInterval) +} + +// WithMetricExporter provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient]. +// Set an alternate client-side metric Exporter to emit metrics through. +// Must implement [metric.Exporter] +func WithMetricExporter(ex *metric.Exporter) option.ClientOption { + return internal.WithMetricExporter.(func(*metric.Exporter) option.ClientOption)(ex) +} + +// WithReadStallTimeout provides a [option.ClientOption] that may be passed to [storage.NewClient]. +// It enables the client to retry stalled requests when starting a download from +// Cloud Storage. If the timeout elapses with no response from the server, the request +// is automatically retried. +// The timeout is initially set to ReadStallTimeoutConfig.Min. The client tracks +// latency across all read requests from the client for each bucket accessed, and can +// adjust the timeout higher to the target percentile when latency for request to that +// bucket is high. +// Currently, this is supported only for downloads ([storage.NewReader] and +// [storage.NewRangeReader] calls) and only for the XML API. Other read APIs (gRPC & JSON) +// will be supported soon. +func WithReadStallTimeout(rstc *ReadStallTimeoutConfig) option.ClientOption { + return internal.WithReadStallTimeout.(func(config *ReadStallTimeoutConfig) option.ClientOption)(rstc) +} + +// ReadStallTimeoutConfig defines the timeout which is adjusted dynamically based on +// past observed latencies. +type ReadStallTimeoutConfig struct { + // Min is the minimum duration of the timeout. The default value is 500ms. Requests + // taking shorter than this value to return response headers will never time out. + // In general, you should choose a Min value that is greater than the typical value + // for the target percentile. + Min time.Duration + + // TargetPercentile is the percentile to target for the dynamic timeout. The default + // value is 0.99. At the default percentile, at most 1% of requests will be timed out + // and retried. + TargetPercentile float64 +} diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index d81a17b6b0..4078927054 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -16,11 +16,12 @@ package storage import ( "context" - "encoding/base64" + "encoding/binary" "errors" "fmt" "hash/crc32" "io" + "log" "net/url" "os" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protowire" @@ -95,10 +97,11 @@ func defaultGRPCOptions() []option.ClientOption { option.WithEndpoint(host), option.WithGRPCDialOption(grpc.WithInsecure()), option.WithoutAuthentication(), + WithDisabledClientMetrics(), ) } else { // Only enable DirectPath when the emulator is not being targeted. - defaults = append(defaults, internaloption.EnableDirectPath(true)) + defaults = append(defaults, internaloption.EnableDirectPath(true), internaloption.EnableDirectPathXds()) } return defaults @@ -124,6 +127,15 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads") } + if !config.disableClientMetrics { + // Do not fail client creation if enabling metrics fails. + if metricsContext, err := enableClientMetrics(ctx, s, config); err == nil { + s.metricsContext = metricsContext + s.clientOption = append(s.clientOption, metricsContext.clientOpts...) + } else { + log.Printf("Failed to enable client metrics: %v", err) + } + } g, err := gapic.NewClient(ctx, s.clientOption...) if err != nil { return nil, err @@ -136,26 +148,17 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl } func (c *grpcStorageClient) Close() error { + if c.settings.metricsContext != nil { + c.settings.metricsContext.close() + } return c.raw.Close() } // Top-level methods. +// GetServiceAccount is not supported in the gRPC client. func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetServiceAccountRequest{ - Project: toProjectResource(project), - } - var resp *storagepb.ServiceAccount - err := run(ctx, func(ctx context.Context) error { - var err error - resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - return resp.EmailAddress, err + return "", errMethodNotSupported } func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { @@ -432,16 +435,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask SoftDeleted: it.query.SoftDeleted, + IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes, } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } fetch := func(pageSize int, pageToken string) (token string, err error) { - // IncludeFoldersAsPrefixes is not supported for gRPC - // TODO: remove this when support is added in the proto. - if it.query.IncludeFoldersAsPrefixes { - return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") - } var objects []*storagepb.Object var gitr *gapic.ObjectIterator err = run(it.ctx, func(ctx context.Context) error { @@ -959,37 +958,48 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } -// bytesCodec is a grpc codec which permits receiving messages as either -// protobuf messages, or as raw []bytes. -type bytesCodec struct { - encoding.Codec +// Custom codec to be used for unmarshaling ReadObjectResponse messages. +// This is used to avoid a copy of object data in proto.Unmarshal. +type bytesCodecV2 struct { } -func (bytesCodec) Marshal(v any) ([]byte, error) { +var _ encoding.CodecV2 = bytesCodecV2{} + +// Marshal is used to encode messages to send for bytesCodecV2. Since we are only +// using this to send ReadObjectRequest messages we don't need to recycle buffers +// here. +func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + var data mem.BufferSlice + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + return data, nil } -func (bytesCodec) Unmarshal(data []byte, v any) error { +// Unmarshal is used for data received for ReadObjectResponse. We want to preserve +// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal. +func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error { switch v := v.(type) { - case *[]byte: - // If gRPC could recycle the data []byte after unmarshaling (through - // buffer pools), we would need to make a copy here. + case *mem.BufferSlice: *v = data + // Pick up a reference to the data so that it is not freed while decoding. + data.Ref() return nil case proto.Message: - return proto.Unmarshal(data, v) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + return proto.Unmarshal(buf.ReadOnlyData(), v) default: - return fmt.Errorf("can not unmarshal type %T", v) + return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v) } } -func (bytesCodec) Name() string { - // If this isn't "", then gRPC sets the content-subtype of the call to this - // value and we get errors. +func (bytesCodecV2) Name() string { return "" } @@ -1000,7 +1010,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange s := callSettings(c.settings, opts...) s.gax = append(s.gax, gax.WithGRPCOptions( - grpc.ForceCodec(bytesCodec{}), + grpc.ForceCodecV2(bytesCodecV2{}), )) if s.userProject != "" { @@ -1018,8 +1028,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.Generation = params.gen } - var databuf []byte - // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { @@ -1045,18 +1053,19 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } var stream storagepb.Storage_ReadObjectClient - var msg *storagepb.ReadObjectResponse var err error + var decoder *readResponseDecoder err = run(cc, func(ctx context.Context) error { - stream, err = c.raw.ReadObject(cc, req, s.gax...) + stream, err = c.raw.ReadObject(ctx, req, s.gax...) if err != nil { return err } // Receive the message into databuf as a wire-encoded message so we can // use a custom decoder to avoid an extra copy at the protobuf layer. - err := stream.RecvMsg(&databuf) + databufs := mem.BufferSlice{} + err := stream.RecvMsg(&databufs) // These types of errors show up on the Recv call, rather than the // initialization of the stream via ReadObject above. if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { @@ -1066,22 +1075,26 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return err } // Use a custom decoder that uses protobuf unmarshalling for all - // fields except the checksummed data. - // Subsequent receives in Read calls will skip all protobuf - // unmarshalling and directly read the content from the gRPC []byte - // response, since only the first call will contain other fields. - msg, err = readFullObjectResponse(databuf) - + // fields except the object data. Object data is handled separately + // to avoid a copy. + decoder = &readResponseDecoder{ + databufs: databufs, + } + err = decoder.readFullObjectResponse() return err }, s.retry, s.idempotent) if err != nil { // Close the stream context we just created to ensure we don't leak // resources. cancel() + // Free any buffers. + if decoder != nil && decoder.databufs != nil { + decoder.databufs.Free() + } return nil, nil, err } - return &readStreamResponse{stream, msg}, cancel, nil + return &readStreamResponse{stream, decoder}, cancel, nil } res, cancel, err := reopen(0) @@ -1091,7 +1104,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // The first message was Recv'd on stream open, use it to populate the // object metadata. - msg := res.response + msg := res.decoder.msg obj := msg.GetMetadata() // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() @@ -1101,9 +1114,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange wantCRC uint32 checkCRC bool ) - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil { + if params.offset == 0 && params.length < 0 { + checkCRC = true + } wantCRC = checksums.GetCrc32C() - checkCRC = true } r = &Reader{ @@ -1115,18 +1130,17 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange LastModified: obj.GetUpdateTime().AsTime(), Metageneration: obj.GetMetageneration(), Generation: obj.GetGeneration(), + CRC32C: wantCRC, }, reader: &gRPCReader{ stream: res.stream, reopen: reopen, cancel: cancel, size: size, - // Store the content from the first Recv in the - // client buffer for reading later. - leftovers: msg.GetChecksummedData().GetContent(), + // Preserve the decoder to read out object data when Read/WriteTo is called. + currMsg: res.decoder, settings: s, zeroRange: params.length == 0, - databuf: databuf, wantCRC: wantCRC, checkCRC: checkCRC, }, @@ -1293,213 +1307,53 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str return res.Permissions, nil } -// HMAC Key methods. +// HMAC Key methods are not implemented in gRPC client. func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func(ctx context.Context) error { - var err error - metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { - s := callSettings(c.settings, opts...) - req := &storagepb.ListHmacKeysRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - ShowDeletedKeys: showDeletedKeys, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } it := &HMACKeysIterator{ ctx: ctx, - projectID: project, - retry: s.retry, + projectID: "", + retry: nil, } - fetch := func(pageSize int, pageToken string) (token string, err error) { - var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func(ctx context.Context) error { - gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) - hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - for _, hkmd := range hmacKeys { - hk := toHMACKeyFromProto(hkmd) - it.hmacKeys = append(it.hmacKeys, hk) - } - - return token, nil + fetch := func(_ int, _ string) (token string, err error) { + return "", errMethodNotSupported } it.pageInfo, it.nextFunc = iterator.NewPageInfo( fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) + func() int { return 0 }, + func() interface{} { return nil }, + ) return it } func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - hk := &storagepb.HmacKeyMetadata{ - AccessId: accessID, - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - State: string(attrs.State), - Etag: attrs.Etag, - } - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } - if attrs.State != "" { - fieldMask.Paths = append(fieldMask.Paths, "state") - } - req := &storagepb.UpdateHmacKeyRequest{ - HmacKey: hk, - UpdateMask: fieldMask, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func(ctx context.Context) error { - var err error - metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.CreateHmacKeyRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func(ctx context.Context) error { - var err error - res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - key := toHMACKeyFromProto(res.Metadata) - key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) - - return key, nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent) + return errMethodNotSupported } -// Notification methods. +// Notification methods are not implemented in gRPC client. func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - req := &storagepb.ListNotificationConfigsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - } - var notifications []*storagepb.NotificationConfig - err = run(ctx, func(ctx context.Context) error { - gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) - for { - // PageSize is not set and fallbacks to the API default pageSize of 100. - items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) - if err != nil { - return err - } - notifications = append(notifications, items...) - // If there are no more results, nextPageToken is empty and err is nil. - if nextPageToken == "" { - return err - } - req.PageToken = nextPageToken - } - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - - return notificationsToMapFromProto(notifications), nil + return nil, errMethodNotSupported } func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.CreateNotificationConfigRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - NotificationConfig: toProtoNotification(n), - } - var pbn *storagepb.NotificationConfig - err = run(ctx, func(ctx context.Context) error { - var err error - pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toNotificationFromProto(pbn), err + return nil, errMethodNotSupported } func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteNotificationConfigRequest{Name: id} - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) - }, s.retry, s.idempotent) + return errMethodNotSupported } // setUserProjectMetadata appends a project ID to the outgoing Context metadata @@ -1512,8 +1366,8 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context } type readStreamResponse struct { - stream storagepb.Storage_ReadObjectClient - response *storagepb.ReadObjectResponse + stream storagepb.Storage_ReadObjectClient + decoder *readResponseDecoder } type gRPCReader struct { @@ -1522,7 +1376,7 @@ type gRPCReader struct { stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte - databuf []byte + currMsg *readResponseDecoder // decoder for the current message cancel context.CancelFunc settings *settings checkCRC bool // should we check the CRC? @@ -1565,18 +1419,21 @@ func (r *gRPCReader) Read(p []byte) (int, error) { } var n int - // Read leftovers and return what was available to conform to the Reader + + // If there is data remaining in the current message, return what was + // available to conform to the Reader // interface: https://pkg.go.dev/io#Reader. - if len(r.leftovers) > 0 { - n = copy(p, r.leftovers) + if !r.currMsg.done { + n = r.currMsg.readAndUpdateCRC(p, func(b []byte) { + r.updateCRC(b) + }) r.seen += int64(n) - r.updateCRC(p[:n]) - r.leftovers = r.leftovers[n:] return n, nil } // Attempt to Recv the next message on the stream. - content, err := r.recv() + // This will update r.currMsg with the decoder for the new message. + err := r.recv() if err != nil { return 0, err } @@ -1588,16 +1445,11 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - n = copy(p[n:], content) - leftover := len(content) - n - if leftover > 0 { - // Wasn't able to copy all of the data in the message, store for - // future Read calls. - r.leftovers = content[n:] - } - r.seen += int64(n) - r.updateCRC(p[:n]) + n = r.currMsg.readAndUpdateCRC(p, func(b []byte) { + r.updateCRC(b) + }) + r.seen += int64(n) return n, nil } @@ -1624,14 +1476,14 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { // Track bytes written during before call. var alreadySeen = r.seen - // Write any leftovers to the stream. There will be some leftovers from the + // Write any already received message to the stream. There will be some leftovers from the // original NewRangeReader call. - if len(r.leftovers) > 0 { - // Write() will write the entire leftovers slice unless there is an error. - written, err := w.Write(r.leftovers) + if r.currMsg != nil && !r.currMsg.done { + written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) { + r.updateCRC(b) + }) r.seen += int64(written) - r.updateCRC(r.leftovers) - r.leftovers = nil + r.currMsg = nil if err != nil { return r.seen - alreadySeen, err } @@ -1642,7 +1494,7 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { // Attempt to receive the next message on the stream. // Will terminate with io.EOF once data has all come through. // recv() handles stream reopening and retry logic so no need for retries here. - msg, err := r.recv() + err := r.recv() if err != nil { if err == io.EOF { // We are done; check the checksum if necessary and return. @@ -1658,9 +1510,10 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - written, err := w.Write(msg) + written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) { + r.updateCRC(b) + }) r.seen += int64(written) - r.updateCRC(msg) if err != nil { return r.seen - alreadySeen, err } @@ -1669,12 +1522,13 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { } // Close cancels the read stream's context in order for it to be closed and -// collected. +// collected, and frees any currently in use buffers. func (r *gRPCReader) Close() error { if r.cancel != nil { r.cancel() } r.stream = nil + r.currMsg = nil return nil } @@ -1689,8 +1543,9 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. -func (r *gRPCReader) recv() ([]byte, error) { - err := r.stream.RecvMsg(&r.databuf) +func (r *gRPCReader) recv() error { + databufs := mem.BufferSlice{} + err := r.stream.RecvMsg(&databufs) var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { @@ -1700,16 +1555,16 @@ func (r *gRPCReader) recv() ([]byte, error) { // This will "close" the existing stream and immediately attempt to // reopen the stream, but will backoff if further attempts are necessary. // Reopening the stream Recvs the first message, so if retrying is - // successful, the next logical chunk will be returned. - msg, err := r.reopenStream() - return msg.GetChecksummedData().GetContent(), err + // successful, r.currMsg will be updated to include the new data. + return r.reopenStream() } if err != nil { - return nil, err + return err } - return readObjectResponseContent(r.databuf) + r.currMsg = &readResponseDecoder{databufs: databufs} + return r.currMsg.readFullObjectResponse() } // ReadObjectResponse field and subfield numbers. @@ -1722,21 +1577,297 @@ const ( metadataField = protowire.Number(4) ) -// readObjectResponseContent returns the checksummed_data.content field of a -// ReadObjectResponse message, or an error if the message is invalid. -// This can be used on recvs of objects after the first recv, since only the -// first message will contain non-data fields. -func readObjectResponseContent(b []byte) ([]byte, error) { - checksummedData, err := readProtoBytes(b, checksummedDataField) +// readResponseDecoder is a wrapper on the raw message, used to decode one message +// without copying object data. It also has methods to write out the resulting object +// data to the user application. +type readResponseDecoder struct { + databufs mem.BufferSlice // raw bytes of the message being processed + // Decoding offsets + off uint64 // offset in the messsage relative to the data as a whole + currBuf int // index of the current buffer being processed + currOff uint64 // offset in the current buffer + // Processed data + msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated + dataOffsets bufferSliceOffsets // offsets of the object data in the message. + done bool // true if the data has been completely read. +} + +type bufferSliceOffsets struct { + startBuf, endBuf int // indices of start and end buffers of object data in the msg + startOff, endOff uint64 // offsets within these buffers where the data starts and ends. + currBuf int // index of current buffer being read out to the user application. + currOff uint64 // offset of read in current buffer. +} + +// peek ahead 10 bytes from the current offset in the databufs. This will return a +// slice of the current buffer if the bytes are all in one buffer, but will copy +// the bytes into a new buffer if the distance is split across buffers. Use this +// to allow protowire methods to be used to parse tags & fixed values. +// The max length of a varint tag is 10 bytes, see +// https://protobuf.dev/programming-guides/encoding/#varints . Other int types +// are shorter. +func (d *readResponseDecoder) peek() []byte { + b := d.databufs[d.currBuf].ReadOnlyData() + // Check if the tag will fit in the current buffer. If not, copy the next 10 + // bytes into a new buffer to ensure that we can read the tag correctly + // without it being divided between buffers. + tagBuf := b[d.currOff:] + remainingInBuf := len(tagBuf) + // If we have less than 10 bytes remaining and are not in the final buffer, + // copy up to 10 bytes ahead from the next buffer. + if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 { + tagBuf = d.copyNextBytes(10) + } + return tagBuf +} + +// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the +// buffers overall. Does not advance offsets. +func (d *readResponseDecoder) copyNextBytes(n int) []byte { + remaining := n + if r := d.databufs.Len() - int(d.off); r < remaining { + remaining = r + } + currBuf := d.currBuf + currOff := d.currOff + var buf []byte + for remaining > 0 { + b := d.databufs[currBuf].ReadOnlyData() + remainingInCurr := len(b[currOff:]) + if remainingInCurr < remaining { + buf = append(buf, b[currOff:]...) + remaining -= remainingInCurr + currBuf++ + currOff = 0 + } else { + buf = append(buf, b[currOff:currOff+uint64(remaining)]...) + remaining = 0 + } + } + return buf +} + +// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we +// go past the end of the data. +func (d *readResponseDecoder) advanceOffset(n uint64) error { + remaining := n + for remaining > 0 { + remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff + if remainingInCurr <= remaining { + remaining -= remainingInCurr + d.currBuf++ + d.currOff = 0 + } else { + d.currOff += remaining + remaining = 0 + } + } + // If we have advanced past the end of the buffers, something went wrong. + if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) { + return errors.New("decoding: truncated message, cannot advance offset") + } + d.off += n + return nil + +} + +// This copies object data from the message into the buffer and returns the number of +// bytes copied. The data offsets are incremented in the message. The updateCRC +// function is called on the copied bytes. +func (d *readResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int { + // For a completely empty message, just return 0 + if len(d.databufs) == 0 { + return 0 + } + databuf := d.databufs[d.dataOffsets.currBuf] + startOff := d.dataOffsets.currOff + var b []byte + if d.dataOffsets.currBuf == d.dataOffsets.endBuf { + b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff] + } else { + b = databuf.ReadOnlyData()[startOff:] + } + n := copy(p, b) + updateCRC(b[:n]) + d.dataOffsets.currOff += uint64(n) + + // We've read all the data from this message. Free the underlying buffers. + if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff { + d.done = true + d.databufs.Free() + } + // We are at the end of the current buffer + if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) { + d.dataOffsets.currOff = 0 + d.dataOffsets.currBuf++ + } + return n +} + +func (d *readResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) { + // For a completely empty message, just return 0 + if len(d.databufs) == 0 { + return 0, nil + } + var written int64 + for !d.done { + databuf := d.databufs[d.dataOffsets.currBuf] + startOff := d.dataOffsets.currOff + var b []byte + if d.dataOffsets.currBuf == d.dataOffsets.endBuf { + b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff] + } else { + b = databuf.ReadOnlyData()[startOff:] + } + var n int + // Write all remaining data from the current buffer + n, err := w.Write(b) + written += int64(n) + updateCRC(b) + if err != nil { + return written, err + } + d.dataOffsets.currOff = 0 + // We've read all the data from this message. + if d.dataOffsets.currBuf == d.dataOffsets.endBuf { + d.done = true + d.databufs.Free() + } else { + d.dataOffsets.currBuf++ + } + } + return written, nil +} + +// Consume the next available tag in the input data and return the field number and type. +// Advances the relevant offsets in the data. +func (d *readResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) { + tagBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf) + if tagLength < 0 { + return 0, 0, protowire.ParseError(tagLength) + } + // Update the offsets and current buffer depending on the tag length. + if err := d.advanceOffset(uint64(tagLength)); err != nil { + return 0, 0, fmt.Errorf("consuming tag: %w", err) + } + return fieldNum, fieldType, nil +} + +// Consume a varint that represents the length of a bytes field. Return the length of +// the data, and advance the offsets by the length of the varint. +func (d *readResponseDecoder) consumeVarint() (uint64, error) { + tagBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + dataLength, tagLength := protowire.ConsumeVarint(tagBuf) + if tagLength < 0 { + return 0, protowire.ParseError(tagLength) + } + + // Update the offsets and current buffer depending on the tag length. + d.advanceOffset(uint64(tagLength)) + return dataLength, nil +} + +func (d *readResponseDecoder) consumeFixed32() (uint32, error) { + valueBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + value, tagLength := protowire.ConsumeFixed32(valueBuf) + if tagLength < 0 { + return 0, protowire.ParseError(tagLength) + } + + // Update the offsets and current buffer depending on the tag length. + d.advanceOffset(uint64(tagLength)) + return value, nil +} + +func (d *readResponseDecoder) consumeFixed64() (uint64, error) { + valueBuf := d.peek() + + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + value, tagLength := protowire.ConsumeFixed64(valueBuf) + if tagLength < 0 { + return 0, protowire.ParseError(tagLength) + } + + // Update the offsets and current buffer depending on the tag length. + d.advanceOffset(uint64(tagLength)) + return value, nil +} + +// Consume any field values up to the end offset provided and don't return anything. +// This is used to skip any values which are not going to be used. +// msgEndOff is indexed in terms of the overall data across all buffers. +func (d *readResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error { + // reimplement protowire.ConsumeFieldValue without the extra case for groups (which + // are are complicted and not a thing in proto3). + var err error + switch fieldType { + case protowire.VarintType: + _, err = d.consumeVarint() + case protowire.Fixed32Type: + _, err = d.consumeFixed32() + case protowire.Fixed64Type: + _, err = d.consumeFixed64() + case protowire.BytesType: + _, err = d.consumeBytes() + default: + return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum) + } if err != nil { - return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err) + return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err) } - content, err := readProtoBytes(checksummedData, checksummedDataContentField) + + return nil +} + +// Consume a bytes field from the input. Returns offsets for the data in the buffer slices +// and an error. +func (d *readResponseDecoder) consumeBytes() (bufferSliceOffsets, error) { + // m is the length of the data past the tag. + m, err := d.consumeVarint() if err != nil { - return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err) + return bufferSliceOffsets{}, fmt.Errorf("consuming bytes field: %w", err) + } + offsets := bufferSliceOffsets{ + startBuf: d.currBuf, + startOff: d.currOff, + currBuf: d.currBuf, + currOff: d.currOff, } - return content, nil + // Advance offsets to lengths of bytes field and capture where we end. + d.advanceOffset(m) + offsets.endBuf = d.currBuf + offsets.endOff = d.currOff + return offsets, nil +} + +// Consume a bytes field from the input and copy into a new buffer if +// necessary (if the data is split across buffers in databuf). This can be +// used to leverage proto.Unmarshal for small bytes fields (i.e. anything +// except object data). +func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) { + // m is the length of the bytes data. + m, err := d.consumeVarint() + if err != nil { + return nil, fmt.Errorf("consuming varint: %w", err) + } + // Copy the data into a buffer and advance the offset + b := d.copyNextBytes(int(m)) + if err := d.advanceOffset(m); err != nil { + return nil, fmt.Errorf("advancing offset: %w", err) + } + return b, nil } // readFullObjectResponse returns the ReadObjectResponse that is encoded in the @@ -1746,21 +1877,17 @@ func readObjectResponseContent(b []byte) ([]byte, error) { // This function is essentially identical to proto.Unmarshal, except it aliases // the data in the input []byte. If the proto library adds a feature to // Unmarshal that does that, this function can be dropped. -func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { +func (d *readResponseDecoder) readFullObjectResponse() error { msg := &storagepb.ReadObjectResponse{} // Loop over the entire message, extracting fields as we go. This does not // handle field concatenation, in which the contents of a single field // are split across multiple protobuf tags. - off := 0 - for off < len(b) { - // Consume the next tag. This will tell us which field is next in the - // buffer, its type, and how much space it takes up. - fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:]) - if fieldLength < 0 { - return nil, protowire.ParseError(fieldLength) + for d.off < uint64(d.databufs.Len()) { + fieldNum, fieldType, err := d.consumeTag() + if err != nil { + return fmt.Errorf("consuming next tag: %w", err) } - off += fieldLength // Unmarshal the field according to its type. Only fields that are not // nil will be present. @@ -1769,142 +1896,95 @@ func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { // The ChecksummedData field was found. Initialize the struct. msg.ChecksummedData = &storagepb.ChecksummedData{} - // Get the bytes corresponding to the checksummed data. - fieldContent, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n)) + bytesFieldLen, err := d.consumeVarint() + if err != nil { + return fmt.Errorf("consuming bytes: %v", err) } - off += n - - // Get the nested fields. We need to do this manually as it contains - // the object content bytes. - contentOff := 0 - for contentOff < len(fieldContent) { - gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:]) - if n < 0 { - return nil, protowire.ParseError(n) + + var contentEndOff = d.off + bytesFieldLen + for d.off < contentEndOff { + gotNum, gotTyp, err := d.consumeTag() + if err != nil { + return fmt.Errorf("consuming checksummedData tag: %w", err) } - contentOff += n switch { case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: - // Get the content bytes. - bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n)) + // Get the offsets of the content bytes. + d.dataOffsets, err = d.consumeBytes() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err) } - msg.ChecksummedData.Content = bytes - contentOff += n case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(fieldContent[contentOff:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n)) + v, err := d.consumeFixed32() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err) } msg.ChecksummedData.Crc32C = &v - contentOff += n default: - n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:]) - if n < 0 { - return nil, protowire.ParseError(n) + err := d.consumeFieldValue(gotNum, gotTyp) + if err != nil { + return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err) } - contentOff += n } } case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: // The field was found. Initialize the struct. msg.ObjectChecksums = &storagepb.ObjectChecksums{} - - // Get the bytes corresponding to the checksums. - bytes, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n)) + // Consume the bytes and copy them into a single buffer if they are split across buffers. + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err) } - off += n - // Unmarshal. - if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil { - return nil, err + if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil { + return err } case fieldNum == contentRangeField && fieldType == protowire.BytesType: msg.ContentRange = &storagepb.ContentRange{} - - bytes, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n)) + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err) } - off += n - - if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil { - return nil, err + if err := proto.Unmarshal(buf, msg.ContentRange); err != nil { + return err } case fieldNum == metadataField && fieldType == protowire.BytesType: msg.Metadata = &storagepb.Object{} - bytes, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n)) + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err) } - off += n - if err := proto.Unmarshal(bytes, msg.Metadata); err != nil { - return nil, err + if err := proto.Unmarshal(buf, msg.Metadata); err != nil { + return err } default: - fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:]) - if fieldLength < 0 { - return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength)) - } - off += fieldLength - } - } - - return msg, nil -} - -// readProtoBytes returns the contents of the protobuf field with number num -// and type bytes from a wire-encoded message. If the field cannot be found, -// the returned slice will be nil and no error will be returned. -// -// It does not handle field concatenation, in which the contents of a single field -// are split across multiple protobuf tags. Encoded data containing split fields -// of this form is technically permissable, but uncommon. -func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) { - off := 0 - for off < len(b) { - gotNum, gotTyp, n := protowire.ConsumeTag(b[off:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - off += n - if gotNum == num && gotTyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b[off:]) - if n < 0 { - return nil, protowire.ParseError(n) + err := d.consumeFieldValue(fieldNum, fieldType) + if err != nil { + return fmt.Errorf("invalid field in ReadObjectResponse: %w", err) } - return b, nil } - n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - off += n } - return nil, nil + d.msg = msg + return nil } // reopenStream "closes" the existing stream and attempts to reopen a stream and // sets the Reader's stream and cancelStream properties in the process. -func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { +func (r *gRPCReader) reopenStream() error { // Close existing stream and initialize new stream with updated offset. r.Close() res, cancel, err := r.reopen(r.seen) if err != nil { - return nil, err + return err } r.stream = res.stream + r.currMsg = res.decoder r.cancel = cancel - return res.response, nil + return nil } func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { diff --git a/vendor/cloud.google.com/go/storage/grpc_dp.go b/vendor/cloud.google.com/go/storage/grpc_dp.go new file mode 100644 index 0000000000..d342273349 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_dp.go @@ -0,0 +1,22 @@ +//go:build !disable_grpc_modules + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + _ "google.golang.org/grpc/balancer/rls" + _ "google.golang.org/grpc/xds/googledirectpath" +) diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go new file mode 100644 index 0000000000..149b37807e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go @@ -0,0 +1,281 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric" + "github.com/google/uuid" + "go.opentelemetry.io/contrib/detectors/gcp" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + "google.golang.org/api/option" + "google.golang.org/api/transport" + "google.golang.org/grpc" + "google.golang.org/grpc/stats/opentelemetry" +) + +const ( + monitoredResourceName = "storage.googleapis.com/Client" + metricPrefix = "storage.googleapis.com/client/" +) + +func latencyHistogramBoundaries() []float64 { + boundaries := []float64{} + boundary := 0.0 + increment := 0.002 + // 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range + for i := 0; i < 50; i++ { + boundaries = append(boundaries, boundary) + // increment by 2ms + boundary += increment + } + // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes + for i := 0; i < 150 && boundary < 300; i++ { + boundaries = append(boundaries, boundary) + if i != 0 && i%10 == 0 { + increment *= 2 + } + boundary += increment + } + return boundaries +} + +func sizeHistogramBoundaries() []float64 { + kb := 1024.0 + mb := 1024.0 * kb + gb := 1024.0 * mb + boundaries := []float64{} + boundary := 0.0 + increment := 128 * kb + // 128 KiB increments up to 4MiB, then exponential growth + for len(boundaries) < 200 && boundary <= 16*gb { + boundaries = append(boundaries, boundary) + boundary += increment + if boundary >= 4*mb { + increment *= 2 + } + } + return boundaries +} + +func metricFormatter(m metricdata.Metrics) string { + return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/") +} + +func gcpAttributeExpectedDefaults() []attribute.KeyValue { + return []attribute.KeyValue{ + {Key: "location", Value: attribute.StringValue("global")}, + {Key: "cloud_platform", Value: attribute.StringValue("unknown")}, + {Key: "host_id", Value: attribute.StringValue("unknown")}} +} + +// Added to help with tests +type preparedResource struct { + projectToUse string + resource *resource.Resource +} + +func newPreparedResource(ctx context.Context, project string, resourceOptions []resource.Option) (*preparedResource, error) { + detectedAttrs, err := resource.New(ctx, resourceOptions...) + if err != nil { + return nil, err + } + preparedResource := &preparedResource{} + s := detectedAttrs.Set() + p, present := s.Value("cloud.account.id") + if present { + preparedResource.projectToUse = p.AsString() + } else { + preparedResource.projectToUse = project + } + updates := []attribute.KeyValue{} + for _, kv := range gcpAttributeExpectedDefaults() { + if val, present := s.Value(kv.Key); !present || val.AsString() == "" { + updates = append(updates, attribute.KeyValue{Key: kv.Key, Value: kv.Value}) + } + } + r, err := resource.New( + ctx, + resource.WithAttributes( + attribute.KeyValue{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)}, + attribute.KeyValue{Key: "instance_id", Value: attribute.StringValue(uuid.New().String())}, + attribute.KeyValue{Key: "project_id", Value: attribute.StringValue(project)}, + attribute.KeyValue{Key: "api", Value: attribute.StringValue("grpc")}, + ), + resource.WithAttributes(detectedAttrs.Attributes()...), + // Last duplicate key / value wins + resource.WithAttributes(updates...), + ) + if err != nil { + return nil, err + } + preparedResource.resource = r + return preparedResource, nil +} + +type metricsContext struct { + // client options passed to gRPC channels + clientOpts []option.ClientOption + // instance of metric reader used by gRPC client-side metrics + provider *metric.MeterProvider + // clean func to call when closing gRPC client + close func() +} + +func createHistogramView(name string, boundaries []float64) metric.View { + return metric.NewView(metric.Instrument{ + Name: name, + Kind: metric.InstrumentKindHistogram, + }, metric.Stream{ + Name: name, + Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries}, + }) +} + +func newGRPCMetricContext(ctx context.Context, project string, config storageConfig) (*metricsContext, error) { + var exporter metric.Exporter + meterOpts := []metric.Option{} + if config.metricExporter != nil { + exporter = *config.metricExporter + } else { + preparedResource, err := newPreparedResource(ctx, project, []resource.Option{resource.WithDetectors(gcp.NewDetector())}) + if err != nil { + return nil, err + } + meterOpts = append(meterOpts, metric.WithResource(preparedResource.resource)) + // Implementation requires a project, if one is not determined possibly user + // credentials. Then we will fail stating gRPC Metrics require a project-id. + if project == "" && preparedResource.projectToUse == "" { + return nil, fmt.Errorf("google cloud project is required to start client-side metrics") + } + // If projectTouse isn't the same as project provided to Storage client, then + // emit a log stating which project is being used to emit metrics to. + if project != preparedResource.projectToUse { + log.Printf("The Project ID configured for metrics is %s, but the Project ID of the storage client is %s. Make sure that the service account in use has the required metric writing role (roles/monitoring.metricWriter) in the project projectIdToUse or metrics will not be written.", preparedResource.projectToUse, project) + } + meOpts := []mexporter.Option{ + mexporter.WithProjectID(preparedResource.projectToUse), + mexporter.WithMetricDescriptorTypeFormatter(metricFormatter), + mexporter.WithCreateServiceTimeSeries(), + mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"})} + exporter, err = mexporter.New(meOpts...) + if err != nil { + return nil, err + } + } + // Metric views update histogram boundaries to be relevant to GCS + // otherwise default OTel histogram boundaries are used. + metricViews := []metric.View{ + createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()), + createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()), + createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries()), + } + interval := time.Minute + if config.metricInterval > 0 { + interval = config.metricInterval + } + meterOpts = append(meterOpts, metric.WithReader(metric.NewPeriodicReader(&exporterLogSuppressor{exporter: exporter}, metric.WithInterval(interval))), + metric.WithView(metricViews...)) + provider := metric.NewMeterProvider(meterOpts...) + mo := opentelemetry.MetricsOptions{ + MeterProvider: provider, + Metrics: opentelemetry.DefaultMetrics().Add( + "grpc.lb.wrr.rr_fallback", + "grpc.lb.wrr.endpoint_weight_not_yet_usable", + "grpc.lb.wrr.endpoint_weight_stale", + "grpc.lb.wrr.endpoint_weights", + "grpc.lb.rls.cache_entries", + "grpc.lb.rls.cache_size", + "grpc.lb.rls.default_target_picks", + "grpc.lb.rls.target_picks", + "grpc.lb.rls.failed_picks"), + OptionalLabels: []string{"grpc.lb.locality"}, + } + opts := []option.ClientOption{ + option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})), + } + context := &metricsContext{ + clientOpts: opts, + provider: provider, + close: createShutdown(ctx, provider), + } + return context, nil +} + +func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) { + var project string + c, err := transport.Creds(ctx, s.clientOption...) + if err == nil { + project = c.ProjectID + } + // Enable client-side metrics for gRPC + metricsContext, err := newGRPCMetricContext(ctx, project, config) + if err != nil { + return nil, fmt.Errorf("gRPC Metrics: %w", err) + } + return metricsContext, nil +} + +func createShutdown(ctx context.Context, provider *metric.MeterProvider) func() { + return func() { + provider.Shutdown(ctx) + } +} + +// Silences permission errors after initial error is emitted to prevent +// chatty logs. +type exporterLogSuppressor struct { + exporter metric.Exporter + emittedFailure bool +} + +// Implements OTel SDK metric.Exporter interface to prevent noisy logs from +// lack of credentials after initial failure. +// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter +func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if err := e.exporter.Export(ctx, rm); err != nil && !e.emittedFailure { + if strings.Contains(err.Error(), "PermissionDenied") { + e.emittedFailure = true + return fmt.Errorf("gRPC metrics failed due permission issue: %w", err) + } + return err + } + return nil +} + +func (e *exporterLogSuppressor) Temporality(k metric.InstrumentKind) metricdata.Temporality { + return e.exporter.Temporality(k) +} + +func (e *exporterLogSuppressor) Aggregation(k metric.InstrumentKind) metric.Aggregation { + return e.exporter.Aggregation(k) +} + +func (e *exporterLogSuppressor) ForceFlush(ctx context.Context) error { + return e.exporter.ForceFlush(ctx) +} + +func (e *exporterLogSuppressor) Shutdown(ctx context.Context) error { + return e.exporter.Shutdown(ctx) +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index f7811a5d14..2387fd33c7 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "cloud.google.com/go/storage/internal/apiv2/storagepb" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) @@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // Options such as UserProjectForHMACKeys can be used to set the // userProject to be billed against for operations. +// Note: gRPC is not supported. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. // Only inactive HMAC keys can be deleted. // After deletion, a key cannot be used to authenticate requests. +// Note: gRPC is not supported. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro return hmKey, nil } -func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { - if pbmd == nil { - return nil - } - - return &HMACKey{ - AccessID: pbmd.GetAccessId(), - ID: pbmd.GetId(), - State: HMACState(pbmd.GetState()), - ProjectID: pbmd.GetProject(), - CreatedTime: convertProtoTime(pbmd.GetCreateTime()), - UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), - ServiceAccountEmail: pbmd.GetServiceAccountEmail(), - } -} - // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. +// Note: gRPC is not supported. func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { if projectID == "" { return nil, errors.New("storage: expecting a non-blank projectID") @@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct { } // Update mutates the HMACKey referred to by accessID. +// Note: gRPC is not supported. func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { if au.State != Active && au.State != Inactive { return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) @@ -237,6 +224,7 @@ type HMACKeysIterator struct { // ListHMACKeys returns an iterator for listing HMACKeys. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. +// Note: gRPC is not supported. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { desc := new(hmacKeyDesc) for _, opt := range opts { diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 0e213a6632..6baf905473 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -22,6 +22,7 @@ import ( "hash/crc32" "io" "io/ioutil" + "log" "net/http" "net/url" "os" @@ -47,13 +48,14 @@ import ( // httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic // storageClient interface. type httpStorageClient struct { - creds *google.Credentials - hc *http.Client - xmlHost string - raw *raw.Service - scheme string - settings *settings - config *storageConfig + creds *google.Credentials + hc *http.Client + xmlHost string + raw *raw.Service + scheme string + settings *settings + config *storageConfig + dynamicReadReqStallTimeout *bucketDelayManager } // newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON @@ -128,14 +130,29 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) } + var bd *bucketDelayManager + if config.readStallTimeoutConfig != nil { + drrstConfig := config.readStallTimeoutConfig + bd, err = newBucketDelayManager( + drrstConfig.TargetPercentile, + getDynamicReadReqIncreaseRateFromEnv(), + getDynamicReadReqInitialTimeoutSecFromEnv(drrstConfig.Min), + drrstConfig.Min, + defaultDynamicReqdReqMaxTimeout) + if err != nil { + return nil, fmt.Errorf("creating dynamic-delay: %w", err) + } + } + return &httpStorageClient{ - creds: creds, - hc: hc, - xmlHost: u.Host, - raw: rawService, - scheme: u.Scheme, - settings: s, - config: &config, + creds: creds, + hc: hc, + xmlHost: u.Host, + raw: rawService, + scheme: u.Scheme, + settings: s, + config: &config, + dynamicReadReqStallTimeout: bd, }, nil } @@ -857,15 +874,47 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa reopen := readerReopen(ctx, req.Header, params, s, func(ctx context.Context) (*http.Response, error) { - // Set custom headers passed in via the context. This is only required for XML; - // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. - ctxHeaders := callctx.HeadersFromContext(ctx) - for k, vals := range ctxHeaders { - for _, v := range vals { - req.Header.Set(k, v) + setHeadersFromCtx(ctx, req.Header) + + if c.dynamicReadReqStallTimeout == nil { + return c.hc.Do(req.WithContext(ctx)) + } + + cancelCtx, cancel := context.WithCancel(ctx) + var ( + res *http.Response + err error + ) + + done := make(chan bool) + go func() { + reqStartTime := time.Now() + res, err = c.hc.Do(req.WithContext(cancelCtx)) + if err == nil { + reqLatency := time.Since(reqStartTime) + c.dynamicReadReqStallTimeout.update(params.bucket, reqLatency) + } else if errors.Is(err, context.Canceled) { + // context.Canceled means operation took more than current dynamicTimeout, + // hence should be increased. + c.dynamicReadReqStallTimeout.increase(params.bucket) + } + done <- true + }() + + // Wait until timeout or request is successful. + timer := time.After(c.dynamicReadReqStallTimeout.getValue(params.bucket)) + select { + case <-timer: + log.Printf("stalled read-req cancelled after %fs", c.dynamicReadReqStallTimeout.getValue(params.bucket).Seconds()) + cancel() + err = context.DeadlineExceeded + if res != nil && res.Body != nil { + res.Body.Close() } + case <-done: + cancel = nil } - return c.hc.Do(req.WithContext(ctx)) + return res, err }, func() error { return setConditionsHeaders(req.Header, params.conds) }, func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) @@ -1422,18 +1471,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen } } else { size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } + } + + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + crc, checkCRC = parseCRC32c(res) + if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) { + checkCRC = false } remain := res.ContentLength @@ -1470,6 +1521,8 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen StartOffset: startOffset, Generation: params.gen, Metageneration: metaGen, + CRC32C: crc, + Decompressed: res.Uncompressed || uncompressedByServer(res), } return &Reader{ Attrs: attrs, @@ -1484,3 +1537,30 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen }, }, nil } + +// setHeadersFromCtx sets custom headers passed in via the context on the header, +// replacing any header with the same key (which avoids duplicating invocation headers). +// This is only required for XML; for gRPC & JSON requests this is handled in +// the GAPIC and Apiary layers respectively. +func setHeadersFromCtx(ctx context.Context, header http.Header) { + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + // Merge x-goog-api-client values into a single space-separated value. + if strings.EqualFold(k, xGoogHeaderKey) { + alreadySetValues := header.Values(xGoogHeaderKey) + vals = append(vals, alreadySetValues...) + + if len(vals) > 0 { + xGoogHeader := vals[0] + for _, v := range vals[1:] { + xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") + } + header.Set(k, xGoogHeader) + } + } else { + for _, v := range vals { + header.Set(k, v) + } + } + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index 415b2b585b..fc3e783bdc 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -68,100 +68,6 @@ func (it *BucketIterator) takeBuf() interface{} { return b } -// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. -type HmacKeyMetadataIterator struct { - items []*storagepb.HmacKeyMetadata - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { - var item *storagepb.HmacKeyMetadata - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *HmacKeyMetadataIterator) bufLen() int { - return len(it.items) -} - -func (it *HmacKeyMetadataIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. -type NotificationConfigIterator struct { - items []*storagepb.NotificationConfig - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { - var item *storagepb.NotificationConfig - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationConfigIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationConfigIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - // ObjectIterator manages a stream of *storagepb.Object. type ObjectIterator struct { items []*storagepb.Object diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go new file mode 100644 index 0000000000..e6cb160d03 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package storage + +import ( + "iter" + + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 5e2a8f0ad5..869f3b1fbc 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -17,19 +17,15 @@ // Package storage is an auto-generated package for the // Cloud Storage API. // -// Stop. This folder is likely not what you are looking for. This folder -// contains protocol buffer definitions for an API only accessible to select -// customers. Customers not participating should not depend on this file. -// Please contact Google Cloud sales if you are interested. Unless told -// otherwise by a Google Cloud representative, do not use or otherwise rely -// on any of the contents of this folder. If you would like to use Cloud -// Storage, please consult our official documentation (at +// This folder contains protocol buffer definitions for an API only +// accessible to select customers. Customers not participating should not +// depend on this file. Please contact Google Cloud sales if you are +// interested. Unless told otherwise by a Google Cloud representative, do not +// use or otherwise rely on any of the contents of this folder. If you would +// like to use Cloud Storage, please consult our official documentation (at // https://cloud.google.com/storage/docs/apis) for details on our XML and // JSON APIs, or else consider one of our client libraries (at -// https://cloud.google.com/storage/docs/reference/libraries). This API -// defined in this folder is unreleased and may shut off, break, or fail at -// any time for any users who are not registered as a part of a private -// preview program. +// https://cloud.google.com/storage/docs/reference/libraries). // // # General documentation // diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 56256bb2cc..7b0241b4bb 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -30,31 +30,11 @@ "CreateBucket" ] }, - "CreateHmacKey": { - "methods": [ - "CreateHmacKey" - ] - }, - "CreateNotificationConfig": { - "methods": [ - "CreateNotificationConfig" - ] - }, "DeleteBucket": { "methods": [ "DeleteBucket" ] }, - "DeleteHmacKey": { - "methods": [ - "DeleteHmacKey" - ] - }, - "DeleteNotificationConfig": { - "methods": [ - "DeleteNotificationConfig" - ] - }, "DeleteObject": { "methods": [ "DeleteObject" @@ -65,46 +45,21 @@ "GetBucket" ] }, - "GetHmacKey": { - "methods": [ - "GetHmacKey" - ] - }, "GetIamPolicy": { "methods": [ "GetIamPolicy" ] }, - "GetNotificationConfig": { - "methods": [ - "GetNotificationConfig" - ] - }, "GetObject": { "methods": [ "GetObject" ] }, - "GetServiceAccount": { - "methods": [ - "GetServiceAccount" - ] - }, "ListBuckets": { "methods": [ "ListBuckets" ] }, - "ListHmacKeys": { - "methods": [ - "ListHmacKeys" - ] - }, - "ListNotificationConfigs": { - "methods": [ - "ListNotificationConfigs" - ] - }, "ListObjects": { "methods": [ "ListObjects" @@ -155,11 +110,6 @@ "UpdateBucket" ] }, - "UpdateHmacKey": { - "methods": [ - "UpdateHmacKey" - ] - }, "UpdateObject": { "methods": [ "UpdateObject" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 82ec5db902..7890a6b399 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -50,10 +50,6 @@ type CallOptions struct { SetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption UpdateBucket []gax.CallOption - DeleteNotificationConfig []gax.CallOption - GetNotificationConfig []gax.CallOption - CreateNotificationConfig []gax.CallOption - ListNotificationConfigs []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption RestoreObject []gax.CallOption @@ -67,12 +63,6 @@ type CallOptions struct { RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption QueryWriteStatus []gax.CallOption - GetServiceAccount []gax.CallOption - CreateHmacKey []gax.CallOption - DeleteHmacKey []gax.CallOption - GetHmacKey []gax.CallOption - ListHmacKeys []gax.CallOption - UpdateHmacKey []gax.CallOption } func defaultGRPCClientOptions() []option.ClientOption { @@ -84,6 +74,7 @@ func defaultGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://storage.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -208,58 +199,6 @@ func defaultCallOptions() *CallOptions { }) }), }, - DeleteNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListNotificationConfigs: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, ComposeObject: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -426,84 +365,6 @@ func defaultCallOptions() *CallOptions { }) }), }, - GetServiceAccount: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - DeleteHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListHmacKeys: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - UpdateHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, } } @@ -521,10 +382,6 @@ type internalClient interface { SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error - GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) - CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) - ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) @@ -538,12 +395,6 @@ type internalClient interface { RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) - GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) - CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) - DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error - GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) - ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator - UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) } // Client is a client for interacting with Cloud Storage API. @@ -641,11 +492,13 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques return c.internalClient.SetIamPolicy(ctx, req, opts...) } -// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if -// any, are held by the caller. +// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder +// to see which, if any, are held by the caller. // The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. +// projects/_/buckets/{bucket} for a bucket, +// projects/_/buckets/{bucket}/objects/{object} for an object, or +// projects/_/buckets/{bucket}/managedFolders/{managedFolder} +// for a managed folder. func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) } @@ -655,29 +508,6 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe return c.internalClient.UpdateBucket(ctx, req, opts...) } -// DeleteNotificationConfig permanently deletes a NotificationConfig. -func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteNotificationConfig(ctx, req, opts...) -} - -// GetNotificationConfig view a NotificationConfig. -func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - return c.internalClient.GetNotificationConfig(ctx, req, opts...) -} - -// CreateNotificationConfig creates a NotificationConfig for a given bucket. -// These NotificationConfigs, when triggered, publish messages to the -// specified Pub/Sub topics. See -// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). -func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - return c.internalClient.CreateNotificationConfig(ctx, req, opts...) -} - -// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket. -func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { - return c.internalClient.ListNotificationConfigs(ctx, req, opts...) -} - // ComposeObject concatenates a list of existing objects into a new object in the same // bucket. func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { @@ -848,36 +678,6 @@ func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWrite return c.internalClient.QueryWriteStatus(ctx, req, opts...) } -// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. -func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - return c.internalClient.GetServiceAccount(ctx, req, opts...) -} - -// CreateHmacKey creates a new HMAC key for the given service account. -func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - return c.internalClient.CreateHmacKey(ctx, req, opts...) -} - -// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. -func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteHmacKey(ctx, req, opts...) -} - -// GetHmacKey gets an existing HMAC key metadata for the given id. -func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.GetHmacKey(ctx, req, opts...) -} - -// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. -func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - return c.internalClient.ListHmacKeys(ctx, req, opts...) -} - -// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. -func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.UpdateHmacKey(ctx, req, opts...) -} - // gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. // // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. @@ -1198,6 +998,9 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } @@ -1246,138 +1049,6 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck return resp, nil } -func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) - var resp *storagepb.NotificationConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) - var resp *storagepb.NotificationConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) - it := &NotificationConfigIterator{} - req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) { - resp := &storagepb.ListNotificationConfigsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1731,189 +1402,3 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW } return resp, nil } - -func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) - var resp *storagepb.ServiceAccount - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) - var resp *storagepb.CreateHmacKeyResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) - it := &HmacKeyMetadataIterator{} - req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { - resp := &storagepb.ListHmacKeysResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetHmacKeys(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index aeb7512f4a..349200cfc8 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29, 0} } // Request message for DeleteBucket. @@ -743,296 +743,6 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { return nil } -// Request message for DeleteNotificationConfig. -type DeleteNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the NotificationConfig. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteNotificationConfigRequest) Reset() { - *x = DeleteNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNotificationConfigRequest) ProtoMessage() {} - -func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} -} - -func (x *DeleteNotificationConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for GetNotificationConfig. -type GetNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the NotificationConfig. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNotificationConfigRequest) Reset() { - *x = GetNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationConfigRequest) ProtoMessage() {} - -func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} -} - -func (x *GetNotificationConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for CreateNotificationConfig. -type CreateNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The bucket to which this NotificationConfig belongs. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the NotificationConfig to be inserted. - NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"` -} - -func (x *CreateNotificationConfigRequest) Reset() { - *x = CreateNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNotificationConfigRequest) ProtoMessage() {} - -func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} -} - -func (x *CreateNotificationConfigRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig { - if x != nil { - return x.NotificationConfig - } - return nil -} - -// Request message for ListNotifications. -type ListNotificationConfigsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a Google Cloud Storage bucket. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of NotificationConfigs to return. The service may - // return fewer than this value. The default value is 100. Specifying a value - // above 100 will result in a page_size of 100. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A page token, received from a previous `ListNotificationConfigs` call. - // Provide this to retrieve the subsequent page. - // - // When paginating, all other parameters provided to `ListNotificationConfigs` - // must match the call that provided the page token. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListNotificationConfigsRequest) Reset() { - *x = ListNotificationConfigsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationConfigsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationConfigsRequest) ProtoMessage() {} - -func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} -} - -func (x *ListNotificationConfigsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListNotificationConfigsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListNotificationConfigsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The result of a call to ListNotificationConfigs -type ListNotificationConfigsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"` - // A token, which can be sent as `page_token` to retrieve the next page. - // If this field is omitted, there are no subsequent pages. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListNotificationConfigsResponse) Reset() { - *x = ListNotificationConfigsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationConfigsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationConfigsResponse) ProtoMessage() {} - -func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} -} - -func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig { - if x != nil { - return x.NotificationConfigs - } - return nil -} - -func (x *ListNotificationConfigsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - // Request message for ComposeObject. type ComposeObjectRequest struct { state protoimpl.MessageState @@ -1069,7 +779,7 @@ type ComposeObjectRequest struct { func (x *ComposeObjectRequest) Reset() { *x = ComposeObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] + mi := &file_google_storage_v2_storage_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1082,7 +792,7 @@ func (x *ComposeObjectRequest) String() string { func (*ComposeObjectRequest) ProtoMessage() {} func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] + mi := &file_google_storage_v2_storage_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1095,7 +805,7 @@ func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} } func (x *ComposeObjectRequest) GetDestination() *Object { @@ -1192,7 +902,7 @@ type DeleteObjectRequest struct { func (x *DeleteObjectRequest) Reset() { *x = DeleteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] + mi := &file_google_storage_v2_storage_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1205,7 +915,7 @@ func (x *DeleteObjectRequest) String() string { func (*DeleteObjectRequest) ProtoMessage() {} func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] + mi := &file_google_storage_v2_storage_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1218,7 +928,7 @@ func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} } func (x *DeleteObjectRequest) GetBucket() string { @@ -1290,6 +1000,12 @@ type RestoreObjectRequest struct { Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` // Required. The specific revision of the object to restore. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Optional. Restore token used to differentiate soft-deleted objects with the + // same name and generation. Only applicable for hierarchical namespace + // buckets. This parameter is optional, and is only required in the rare case + // when there are multiple soft-deleted objects with the same name and + // generation. + RestoreToken string `protobuf:"bytes,11,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"` // Makes the operation conditional on whether the object's current generation // matches the given value. Setting to 0 makes the operation succeed only if // there are no live versions of the object. @@ -1316,7 +1032,7 @@ type RestoreObjectRequest struct { func (x *RestoreObjectRequest) Reset() { *x = RestoreObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1329,7 +1045,7 @@ func (x *RestoreObjectRequest) String() string { func (*RestoreObjectRequest) ProtoMessage() {} func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1342,7 +1058,7 @@ func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} } func (x *RestoreObjectRequest) GetBucket() string { @@ -1366,6 +1082,13 @@ func (x *RestoreObjectRequest) GetGeneration() int64 { return 0 } +func (x *RestoreObjectRequest) GetRestoreToken() string { + if x != nil { + return x.RestoreToken + } + return "" +} + func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { if x != nil && x.IfGenerationMatch != nil { return *x.IfGenerationMatch @@ -1423,7 +1146,7 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1436,7 +1159,7 @@ func (x *CancelResumableWriteRequest) String() string { func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1449,7 +1172,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} } func (x *CancelResumableWriteRequest) GetUploadId() string { @@ -1470,7 +1193,7 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1483,7 +1206,7 @@ func (x *CancelResumableWriteResponse) String() string { func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1496,7 +1219,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} } // Request message for ReadObject. @@ -1558,7 +1281,7 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1571,7 +1294,7 @@ func (x *ReadObjectRequest) String() string { func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1584,7 +1307,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} } func (x *ReadObjectRequest) GetBucket() string { @@ -1701,12 +1424,18 @@ type GetObjectRequest struct { // metadata.owner. // * may be used to mean "all fields". ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Optional. Restore token used to differentiate soft-deleted objects with the + // same name and generation. Only applicable for hierarchical namespace + // buckets and if soft_deleted is set to true. This parameter is optional, and + // is only required in the rare case when there are multiple soft-deleted + // objects with the same name and generation. + RestoreToken string `protobuf:"bytes,12,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"` } func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1719,7 +1448,7 @@ func (x *GetObjectRequest) String() string { func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1732,7 +1461,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} } func (x *GetObjectRequest) GetBucket() string { @@ -1805,6 +1534,13 @@ func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { return nil } +func (x *GetObjectRequest) GetRestoreToken() string { + if x != nil { + return x.RestoreToken + } + return "" +} + // Response message for ReadObject. type ReadObjectResponse struct { state protoimpl.MessageState @@ -1832,7 +1568,7 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1845,7 +1581,7 @@ func (x *ReadObjectResponse) String() string { func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1858,7 +1594,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} } func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { @@ -1929,7 +1665,7 @@ type WriteObjectSpec struct { func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1942,7 +1678,7 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1955,7 +1691,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} } func (x *WriteObjectSpec) GetResource() *Object { @@ -2059,7 +1795,7 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2072,7 +1808,7 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2085,7 +1821,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2201,7 +1937,7 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2214,7 +1950,7 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2227,7 +1963,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2306,8 +2042,7 @@ type BidiWriteObjectRequest struct { Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service // don't match the specified checksums the call will fail. May only be - // provided in the first or last request (either with first_message, or - // finish_write set). + // provided in last request (with finish_write set). ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` // For each BidiWriteObjectRequest where state_lookup is `true` or the client // closes the stream, the service will send a BidiWriteObjectResponse @@ -2339,7 +2074,7 @@ type BidiWriteObjectRequest struct { func (x *BidiWriteObjectRequest) Reset() { *x = BidiWriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2352,7 +2087,7 @@ func (x *BidiWriteObjectRequest) String() string { func (*BidiWriteObjectRequest) ProtoMessage() {} func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2365,7 +2100,7 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} } func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { @@ -2495,7 +2230,7 @@ type BidiWriteObjectResponse struct { func (x *BidiWriteObjectResponse) Reset() { *x = BidiWriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2508,7 +2243,7 @@ func (x *BidiWriteObjectResponse) String() string { func (*BidiWriteObjectResponse) ProtoMessage() {} func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2521,7 +2256,7 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} } func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { @@ -2630,7 +2365,7 @@ type ListObjectsRequest struct { func (x *ListObjectsRequest) Reset() { *x = ListObjectsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2643,7 +2378,7 @@ func (x *ListObjectsRequest) String() string { func (*ListObjectsRequest) ProtoMessage() {} func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2656,7 +2391,7 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} } func (x *ListObjectsRequest) GetParent() string { @@ -2766,7 +2501,7 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2779,7 +2514,7 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2792,7 +2527,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -2827,7 +2562,7 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2840,7 +2575,7 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2853,7 +2588,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -3011,7 +2746,7 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3024,7 +2759,7 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3037,7 +2772,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -3227,7 +2962,7 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3240,7 +2975,7 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3253,7 +2988,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -3312,7 +3047,7 @@ type StartResumableWriteRequest struct { func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3325,7 +3060,7 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3338,7 +3073,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -3376,7 +3111,7 @@ type StartResumableWriteResponse struct { func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3389,7 +3124,7 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3402,7 +3137,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -3459,7 +3194,7 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3472,7 +3207,7 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3485,7 +3220,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} } func (x *UpdateObjectRequest) GetObject() *Object { @@ -3544,87 +3279,40 @@ func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectReques return nil } -// Request message for GetServiceAccount. -type GetServiceAccountRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Project ID, in the format of "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetServiceAccountRequest) Reset() { - *x = GetServiceAccountRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServiceAccountRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServiceAccountRequest) ProtoMessage() {} - -func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. -func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} -} - -func (x *GetServiceAccountRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request message for CreateHmacKey. -type CreateHmacKeyRequest struct { +// Parameters that can be passed to any object request. +type CommonObjectRequestParams struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The project that the HMAC-owning service account lives in, in the - // format of "projects/{projectIdentifier}". {projectIdentifier} can be the - // project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Required. The service account to create the HMAC for. - ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // Encryption algorithm used with the Customer-Supplied Encryption Keys + // feature. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // Encryption key used with the Customer-Supplied Encryption Keys feature. + // In raw bytes format (not base64-encoded). + EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` + // SHA256 hash of encryption key used with the Customer-Supplied Encryption + // Keys feature. + EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` } -func (x *CreateHmacKeyRequest) Reset() { - *x = CreateHmacKeyRequest{} +func (x *CommonObjectRequestParams) Reset() { + *x = CommonObjectRequestParams{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateHmacKeyRequest) String() string { +func (x *CommonObjectRequestParams) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateHmacKeyRequest) ProtoMessage() {} +func (*CommonObjectRequestParams) ProtoMessage() {} -func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] +func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3635,115 +3323,56 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} -} - -func (x *CreateHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" +// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. +func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} } -func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { +func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { if x != nil { - return x.ServiceAccountEmail + return x.EncryptionAlgorithm } return "" } -// Create hmac response. The only time the secret for an HMAC will be returned. -type CreateHmacKeyResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key metadata. - Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // HMAC key secret material. - // In raw bytes format (not base64-encoded). - SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` -} - -func (x *CreateHmacKeyResponse) Reset() { - *x = CreateHmacKeyResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateHmacKeyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateHmacKeyResponse) ProtoMessage() {} - -func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} -} - -func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { +func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { if x != nil { - return x.Metadata + return x.EncryptionKeyBytes } return nil } -func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { +func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { if x != nil { - return x.SecretKeyBytes + return x.EncryptionKeySha256Bytes } return nil } -// Request object to delete a given HMAC key. -type DeleteHmacKeyRequest struct { +// Shared constants. +type ServiceConstants struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project that owns the HMAC key, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } -func (x *DeleteHmacKeyRequest) Reset() { - *x = DeleteHmacKeyRequest{} +func (x *ServiceConstants) Reset() { + *x = ServiceConstants{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteHmacKeyRequest) String() string { +func (x *ServiceConstants) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteHmacKeyRequest) ProtoMessage() {} +func (*ServiceConstants) ProtoMessage() {} -func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] +func (x *ServiceConstants) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3754,424 +3383,29 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} -} - -func (x *DeleteHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *DeleteHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" +// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. +func (*ServiceConstants) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} } -// Request object to get metadata on a given HMAC key. -type GetHmacKeyRequest struct { +// A bucket. +type Bucket struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project the HMAC key lies in, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetHmacKeyRequest) Reset() { - *x = GetHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetHmacKeyRequest) ProtoMessage() {} - -func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} -} - -func (x *GetHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *GetHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request to fetch a list of HMAC keys under a given project. -type ListHmacKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project to list HMAC keys for, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // The maximum number of keys to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously returned token from ListHmacKeysResponse to get the next page. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, filters to only return HMAC keys for specified service account. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // If set, return deleted keys that have not yet been wiped out. - ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` -} - -func (x *ListHmacKeysRequest) Reset() { - *x = ListHmacKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysRequest) ProtoMessage() {} - -func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. -func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} -} - -func (x *ListHmacKeysRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListHmacKeysRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListHmacKeysRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { - if x != nil { - return x.ShowDeletedKeys - } - return false -} - -// Hmac key list response with next page information. -type ListHmacKeysResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListHmacKeysResponse) Reset() { - *x = ListHmacKeysResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysResponse) ProtoMessage() {} - -func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. -func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} -} - -func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { - if x != nil { - return x.HmacKeys - } - return nil -} - -func (x *ListHmacKeysResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request object to update an HMAC key state. -// HmacKeyMetadata.state is required and the only writable field in -// UpdateHmacKey operation. Specifying fields other than state will result in an -// error. -type UpdateHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The HMAC key to update. - // If present, the hmac_key's `id` field will be used to identify the key. - // Otherwise, the hmac_key's access_id and project fields will be used to - // identify the key. - HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` - // Update mask for hmac_key. - // Not specifying any fields will mean only the `state` field is updated to - // the value specified in `hmac_key`. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateHmacKeyRequest) Reset() { - *x = UpdateHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateHmacKeyRequest) ProtoMessage() {} - -func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} -} - -func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { - if x != nil { - return x.HmacKey - } - return nil -} - -func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Parameters that can be passed to any object request. -type CommonObjectRequestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Encryption algorithm used with the Customer-Supplied Encryption Keys - // feature. - EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` - // Encryption key used with the Customer-Supplied Encryption Keys feature. - // In raw bytes format (not base64-encoded). - EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` - // SHA256 hash of encryption key used with the Customer-Supplied Encryption - // Keys feature. - EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` -} - -func (x *CommonObjectRequestParams) Reset() { - *x = CommonObjectRequestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CommonObjectRequestParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CommonObjectRequestParams) ProtoMessage() {} - -func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. -func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} -} - -func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { - if x != nil { - return x.EncryptionAlgorithm - } - return "" -} - -func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { - if x != nil { - return x.EncryptionKeyBytes - } - return nil -} - -func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { - if x != nil { - return x.EncryptionKeySha256Bytes - } - return nil -} - -// Shared constants. -type ServiceConstants struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ServiceConstants) Reset() { - *x = ServiceConstants{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConstants) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConstants) ProtoMessage() {} - -func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. -func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} -} - -// A bucket. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. The name of the bucket. - // Format: `projects/{project}/buckets/{bucket}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Output only. The user-chosen part of the bucket name. The `{bucket}` - // portion of the `name` field. For globally unique buckets, this is equal to - // the "bucket name" of other Cloud Storage APIs. Example: "pub". - BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` - // The etag of the bucket. - // If included in the metadata of an UpdateBucketRequest, the operation will - // only be performed if the etag matches that of the bucket. - Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The project which owns this bucket, in the format of + // Immutable. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The user-chosen part of the bucket name. The `{bucket}` + // portion of the `name` field. For globally unique buckets, this is equal to + // the "bucket name" of other Cloud Storage APIs. Example: "pub". + BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // The etag of the bucket. + // If included in the metadata of an UpdateBucketRequest, the operation will + // only be performed if the etag matches that of the bucket. + Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The project which owns this bucket, in the format of // "projects/{projectIdentifier}". // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` @@ -4268,7 +3502,8 @@ type Bucket struct { // Reserved for future use. SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` // Configuration that, if present, specifies the data placement for a - // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. + // [https://cloud.google.com/storage/docs/locations#location-dr][configurable + // dual-region]. CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. @@ -4285,7 +3520,7 @@ type Bucket struct { func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4298,7 +3533,7 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4311,7 +3546,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} } func (x *Bucket) GetName() string { @@ -4574,7 +3809,7 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4587,7 +3822,7 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4600,7 +3835,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} } func (x *BucketAccessControl) GetRole() string { @@ -4682,7 +3917,7 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4695,7 +3930,7 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4708,7 +3943,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} } func (x *ChecksummedData) GetContent() []byte { @@ -4734,230 +3969,35 @@ type ObjectChecksums struct { // CRC32C digest of the object data. Computed by the Cloud Storage service for // all written objects. // If set in a WriteObjectRequest, service will validate that the stored - // object matches this checksum. - Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` - // 128 bit MD5 hash of the object data. - // For more information about using the MD5 hash, see - // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and - // ETags: Best Practices]. - // Not all objects will provide an MD5 hash. For example, composite objects - // provide only crc32c hashes. - // This value is equivalent to running `cat object.txt | openssl md5 -binary` - Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` -} - -func (x *ObjectChecksums) Reset() { - *x = ObjectChecksums{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectChecksums) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectChecksums) ProtoMessage() {} - -func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. -func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} -} - -func (x *ObjectChecksums) GetCrc32C() uint32 { - if x != nil && x.Crc32C != nil { - return *x.Crc32C - } - return 0 -} - -func (x *ObjectChecksums) GetMd5Hash() []byte { - if x != nil { - return x.Md5Hash - } - return nil -} - -// Hmac Key Metadata, which includes all information other than the secret. -type HmacKeyMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. Resource name ID of the key in the format - // {projectIdentifier}/{accessId}. - // {projectIdentifier} can be the project ID or project number. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Immutable. Globally unique id for keys. - AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Immutable. Identifies the project that owns the service account of the - // specified HMAC key, in the format "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Output only. Email of the service account the key authenticates as. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // State of the key. One of ACTIVE, INACTIVE, or DELETED. - // Writable, can be updated by UpdateHmacKey operation. - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - // Output only. The creation time of the HMAC key. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. The last modification time of the HMAC key metadata. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // The etag of the HMAC key. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *HmacKeyMetadata) Reset() { - *x = HmacKeyMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HmacKeyMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HmacKeyMetadata) ProtoMessage() {} - -func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. -func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} -} - -func (x *HmacKeyMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *HmacKeyMetadata) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *HmacKeyMetadata) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *HmacKeyMetadata) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *HmacKeyMetadata) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetEtag() string { - if x != nil { - return x.Etag - } - return "" + // object matches this checksum. + Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` + // 128 bit MD5 hash of the object data. + // For more information about using the MD5 hash, see + // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and + // ETags: Best Practices]. + // Not all objects will provide an MD5 hash. For example, composite objects + // provide only crc32c hashes. This value is equivalent to running `cat + // object.txt | openssl md5 -binary` + Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` } -// A directive to publish Pub/Sub notifications upon changes to a bucket. -type NotificationConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The resource name of this NotificationConfig. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - // The `{project}` portion may be `_` for globally unique buckets. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The Pub/Sub topic to which this subscription publishes. Formatted - // as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // The etag of the NotificationConfig. - // If included in the metadata of GetNotificationConfigRequest, the operation - // will only be performed if the etag matches that of the NotificationConfig. - Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // If present, only send notifications about listed event types. If - // empty, sent notifications for all event types. - EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` - // A list of additional attributes to attach to each Pub/Sub - // message published for this NotificationConfig. - CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If present, only apply this NotificationConfig to object names that - // begin with this prefix. - ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` - // Required. The desired content of the Payload. - PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` -} - -func (x *NotificationConfig) Reset() { - *x = NotificationConfig{} +func (x *ObjectChecksums) Reset() { + *x = ObjectChecksums{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *NotificationConfig) String() string { +func (x *ObjectChecksums) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NotificationConfig) ProtoMessage() {} +func (*ObjectChecksums) ProtoMessage() {} -func (x *NotificationConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] +func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4968,60 +4008,25 @@ func (x *NotificationConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. -func (*NotificationConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} -} - -func (x *NotificationConfig) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NotificationConfig) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *NotificationConfig) GetEtag() string { - if x != nil { - return x.Etag - } - return "" +// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. +func (*ObjectChecksums) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} } -func (x *NotificationConfig) GetEventTypes() []string { - if x != nil { - return x.EventTypes +func (x *ObjectChecksums) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C } - return nil + return 0 } -func (x *NotificationConfig) GetCustomAttributes() map[string]string { +func (x *ObjectChecksums) GetMd5Hash() []byte { if x != nil { - return x.CustomAttributes + return x.Md5Hash } return nil } -func (x *NotificationConfig) GetObjectNamePrefix() string { - if x != nil { - return x.ObjectNamePrefix - } - return "" -} - -func (x *NotificationConfig) GetPayloadFormat() string { - if x != nil { - return x.PayloadFormat - } - return "" -} - // Describes the Customer-Supplied Encryption Key mechanism used to store an // Object's data at rest. type CustomerEncryption struct { @@ -5039,7 +4044,7 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5052,7 +4057,7 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5065,7 +4070,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -5106,6 +4111,10 @@ type Object struct { // Immutable. The content generation of this object. Used for object // versioning. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Output only. Restore token used to differentiate deleted objects with the + // same name and generation. This field is output only, and only set for + // deleted objects in HNS buckets. + RestoreToken *string `protobuf:"bytes,35,opt,name=restore_token,json=restoreToken,proto3,oneof" json:"restore_token,omitempty"` // Output only. The version of the metadata for this generation of this // object. Used for preconditions and for detecting changes in metadata. A // metageneration number is only meaningful in the context of a particular @@ -5148,7 +4157,10 @@ type Object struct { // Components are accumulated by compose operations. ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` // Output only. Hashes for the data part of this object. This field is used - // for output only and will be silently ignored if provided in requests. + // for output only and will be silently ignored if provided in requests. The + // checksums of the complete object regardless of data range. If the object is + // downloaded in full, the client should compute one of these checksums over + // the downloaded object and compare it against the value provided here. Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` // Output only. The modification time of the object metadata. // Set initially to object creation time and then updated whenever any @@ -5214,7 +4226,7 @@ type Object struct { func (x *Object) Reset() { *x = Object{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5227,7 +4239,7 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5240,7 +4252,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} } func (x *Object) GetName() string { @@ -5271,6 +4283,13 @@ func (x *Object) GetGeneration() int64 { return 0 } +func (x *Object) GetRestoreToken() string { + if x != nil && x.RestoreToken != nil { + return *x.RestoreToken + } + return "" +} + func (x *Object) GetMetageneration() int64 { if x != nil { return x.Metageneration @@ -5452,7 +4471,10 @@ type ObjectAccessControl struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The access permission for the entity. + // The access permission for the entity. One of the following values: + // * `READER` + // * `WRITER` + // * `OWNER` Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` // The ID of the access-control entry. Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` @@ -5496,7 +4518,7 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5509,7 +4531,7 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5522,7 +4544,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} } func (x *ObjectAccessControl) GetRole() string { @@ -5607,7 +4629,7 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5620,7 +4642,7 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5633,7 +4655,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -5672,7 +4694,7 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5685,7 +4707,7 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5698,7 +4720,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} } func (x *ProjectTeam) GetProjectNumber() string { @@ -5715,57 +4737,6 @@ func (x *ProjectTeam) GetTeam() string { return "" } -// A service account, owned by Cloud Storage, which may be used when taking -// action on behalf of a given project, for example to publish Pub/Sub -// notifications or to retrieve security keys. -type ServiceAccount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ID of the notification. - EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` -} - -func (x *ServiceAccount) Reset() { - *x = ServiceAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceAccount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceAccount) ProtoMessage() {} - -func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. -func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} -} - -func (x *ServiceAccount) GetEmailAddress() string { - if x != nil { - return x.EmailAddress - } - return "" -} - // The owner of a specific resource. type Owner struct { state protoimpl.MessageState @@ -5781,7 +4752,7 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5794,7 +4765,7 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5807,7 +4778,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} } func (x *Owner) GetEntity() string { @@ -5841,7 +4812,7 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5854,7 +4825,7 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5867,7 +4838,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} } func (x *ContentRange) GetStart() int64 { @@ -5909,7 +4880,7 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5922,7 +4893,7 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5935,7 +4906,7 @@ func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message // Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0} } func (x *ComposeObjectRequest_SourceObject) GetName() string { @@ -5974,7 +4945,7 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5987,7 +4958,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6000,7 +4971,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() p // Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0, 0} } func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { @@ -6023,7 +4994,7 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6036,7 +5007,7 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6049,7 +5020,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 0} } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -6089,7 +5060,7 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6102,7 +5073,7 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6115,7 +5086,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 1} } func (x *Bucket_Cors) GetOrigin() []string { @@ -6160,7 +5131,7 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6173,7 +5144,7 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6186,7 +5157,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 2} } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -6212,7 +5183,7 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6225,7 +5196,7 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6238,7 +5209,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3} } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -6270,7 +5241,7 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6283,7 +5254,7 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6296,7 +5267,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4} } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -6322,7 +5293,7 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6335,7 +5306,7 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6348,7 +5319,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 5} } func (x *Bucket_Logging) GetLogBucket() string { @@ -6387,7 +5358,7 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6400,7 +5371,7 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6413,7 +5384,7 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 6} } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -6454,7 +5425,7 @@ type Bucket_SoftDeletePolicy struct { func (x *Bucket_SoftDeletePolicy) Reset() { *x = Bucket_SoftDeletePolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6467,7 +5438,7 @@ func (x *Bucket_SoftDeletePolicy) String() string { func (*Bucket_SoftDeletePolicy) ProtoMessage() {} func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6480,7 +5451,7 @@ func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 7} } func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { @@ -6512,7 +5483,7 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6525,7 +5496,7 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6538,7 +5509,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 8} } func (x *Bucket_Versioning) GetEnabled() bool { @@ -6572,7 +5543,7 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6585,7 +5556,7 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6598,7 +5569,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 9} } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -6630,7 +5601,7 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6643,7 +5614,7 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6656,7 +5627,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 10} } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -6691,7 +5662,7 @@ type Bucket_Autoclass struct { func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6704,7 +5675,7 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6717,7 +5688,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 11} } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -6761,7 +5732,7 @@ type Bucket_HierarchicalNamespace struct { func (x *Bucket_HierarchicalNamespace) Reset() { *x = Bucket_HierarchicalNamespace{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6774,7 +5745,7 @@ func (x *Bucket_HierarchicalNamespace) String() string { func (*Bucket_HierarchicalNamespace) ProtoMessage() {} func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6787,7 +5758,7 @@ func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 12} } func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { @@ -6816,7 +5787,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6829,7 +5800,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6842,7 +5813,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3, 0} } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6875,7 +5846,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6888,7 +5859,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6901,7 +5872,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0} } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6935,7 +5906,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6948,7 +5919,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6961,7 +5932,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 0} } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -7033,7 +6004,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[76] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7046,7 +6017,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[76] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7059,7 +6030,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 1} } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -7242,386 +6213,385 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, - 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, - 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, - 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, - 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, - 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, - 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, - 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, - 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, - 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, + 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, + 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, - 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, - 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, + 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd3, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, + 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, + 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, + 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, + 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x8e, 0x06, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, + 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, + 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, + 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, + 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, + 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, + 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, + 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, + 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, + 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, @@ -7642,834 +6612,662 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, - 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, - 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, - 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, - 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, - 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, - 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, - 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, - 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, - 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, - 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, - 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, + 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, + 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, + 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, + 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, + 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, + 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, + 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, + 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, - 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, - 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, - 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, - 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, + 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, + 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, - 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, - 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, + 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, + 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, + 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, - 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, - 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, - 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, - 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, - 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, + 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, + 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, - 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, - 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, - 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, - 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, - 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, - 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, - 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, - 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, - 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, - 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, - 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, - 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, - 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, - 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, - 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, - 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, - 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, - 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, - 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, - 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, - 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, - 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, - 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, - 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, - 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, - 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, - 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, + 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, + 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, + 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, + 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, + 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, + 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, + 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, + 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, + 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, + 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, - 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, - 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, - 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, - 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, - 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, - 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, - 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, - 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, - 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, - 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, - 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, - 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, - 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, - 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, - 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, - 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, - 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, - 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, - 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, + 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, + 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, + 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, + 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, + 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, + 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, + 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, + 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, + 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, + 0x10, 0x01, 0x22, 0x86, 0x24, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, - 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, - 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, + 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, + 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, - 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, - 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, - 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, - 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, - 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, - 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, - 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, - 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, - 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, - 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, - 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, - 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, - 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, - 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, - 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, - 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, - 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, - 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, - 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, - 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, - 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, - 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, - 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, - 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, - 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, - 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, - 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, - 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, - 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, - 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, - 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, - 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, + 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, - 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, - 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, - 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, - 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, - 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, - 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, + 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, + 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, + 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, + 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, + 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, + 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, + 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, + 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, + 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, + 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, + 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, + 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, + 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, + 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, + 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, + 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, - 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, - 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, - 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, - 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, - 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, - 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, - 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, - 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, - 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, - 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, - 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, - 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, + 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, + 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, + 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, + 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2a, 0x07, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x32, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x97, 0x02, 0x0a, 0x13, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, + 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf7, 0x0d, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, @@ -8479,479 +7277,390 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, - 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, - 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, + 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, + 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, + 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, + 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, - 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, - 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x48, 0x02, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x48, 0x03, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, + 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, + 0x13, 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, + 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, + 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x48, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x8c, 0x1c, 0x0a, 0x07, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, + 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, + 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, - 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, - 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, - 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, - 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, - 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, - 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, - 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, - 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, - 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa, - 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, - 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, - 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, - 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, - 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, - 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, - 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, + 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, + 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, + 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, + 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, - 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, - 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, - 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, + 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, - 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, - 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, - 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, - 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, - 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, - 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, - 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, - 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, - 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, - 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, - 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, - 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, - 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, - 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, - 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, - 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, - 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, - 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, - 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, + 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, + 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x1a, 0xa7, 0x02, + 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, + 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, + 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -8967,7 +7676,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 62) var file_google_storage_v2_storage_proto_goTypes = []any{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8977,262 +7686,216 @@ var file_google_storage_v2_storage_proto_goTypes = []any{ (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest - (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest - (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest - (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest - (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest - (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse - (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest - (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest - (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse - (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest - (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse - (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 44: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata - (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig - (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption - (*Object)(nil), // 51: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount - (*Owner)(nil), // 56: google.storage.v2.Owner - (*ContentRange)(nil), // 57: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy - (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass - (*Bucket_HierarchicalNamespace)(nil), // 72: google.storage.v2.Bucket.HierarchicalNamespace - nil, // 73: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 74: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 79: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 82: google.protobuf.Duration - (*date.Date)(nil), // 83: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 84: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 85: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 86: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 87: google.protobuf.Empty - (*iampb.Policy)(nil), // 88: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 89: google.iam.v1.TestIamPermissionsResponse + (*ComposeObjectRequest)(nil), // 8: google.storage.v2.ComposeObjectRequest + (*DeleteObjectRequest)(nil), // 9: google.storage.v2.DeleteObjectRequest + (*RestoreObjectRequest)(nil), // 10: google.storage.v2.RestoreObjectRequest + (*CancelResumableWriteRequest)(nil), // 11: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 12: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 16: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 17: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 18: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 19: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 20: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest + (*CommonObjectRequestParams)(nil), // 29: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 30: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 31: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 32: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 33: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 34: google.storage.v2.ObjectChecksums + (*CustomerEncryption)(nil), // 35: google.storage.v2.CustomerEncryption + (*Object)(nil), // 36: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 37: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 38: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 39: google.storage.v2.ProjectTeam + (*Owner)(nil), // 40: google.storage.v2.Owner + (*ContentRange)(nil), // 41: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 42: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 43: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 44: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 45: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 46: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 47: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 48: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 49: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 50: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 51: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 52: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 53: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 54: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 55: google.storage.v2.Bucket.Autoclass + (*Bucket_HierarchicalNamespace)(nil), // 56: google.storage.v2.Bucket.HierarchicalNamespace + nil, // 57: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 58: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 59: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 60: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 61: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 62: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 63: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 64: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 65: google.protobuf.Duration + (*date.Date)(nil), // 66: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 67: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 68: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 69: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 70: google.protobuf.Empty + (*iampb.Policy)(nil), // 71: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 72: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig - 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig - 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object - 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 81, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 73, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace - 67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy - 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp - 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp - 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 81, // 91: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 82, // 92: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 82, // 93: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration - 81, // 94: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp - 81, // 95: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 81, // 96: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp - 81, // 97: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 76, // 98: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 77, // 99: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 83, // 100: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 83, // 101: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 83, // 102: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 103: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 104: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 105: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 106: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 107: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 84, // 108: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest - 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest - 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object - 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse - 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 135, // [135:167] is the sub-list for method output_type - 103, // [103:135] is the sub-list for method input_type - 103, // [103:103] is the sub-list for extension type_name - 103, // [103:103] is the sub-list for extension extendee - 0, // [0:103] is the sub-list for field type_name + 63, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 31, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 63, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 31, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 31, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 63, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 36, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 29, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 29, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 29, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 63, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 29, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 63, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 33, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 41, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 36, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 36, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 16, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 33, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 16, // 26: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 33, // 27: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 28: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 29: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 30: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 63, // 31: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 29, // 32: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 33: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 36, // 34: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 29, // 35: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 36: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 36, // 37: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 16, // 38: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 29, // 39: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 40: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 36, // 41: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 63, // 42: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 29, // 43: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 32, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 37, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 48, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 64, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 45, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 64, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 57, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 53, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 52, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 49, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 40, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 46, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 44, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 50, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 47, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 54, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 55, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 56, // 61: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace + 51, // 62: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 39, // 63: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 37, // 64: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 65: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 64, // 66: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 34, // 67: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 64, // 68: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 64, // 69: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 64, // 70: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 62, // 71: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 40, // 72: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 35, // 73: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 64, // 74: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 64, // 75: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp + 64, // 76: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp + 39, // 77: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 36, // 78: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 43, // 79: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 58, // 80: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 59, // 81: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 64, // 82: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 65, // 83: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 65, // 84: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 64, // 85: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 64, // 86: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 64, // 87: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 64, // 88: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 60, // 89: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 61, // 90: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 66, // 91: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 66, // 92: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 66, // 93: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 94: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 95: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 96: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 97: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 98: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 67, // 99: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 68, // 100: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 69, // 101: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 102: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 9, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 10, // 105: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 11, // 106: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 14, // 107: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 13, // 108: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 28, // 109: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 17, // 110: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 19, // 111: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 21, // 112: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 24, // 113: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 26, // 114: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 22, // 115: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 70, // 116: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 31, // 117: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 31, // 118: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 119: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 31, // 120: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 71, // 121: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 71, // 122: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 72, // 123: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 31, // 124: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 36, // 125: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 70, // 126: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 36, // 127: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 12, // 128: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 36, // 129: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 15, // 130: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 36, // 131: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 18, // 132: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 20, // 133: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 38, // 134: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 25, // 135: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 27, // 136: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 23, // 137: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 116, // [116:138] is the sub-list for method output_type + 94, // [94:116] is the sub-list for method input_type + 94, // [94:94] is the sub-list for extension type_name + 94, // [94:94] is the sub-list for extension extendee + 0, // [0:94] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -9326,66 +7989,6 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*DeleteNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*GetNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*CreateNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ListNotificationConfigsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ListNotificationConfigsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest); i { case 0: return &v.state @@ -9397,7 +8000,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*DeleteObjectRequest); i { case 0: return &v.state @@ -9409,7 +8012,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state @@ -9421,7 +8024,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state @@ -9433,7 +8036,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state @@ -9445,7 +8048,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectRequest); i { case 0: return &v.state @@ -9457,7 +8060,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetObjectRequest); i { case 0: return &v.state @@ -9469,7 +8072,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*ReadObjectResponse); i { case 0: return &v.state @@ -9481,7 +8084,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectSpec); i { case 0: return &v.state @@ -9493,7 +8096,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectRequest); i { case 0: return &v.state @@ -9505,7 +8108,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*WriteObjectResponse); i { case 0: return &v.state @@ -9517,7 +8120,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state @@ -9529,7 +8132,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state @@ -9541,7 +8144,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*ListObjectsRequest); i { case 0: return &v.state @@ -9553,7 +8156,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state @@ -9565,7 +8168,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state @@ -9577,7 +8180,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state @@ -9589,7 +8192,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*RewriteResponse); i { case 0: return &v.state @@ -9601,7 +8204,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state @@ -9613,7 +8216,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state @@ -9625,7 +8228,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state @@ -9637,103 +8240,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*GetServiceAccountRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*CreateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*CreateHmacKeyResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*DeleteHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*GetHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*ListHmacKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*ListHmacKeysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*UpdateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state @@ -9745,7 +8252,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*ServiceConstants); i { case 0: return &v.state @@ -9757,7 +8264,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*Bucket); i { case 0: return &v.state @@ -9769,7 +8276,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*BucketAccessControl); i { case 0: return &v.state @@ -9781,7 +8288,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*ChecksummedData); i { case 0: return &v.state @@ -9793,7 +8300,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*ObjectChecksums); i { case 0: return &v.state @@ -9805,31 +8312,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { - switch v := v.(*HmacKeyMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { - switch v := v.(*NotificationConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*CustomerEncryption); i { case 0: return &v.state @@ -9841,7 +8324,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*Object); i { case 0: return &v.state @@ -9853,7 +8336,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*ObjectAccessControl); i { case 0: return &v.state @@ -9865,20 +8348,8 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { - switch v := v.(*ListObjectsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*ProjectTeam); i { + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { + switch v := v.(*ListObjectsResponse); i { case 0: return &v.state case 1: @@ -9889,8 +8360,8 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { - switch v := v.(*ServiceAccount); i { + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { + switch v := v.(*ProjectTeam); i { case 0: return &v.state case 1: @@ -9901,7 +8372,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*Owner); i { case 0: return &v.state @@ -9913,7 +8384,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*ContentRange); i { case 0: return &v.state @@ -9925,7 +8396,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state @@ -9937,7 +8408,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state @@ -9949,7 +8420,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Billing); i { case 0: return &v.state @@ -9961,7 +8432,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Cors); i { case 0: return &v.state @@ -9973,7 +8444,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Encryption); i { case 0: return &v.state @@ -9985,7 +8456,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state @@ -9997,7 +8468,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state @@ -10009,7 +8480,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Logging); i { case 0: return &v.state @@ -10021,7 +8492,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state @@ -10033,7 +8504,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { switch v := v.(*Bucket_SoftDeletePolicy); i { case 0: return &v.state @@ -10045,7 +8516,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Versioning); i { case 0: return &v.state @@ -10057,7 +8528,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Website); i { case 0: return &v.state @@ -10069,7 +8540,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state @@ -10081,7 +8552,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Autoclass); i { case 0: return &v.state @@ -10093,7 +8564,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { switch v := v.(*Bucket_HierarchicalNamespace); i { case 0: return &v.state @@ -10105,7 +8576,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { case 0: return &v.state @@ -10117,7 +8588,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule); i { case 0: return &v.state @@ -10129,7 +8600,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -10141,7 +8612,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v any, i int) any { + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -10158,51 +8629,51 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[7].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[8].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[9].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []any{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{ (*BidiWriteObjectRequest_UploadId)(nil), (*BidiWriteObjectRequest_WriteObjectSpec)(nil), (*BidiWriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{ (*BidiWriteObjectResponse_PersistedSize)(nil), (*BidiWriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[33].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[35].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[54].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[60].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 79, + NumMessages: 62, NumExtensions: 0, NumServices: 1, }, @@ -10247,25 +8718,16 @@ type StorageClient interface { // The `resource` field in the request should be // `projects/_/buckets/{bucket}`. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. + // Tests a set of permissions on the given bucket, object, or managed folder + // to see which, if any, are held by the caller. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}` for a bucket, + // `projects/_/buckets/{bucket}/objects/{object}` for an object, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Permanently deletes a NotificationConfig. - DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // View a NotificationConfig. - GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) - // Creates a NotificationConfig for a given bucket. - // These NotificationConfigs, when triggered, publish messages to the - // specified Pub/Sub topics. See - // https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) - // Retrieves a list of NotificationConfigs for a given bucket. - ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -10393,18 +8855,6 @@ type StorageClient interface { // object name, the sequence of returned `persisted_size` values will be // non-decreasing. QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) } type storageClient struct { @@ -10496,42 +8946,6 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques return out, nil } -func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { - out := new(NotificationConfig) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { - out := new(NotificationConfig) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) { - out := new(ListNotificationConfigsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) @@ -10719,60 +9133,6 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat return out, nil } -func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { - out := new(ServiceAccount) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { - out := new(CreateHmacKeyResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { - out := new(ListHmacKeysResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. @@ -10793,25 +9153,16 @@ type StorageServer interface { // The `resource` field in the request should be // `projects/_/buckets/{bucket}`. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. + // Tests a set of permissions on the given bucket, object, or managed folder + // to see which, if any, are held by the caller. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}` for a bucket, + // `projects/_/buckets/{bucket}/objects/{object}` for an object, or + // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}` + // for a managed folder. TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) - // Permanently deletes a NotificationConfig. - DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) - // View a NotificationConfig. - GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) - // Creates a NotificationConfig for a given bucket. - // These NotificationConfigs, when triggered, publish messages to the - // specified Pub/Sub topics. See - // https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) - // Retrieves a list of NotificationConfigs for a given bucket. - ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) @@ -10939,18 +9290,6 @@ type StorageServer interface { // object name, the sequence of returned `persisted_size` values will be // non-decreasing. QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) } // UnimplementedStorageServer can be embedded to have forward compatible implementations. @@ -10984,18 +9323,6 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.Te func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented") -} func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") } @@ -11035,24 +9362,6 @@ func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartRe func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") } -func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") -} -func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") -} -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") -} -func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") -} -func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") -} -func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") -} func RegisterStorageServer(s *grpc.Server, srv StorageServer) { s.RegisterService(&_Storage_serviceDesc, srv) @@ -11220,78 +9529,6 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationConfigsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListNotificationConfigs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ComposeObjectRequest) if err := dec(in); err != nil { @@ -11545,114 +9782,6 @@ func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServiceAccountRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetServiceAccount(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetServiceAccount", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListHmacKeysRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListHmacKeys(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListHmacKeys", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - var _Storage_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.storage.v2.Storage", HandlerType: (*StorageServer)(nil), @@ -11693,22 +9822,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "UpdateBucket", Handler: _Storage_UpdateBucket_Handler, }, - { - MethodName: "DeleteNotificationConfig", - Handler: _Storage_DeleteNotificationConfig_Handler, - }, - { - MethodName: "GetNotificationConfig", - Handler: _Storage_GetNotificationConfig_Handler, - }, - { - MethodName: "CreateNotificationConfig", - Handler: _Storage_CreateNotificationConfig_Handler, - }, - { - MethodName: "ListNotificationConfigs", - Handler: _Storage_ListNotificationConfigs_Handler, - }, { MethodName: "ComposeObject", Handler: _Storage_ComposeObject_Handler, @@ -11749,30 +9862,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "QueryWriteStatus", Handler: _Storage_QueryWriteStatus_Handler, }, - { - MethodName: "GetServiceAccount", - Handler: _Storage_GetServiceAccount_Handler, - }, - { - MethodName: "CreateHmacKey", - Handler: _Storage_CreateHmacKey_Handler, - }, - { - MethodName: "DeleteHmacKey", - Handler: _Storage_DeleteHmacKey_Handler, - }, - { - MethodName: "GetHmacKey", - Handler: _Storage_GetHmacKey_Handler, - }, - { - MethodName: "ListHmacKeys", - Handler: _Storage_ListHmacKeys_Handler, - }, - { - MethodName: "UpdateHmacKey", - Handler: _Storage_UpdateHmacKey_Handler, - }, }, Streams: []grpc.StreamDesc{ { diff --git a/vendor/cloud.google.com/go/storage/internal/experimental.go b/vendor/cloud.google.com/go/storage/internal/experimental.go new file mode 100644 index 0000000000..7db3ff5274 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/experimental.go @@ -0,0 +1,32 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// All options in this package are experimental. + +package internal + +var ( + // WithMetricInterval is a function which is implemented by storage package. + // It sets how often to emit metrics when using NewPeriodicReader and must be + // greater than 1 minute. + WithMetricInterval any // func (*time.Duration) option.ClientOption + + // WithMetricExporter is a function which is implemented by storage package. + // Set an alternate client-side metric Exporter to emit metrics through. + WithMetricExporter any // func (*metric.Exporter) option.ClientOption + + // WithReadStallTimeout is a function which is implemented by storage package. + // It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption. + WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption +) diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index e5b2de0917..fc6b11e222 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.43.0" +const Version = "1.46.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index de57b4bbbb..99783f3df4 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -74,7 +74,15 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err) } attempts++ - return !errorFunc(err), err + retryable := errorFunc(err) + // Explicitly check context cancellation so that we can distinguish between a + // DEADLINE_EXCEEDED error from the server and a user-set context deadline. + // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's + // sent by the server) in both cases. + if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) { + retryable = false + } + return !retryable, err }) } @@ -84,21 +92,7 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - // TODO: remove this once the respective transport packages merge xGoogHeader. - // Also remove gl-go at that time, as it will be repeated. - hdrs := callctx.HeadersFromContext(ctx) - for _, v := range hdrs[xGoogHeaderKey] { - xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") - } - - if hdrs[xGoogHeaderKey] != nil { - // Replace the key instead of adding it, if there was anything to merge with. - hdrs[xGoogHeaderKey] = []string{xGoogHeader} - } else { - // TODO: keep this line when removing the above code. - ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) - } - + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) return ctx } @@ -138,14 +132,18 @@ func ShouldRetry(err error) bool { return true } } + case *net.DNSError: + if e.IsTemporary { + return true + } case interface{ Temporary() bool }: if e.Temporary() { return true } } - // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC. + // UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC. if st, ok := status.FromError(err); ok { - if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal { + if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded { return true } } diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 1d6cfdf598..bc15900f01 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,7 +21,6 @@ import ( "regexp" "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal/apiv2/storagepb" raw "google.golang.org/api/storage/v1" ) @@ -92,30 +91,6 @@ func toNotification(rn *raw.Notification) *Notification { return n } -func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification { - n := &Notification{ - ID: pbn.GetName(), - EventTypes: pbn.GetEventTypes(), - ObjectNamePrefix: pbn.GetObjectNamePrefix(), - CustomAttributes: pbn.GetCustomAttributes(), - PayloadFormat: pbn.GetPayloadFormat(), - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) - return n -} - -func toProtoNotification(n *Notification) *storagepb.NotificationConfig { - return &storagepb.NotificationConfig{ - Name: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: n.PayloadFormat, - } -} - var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`) // parseNotificationTopic extracts the project and topic IDs from from the full @@ -144,6 +119,7 @@ func toRawNotification(n *Notification) *raw.Notification { // AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID // and PayloadFormat, and must not set its ID. The other fields are all optional. The // returned Notification's ID can be used to refer to it. +// Note: gRPC is not supported. func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") defer func() { trace.EndSpan(ctx, err) }() @@ -165,6 +141,7 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re // Notifications returns all the Notifications configured for this bucket, as a map // indexed by notification ID. +// Note: gRPC is not supported. func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") defer func() { trace.EndSpan(ctx, err) }() @@ -182,15 +159,8 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { return m } -func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification { - m := map[string]*Notification{} - for _, n := range ns { - m[n.Name] = toNotificationFromProto(n) - } - return m -} - // DeleteNotification deletes the notification with the given ID. +// Note: gRPC is not supported. func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") defer func() { trace.EndSpan(ctx, err) }() diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index debdb0f52d..3b0cf9e718 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -15,15 +15,71 @@ package storage import ( + "os" + "strconv" + "time" + + "cloud.google.com/go/storage/experimental" + storageinternal "cloud.google.com/go/storage/internal" + "go.opentelemetry.io/otel/sdk/metric" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" ) -// storageConfig contains the Storage client option configuration that can be +const ( + dynamicReadReqIncreaseRateEnv = "DYNAMIC_READ_REQ_INCREASE_RATE" + dynamicReadReqInitialTimeoutEnv = "DYNAMIC_READ_REQ_INITIAL_TIMEOUT" + defaultDynamicReadReqIncreaseRate = 15.0 + defaultDynamicReqdReqMaxTimeout = 1 * time.Hour + defaultDynamicReadReqMinTimeout = 500 * time.Millisecond + defaultTargetPercentile = 0.99 +) + +func init() { + // initialize experimental options + storageinternal.WithMetricExporter = withMetricExporter + storageinternal.WithMetricInterval = withMetricInterval + storageinternal.WithReadStallTimeout = withReadStallTimeout +} + +// getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable. +// It returns defaultDynamicReadReqIncreaseRate if env is not set or the set value is invalid. +func getDynamicReadReqIncreaseRateFromEnv() float64 { + increaseRate := os.Getenv(dynamicReadReqIncreaseRateEnv) + if increaseRate == "" { + return defaultDynamicReadReqIncreaseRate + } + + val, err := strconv.ParseFloat(increaseRate, 64) + if err != nil { + return defaultDynamicReadReqIncreaseRate + } + return val +} + +// getDynamicReadReqInitialTimeoutSecFromEnv returns the value set in the env variable. +// It returns the passed defaultVal if env is not set or the set value is invalid. +func getDynamicReadReqInitialTimeoutSecFromEnv(defaultVal time.Duration) time.Duration { + initialTimeout := os.Getenv(dynamicReadReqInitialTimeoutEnv) + if initialTimeout == "" { + return defaultVal + } + + val, err := time.ParseDuration(initialTimeout) + if err != nil { + return defaultVal + } + return val +} + // set through storageClientOptions. type storageConfig struct { - useJSONforReads bool - readAPIWasSet bool + useJSONforReads bool + readAPIWasSet bool + disableClientMetrics bool + metricExporter *metric.Exporter + metricInterval time.Duration + readStallTimeoutConfig *experimental.ReadStallTimeoutConfig } // newStorageConfig generates a new storageConfig with all the given @@ -78,3 +134,94 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) { c.useJSONforReads = w.useJSON c.readAPIWasSet = true } + +type withDisabledClientMetrics struct { + internaloption.EmbeddableAdapter + disabledClientMetrics bool +} + +// WithDisabledClientMetrics is an option that may be passed to [NewClient]. +// gRPC metrics are enabled by default in the GCS client and will export the +// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to +// [Google Cloud Monitoring]. The option is used to disable metrics. +// Google Cloud Support can use this information to more quickly diagnose +// problems related to GCS and gRPC. +// Sending this data does not incur any billing charges, and requires minimal +// CPU (a single RPC every few minutes) or memory (a few KiB to batch the +// telemetry). +// +// The default is to enable client metrics. To opt-out of metrics collected use +// this option. +// +// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md +// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md +// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs +func WithDisabledClientMetrics() option.ClientOption { + return &withDisabledClientMetrics{disabledClientMetrics: true} +} + +func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) { + c.disableClientMetrics = w.disabledClientMetrics +} + +type withMeterOptions struct { + internaloption.EmbeddableAdapter + // set sampling interval + interval time.Duration +} + +func withMetricInterval(interval time.Duration) option.ClientOption { + return &withMeterOptions{interval: interval} +} + +func (w *withMeterOptions) ApplyStorageOpt(c *storageConfig) { + c.metricInterval = w.interval +} + +type withMetricExporterConfig struct { + internaloption.EmbeddableAdapter + // exporter override + metricExporter *metric.Exporter +} + +func withMetricExporter(ex *metric.Exporter) option.ClientOption { + return &withMetricExporterConfig{metricExporter: ex} +} + +func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) { + c.metricExporter = w.metricExporter +} + +// WithReadStallTimeout is an option that may be passed to [NewClient]. +// It enables the client to retry the stalled read request, happens as part of +// storage.Reader creation. As the name suggest, timeout is adjusted dynamically +// based on past observed read-req latencies. +// +// This is only supported for the read operation and that too for http(XML) client. +// Grpc read-operation will be supported soon. +func withReadStallTimeout(rstc *experimental.ReadStallTimeoutConfig) option.ClientOption { + // TODO (raj-prince): To keep separate dynamicDelay instance for different BucketHandle. + // Currently, dynamicTimeout is kept at the client and hence shared across all the + // BucketHandle, which is not the ideal state. As latency depends on location of VM + // and Bucket, and read latency of different buckets may lie in different range. + // Hence having a separate dynamicTimeout instance at BucketHandle level will + // be better + if rstc.Min == time.Duration(0) { + rstc.Min = defaultDynamicReadReqMinTimeout + } + if rstc.TargetPercentile == 0 { + rstc.TargetPercentile = defaultTargetPercentile + } + return &withReadStallTimeoutConfig{ + readStallTimeoutConfig: rstc, + } +} + +type withReadStallTimeoutConfig struct { + internaloption.EmbeddableAdapter + readStallTimeoutConfig *experimental.ReadStallTimeoutConfig +} + +func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) { + config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 6da2432f00..e1d9665928 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -65,6 +65,19 @@ type ReaderObjectAttrs struct { // meaningful in the context of a particular generation of a // particular object. Metageneration int64 + + // CRC32C is the CRC32 checksum of the entire object's content using the + // Castagnoli93 polynomial, if available. + CRC32C uint32 + + // Decompressed is true if the object is stored as a gzip file and was + // decompressed when read. + // Objects are automatically decompressed if the object's metadata property + // "Content-Encoding" is set to "gzip" or satisfies decompressive + // transcoding as per https://cloud.google.com/storage/docs/transcoding. + // + // To prevent decompression on reads, use [ObjectHandle.ReadCompressed]. + Decompressed bool } // NewReader creates a new Reader to read the contents of the @@ -91,7 +104,8 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies // decompressive transcoding per https://cloud.google.com/storage/docs/transcoding // that file will be served back whole, regardless of the requested range as -// Google Cloud Storage dictates. +// Google Cloud Storage dictates. If decompressive transcoding occurs, +// [Reader.Attrs.Decompressed] will be true. // // By default, reads are made using the Cloud Storage XML API. We recommend // using the JSON API instead, which can be done by setting [WithJSONReads] diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index b6316fa668..0c11eb82ad 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -214,14 +214,11 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // NewGRPCClient creates a new Storage client using the gRPC transport and API. // Client methods which have not been implemented in gRPC will return an error. -// In particular, methods for Cloud Pub/Sub notifications are not supported. +// In particular, methods for Cloud Pub/Sub notifications, Service Account HMAC +// keys, and ServiceAccount are not supported. // Using a non-default universe domain is also not supported with the Storage // gRPC client. // -// The storage gRPC API is still in preview and not yet publicly available. -// If you would like to use the API, please first contact your GCP account rep to -// request access. The API may be subject to breaking changes. -// // Clients should be reused instead of created as needed. The methods of Client // are safe for concurrent use by multiple goroutines. // @@ -1695,7 +1692,6 @@ type Query struct { // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of // prefixes returned by the query. Only applicable if Delimiter is set to /. - // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. IncludeFoldersAsPrefixes bool // SoftDeleted indicates whether to list soft-deleted objects. @@ -2350,6 +2346,7 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec } // ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. +// Note: gRPC is not supported. func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { o := makeStorageOpts(true, c.retry, "") return c.tc.GetServiceAccount(ctx, projectID, o...) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index d13f2e0b35..c03a6ba036 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,20 @@ # Release History +## 1.15.0 (2024-10-14) + +### Features Added + +* `BearerTokenPolicy` handles CAE claims challenges + +### Bugs Fixed + +* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled. +* Fixed an integer overflow in the retry policy. + +### Other Changes + +* Update dependencies. + ## 1.14.0 (2024-08-07) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go index 765fbc6843..8ad3d5400e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -5,7 +5,6 @@ package runtime import ( "context" - "encoding/base64" "fmt" "net/http" "strings" @@ -66,31 +65,16 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, AuthorizationHandler: azpolicy.AuthorizationHandler{ - OnChallenge: p.onChallenge, - OnRequest: p.onRequest, + OnRequest: p.onRequest, }, }) return p } -func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { - challenge := res.Header.Get(shared.HeaderWWWAuthenticate) - claims, err := parseChallenge(challenge) - if err != nil { - // the challenge contains claims we can't parse - return err - } else if claims != "" { - // request a new token having the specified claims, send the request again - return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes}) - } - // auth challenge didn't include claims, so this is a simple authorization failure - return azruntime.NewResponseError(res) -} - // onRequest authorizes requests with one or more bearer tokens func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { // authorize the request with a token for the primary tenant - err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes}) + err := authNZ(azpolicy.TokenRequestOptions{Scopes: b.scopes}) if err != nil || len(b.auxResources) == 0 { return err } @@ -116,31 +100,3 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { return b.btp.Do(req) } - -// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token -// that will satisfy conditional access policies. It returns a non-nil error when the given value contains -// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. -func parseChallenge(wwwAuthenticate string) (string, error) { - claims := "" - var err error - for _, param := range strings.Split(wwwAuthenticate, ",") { - if _, after, found := strings.Cut(param, "claims="); found { - if claims != "" { - // The header contains multiple challenges, at least two of which specify claims. The specs allow this - // but it's unclear what a client should do in this case and there's as yet no concrete example of it. - err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) - break - } - // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded - claims = strings.Trim(after, `\"=`) - // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" - if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { - claims = string(b) - } else { - err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) - break - } - } - } - return claims, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go index 17bd50c673..03cb227d0d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" // ResponseError is returned when a request is made to a service and // the service returns a non-success HTTP status code. // Use errors.As() to access this type in the error chain. +// +// When marshaling instances, the RawResponse field will be omitted. +// However, the contents returned by Error() will be preserved. type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 08a9545873..8aec256bd0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -117,12 +117,18 @@ type ResponseError struct { StatusCode int // RawResponse is the underlying HTTP response. - RawResponse *http.Response + RawResponse *http.Response `json:"-"` + + errMsg string } // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + if e.errMsg != "" { + return e.errMsg + } + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} @@ -163,5 +169,33 @@ func (e *ResponseError) Error() string { } fmt.Fprintln(msg, separator) - return msg.String() + e.errMsg = msg.String() + return e.errMsg +} + +// internal type used for marshaling/unmarshaling +type responseError struct { + ErrorCode string `json:"errorCode"` + StatusCode int `json:"statusCode"` + ErrorMessage string `json:"errorMessage"` +} + +func (e ResponseError) MarshalJSON() ([]byte, error) { + return json.Marshal(responseError{ + ErrorCode: e.ErrorCode, + StatusCode: e.StatusCode, + ErrorMessage: e.Error(), + }) +} + +func (e *ResponseError) UnmarshalJSON(data []byte) error { + re := responseError{} + if err := json.Unmarshal(data, &re); err != nil { + return err + } + + e.ErrorCode = re.ErrorCode + e.StatusCode = re.StatusCode + e.errMsg = re.ErrorMessage + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 7cb8c207e6..c1c8984c60 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.14.0" + Version = "v1.15.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index 8d98453588..bb37a5efb4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -161,19 +161,20 @@ type BearerTokenOptions struct { // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. type AuthorizationHandler struct { - // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token - // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, - // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't - // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a - // token from its credential according to its configuration. + // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest + // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request + // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send + // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token + // from its credential according to its configuration. OnRequest func(*Request, func(TokenRequestOptions) error) error - // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the - // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible - // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's - // given credential. Implementations that need to perform I/O should use the Request's context, available from - // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, - // the policy will return any 401 response to the client. + // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon + // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle. + // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the + // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given + // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When + // OnChallenge returns nil, the policy will send the Request again. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index cb2a695280..7ed66be380 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -4,9 +4,12 @@ package runtime import ( + "encoding/base64" "errors" "net/http" + "regexp" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -17,6 +20,11 @@ import ( ) // BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle +// additional authentication challenges, or needing more control over authorization, should +// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions]. +// +// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation type BearerTokenPolicy struct { // mainResource is the resource to be retreived using the tenant specified in the credential mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] @@ -51,8 +59,18 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * if opts == nil { opts = &policy.BearerTokenOptions{} } + ah := opts.AuthorizationHandler + if ah.OnRequest == nil { + // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge + // doesn't get a default so the policy can use a nil check to determine whether the caller + // provided an implementation. + ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + // authNZ sets EnableCAE: true in all cases, no need to duplicate that here + return authNZ(policy.TokenRequestOptions{Scopes: scopes}) + } + } return &BearerTokenPolicy{ - authzHandler: opts.AuthorizationHandler, + authzHandler: ah, cred: cred, scopes: scopes, mainResource: temporal.NewResource(acquire), @@ -63,6 +81,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * // authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { return func(tro policy.TokenRequestOptions) error { + tro.EnableCAE = true as := acquiringResourceState{p: b, req: req, tro: tro} tk, err := b.mainResource.Get(as) if err != nil { @@ -86,12 +105,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } - var err error - if b.authzHandler.OnRequest != nil { - err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) - } else { - err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) - } + err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) if err != nil { return nil, errorinfo.NonRetriableError(err) } @@ -101,17 +115,51 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } + res, err = b.handleChallenge(req, res, false) + return res, err +} + +// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling +// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge. +// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the +// AuthorizationHandler. +func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) { + var err error if res.StatusCode == http.StatusUnauthorized { b.mainResource.Expire() - if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { - if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { - res, err = req.Next() + if res.Header.Get(shared.HeaderWWWAuthenticate) != "" { + caeChallenge, parseErr := parseCAEChallenge(res) + if parseErr != nil { + return res, parseErr + } + switch { + case caeChallenge != nil: + authNZ := func(tro policy.TokenRequestOptions) error { + // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value + // will be empty at time of writing because CAE is the only feature involving claims. If in + // the future some client needs to specify unrelated claims, this function may need to merge + // them with the challenge claims. + tro.Claims = caeChallenge.params["claims"] + return b.authenticateAndAuthorize(req)(tro) + } + err = b.authzHandler.OnRequest(req, authNZ) + if err == nil { + res, err = req.Next() + } + case b.authzHandler.OnChallenge != nil && !recursed: + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + if res, err = req.Next(); err == nil { + res, err = b.handleChallenge(req, res, true) + } + } else { + // don't retry challenge handling errors + err = errorinfo.NonRetriableError(err) + } + default: + // return the response to the pipeline } } } - if err != nil { - err = errorinfo.NonRetriableError(err) - } return res, err } @@ -121,3 +169,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { } return nil } + +// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none). +// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError. +func parseCAEChallenge(res *http.Response) (*authChallenge, error) { + var ( + caeChallenge *authChallenge + err error + ) + for _, c := range parseChallenges(res) { + if c.scheme == "Bearer" { + if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" { + if b, de := base64.StdEncoding.DecodeString(claims); de == nil { + c.params["claims"] = string(b) + caeChallenge = &c + } else { + // don't include the decoding error because it's something + // unhelpful like "illegal base64 data at input byte 42" + err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims)) + } + break + } + } + } + return caeChallenge, err +} + +var ( + challenge, challengeParams *regexp.Regexp + once = &sync.Once{} +) + +type authChallenge struct { + scheme string + params map[string]string +} + +// parseChallenges assumes authentication challenges have quoted parameter values +func parseChallenges(res *http.Response) []authChallenge { + once.Do(func() { + // matches challenges having quoted parameters, capturing scheme and parameters + challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`) + // captures parameter names and values in a match of the above expression + challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`) + }) + parsed := []authChallenge{} + // WWW-Authenticate can have multiple values, each containing multiple challenges + for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) { + for _, sm := range challenge.FindAllStringSubmatch(h, -1) { + // sm is [challenge, scheme, params] (see regexp documentation on submatches) + c := authChallenge{ + params: make(map[string]string), + scheme: sm[1], + } + for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) { + // sm is [key="value", key, value] (see regexp documentation on submatches) + c.params[sm[1]] = sm[2] + } + parsed = append(parsed, c) + } + } + return parsed +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index e15eea8249..4c3a31fea7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - delay := time.Duration((1< o.MaxRetryDelay { + delayFloat := float64(delay) * jitterMultiplier + if delayFloat > float64(math.MaxInt64) { + // the jitter pushed us over MaxInt64, so just use MaxInt64 + delay = time.Duration(math.MaxInt64) + } else { + delay = time.Duration(delayFloat) + } + + if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value delay = o.MaxRetryDelay } + return delay } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md new file mode 100644 index 0000000000..ea267e4f41 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -0,0 +1,10 @@ +# Breaking Changes + +## v1.6.0 + +### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios + +As of `azidentity` v1.6.0, `DefaultAzureCredential` makes a minor behavioral change when it uses IMDS managed +identity. It sends its first request to IMDS without the "Metadata" header, to expedite validating whether the endpoint +is available. This precedes the credential's first token request and is guaranteed to fail with a 400 error. This error +response can appear in logs but doesn't indicate authentication failed. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index a8c2feb6d4..e35f5ad935 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,52 @@ # Release History +## 1.8.0 (2024-10-08) + +### Other Changes +* `AzurePipelinesCredential` sets an additional OIDC request header so that it + receives a 401 instead of a 302 after presenting an invalid system access token +* Allow logging of debugging headers for `AzurePipelinesCredential` and include + them in error messages + +## 1.8.0-beta.3 (2024-09-17) + +### Features Added +* Added `ObjectID` type for `ManagedIdentityCredentialOptions.ID` + +### Other Changes +* Removed redundant content from error messages + +## 1.8.0-beta.2 (2024-08-06) + +### Breaking Changes +* `NewManagedIdentityCredential` now returns an error when a user-assigned identity + is specified on a platform whose managed identity API doesn't support that. + `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. + Returning an error instead prevents the credential authenticating an unexpected + identity, causing a client to act with unexpected privileges. The affected + platforms are: + * Azure Arc + * Azure ML (when a resource ID is specified; client IDs are supported) + * Cloud Shell + * Service Fabric + +### Other Changes +* If `DefaultAzureCredential` receives a non-JSON response when probing IMDS before + attempting to authenticate a managed identity, it continues to the next credential + in the chain instead of immediately returning an error. + +## 1.8.0-beta.1 (2024-07-17) + +### Features Added +* Restored persistent token caching feature + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Redesigned the persistent caching API. Encryption is now required in all cases + and persistent cache construction is separate from credential construction. + The `PersistentUserAuthentication` example in the package docs has been updated + to demonstrate the new API. + ## 1.7.0 (2024-06-20) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 7e201ea2fd..96f30b25cc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,7 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential -`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: +`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: ![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) @@ -126,12 +126,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ## Credential Types -### Authenticating Azure Hosted Applications +### Credential chains |Credential|Usage |-|- |[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps |[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials + +### Authenticating Azure-Hosted Applications + +|Credential|Usage +|-|- |[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables |[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource |[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes @@ -158,7 +163,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI -|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI +|[AzureDeveloperCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index fbaa292204..e0bd09c636 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -1,57 +1,40 @@ ## Token caching in the Azure Identity client module -*Token caching* is a feature provided by the Azure Identity library that allows apps to: +Token caching helps apps: - Improve their resilience and performance. -- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. -- Reduce the number of times the user is prompted to authenticate. +- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times users are prompted to authenticate. -When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use. -Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. +#### Caching can't be disabled -### In-memory token caching - -*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. - -**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. +Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances. -#### Caching cannot be disabled +### In-memory token caching -As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. +Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache. ### Persistent token caching -> Only azidentity v1.5.0-beta versions support persistent token caching +Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs. -*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. +Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -| Operating system | Storage mechanism | +| Operating system | Encryption facility | |------------------|---------------------------------------| | Linux | kernel key retention service (keyctl) | | macOS | Keychain | -| Windows | DPAPI | - -By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. -However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. +| Windows | Data Protection API (DPAPI) | -With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: - -- Makes the app more resilient to failures. -- Ensures the app can continue to function during an Entra ID outage or disruption. -- Avoids having to prompt users to authenticate each time the process is restarted. - ->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. - -#### Example code - -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. ### Credentials supporting token caching The following table indicates the state of in-memory and persistent caching in each credential type. -**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). +**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | |--------------------------------|---------------------------------------------------------------------|--------------------------| @@ -66,6 +49,9 @@ The following table indicates the state of in-memory and persistent caching in e | `EnvironmentCredential` | Supported | Not Supported | | `InteractiveBrowserCredential` | Supported | Supported | | `ManagedIdentityCredential` | Supported | Not Supported | -| `OnBehalfOfCredential` | Supported | Supported | +| `OnBehalfOfCredential` | Supported | Not Supported | | `UsernamePasswordCredential` | Supported | Supported | | `WorkloadIdentityCredential` | Supported | Supported | + +[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication +[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 54016a0709..c24f67e84a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -234,7 +234,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul |---|---|---| | AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.| | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| -|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| +|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| ## Get additional help diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index bff0c44dac..045f87acd5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_087379b475" + "Tag": "go/azidentity_c55452bbf6" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go index ada4d6501d..840a71469c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -18,10 +18,10 @@ import ( var supportedAuthRecordVersions = []string{"1.0"} -// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as // [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication -// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. -type authenticationRecord struct { +// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user. +type AuthenticationRecord struct { // Authority is the URL of the authority that issued the token. Authority string `json:"authority"` @@ -42,11 +42,11 @@ type authenticationRecord struct { } // UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord -func (a *authenticationRecord) UnmarshalJSON(b []byte) error { +func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error { // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally // different type enables this by assigning all the fields without recursing into this method. - type r authenticationRecord + type r AuthenticationRecord err := json.Unmarshal(b, (*r)(a)) if err != nil { return err @@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error { } // account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. -func (a *authenticationRecord) account() public.Account { +func (a *AuthenticationRecord) account() public.Account { return public.Account{ Environment: a.Authority, HomeAccountID: a.HomeAccountID, @@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account { } } -func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { +func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) { u, err := url.Parse(ar.IDToken.Issuer) if err != nil { - return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) } tenant := ar.IDToken.TenantID if tenant == "" { @@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) if username == "" { username = ar.IDToken.UPN } - return authenticationRecord{ + return AuthenticationRecord{ Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), ClientID: ar.IDToken.Audience, HomeAccountID: ar.Account.HomeAccountID, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index b0965036bb..ce55dc658e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -53,8 +53,14 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) -// tokenCachePersistenceOptions contains options for persistent token caching -type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache = internal.Cache // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go index 80c1806bb1..a4b8ab6f4d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -20,6 +20,8 @@ const ( credNameAzurePipelines = "AzurePipelinesCredential" oidcAPIVersion = "7.1" systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" + xMsEdgeRef = "x-msedge-ref" + xVssE2eId = "x-vss-e2eid" ) // AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See @@ -40,6 +42,11 @@ type AzurePipelinesCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -81,8 +88,11 @@ func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, system if options == nil { options = &AzurePipelinesCredentialOptions{} } + // these headers are useful to the DevOps team when debugging OIDC error responses + options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId) caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } @@ -108,33 +118,40 @@ func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, er url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID url, err := runtime.EncodeQueryParams(url) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil) } req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil) } req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + // instruct endpoint to return 401 instead of 302, if the system access token is invalid + req.Header.Set("X-TFS-FedAuthRedirect", "Suppress") res, err := doForClient(a.cred.client.azClient, req) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil) } if res.StatusCode != http.StatusOK { - msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration." + for _, h := range []string{xMsEdgeRef, xVssE2eId} { + if v := res.Header.Get(h); v != "" { + msg += fmt.Sprintf("\n%s: %s", h, v) + } + } // include the response because its body, if any, probably contains an error message. // OK responses aren't included with errors because they probably contain secrets - return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res) } b, err := runtime.Payload(res) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil) } var r struct { OIDCToken string `json:"oidcToken"` } err = json.Unmarshal(b, &r) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil) } return r.OIDCToken, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 6c35a941b9..2460f66ec1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -113,11 +113,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token if err != nil { // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise msg := createChainedErrorMessage(errs) - if errors.As(err, &unavailableErr) { + var authFailedErr *AuthenticationFailedError + switch { + case errors.As(err, &authFailedErr): + err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse) + if af, ok := err.(*AuthenticationFailedError); ok { + // stop Error() printing the response again; it's already in msg + af.omitResponse = true + } + case errors.As(err, &unavailableErr): err = newCredentialUnavailableError(c.name, msg) - } else { + default: res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, msg, res, err) + err = newAuthenticationFailedError(c.name, msg, res) } } return token, err @@ -126,7 +134,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token func createChainedErrorMessage(errs []error) string { msg := "failed to acquire a token.\nAttempted credentials:" for _, err := range errs { - msg += fmt.Sprintf("\n\t%s", err.Error()) + msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t")) } return msg } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 4cd8c51447..62c12b5465 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,16 +26,27 @@ extends: parameters: CloudConfig: Public: + ServiceConnection: azure-sdk-tests + SubscriptionConfigurationFilePaths: + - eng/common/TestResources/sub-config/AzurePublicMsft.json SubscriptionConfigurations: - $(sub-config-azure-cloud-test-resources) - $(sub-config-identity-test-resources) - EnvVars: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) + EnableRaceDetector: true RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + PreSteps: + - task: AzureCLI@2 + displayName: Set OIDC token + inputs: + addSpnToEnvironment: true + azureSubscription: azure-sdk-tests + inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" + scriptLocation: inlineScript + scriptType: pscore MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index b588750ef3..2307da86f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 80cd96b560..9e6bca1c92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct { // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index 9e6772e9b8..f0890fe1ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct { // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache } // ClientSecretCredential authenticates an application with a client secret. @@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 3bd08c685f..7059a510c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -29,8 +29,8 @@ type confidentialClientOptions struct { AdditionallyAllowedTenants []string // Assertion for on-behalf-of authentication Assertion string + Cache Cache DisableInstanceDiscovery, SendX5C bool - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -107,12 +107,12 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } } if err != nil { - // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. - // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr credentialUnavailable - if !errors.As(err, &unavailableErr) { - res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, err.Error(), res, err) + var ( + authFailedErr *AuthenticationFailedError + unavailableErr credentialUnavailable + ) + if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) { + err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) @@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { - cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + cache, err := internal.ExportReplace(c.opts.Cache, enableCAE) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 551d319946..3cfc0f7bf1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -36,10 +36,13 @@ type DefaultAzureCredentialOptions struct { TenantID string } -// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. -// It combines credentials suitable for deployment with credentials suitable for local development. -// It attempts to authenticate with each of these credential types, in the following order, stopping -// when one provides a token: +// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by +// combining credentials used in Azure hosting environments and credentials used in local development. In +// production, it's better to use a specific credential type so authentication is more predictable and easier +// to debug. +// +// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, +// stopping when one provides a token: // // - [EnvironmentCredential] // - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index cd30bedd5e..53c4c72873 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -25,18 +25,26 @@ type DeviceCodeCredentialOptions struct { // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -49,9 +57,6 @@ type DeviceCodeCredentialOptions struct { // applications. TenantID string - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions - // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -101,12 +106,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, DeviceCodePrompt: cp.UserPrompt, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - Record: cp.authenticationRecord, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + Record: cp.AuthenticationRecord, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -116,8 +121,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate prompts a user to log in via the device code flow. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 35fa01d136..b05cb035a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -38,18 +38,30 @@ type AuthenticationFailedError struct { // RawResponse is the HTTP response motivating the error, if available. RawResponse *http.Response - credType string - message string - err error + credType, message string + omitResponse bool } -func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { - return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +func newAuthenticationFailedError(credType, message string, resp *http.Response) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp} +} + +// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error. +// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error +// message, because that message is redundant given the response. If the original error isn't a +// CallErr, the returned error incorporates its message. +func newAuthenticationFailedErrorFromMSAL(credType string, err error) error { + msg := "" + res := getResponseFromError(err) + if res == nil { + msg = err.Error() + } + return newAuthenticationFailedError(credType, msg, res) } // Error implements the error interface. Note that the message contents are not contractual and can change over time. func (e *AuthenticationFailedError) Error() string { - if e.RawResponse == nil { + if e.RawResponse == nil || e.omitResponse { return e.credType + ": " + e.message } msg := &bytes.Buffer{} @@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprintln(msg, "Request information not available") } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") body, err := runtime.Payload(e.RawResponse) switch { @@ -109,17 +121,17 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token // because the credential requires user interaction and is configured not to request it automatically. -type authenticationRequiredError struct { +type AuthenticationRequiredError struct { credentialUnavailableError // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. TokenRequestOptions policy.TokenRequestOptions } -func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { - return &authenticationRequiredError{ +func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &AuthenticationRequiredError{ credentialUnavailableError: credentialUnavailableError{ credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", }, @@ -128,8 +140,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti } var ( - _ credentialUnavailable = (*authenticationRequiredError)(nil) - _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) + _ credentialUnavailable = (*AuthenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil) ) type credentialUnavailable interface { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum deleted file mode 100644 index c592f283b6..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ /dev/null @@ -1,60 +0,0 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 056785a8a3..848db16e43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -24,18 +24,26 @@ type InteractiveBrowserCredentialOptions struct { // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -54,9 +62,6 @@ type InteractiveBrowserCredentialOptions struct { // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -82,13 +87,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, LoginHint: cp.LoginHint, - Record: cp.authenticationRecord, + Record: cp.AuthenticationRecord, RedirectURL: cp.RedirectURL, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -97,8 +102,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate opens the default browser so a user can log in. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go new file mode 100644 index 0000000000..c0cfe76060 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache struct { + // impl is a pointer so a Cache can carry persistent state across copies + impl *impl +} + +// impl is a Cache's private implementation +type impl struct { + // factory constructs storage implementations + factory func(bool) (cache.ExportReplace, error) + // cae and noCAE are previously constructed storage implementations. CAE + // and non-CAE tokens must be stored separately because MSAL's cache doesn't + // observe token claims. If a single storage implementation held both kinds + // of tokens, it could create a reauthentication or error loop by returning + // a non-CAE token lacking a required claim. + cae, noCAE cache.ExportReplace + // mu synchronizes around cae and noCAE + mu *sync.RWMutex +} + +func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) { + if i == nil { + // zero-value Cache: return a nil ExportReplace and MSAL will cache in memory + return nil, nil + } + var ( + err error + xr cache.ExportReplace + ) + i.mu.RLock() + xr = i.cae + if !cae { + xr = i.noCAE + } + i.mu.RUnlock() + if xr != nil { + return xr, nil + } + i.mu.Lock() + defer i.mu.Unlock() + if cae { + if i.cae == nil { + if xr, err = i.factory(cae); err == nil { + i.cae = xr + } + } + return i.cae, err + } + if i.noCAE == nil { + if xr, err = i.factory(cae); err == nil { + i.noCAE = xr + } + } + return i.noCAE, err +} + +// NewCache is the constructor for Cache. It takes a factory instead of an instance +// because it doesn't know whether the Cache will store both CAE and non-CAE tokens. +func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache { + return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}} +} + +// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface. +// It's a function instead of a method on Cache so packages in azidentity and +// azidentity/cache can call it while applications can't. "cae" declares whether the +// caller intends this implementation to store CAE tokens. +func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) { + return c.impl.exportReplace(cae) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go deleted file mode 100644 index b1b4d5c8bd..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -// TokenCachePersistenceOptions contains options for persistent token caching -type TokenCachePersistenceOptions struct { - // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text - // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts - // encryption before falling back to plaintext storage. - AllowUnencryptedStorage bool - - // Name identifies the cache. Set this to isolate data from other applications. - Name string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go deleted file mode 100644 index c1498b4644..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -import ( - "errors" - - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" -) - -var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") - -// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to -// use a persistent cache must first import the cache module, which will replace this function -// with a platform-specific implementation. -var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { - if o == nil { - return nil, nil - } - return nil, errMissingImport -} - -// CacheFilePath returns the path to the cache file for the given name. -// Defining it in this package makes it available to azidentity tests. -var CacheFilePath = func(name string) (string, error) { - return "", errMissingImport -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 6122cc7005..4c657a92ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -143,6 +143,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if endpoint, ok := os.LookupEnv(identityEndpoint); ok { if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { + if options.ID != nil { + return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") + } env = "Service Fabric" c.endpoint = endpoint c.msiType = msiTypeServiceFabric @@ -152,6 +155,9 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAppService } } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + if options.ID != nil { + return nil, errors.New("the Azure Arc API doesn't support specifying a user-assigned managed identity at runtime") + } env = "Azure Arc" c.endpoint = endpoint c.msiType = msiTypeAzureArc @@ -159,9 +165,15 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { c.endpoint = endpoint if _, ok := os.LookupEnv(msiSecret); ok { + if options.ID != nil && options.ID.idKind() != miClientID { + return nil, errors.New("the Azure ML API supports specifying a user-assigned managed identity by client ID only") + } env = "Azure ML" c.msiType = msiTypeAzureML } else { + if options.ID != nil { + return nil, errors.New("the Cloud Shell API doesn't support user-assigned managed identities") + } env = "Cloud Shell" c.msiType = msiTypeCloudShell } @@ -207,9 +219,10 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) - if err == nil { - _, err = c.azClient.Pipeline().Do(req) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } + res, err := c.azClient.Pipeline().Do(req) if err != nil { msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { @@ -217,7 +230,16 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } - // send normal token requests from now on because something responded + // because IMDS always responds with JSON, assume a non-JSON response is from something else, such + // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating + b, err := azruntime.Payload(res) + if err != nil { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) + } + if !json.Valid(b) { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") + } + // send normal token requests from now on because IMDS responded c.probeIMDS = false } @@ -228,7 +250,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi resp, err := c.azClient.Pipeline().Do(msg) if err != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil) } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { @@ -239,7 +261,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi switch resp.StatusCode { case http.StatusBadRequest: if id != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { @@ -256,7 +278,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) } func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { @@ -284,10 +306,10 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac if expiresOn, err := strconv.Atoi(v); err == nil { return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res) default: msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res) } } @@ -302,15 +324,15 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage key, err := c.getAzureArcSecretKey(ctx, scopes) if err != nil { msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) - return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil) } - return c.createAzureArcAuthRequest(ctx, id, scopes, key) + return c.createAzureArcAuthRequest(ctx, scopes, key) case msiTypeAzureML: return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: - return c.createServiceFabricAuthRequest(ctx, id, scopes) + return c.createServiceFabricAuthRequest(ctx, scopes) case msiTypeCloudShell: - return c.createCloudShellAuthRequest(ctx, id, scopes) + return c.createCloudShellAuthRequest(ctx, scopes) default: return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") } @@ -323,13 +345,16 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma } request.Raw().Header.Set(headerMetadata, "true") q := request.Raw().URL.Query() - q.Add("api-version", imdsAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) + q.Set("api-version", imdsAPIVersion) + q.Set("resource", strings.Join(scopes, " ")) if id != nil { - if id.idKind() == miResourceID { - q.Add(msiResID, id.String()) - } else { - q.Add(qpClientID, id.String()) + switch id.idKind() { + case miClientID: + q.Set(qpClientID, id.String()) + case miObjectID: + q.Set("object_id", id.String()) + case miResourceID: + q.Set(msiResID, id.String()) } } request.Raw().URL.RawQuery = q.Encode() @@ -343,13 +368,16 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, } request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) q := request.Raw().URL.Query() - q.Add("api-version", "2019-08-01") - q.Add("resource", scopes[0]) + q.Set("api-version", "2019-08-01") + q.Set("resource", scopes[0]) if id != nil { - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) + switch id.idKind() { + case miClientID: + q.Set(qpClientID, id.String()) + case miObjectID: + q.Set("principal_id", id.String()) + case miResourceID: + q.Set(miResID, id.String()) } } request.Raw().URL.RawQuery = q.Encode() @@ -363,23 +391,24 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id } request.Raw().Header.Set("secret", os.Getenv(msiSecret)) q := request.Raw().URL.Query() - q.Add("api-version", "2017-09-01") - q.Add("resource", strings.Join(scopes, " ")) - q.Add("clientid", os.Getenv(defaultIdentityClientID)) + q.Set("api-version", "2017-09-01") + q.Set("resource", strings.Join(scopes, " ")) + q.Set("clientid", os.Getenv(defaultIdentityClientID)) if id != nil { - if id.idKind() == miResourceID { - log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") - q.Set("clientid", "") - q.Set(miResID, id.String()) - } else { + switch id.idKind() { + case miClientID: q.Set("clientid", id.String()) + case miObjectID: + return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by object ID", nil) + case miResourceID: + return nil, newAuthenticationFailedError(credNameManagedIdentity, "Azure ML doesn't support specifying a managed identity by resource ID", nil) } } request.Raw().URL.RawQuery = q.Encode() return request, nil } -func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err @@ -387,16 +416,8 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte q := request.Raw().URL.Query() request.Raw().Header.Set("Accept", "application/json") request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) - q.Add("api-version", serviceFabricAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } + q.Set("api-version", serviceFabricAPIVersion) + q.Set("resource", strings.Join(scopes, " ")) request.Raw().URL.RawQuery = q.Encode() return request, nil } @@ -409,8 +430,8 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour } request.Raw().Header.Set(headerMetadata, "true") q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) + q.Set("api-version", azureArcAPIVersion) + q.Set("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key response, err := c.azClient.Pipeline().Do(request) @@ -421,39 +442,39 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour // of the secret key file. Any other status code indicates an error in the request. if response.StatusCode != 401 { msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) - return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response) } header := response.Header.Get("WWW-Authenticate") if len(header) == 0 { - return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil) } // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key _, p, found := strings.Cut(header, "=") if !found { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil) } expected, err := arcKeyDirectory() if err != nil { return "", err } if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil) } f, err := os.Stat(p) if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil) } if s := f.Size(); s > 4096 { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil) } key, err := os.ReadFile(p) if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil) } return string(key), nil } -func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, resources []string, key string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err @@ -461,21 +482,13 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i request.Raw().Header.Set(headerMetadata, "true") request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } + q.Set("api-version", azureArcAPIVersion) + q.Set("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() return request, nil } -func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, scopes []string) (*policy.Request, error) { request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) if err != nil { return nil, err @@ -488,14 +501,5 @@ func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { return nil, err } - if id != nil { - log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") - q := request.Raw().URL.Query() - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } return request, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 13c043d8e0..1d53579cf3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -22,8 +22,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential" type managedIdentityIDKind int const ( - miClientID managedIdentityIDKind = 0 - miResourceID managedIdentityIDKind = 1 + miClientID managedIdentityIDKind = iota + miObjectID + miResourceID ) // ManagedIDKind identifies the ID of a managed identity as either a client or resource ID @@ -32,7 +33,12 @@ type ManagedIDKind interface { idKind() managedIdentityIDKind } -// ClientID is the client ID of a user-assigned managed identity. +// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ClientID is specified on the following platforms: +// +// - Azure Arc +// - Cloud Shell +// - Service Fabric type ClientID string func (ClientID) idKind() managedIdentityIDKind { @@ -44,7 +50,31 @@ func (c ClientID) String() string { return string(c) } -// ResourceID is the resource ID of a user-assigned managed identity. +// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when an ObjectID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric +type ObjectID string + +func (ObjectID) idKind() managedIdentityIDKind { + return miObjectID +} + +// String returns the string value of the ID. +func (o ObjectID) String() string { + return string(o) +} + +// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ResourceID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric type ResourceID string func (ResourceID) idKind() managedIdentityIDKind { @@ -60,9 +90,10 @@ func (r ResourceID) String() string { type ManagedIdentityCredentialOptions struct { azcore.ClientOptions - // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity - // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that - // some platforms don't accept resource IDs. + // ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of + // the hosting environment's default. The value may be the identity's client, object, or resource ID. + // NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed + // identities, or the specified kind of ID. ID ManagedIDKind // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have @@ -73,10 +104,11 @@ type ManagedIdentityCredentialOptions struct { dac bool } -// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview +// user-assigned identity. +// +// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index b3d22dbf3c..73363e1c9e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -30,12 +30,12 @@ type publicClientOptions struct { azcore.ClientOptions AdditionallyAllowedTenants []string + Cache Cache DeviceCodePrompt func(context.Context, DeviceCodeMessage) error DisableAutomaticAuthentication bool DisableInstanceDiscovery bool LoginHint, RedirectURL string - Record authenticationRecord - TokenCachePersistenceOptions *tokenCachePersistenceOptions + Record AuthenticationRecord Username, Password string } @@ -48,7 +48,7 @@ type publicClient struct { host string name string opts publicClientOptions - record authenticationRecord + record AuthenticationRecord azClient *azcore.Client } @@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p }, nil } -func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) { if tro == nil { tro = &policy.TokenRequestOptions{} } if len(tro.Scopes) == 0 { if p.defaultScope == nil { - return authenticationRecord{}, errScopeRequired + return AuthenticationRecord{}, errScopeRequired } tro.Scopes = p.defaultScope } client, mu, err := p.client(*tro) if err != nil { - return authenticationRecord{}, err + return AuthenticationRecord{}, err } mu.Lock() defer mu.Unlock() @@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) + return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } at, err := p.reqToken(ctx, client, tro) if err == nil { @@ -222,13 +222,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { - cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + c, err := internal.ExportReplace(p.opts.Cache, enableCAE) if err != nil { return nil, err } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithCache(cache), + public.WithCache(c), public.WithHTTPClient(p), } if enableCAE { @@ -244,8 +244,7 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke if err == nil { p.record, err = newAuthenticationRecord(ar) } else { - res := getResponseFromError(err) - err = newAuthenticationFailedError(p.name, err.Error(), res, err) + err = newAuthenticationFailedErrorFromMSAL(p.name, err) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index a69bbce34c..1a07fede63 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -5,7 +5,19 @@ param ( [hashtable] $AdditionalParameters = @{}, - [hashtable] $DeploymentOutputs + [hashtable] $DeploymentOutputs, + + [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $TenantId, + + [Parameter()] + [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] + [string] $TestApplicationId, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments ) $ErrorActionPreference = 'Stop' @@ -16,14 +28,14 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } - az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] } Write-Host "Building container" $image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" -FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder ENV GOARCH=amd64 GOWORK=off COPY . /azidentity WORKDIR /azidentity/testdata/managed-id-test @@ -53,9 +65,11 @@ az container create -g $rg -n $aciName --image $image ` --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` - AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` - AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` - FUNCTIONS_CUSTOMHANDLER_PORT=80 + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" # Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index 2a21652930..135feb0178 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -135,6 +135,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' value: deployResources ? usermgdid.id : null } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID' + value: deployResources ? usermgdid.properties.clientId : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID' + value: deployResources ? usermgdid.properties.principalId : null + } { name: 'AzureWebJobsStorage' value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' @@ -217,3 +225,4 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 294ed81e95..740abd4709 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -25,18 +25,20 @@ type UsernamePasswordCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -54,13 +56,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Record: options.authenticationRecord, - TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.AuthenticationRecord, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -70,7 +72,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st } // Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 4305b5d3d8..4fa22dcc12 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.7.0" + version = "v1.8.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 3e43e788e9..6fecada2f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the // environment variable AZURE_FEDERATED_TOKEN_FILE. TokenFilePath string @@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md new file mode 100644 index 0000000000..9515ee5205 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md @@ -0,0 +1,3 @@ +# GCP Resource detection library + +This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go new file mode 100644 index 0000000000..0a36807033 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go @@ -0,0 +1,76 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +const ( + // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules + // for the environment variables available in GAE environments. + gaeServiceEnv = "GAE_SERVICE" + gaeVersionEnv = "GAE_VERSION" + gaeInstanceEnv = "GAE_INSTANCE" + gaeEnv = "GAE_ENV" + gaeStandard = "standard" +) + +func (d *Detector) onAppEngineStandard() bool { + // See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables. + env, found := d.os.LookupEnv(gaeEnv) + return found && env == gaeStandard +} + +func (d *Detector) onAppEngine() bool { + _, found := d.os.LookupEnv(gaeServiceEnv) + return found +} + +// AppEngineServiceName returns the service name of the app engine service. +func (d *Detector) AppEngineServiceName() (string, error) { + if name, found := d.os.LookupEnv(gaeServiceEnv); found { + return name, nil + } + return "", errEnvVarNotFound +} + +// AppEngineServiceVersion returns the service version of the app engine service. +func (d *Detector) AppEngineServiceVersion() (string, error) { + if version, found := d.os.LookupEnv(gaeVersionEnv); found { + return version, nil + } + return "", errEnvVarNotFound +} + +// AppEngineServiceInstance returns the service instance of the app engine service. +func (d *Detector) AppEngineServiceInstance() (string, error) { + if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found { + return instanceID, nil + } + return "", errEnvVarNotFound +} + +// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running. +func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) { + // The GCE metadata server is available on App Engine Flex. + return d.GCEAvailabilityZoneAndRegion() +} + +// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in. +func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) { + return d.metadata.Zone() +} + +// AppEngineStandardCloudRegion returns the region the app engine service is running in. +func (d *Detector) AppEngineStandardCloudRegion() (string, error) { + return d.FaaSCloudRegion() +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go new file mode 100644 index 0000000000..d3992a4f7e --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go @@ -0,0 +1,55 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +const ( + bmsProjectIDEnv = "BMS_PROJECT_ID" + bmsRegionEnv = "BMS_REGION" + bmsInstanceIDEnv = "BMS_INSTANCE_ID" +) + +// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying +// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables. +// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs +func (d *Detector) onBareMetalSolution() bool { + projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv) + region, regionExists := d.os.LookupEnv(bmsRegionEnv) + instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv) + return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != "" +} + +// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable. +func (d *Detector) BareMetalSolutionInstanceID() (string, error) { + if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found { + return instanceID, nil + } + return "", errEnvVarNotFound +} + +// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable. +func (d *Detector) BareMetalSolutionCloudRegion() (string, error) { + if region, found := d.os.LookupEnv(bmsRegionEnv); found { + return region, nil + } + return "", errEnvVarNotFound +} + +// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable. +func (d *Detector) BareMetalSolutionProjectID() (string, error) { + if project, found := d.os.LookupEnv(bmsProjectIDEnv); found { + return project, nil + } + return "", errEnvVarNotFound +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go new file mode 100644 index 0000000000..2cc62de097 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go @@ -0,0 +1,102 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "errors" + "os" + + "cloud.google.com/go/compute/metadata" +) + +var errEnvVarNotFound = errors.New("environment variable not found") + +// NewDetector returns a *Detector which can get detect the platform, +// and fetch attributes of the platform on which it is running. +func NewDetector() *Detector { + return &Detector{metadata: metadata.NewClient(nil), os: realOSProvider{}} +} + +type Platform int64 + +const ( + UnknownPlatform Platform = iota + GKE + GCE + CloudRun + CloudRunJob + CloudFunctions + AppEngineStandard + AppEngineFlex + BareMetalSolution +) + +// CloudPlatform returns the platform on which this program is running. +func (d *Detector) CloudPlatform() Platform { + switch { + case d.onBareMetalSolution(): + return BareMetalSolution + case d.onGKE(): + return GKE + case d.onCloudFunctions(): + return CloudFunctions + case d.onCloudRun(): + return CloudRun + case d.onCloudRunJob(): + return CloudRunJob + case d.onAppEngineStandard(): + return AppEngineStandard + case d.onAppEngine(): + return AppEngineFlex + case d.onGCE(): + return GCE + } + return UnknownPlatform +} + +// ProjectID returns the ID of the project in which this program is running. +func (d *Detector) ProjectID() (string, error) { + return d.metadata.ProjectID() +} + +// Detector collects resource information for all GCP platforms. +type Detector struct { + metadata metadataProvider + os osProvider +} + +// metadataProvider contains the subset of the metadata.Client functions used +// by this resource Detector to allow testing with a fake implementation. +type metadataProvider interface { + ProjectID() (string, error) + InstanceID() (string, error) + Get(string) (string, error) + InstanceName() (string, error) + Hostname() (string, error) + Zone() (string, error) + InstanceAttributeValue(string) (string, error) +} + +// osProvider contains the subset of the os package functions used by. +type osProvider interface { + LookupEnv(string) (string, bool) +} + +// realOSProvider uses the os package to lookup env vars. +type realOSProvider struct{} + +func (realOSProvider) LookupEnv(env string) (string, bool) { + return os.LookupEnv(env) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go new file mode 100644 index 0000000000..9277608dd6 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go @@ -0,0 +1,105 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "strings" +) + +const ( + // Cloud Functions env vars: + // https://cloud.google.com/functions/docs/configuring/env-var#newer_runtimes + // + // Cloud Run env vars: + // https://cloud.google.com/run/docs/container-contract#services-env-vars + // + // Cloud Run jobs env vars: + // https://cloud.google.com/run/docs/container-contract#jobs-env-vars + cloudFunctionsTargetEnv = "FUNCTION_TARGET" + cloudRunConfigurationEnv = "K_CONFIGURATION" + cloudRunJobsEnv = "CLOUD_RUN_JOB" + faasServiceEnv = "K_SERVICE" + faasRevisionEnv = "K_REVISION" + cloudRunJobExecutionEnv = "CLOUD_RUN_EXECUTION" + cloudRunJobTaskIndexEnv = "CLOUD_RUN_TASK_INDEX" + regionMetadataAttr = "instance/region" +) + +func (d *Detector) onCloudFunctions() bool { + _, found := d.os.LookupEnv(cloudFunctionsTargetEnv) + return found +} + +func (d *Detector) onCloudRun() bool { + _, found := d.os.LookupEnv(cloudRunConfigurationEnv) + return found +} + +func (d *Detector) onCloudRunJob() bool { + _, found := d.os.LookupEnv(cloudRunJobsEnv) + return found +} + +// FaaSName returns the name of the Cloud Run, Cloud Run jobs or Cloud Functions service. +func (d *Detector) FaaSName() (string, error) { + if name, found := d.os.LookupEnv(faasServiceEnv); found { + return name, nil + } + if name, found := d.os.LookupEnv(cloudRunJobsEnv); found { + return name, nil + } + return "", errEnvVarNotFound +} + +// FaaSVersion returns the revision of the Cloud Run or Cloud Functions service. +func (d *Detector) FaaSVersion() (string, error) { + if version, found := d.os.LookupEnv(faasRevisionEnv); found { + return version, nil + } + return "", errEnvVarNotFound +} + +// CloudRunJobExecution returns the execution id of the Cloud Run jobs. +func (d *Detector) CloudRunJobExecution() (string, error) { + if eid, found := d.os.LookupEnv(cloudRunJobExecutionEnv); found { + return eid, nil + } + return "", errEnvVarNotFound +} + +// CloudRunJobTaskIndex returns the task index for the execution of the Cloud Run jobs. +func (d *Detector) CloudRunJobTaskIndex() (string, error) { + if tidx, found := d.os.LookupEnv(cloudRunJobTaskIndexEnv); found { + return tidx, nil + } + return "", errEnvVarNotFound +} + +// FaaSID returns the instance id of the Cloud Run or Cloud Function. +func (d *Detector) FaaSID() (string, error) { + return d.metadata.InstanceID() +} + +// FaaSCloudRegion detects region from the metadata server. +// It is in the format /projects//regions/. +// +// https://cloud.google.com/run/docs/reference/container-contract#metadata-server +func (d *Detector) FaaSCloudRegion() (string, error) { + region, err := d.metadata.Get(regionMetadataAttr) + if err != nil { + return "", err + } + return region[strings.LastIndex(region, "/")+1:], nil +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go new file mode 100644 index 0000000000..37259fc451 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go @@ -0,0 +1,75 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "fmt" + "strings" +) + +// See the available GCE instance metadata: +// https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata +const machineTypeMetadataAttr = "instance/machine-type" + +func (d *Detector) onGCE() bool { + _, err := d.metadata.Get(machineTypeMetadataAttr) + return err == nil +} + +// GCEHostType returns the machine type of the instance on which this program is running. +func (d *Detector) GCEHostType() (string, error) { + return d.metadata.Get(machineTypeMetadataAttr) +} + +// GCEHostID returns the instance ID of the instance on which this program is running. +func (d *Detector) GCEHostID() (string, error) { + return d.metadata.InstanceID() +} + +// GCEHostName returns the instance name of the instance on which this program is running. +// Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which +// value is returned. +func (d *Detector) GCEHostName() (string, error) { + return d.metadata.InstanceName() +} + +// GCEInstanceName returns the instance name of the instance on which this program is running. +// This is the value visible in the Cloud Console UI, and the prefix for the default hostname +// of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func (d *Detector) GCEInstanceName() (string, error) { + return d.metadata.InstanceName() +} + +// GCEInstanceHostname returns the full value of the default or custom hostname of the instance +// on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm. +func (d *Detector) GCEInstanceHostname() (string, error) { + return d.metadata.Hostname() +} + +// GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running. +func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) { + zone, err := d.metadata.Zone() + if err != nil { + return "", "", err + } + if zone == "" { + return "", "", fmt.Errorf("no zone detected from GCE metadata server") + } + splitZone := strings.SplitN(zone, "-", 3) + if len(splitZone) != 3 { + return "", "", fmt.Errorf("zone was not in the expected format: country-region-zone. Got %v", zone) + } + return zone, strings.Join(splitZone[0:2], "-"), nil +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go new file mode 100644 index 0000000000..67ed972b23 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go @@ -0,0 +1,70 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcp + +import ( + "fmt" + "strings" +) + +const ( + // If the kubernetes.default.svc service exists in the cluster, + // then the KUBERNETES_SERVICE_HOST env var will be populated. + // Use this as an indication that we are running on kubernetes. + k8sServiceHostEnv = "KUBERNETES_SERVICE_HOST" + // See the available GKE metadata: + // https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity#instance_metadata + clusterNameMetadataAttr = "cluster-name" + clusterLocationMetadataAttr = "cluster-location" +) + +func (d *Detector) onGKE() bool { + _, found := d.os.LookupEnv(k8sServiceHostEnv) + return found +} + +// GKEHostID returns the instance ID of the instance on which this program is running. +func (d *Detector) GKEHostID() (string, error) { + return d.GCEHostID() +} + +// GKEClusterName returns the name if the GKE cluster in which this program is running. +func (d *Detector) GKEClusterName() (string, error) { + return d.metadata.InstanceAttributeValue(clusterNameMetadataAttr) +} + +type LocationType int64 + +const ( + UndefinedLocation LocationType = iota + Zone + Region +) + +// GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional. +func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) { + clusterLocation, err := d.metadata.InstanceAttributeValue(clusterLocationMetadataAttr) + if err != nil { + return "", UndefinedLocation, err + } + switch strings.Count(clusterLocation, "-") { + case 1: + return clusterLocation, Region, nil + case 2: + return clusterLocation, Zone, nil + default: + return "", UndefinedLocation, fmt.Errorf("unrecognized format for cluster location: %v", clusterLocation) + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md new file mode 100644 index 0000000000..c77d5eb154 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md @@ -0,0 +1,37 @@ +# OpenTelemetry Google Cloud Monitoring Exporter + +[![Docs](https://godoc.org/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric?status.svg)](https://pkg.go.dev/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric) +[![Apache License][license-image]][license-url] + +OpenTelemetry Google Cloud Monitoring Exporter allow the user to send collected metrics to Google Cloud. + +[Google Cloud Monitoring](https://cloud.google.com/monitoring) provides visibility into the performance, uptime, and overall health of cloud-powered applications. It collects metrics, events, and metadata from Google Cloud, Amazon Web Services, hosted uptime probes, application instrumentation, and a variety of common application components including Cassandra, Nginx, Apache Web Server, Elasticsearch, and many others. Operations ingests that data and generates insights via dashboards, charts, and alerts. Cloud Monitoring alerting helps you collaborate by integrating with Slack, PagerDuty, and more. + +## Setup + +Google Cloud Monitoring is a managed service provided by Google Cloud Platform. Google Cloud Monitoring requires to set up "Workspace" in advance. The guide to create a new Workspace is available on [the official document](https://cloud.google.com/monitoring/workspaces/create). + +## Authentication + +The Google Cloud Monitoring exporter depends upon [`google.FindDefaultCredentials`](https://pkg.go.dev/golang.org/x/oauth2/google?tab=doc#FindDefaultCredentials), so the service account is automatically detected by default, but also the custom credential file (so called `service_account_key.json`) can be detected with specific conditions. Quoting from the document of `google.FindDefaultCredentials`: + +* A JSON file whose path is specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. +* A JSON file in a location known to the gcloud command-line tool. On Windows, this is `%APPDATA%/gcloud/application_default_credentials.json`. On other systems, `$HOME/.config/gcloud/application_default_credentials.json`. + +When running code locally, you may need to specify a Google Project ID in addition to `GOOGLE_APPLICATION_CREDENTIALS`. This is best done using an environment variable (e.g. `GOOGLE_CLOUD_PROJECT`) and the `metric.WithProjectID` method, e.g.: + +```golang +projectID := os.Getenv("GOOGLE_CLOUD_PROJECT") +opts := []mexporter.Option{ + mexporter.WithProjectID(projectID), +} +``` + +## Useful links + +* For more information on OpenTelemetry, visit: https://opentelemetry.io/ +* For more about OpenTelemetry Go, visit: https://github.com/open-telemetry/opentelemetry-go +* Learn more about Google Cloud Monitoring at https://cloud.google.com/monitoring + +[license-url]: https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE +[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go new file mode 100644 index 0000000000..90dfcb344e --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go @@ -0,0 +1,49 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "context" + "errors" + "fmt" + + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "golang.org/x/oauth2/google" +) + +// New creates a new Exporter thats implements metric.Exporter. +func New(opts ...Option) (sdkmetric.Exporter, error) { + o := options{ + context: context.Background(), + resourceAttributeFilter: DefaultResourceAttributesFilter, + } + for _, opt := range opts { + opt(&o) + } + + if o.projectID == "" { + creds, err := google.FindDefaultCredentials(o.context, monitoring.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("failed to find Google Cloud credentials: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("google cloud monitoring: no project found with application default credentials") + } + o.projectID = creds.ProjectID + } + return newMetricExporter(&o) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go new file mode 100644 index 0000000000..57329a4bdc --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go @@ -0,0 +1,97 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +// TODO: remove this file when the constants are ready in the Go SDK + +// Mappings for the well-known OpenTelemetry resource label keys +// to applicable Monitored Resource label keys. +// A uniquely identifying name for the Kubernetes cluster. Kubernetes +// does not have cluster names as an internal concept so this may be +// set to any meaningful value within the environment. For example, +// GKE clusters have a name which can be used for this label. +const ( + // Deprecated: use semconv.CloudProviderKey instead. + CloudKeyProvider = "cloud.provider" + // Deprecated: use semconv.CloudAccountIDKey instead. + CloudKeyAccountID = "cloud.account.id" + // Deprecated: use semconv.CloudRegionKey instead. + CloudKeyRegion = "cloud.region" + // Deprecated: use semconv.CloudAvailabilityZoneKey instead. + CloudKeyZone = "cloud.availability_zone" + + // Deprecated: use semconv.ServiceNamespaceKey instead. + ServiceKeyNamespace = "service.namespace" + // Deprecated: use semconv.ServiceInstanceIDKey instead. + ServiceKeyInstanceID = "service.instance.id" + // Deprecated: use semconv.ServiceNameKey instead. + ServiceKeyName = "service.name" + + // Deprecated: HostType is not needed. + HostType = "host" + // A uniquely identifying name for the host. + // Deprecated: use semconv.HostNameKey instead. + HostKeyName = "host.name" + // A hostname as returned by the 'hostname' command on host machine. + // Deprecated: HostKeyHostName is not needed. + HostKeyHostName = "host.hostname" + // Deprecated: use semconv.HostIDKey instead. + HostKeyID = "host.id" + // Deprecated: use semconv.HostTypeKey instead. + HostKeyType = "host.type" + + // A uniquely identifying name for the Container. + // Deprecated: use semconv.ContainerNameKey instead. + ContainerKeyName = "container.name" + // Deprecated: use semconv.ContainerImageNameKey instead. + ContainerKeyImageName = "container.image.name" + // Deprecated: use semconv.ContainerImageTagKey instead. + ContainerKeyImageTag = "container.image.tag" + + // Cloud Providers + // Deprecated: use semconv.CloudProviderAWS instead. + CloudProviderAWS = "aws" + // Deprecated: use semconv.CloudProviderGCP instead. + CloudProviderGCP = "gcp" + // Deprecated: use semconv.CloudProviderAzure instead. + CloudProviderAZURE = "azure" + + // Deprecated: Use "k8s" instead. This should not be needed. + K8S = "k8s" + // Deprecated: use semconv.K8SClusterNameKey instead. + K8SKeyClusterName = "k8s.cluster.name" + // Deprecated: use semconv.K8SNamespaceNameKey instead. + K8SKeyNamespaceName = "k8s.namespace.name" + // Deprecated: use semconv.K8SPodNameKey instead. + K8SKeyPodName = "k8s.pod.name" + // Deprecated: use semconv.K8SDeploymentNameKey instead. + K8SKeyDeploymentName = "k8s.deployment.name" + + // Monitored Resources types + // Deprecated: Use "k8s_container" instead. + K8SContainer = "k8s_container" + // Deprecated: Use "k8s_node" instead. + K8SNode = "k8s_node" + // Deprecated: Use "k8s_pod" instead. + K8SPod = "k8s_pod" + // Deprecated: Use "k8s_cluster" instead. + K8SCluster = "k8s_cluster" + // Deprecated: Use "gce_instance" instead. + GCEInstance = "gce_instance" + // Deprecated: Use "aws_ec2_instance" instead. + AWSEC2Instance = "aws_ec2_instance" + // Deprecated: Use "generic_task" instead. + GenericTask = "generic_task" +) diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go new file mode 100644 index 0000000000..974c0af950 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go @@ -0,0 +1,32 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "errors" + "fmt" +) + +var ( + errBlankProjectID = errors.New("expecting a non-blank ProjectID") +) + +type errUnexpectedAggregationKind struct { + kind string +} + +func (e errUnexpectedAggregationKind) Error() string { + return fmt.Sprintf("the metric kind is unexpected: %v", e.kind) +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go new file mode 100644 index 0000000000..ba0012e25a --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go @@ -0,0 +1,890 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/api/distribution" + "google.golang.org/genproto/googleapis/api/label" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping" +) + +const ( + // The number of timeserieses to send to GCM in a single request. This + // is a hard limit in the GCM API, so we never want to exceed 200. + sendBatchSize = 200 + + cloudMonitoringMetricDescriptorNameFormat = "workload.googleapis.com/%s" + platformMappingMonitoredResourceKey = "gcp.resource_type" +) + +// key is used to judge the uniqueness of the record descriptor. +type key struct { + name string + libraryname string +} + +func keyOf(metrics metricdata.Metrics, library instrumentation.Library) key { + return key{ + name: metrics.Name, + libraryname: library.Name, + } +} + +// metricExporter is the implementation of OpenTelemetry metric exporter for +// Google Cloud Monitoring. +type metricExporter struct { + o *options + shutdown chan struct{} + // mdCache is the cache to hold MetricDescriptor to avoid creating duplicate MD. + mdCache map[key]*googlemetricpb.MetricDescriptor + client *monitoring.MetricClient + mdLock sync.RWMutex + shutdownOnce sync.Once +} + +// ForceFlush does nothing, the exporter holds no state. +func (e *metricExporter) ForceFlush(ctx context.Context) error { return ctx.Err() } + +// Shutdown shuts down the client connections. +func (e *metricExporter) Shutdown(ctx context.Context) error { + err := errShutdown + e.shutdownOnce.Do(func() { + close(e.shutdown) + err = errors.Join(ctx.Err(), e.client.Close()) + }) + return err +} + +// newMetricExporter returns an exporter that uploads OTel metric data to Google Cloud Monitoring. +func newMetricExporter(o *options) (*metricExporter, error) { + if strings.TrimSpace(o.projectID) == "" { + return nil, errBlankProjectID + } + + clientOpts := append([]option.ClientOption{option.WithGRPCDialOption(grpc.WithUserAgent(userAgent))}, o.monitoringClientOptions...) + ctx := o.context + if ctx == nil { + ctx = context.Background() + } + client, err := monitoring.NewMetricClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + if o.compression == "gzip" { + client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries, + gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name))) + } + + cache := map[key]*googlemetricpb.MetricDescriptor{} + e := &metricExporter{ + o: o, + mdCache: cache, + client: client, + shutdown: make(chan struct{}), + } + return e, nil +} + +var errShutdown = fmt.Errorf("exporter is shutdown") + +// Export exports OpenTelemetry Metrics to Google Cloud Monitoring. +func (me *metricExporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + select { + case <-me.shutdown: + return errShutdown + default: + } + + if me.o.destinationProjectQuota { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(map[string]string{"x-goog-user-project": strings.TrimPrefix(me.o.projectID, "projects/")})) + } + return errors.Join( + me.exportMetricDescriptor(ctx, rm), + me.exportTimeSeries(ctx, rm), + ) +} + +// Temporality returns the Temporality to use for an instrument kind. +func (me *metricExporter) Temporality(ik metric.InstrumentKind) metricdata.Temporality { + return metric.DefaultTemporalitySelector(ik) +} + +// Aggregation returns the Aggregation to use for an instrument kind. +func (me *metricExporter) Aggregation(ik metric.InstrumentKind) metric.Aggregation { + return metric.DefaultAggregationSelector(ik) +} + +// exportMetricDescriptor create MetricDescriptor from the record +// if the descriptor is not registered in Cloud Monitoring yet. +func (me *metricExporter) exportMetricDescriptor(ctx context.Context, rm *metricdata.ResourceMetrics) error { + // We only send metric descriptors if we're configured *and* we're not sending service timeseries. + if me.o.disableCreateMetricDescriptors { + return nil + } + + me.mdLock.Lock() + defer me.mdLock.Unlock() + mds := make(map[key]*googlemetricpb.MetricDescriptor) + extraLabels := me.extraLabelsFromResource(rm.Resource) + for _, scope := range rm.ScopeMetrics { + for _, metrics := range scope.Metrics { + k := keyOf(metrics, scope.Scope) + + if _, ok := me.mdCache[k]; ok { + continue + } + + if _, localok := mds[k]; !localok { + md := me.recordToMdpb(metrics, extraLabels) + mds[k] = md + } + } + } + + // TODO: This process is synchronous and blocks longer time if records in cps + // have many different descriptors. In the cps.ForEach above, it should spawn + // goroutines to send CreateMetricDescriptorRequest asynchronously in the case + // the descriptor does not exist in global cache (me.mdCache). + // See details in #26. + var errs []error + for kmd, md := range mds { + err := me.createMetricDescriptorIfNeeded(ctx, md) + if err == nil { + me.mdCache[kmd] = md + } + errs = append(errs, err) + } + return errors.Join(errs...) +} + +func (me *metricExporter) createMetricDescriptorIfNeeded(ctx context.Context, md *googlemetricpb.MetricDescriptor) error { + mdReq := &monitoringpb.GetMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", me.o.projectID, md.Type), + } + _, err := me.client.GetMetricDescriptor(ctx, mdReq) + if err == nil { + // If the metric descriptor already exists, skip the CreateMetricDescriptor call. + // Metric descriptors cannot be updated without deleting them first, so there + // isn't anything we can do here: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#md-modify + return nil + } + req := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", me.o.projectID), + MetricDescriptor: md, + } + _, err = me.client.CreateMetricDescriptor(ctx, req) + return err +} + +// exportTimeSeries create TimeSeries from the records in cps. +// res should be the common resource among all TimeSeries, such as instance id, application name and so on. +func (me *metricExporter) exportTimeSeries(ctx context.Context, rm *metricdata.ResourceMetrics) error { + tss, err := me.recordsToTspbs(rm) + if len(tss) == 0 { + return err + } + + name := fmt.Sprintf("projects/%s", me.o.projectID) + + errs := []error{err} + for i := 0; i < len(tss); i += sendBatchSize { + j := i + sendBatchSize + if j >= len(tss) { + j = len(tss) + } + + // TODO: When this exporter is rewritten, support writing to multiple + // projects based on the "gcp.project.id" resource. + req := &monitoringpb.CreateTimeSeriesRequest{ + Name: name, + TimeSeries: tss[i:j], + } + if me.o.createServiceTimeSeries { + errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req)) + } else { + errs = append(errs, me.client.CreateTimeSeries(ctx, req)) + } + } + + return errors.Join(errs...) +} + +func (me *metricExporter) extraLabelsFromResource(res *resource.Resource) *attribute.Set { + set, _ := attribute.NewSetWithFiltered(res.Attributes(), me.o.resourceAttributeFilter) + return &set +} + +// descToMetricType converts descriptor to MetricType proto type. +// Basically this returns default value ("workload.googleapis.com/[metric type]"). +func (me *metricExporter) descToMetricType(desc metricdata.Metrics) string { + if formatter := me.o.metricDescriptorTypeFormatter; formatter != nil { + return formatter(desc) + } + return fmt.Sprintf(cloudMonitoringMetricDescriptorNameFormat, desc.Name) +} + +// metricTypeToDisplayName takes a GCM metric type, like (workload.googleapis.com/MyCoolMetric) and returns the display name. +func metricTypeToDisplayName(mURL string) string { + // strip domain, keep path after domain. + u, err := url.Parse(fmt.Sprintf("metrics://%s", mURL)) + if err != nil || u.Path == "" { + return mURL + } + return strings.TrimLeft(u.Path, "/") +} + +// recordToMdpb extracts data and converts them to googlemetricpb.MetricDescriptor. +func (me *metricExporter) recordToMdpb(metrics metricdata.Metrics, extraLabels *attribute.Set) *googlemetricpb.MetricDescriptor { + name := metrics.Name + typ := me.descToMetricType(metrics) + kind, valueType := recordToMdpbKindType(metrics.Data) + + // Detailed explanations on MetricDescriptor proto is not documented on + // generated Go packages. Refer to the original proto file. + // https://github.com/googleapis/googleapis/blob/50af053/google/api/metric.proto#L33 + return &googlemetricpb.MetricDescriptor{ + Name: name, + DisplayName: metricTypeToDisplayName(typ), + Type: typ, + MetricKind: kind, + ValueType: valueType, + Unit: string(metrics.Unit), + Description: metrics.Description, + Labels: labelDescriptors(metrics, extraLabels), + } +} + +func labelDescriptors(metrics metricdata.Metrics, extraLabels *attribute.Set) []*label.LabelDescriptor { + labels := []*label.LabelDescriptor{} + seenKeys := map[string]struct{}{} + addAttributes := func(attr *attribute.Set) { + iter := attr.Iter() + for iter.Next() { + kv := iter.Attribute() + // Skip keys that have already been set + if _, ok := seenKeys[normalizeLabelKey(string(kv.Key))]; ok { + continue + } + labels = append(labels, &label.LabelDescriptor{ + Key: normalizeLabelKey(string(kv.Key)), + }) + seenKeys[normalizeLabelKey(string(kv.Key))] = struct{}{} + } + } + addAttributes(extraLabels) + switch a := metrics.Data.(type) { + case metricdata.Gauge[int64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Gauge[float64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Sum[int64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Sum[float64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Histogram[float64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + case metricdata.Histogram[int64]: + for _, pt := range a.DataPoints { + addAttributes(&pt.Attributes) + } + } + return labels +} + +type attributes struct { + attrs attribute.Set +} + +func (attrs *attributes) GetString(key string) (string, bool) { + value, ok := attrs.attrs.Value(attribute.Key(key)) + return value.AsString(), ok +} + +// resourceToMonitoredResourcepb converts resource in OTel to MonitoredResource +// proto type for Cloud Monitoring. +// +// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors +func (me *metricExporter) resourceToMonitoredResourcepb(res *resource.Resource) *monitoredrespb.MonitoredResource { + platformMrType, platformMappingRequested := res.Set().Value(platformMappingMonitoredResourceKey) + + // check if platform mapping is requested and possible + if platformMappingRequested && platformMrType.AsString() == me.o.monitoredResourceDescription.mrType { + // assemble attributes required to construct this MR + attributeMap := make(map[string]string) + for expectedLabel := range me.o.monitoredResourceDescription.mrLabels { + value, found := res.Set().Value(attribute.Key(expectedLabel)) + if found { + attributeMap[expectedLabel] = value.AsString() + } + } + return &monitoredrespb.MonitoredResource{ + Type: platformMrType.AsString(), + Labels: attributeMap, + } + } + + gmr := resourcemapping.ResourceAttributesToMonitoringMonitoredResource(&attributes{ + attrs: attribute.NewSet(res.Attributes()...), + }) + newLabels := make(map[string]string, len(gmr.Labels)) + for k, v := range gmr.Labels { + newLabels[k] = sanitizeUTF8(v) + } + mr := &monitoredrespb.MonitoredResource{ + Type: gmr.Type, + Labels: newLabels, + } + return mr +} + +// recordToMdpbKindType return the mapping from OTel's record descriptor to +// Cloud Monitoring's MetricKind and ValueType. +func recordToMdpbKindType(a metricdata.Aggregation) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { + switch agg := a.(type) { + case metricdata.Gauge[int64]: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + case metricdata.Gauge[float64]: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + case metricdata.Sum[int64]: + if agg.IsMonotonic { + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 + } + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + case metricdata.Sum[float64]: + if agg.IsMonotonic { + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE + } + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + case metricdata.Histogram[int64], metricdata.Histogram[float64]: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION + default: + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } +} + +// recordToMpb converts data from records to Metric proto type for Cloud Monitoring. +func (me *metricExporter) recordToMpb(metrics metricdata.Metrics, attributes attribute.Set, library instrumentation.Library, extraLabels *attribute.Set) *googlemetricpb.Metric { + me.mdLock.RLock() + defer me.mdLock.RUnlock() + k := keyOf(metrics, library) + md, ok := me.mdCache[k] + if !ok { + md = me.recordToMdpb(metrics, extraLabels) + } + + labels := make(map[string]string) + addAttributes := func(attr *attribute.Set) { + iter := attr.Iter() + for iter.Next() { + kv := iter.Attribute() + labels[normalizeLabelKey(string(kv.Key))] = sanitizeUTF8(kv.Value.Emit()) + } + } + addAttributes(extraLabels) + addAttributes(&attributes) + + return &googlemetricpb.Metric{ + Type: md.Type, + Labels: labels, + } +} + +// recordToTspb converts record to TimeSeries proto type with common resource. +// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries +func (me *metricExporter) recordToTspb(m metricdata.Metrics, mr *monitoredrespb.MonitoredResource, library instrumentation.Scope, extraLabels *attribute.Set) ([]*monitoringpb.TimeSeries, error) { + var tss []*monitoringpb.TimeSeries + var errs []error + if m.Data == nil { + return nil, nil + } + switch a := m.Data.(type) { + case metricdata.Gauge[int64]: + for _, point := range a.DataPoints { + ts, err := gaugeToTimeSeries[int64](point, m, mr) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Gauge[float64]: + for _, point := range a.DataPoints { + ts, err := gaugeToTimeSeries[float64](point, m, mr) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Sum[int64]: + for _, point := range a.DataPoints { + var ts *monitoringpb.TimeSeries + var err error + if a.IsMonotonic { + ts, err = sumToTimeSeries[int64](point, m, mr) + } else { + // Send non-monotonic sums as gauges + ts, err = gaugeToTimeSeries[int64](point, m, mr) + } + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Sum[float64]: + for _, point := range a.DataPoints { + var ts *monitoringpb.TimeSeries + var err error + if a.IsMonotonic { + ts, err = sumToTimeSeries[float64](point, m, mr) + } else { + // Send non-monotonic sums as gauges + ts, err = gaugeToTimeSeries[float64](point, m, mr) + } + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Histogram[int64]: + for _, point := range a.DataPoints { + ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.Histogram[float64]: + for _, point := range a.DataPoints { + ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.ExponentialHistogram[int64]: + for _, point := range a.DataPoints { + ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + case metricdata.ExponentialHistogram[float64]: + for _, point := range a.DataPoints { + ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID) + if err != nil { + errs = append(errs, err) + continue + } + ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels) + tss = append(tss, ts) + } + default: + errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()}) + } + return tss, errors.Join(errs...) +} + +func (me *metricExporter) recordsToTspbs(rm *metricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) { + mr := me.resourceToMonitoredResourcepb(rm.Resource) + extraLabels := me.extraLabelsFromResource(rm.Resource) + + var ( + tss []*monitoringpb.TimeSeries + errs []error + ) + for _, scope := range rm.ScopeMetrics { + for _, metrics := range scope.Metrics { + ts, err := me.recordToTspb(metrics, mr, scope.Scope, extraLabels) + errs = append(errs, err) + tss = append(tss, ts...) + } + } + + return tss, errors.Join(errs...) +} + +func sanitizeUTF8(s string) string { + return strings.ToValidUTF8(s, "�") +} + +func gaugeToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) { + value, valueType := numberDataPointToValue(point) + timestamp := timestamppb.New(point.Time) + if err := timestamp.CheckValid(); err != nil { + return nil, err + } + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_GAUGE, + ValueType: valueType, + Points: []*monitoringpb.Point{{ + Interval: &monitoringpb.TimeInterval{ + EndTime: timestamp, + }, + Value: value, + }}, + }, nil +} + +func sumToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + value, valueType := numberDataPointToValue[N](point) + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: valueType, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: value, + }}, + }, nil +} + +// TODO(@dashpole): Refactor to pass control-coupling lint check. +// +//nolint:revive +func histogramToTimeSeries[N int64 | float64](point metricdata.HistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + distributionValue := histToDistribution(point, projectID) + if enableSOSD { + setSumOfSquaredDeviation(point, distributionValue) + } + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: distributionValue, + }, + }, + }}, + }, nil +} + +func expHistogramToTimeSeries[N int64 | float64](point metricdata.ExponentialHistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) { + interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time) + if err != nil { + return nil, err + } + distributionValue := expHistToDistribution(point, projectID) + // TODO: Implement "setSumOfSquaredDeviationExpHist" for parameter "enableSOSD" functionality. + return &monitoringpb.TimeSeries{ + Resource: mr, + Unit: string(metrics.Unit), + MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE, + ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION, + Points: []*monitoringpb.Point{{ + Interval: interval, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: distributionValue, + }, + }, + }}, + }, nil +} + +func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) { + // The end time of a new interval must be at least a millisecond after the end time of the + // previous interval, for all non-gauge types. + // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval + if end.Sub(start).Milliseconds() <= 1 { + end = start.Add(time.Millisecond) + } + startpb := timestamppb.New(start) + endpb := timestamppb.New(end) + err := errors.Join( + startpb.CheckValid(), + endpb.CheckValid(), + ) + if err != nil { + return nil, err + } + + return &monitoringpb.TimeInterval{ + StartTime: startpb, + EndTime: endpb, + }, nil +} + +func histToDistribution[N int64 | float64](hist metricdata.HistogramDataPoint[N], projectID string) *distribution.Distribution { + counts := make([]int64, len(hist.BucketCounts)) + for i, v := range hist.BucketCounts { + counts[i] = int64(v) + } + var mean float64 + if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero + mean = float64(hist.Sum) / float64(hist.Count) + } + return &distribution.Distribution{ + Count: int64(hist.Count), + Mean: mean, + BucketCounts: counts, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: hist.Bounds, + }, + }, + }, + Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID), + } +} + +func expHistToDistribution[N int64 | float64](hist metricdata.ExponentialHistogramDataPoint[N], projectID string) *distribution.Distribution { + // First calculate underflow bucket with all negatives + zeros. + underflow := hist.ZeroCount + negativeBuckets := hist.NegativeBucket.Counts + for i := 0; i < len(negativeBuckets); i++ { + underflow += negativeBuckets[i] + } + + // Next, pull in remaining buckets. + counts := make([]int64, len(hist.PositiveBucket.Counts)+2) + bucketOptions := &distribution.Distribution_BucketOptions{} + counts[0] = int64(underflow) + positiveBuckets := hist.PositiveBucket.Counts + for i := 0; i < len(positiveBuckets); i++ { + counts[i+1] = int64(positiveBuckets[i]) + } + // Overflow bucket is always empty + counts[len(counts)-1] = 0 + + if len(hist.PositiveBucket.Counts) == 0 { + // We cannot send exponential distributions with no positive buckets, + // instead we send a simple overflow/underflow histogram. + bucketOptions.Options = &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: []float64{0}, + }, + } + } else { + // Exponential histogram + growth := math.Exp2(math.Exp2(-float64(hist.Scale))) + scale := math.Pow(growth, float64(hist.PositiveBucket.Offset)) + bucketOptions.Options = &distribution.Distribution_BucketOptions_ExponentialBuckets{ + ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{ + GrowthFactor: growth, + Scale: scale, + NumFiniteBuckets: int32(len(counts) - 2), + }, + } + } + + var mean float64 + if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero + mean = float64(hist.Sum) / float64(hist.Count) + } + + return &distribution.Distribution{ + Count: int64(hist.Count), + Mean: mean, + BucketCounts: counts, + BucketOptions: bucketOptions, + Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID), + } +} + +func toDistributionExemplar[N int64 | float64](Exemplars []metricdata.Exemplar[N], projectID string) []*distribution.Distribution_Exemplar { + var exemplars []*distribution.Distribution_Exemplar + for _, e := range Exemplars { + attachments := []*anypb.Any{} + if hasValidSpanContext(e) { + sctx, err := anypb.New(&monitoringpb.SpanContext{ + SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, hex.EncodeToString(e.TraceID[:]), hex.EncodeToString(e.SpanID[:])), + }) + if err == nil { + attachments = append(attachments, sctx) + } + } + if len(e.FilteredAttributes) > 0 { + attr, err := anypb.New(&monitoringpb.DroppedLabels{ + Label: attributesToLabels(e.FilteredAttributes), + }) + if err == nil { + attachments = append(attachments, attr) + } + } + exemplars = append(exemplars, &distribution.Distribution_Exemplar{ + Value: float64(e.Value), + Timestamp: timestamppb.New(e.Time), + Attachments: attachments, + }) + } + sort.Slice(exemplars, func(i, j int) bool { + return exemplars[i].Value < exemplars[j].Value + }) + return exemplars +} + +func attributesToLabels(attrs []attribute.KeyValue) map[string]string { + labels := make(map[string]string, len(attrs)) + for _, attr := range attrs { + labels[normalizeLabelKey(string(attr.Key))] = sanitizeUTF8(attr.Value.Emit()) + } + return labels +} + +var ( + nilTraceID trace.TraceID + nilSpanID trace.SpanID +) + +func hasValidSpanContext[N int64 | float64](e metricdata.Exemplar[N]) bool { + return !bytes.Equal(e.TraceID[:], nilTraceID[:]) && !bytes.Equal(e.SpanID[:], nilSpanID[:]) +} + +func setSumOfSquaredDeviation[N int64 | float64](hist metricdata.HistogramDataPoint[N], dist *distribution.Distribution) { + var prevBound float64 + // Calculate the sum of squared deviation. + for i := 0; i < len(hist.Bounds); i++ { + // Assume all points in the bucket occur at the middle of the bucket range + middleOfBucket := (prevBound + hist.Bounds[i]) / 2 + dist.SumOfSquaredDeviation += float64(dist.BucketCounts[i]) * (middleOfBucket - dist.Mean) * (middleOfBucket - dist.Mean) + prevBound = hist.Bounds[i] + } + // The infinity bucket is an implicit +Inf bound after the list of explicit bounds. + // Assume points in the infinity bucket are at the top of the previous bucket + middleOfInfBucket := prevBound + if len(dist.BucketCounts) > 0 { + dist.SumOfSquaredDeviation += float64(dist.BucketCounts[len(dist.BucketCounts)-1]) * (middleOfInfBucket - dist.Mean) * (middleOfInfBucket - dist.Mean) + } +} + +func numberDataPointToValue[N int64 | float64]( + point metricdata.DataPoint[N], +) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) { + switch v := any(point.Value).(type) { + case int64: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v, + }}, + googlemetricpb.MetricDescriptor_INT64 + case float64: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v, + }}, + googlemetricpb.MetricDescriptor_DOUBLE + } + // It is impossible to reach this statement + return nil, googlemetricpb.MetricDescriptor_INT64 +} + +// https://github.com/googleapis/googleapis/blob/c4c562f89acce603fb189679836712d08c7f8584/google/api/metric.proto#L149 +// +// > The label key name must follow: +// > +// > * Only upper and lower-case letters, digits and underscores (_) are +// > allowed. +// > * Label name must start with a letter or digit. +// > * The maximum length of a label name is 100 characters. +// +// Note: this does not truncate if a label is too long. +func normalizeLabelKey(s string) string { + if len(s) == 0 { + return s + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore. +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go new file mode 100644 index 0000000000..11b96067d5 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go @@ -0,0 +1,201 @@ +// Copyright 2020-2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + + apioption "google.golang.org/api/option" +) + +var userAgent = fmt.Sprintf("opentelemetry-go %s; google-cloud-metric-exporter %s", otel.Version(), Version()) + +// MonitoredResourceDescription is the struct which holds information required to map OTel resource to specific +// Google Cloud MonitoredResource. +type MonitoredResourceDescription struct { + mrLabels map[string]struct{} + mrType string +} + +// Option is function type that is passed to the exporter initialization function. +type Option func(*options) + +// options is the struct to hold options for metricExporter and its client instance. +type options struct { + // context allows you to provide a custom context for API calls. + // + // This context will be used several times: first, to create Cloud Monitoring + // clients, and then every time a new batch of metrics needs to be uploaded. + // + // If unset, context.Background() will be used. + context context.Context + // metricDescriptorTypeFormatter is the custom formtter for the MetricDescriptor.Type. + // By default, the format string is "workload.googleapis.com/[metric name]". + metricDescriptorTypeFormatter func(metricdata.Metrics) string + // resourceAttributeFilter determinies which resource attributes to + // add to metrics as metric labels. By default, it adds service.name, + // service.namespace, and service.instance.id. + resourceAttributeFilter attribute.Filter + // monitoredResourceDescription sets whether to attempt mapping the OTel Resource to a specific + // Google Cloud Monitored Resource. When provided, the exporter attempts to map only to the provided + // monitored resource type. + monitoredResourceDescription MonitoredResourceDescription + // projectID is the identifier of the Cloud Monitoring + // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials. + // + // It will be used in the project_id label of a Google Cloud Monitoring monitored + // resource if the resource does not inherently belong to a specific + // project, e.g. on-premise resource like k8s_container or generic_task. + projectID string + // compression enables gzip compression on gRPC calls. + compression string + // monitoringClientOptions are additional options to be passed + // to the underlying Stackdriver Monitoring API client. + // Optional. + monitoringClientOptions []apioption.ClientOption + // destinationProjectQuota sets whether the request should use quota from + // the destination project for the request. + destinationProjectQuota bool + + // disableCreateMetricDescriptors disables automatic MetricDescriptor creation + disableCreateMetricDescriptors bool + + // enableSumOfSquaredDeviation enables calculation of an estimated sum of squared + // deviation. It isn't correct, so we don't send it by default. + enableSumOfSquaredDeviation bool + + // createServiceTimeSeries sets whether to create timeseries using `CreateServiceTimeSeries`. + // Implicitly, this sets `disableCreateMetricDescriptors` to true. + createServiceTimeSeries bool +} + +// WithProjectID sets Google Cloud Platform project as projectID. +// Without using this option, it automatically detects the project ID +// from the default credential detection process. +// Please find the detailed order of the default credentail detection proecess on the doc: +// https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials +func WithProjectID(id string) func(o *options) { + return func(o *options) { + o.projectID = id + } +} + +// WithDestinationProjectQuota enables per-request usage of the destination +// project's quota. For example, when setting gcp.project.id on a metric. +func WithDestinationProjectQuota() func(o *options) { + return func(o *options) { + o.destinationProjectQuota = true + } +} + +// WithMonitoringClientOptions add the options for Cloud Monitoring client instance. +// Available options are defined in. +func WithMonitoringClientOptions(opts ...apioption.ClientOption) func(o *options) { + return func(o *options) { + o.monitoringClientOptions = append(o.monitoringClientOptions, opts...) + } +} + +// WithMetricDescriptorTypeFormatter sets the custom formatter for MetricDescriptor. +// Note that the format has to follow the convention defined in the official document. +// The default is "workload.googleapis.com/[metric name]". +// ref. https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom_metric_names +func WithMetricDescriptorTypeFormatter(f func(metricdata.Metrics) string) func(o *options) { + return func(o *options) { + o.metricDescriptorTypeFormatter = f + } +} + +// WithFilteredResourceAttributes determinies which resource attributes to +// add to metrics as metric labels. By default, it adds service.name, +// service.namespace, and service.instance.id. This is recommended to avoid +// writing duplicate timeseries against the same monitored resource. Use +// WithFilteredResourceAttributes(NoAttributes()) to disable the addition of +// resource attributes to metric labels. +func WithFilteredResourceAttributes(filter attribute.Filter) func(o *options) { + return func(o *options) { + o.resourceAttributeFilter = filter + } +} + +// DefaultResourceAttributesFilter is the default filter applied to resource +// attributes. +func DefaultResourceAttributesFilter(kv attribute.KeyValue) bool { + return (kv.Key == semconv.ServiceNameKey || + kv.Key == semconv.ServiceNamespaceKey || + kv.Key == semconv.ServiceInstanceIDKey) && len(kv.Value.AsString()) > 0 +} + +// NoAttributes can be passed to WithFilteredResourceAttributes to disable +// adding resource attributes as metric labels. +func NoAttributes(attribute.KeyValue) bool { + return false +} + +// WithDisableCreateMetricDescriptors will disable the automatic creation of +// MetricDescriptors when an unknown metric is set to be exported. +func WithDisableCreateMetricDescriptors() func(o *options) { + return func(o *options) { + o.disableCreateMetricDescriptors = true + } +} + +// WithCompression sets the compression to use for gRPC requests. +func WithCompression(c string) func(o *options) { + return func(o *options) { + o.compression = c + } +} + +// WithSumOfSquaredDeviation sets the SumOfSquaredDeviation field on histograms. +// It is an estimate, and is not the actual sum of squared deviations. +func WithSumOfSquaredDeviation() func(o *options) { + return func(o *options) { + o.enableSumOfSquaredDeviation = true + } +} + +// WithCreateServiceTimeSeries configures the exporter to use `CreateServiceTimeSeries` for creating timeseries. +// If this is used, metric descriptors are not exported. +func WithCreateServiceTimeSeries() func(o *options) { + return func(o *options) { + o.createServiceTimeSeries = true + o.disableCreateMetricDescriptors = true + } +} + +// WithMonitoredResourceDescription configures the exporter to attempt to map the OpenTelemetry Resource to the provided +// Google MonitoredResource. The provided mrLabels would be searched for in the OpenTelemetry Resource Attributes and if +// found, would be included in the MonitoredResource labels. +func WithMonitoredResourceDescription(mrType string, mrLabels []string) func(o *options) { + return func(o *options) { + mrLabelSet := make(map[string]struct{}) + for _, label := range mrLabels { + mrLabelSet[label] = struct{}{} + } + o.monitoredResourceDescription = MonitoredResourceDescription{ + mrType: mrType, + mrLabels: mrLabelSet, + } + } +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go new file mode 100644 index 0000000000..e31119fc12 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go @@ -0,0 +1,21 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +// Version is the current release version of the OpenTelemetry +// Operations Metric Exporter in use. +func Version() string { + return "0.48.1" +} diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go new file mode 100644 index 0000000000..4b5af517fe --- /dev/null +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go @@ -0,0 +1,286 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resourcemapping + +import ( + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +const ( + ProjectIDAttributeKey = "gcp.project.id" + + awsAccount = "aws_account" + awsEc2Instance = "aws_ec2_instance" + clusterName = "cluster_name" + containerName = "container_name" + gceInstance = "gce_instance" + genericNode = "generic_node" + genericTask = "generic_task" + instanceID = "instance_id" + job = "job" + k8sCluster = "k8s_cluster" + k8sContainer = "k8s_container" + k8sNode = "k8s_node" + k8sPod = "k8s_pod" + location = "location" + namespace = "namespace" + namespaceName = "namespace_name" + nodeID = "node_id" + nodeName = "node_name" + podName = "pod_name" + region = "region" + taskID = "task_id" + zone = "zone" + gaeInstance = "gae_instance" + gaeApp = "gae_app" + gaeModuleID = "module_id" + gaeVersionID = "version_id" + cloudRunRevision = "cloud_run_revision" + cloudFunction = "cloud_function" + cloudFunctionName = "function_name" + serviceName = "service_name" + configurationName = "configuration_name" + revisionName = "revision_name" + bmsInstance = "baremetalsolution.googleapis.com/Instance" + unknownServicePrefix = "unknown_service" +) + +var ( + // monitoredResourceMappings contains mappings of GCM resource label keys onto mapping config from OTel + // resource for a given monitored resource type. + monitoredResourceMappings = map[string]map[string]struct { + // If none of the otelKeys are present in the Resource, fallback to this literal value + fallbackLiteral string + // OTel resource keys to try and populate the resource label from. For entries with + // multiple OTel resource keys, the keys' values will be coalesced in order until there + // is a non-empty value. + otelKeys []string + }{ + gceInstance: { + zone: {otelKeys: []string{string(semconv.CloudAvailabilityZoneKey)}}, + instanceID: {otelKeys: []string{string(semconv.HostIDKey)}}, + }, + k8sContainer: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}}, + podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}}, + containerName: {otelKeys: []string{string(semconv.K8SContainerNameKey)}}, + }, + k8sPod: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}}, + podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}}, + }, + k8sNode: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + nodeName: {otelKeys: []string{string(semconv.K8SNodeNameKey)}}, + }, + k8sCluster: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}}, + }, + gaeInstance: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}}, + gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}}, + instanceID: {otelKeys: []string{string(semconv.FaaSInstanceKey)}}, + }, + gaeApp: { + location: {otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }}, + gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}}, + gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}}, + }, + awsEc2Instance: { + instanceID: {otelKeys: []string{string(semconv.HostIDKey)}}, + region: { + otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }, + }, + awsAccount: {otelKeys: []string{string(semconv.CloudAccountIDKey)}}, + }, + bmsInstance: { + location: {otelKeys: []string{string(semconv.CloudRegionKey)}}, + instanceID: {otelKeys: []string{string(semconv.HostIDKey)}}, + }, + genericTask: { + location: { + otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }, + fallbackLiteral: "global", + }, + namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}}, + job: {otelKeys: []string{string(semconv.ServiceNameKey), string(semconv.FaaSNameKey)}}, + taskID: {otelKeys: []string{string(semconv.ServiceInstanceIDKey), string(semconv.FaaSInstanceKey)}}, + }, + genericNode: { + location: { + otelKeys: []string{ + string(semconv.CloudAvailabilityZoneKey), + string(semconv.CloudRegionKey), + }, + fallbackLiteral: "global", + }, + namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}}, + nodeID: {otelKeys: []string{string(semconv.HostIDKey), string(semconv.HostNameKey)}}, + }, + } +) + +// ReadOnlyAttributes is an interface to abstract between pulling attributes from PData library or OTEL SDK. +type ReadOnlyAttributes interface { + GetString(string) (string, bool) +} + +// ResourceAttributesToLoggingMonitoredResource converts from a set of OTEL resource attributes into a +// GCP monitored resource type and label set for Cloud Logging. +// E.g. +// This may output `gce_instance` type with appropriate labels. +func ResourceAttributesToLoggingMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource { + cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey)) + switch cloudPlatform { + case semconv.CloudPlatformGCPAppEngine.Value.AsString(): + return createMonitoredResource(gaeApp, attrs) + default: + return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs) + } +} + +// ResourceAttributesToMonitoringMonitoredResource converts from a set of OTEL resource attributes into a +// GCP monitored resource type and label set for Cloud Monitoring +// E.g. +// This may output `gce_instance` type with appropriate labels. +func ResourceAttributesToMonitoringMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource { + cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey)) + switch cloudPlatform { + case semconv.CloudPlatformGCPAppEngine.Value.AsString(): + return createMonitoredResource(gaeInstance, attrs) + default: + return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs) + } +} + +func commonResourceAttributesToMonitoredResource(cloudPlatform string, attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource { + switch cloudPlatform { + case semconv.CloudPlatformGCPComputeEngine.Value.AsString(): + return createMonitoredResource(gceInstance, attrs) + case semconv.CloudPlatformAWSEC2.Value.AsString(): + return createMonitoredResource(awsEc2Instance, attrs) + // TODO(alex-basinov): replace this string literal with semconv.CloudPlatformGCPBareMetalSolution + // once https://github.com/open-telemetry/semantic-conventions/pull/64 makes its way + // into the semconv module. + case "gcp_bare_metal_solution": + return createMonitoredResource(bmsInstance, attrs) + default: + // if k8s.cluster.name is set, pattern match for various k8s resources. + // this will also match non-cloud k8s platforms like minikube. + if _, ok := attrs.GetString(string(semconv.K8SClusterNameKey)); ok { + // Try for most to least specific k8s_container, k8s_pod, etc + if _, ok := attrs.GetString(string(semconv.K8SContainerNameKey)); ok { + return createMonitoredResource(k8sContainer, attrs) + } else if _, ok := attrs.GetString(string(semconv.K8SPodNameKey)); ok { + return createMonitoredResource(k8sPod, attrs) + } else if _, ok := attrs.GetString(string(semconv.K8SNodeNameKey)); ok { + return createMonitoredResource(k8sNode, attrs) + } else { + return createMonitoredResource(k8sCluster, attrs) + } + } + + // Fallback to generic_task + _, hasServiceName := attrs.GetString(string(semconv.ServiceNameKey)) + _, hasFaaSName := attrs.GetString(string(semconv.FaaSNameKey)) + _, hasServiceInstanceID := attrs.GetString(string(semconv.ServiceInstanceIDKey)) + _, hasFaaSInstance := attrs.GetString(string(semconv.FaaSInstanceKey)) + if (hasServiceName && hasServiceInstanceID) || (hasFaaSInstance && hasFaaSName) { + return createMonitoredResource(genericTask, attrs) + } + + // Everything else fallback to generic_node + return createMonitoredResource(genericNode, attrs) + } +} + +func createMonitoredResource( + monitoredResourceType string, + resourceAttrs ReadOnlyAttributes, +) *monitoredrespb.MonitoredResource { + mappings := monitoredResourceMappings[monitoredResourceType] + mrLabels := make(map[string]string, len(mappings)) + + for mrKey, mappingConfig := range mappings { + mrValue := "" + ok := false + // Coalesce the possible keys in order + for _, otelKey := range mappingConfig.otelKeys { + mrValue, ok = resourceAttrs.GetString(otelKey) + if mrValue != "" && !strings.HasPrefix(mrValue, unknownServicePrefix) { + break + } + } + if mrValue == "" && contains(mappingConfig.otelKeys, string(semconv.ServiceNameKey)) { + // the service name started with unknown_service, and was ignored above + mrValue, ok = resourceAttrs.GetString(string(semconv.ServiceNameKey)) + } + if !ok || mrValue == "" { + mrValue = mappingConfig.fallbackLiteral + } + mrLabels[mrKey] = sanitizeUTF8(mrValue) + } + return &monitoredrespb.MonitoredResource{ + Type: monitoredResourceType, + Labels: mrLabels, + } +} + +func contains(list []string, element string) bool { + for _, item := range list { + if item == element { + return true + } + } + return false +} + +func sanitizeUTF8(s string) string { + return strings.ToValidUTF8(s, "�") +} diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index 186c2eb186..40f5f333b5 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8@sha256:b93deceb59a58588d5b16429fc47f98920f84740a1f2ed6454e33275f0701b59 +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10@sha256:de2a0a20c1c3b39c3de829196de9694d09f97cd18fda1004de855ed2b4c841ba USER root diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go index f629a6a2e7..a6fa3d4a2e 100644 --- a/vendor/github.com/IBM/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -1101,7 +1101,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) @@ -1134,7 +1134,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go index 268696cf46..d0d5b87b8b 100644 --- a/vendor/github.com/IBM/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -59,7 +59,8 @@ type Broker struct { kerberosAuthenticator GSSAPIKerberosAuth clientSessionReauthenticationTimeMs int64 - throttleTimer *time.Timer + throttleTimer *time.Timer + throttleTimerLock sync.Mutex } // SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker @@ -1697,6 +1698,8 @@ func (b *Broker) handleThrottledResponse(resp protocolBody) { } func (b *Broker) setThrottle(throttleTime time.Duration) { + b.throttleTimerLock.Lock() + defer b.throttleTimerLock.Unlock() if b.throttleTimer != nil { // if there is an existing timer stop/clear it if !b.throttleTimer.Stop() { @@ -1707,6 +1710,8 @@ func (b *Broker) setThrottle(throttleTime time.Duration) { } func (b *Broker) waitIfThrottled() { + b.throttleTimerLock.Lock() + defer b.throttleTimerLock.Unlock() if b.throttleTimer != nil { DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID()) <-b.throttleTimer.C diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go index ad970a3f08..f2f197887c 100644 --- a/vendor/github.com/IBM/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -387,7 +387,7 @@ type Config struct { // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. + // Equivalent to the JVM's `fetch.max.wait.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to @@ -523,7 +523,7 @@ func NewConfig() *Config { c.Metadata.Full = true c.Metadata.AllowAutoTopicCreation = true - c.Producer.MaxMessageBytes = 1000000 + c.Producer.MaxMessageBytes = 1024 * 1024 c.Producer.RequiredAcks = WaitForLocal c.Producer.Timeout = 10 * time.Second c.Producer.Partitioner = NewHashPartitioner diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index e916416d50..204768e320 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' services: zookeeper-1: hostname: 'zookeeper-1' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '1' @@ -15,6 +15,7 @@ services: zookeeper-2: hostname: 'zookeeper-2' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '2' @@ -27,6 +28,7 @@ services: zookeeper-3: hostname: 'zookeeper-3' image: 'docker.io/library/zookeeper:3.6.3' + init: true restart: always environment: ZOO_MY_ID: '3' @@ -39,6 +41,7 @@ services: kafka-1: hostname: 'kafka-1' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -84,6 +87,7 @@ services: kafka-2: hostname: 'kafka-2' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -129,6 +133,7 @@ services: kafka-3: hostname: 'kafka-3' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -174,6 +179,7 @@ services: kafka-4: hostname: 'kafka-4' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -219,6 +225,7 @@ services: kafka-5: hostname: 'kafka-5' image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + init: true build: context: . dockerfile: Dockerfile.kafka @@ -264,6 +271,7 @@ services: toxiproxy: hostname: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' + init: true healthcheck: test: ['CMD', '/toxiproxy-cli', 'l'] interval: 15s diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go index 1bf5459089..2948651272 100644 --- a/vendor/github.com/IBM/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -251,18 +251,31 @@ func (om *offsetManager) Commit() { } func (om *offsetManager) flushToBroker() { + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + // Care needs to be taken to unlock this. Don't want to defer the unlock as this would + // cause the lock to be held while waiting for the broker to reply. + broker.lock.Lock() req := om.constructRequest() if req == nil { + broker.lock.Unlock() return } + resp, rp, err := sendOffsetCommit(broker, req) + broker.lock.Unlock() - broker, err := om.coordinator() if err != nil { om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() return } - resp, err := broker.CommitOffset(req) + err = handleResponsePromise(req, resp, rp, nil) if err != nil { om.handleError(err) om.releaseCoordinator(broker) @@ -270,9 +283,20 @@ func (om *offsetManager) flushToBroker() { return } + broker.handleThrottledResponse(resp) om.handleResponse(broker, req, resp) } +func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) { + resp := new(OffsetCommitResponse) + responseHeaderVersion := resp.headerVersion() + promise, err := coordinator.send(req, true, responseHeaderVersion) + if err != nil { + return nil, nil, err + } + return resp, promise, nil +} + func (om *offsetManager) constructRequest() *OffsetCommitRequest { r := &OffsetCommitRequest{ Version: 1, diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go index ca7e13dab0..bf20b75e90 100644 --- a/vendor/github.com/IBM/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -466,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, resultOffsets = failedTxn if len(resultOffsets) == 0 { - DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n", t.transactionalID, groupId) return resultOffsets, false, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index fca15f3822..4be90c82c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.30.5" +const goModuleVersion = "1.32.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go deleted file mode 100644 index 19d6107c46..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go +++ /dev/null @@ -1,320 +0,0 @@ -// Package metrics implements metrics gathering for SDK development purposes. -// -// This package is designated as private and is intended for use only by the -// AWS client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package metrics - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/aws/smithy-go/middleware" -) - -const ( - // ServiceIDKey is the key for the service ID metric. - ServiceIDKey = "ServiceId" - // OperationNameKey is the key for the operation name metric. - OperationNameKey = "OperationName" - // ClientRequestIDKey is the key for the client request ID metric. - ClientRequestIDKey = "ClientRequestId" - // APICallDurationKey is the key for the API call duration metric. - APICallDurationKey = "ApiCallDuration" - // APICallSuccessfulKey is the key for the API call successful metric. - APICallSuccessfulKey = "ApiCallSuccessful" - // MarshallingDurationKey is the key for the marshalling duration metric. - MarshallingDurationKey = "MarshallingDuration" - // InThroughputKey is the key for the input throughput metric. - InThroughputKey = "InThroughput" - // OutThroughputKey is the key for the output throughput metric. - OutThroughputKey = "OutThroughput" - // RetryCountKey is the key for the retry count metric. - RetryCountKey = "RetryCount" - // HTTPStatusCodeKey is the key for the HTTP status code metric. - HTTPStatusCodeKey = "HttpStatusCode" - // AWSExtendedRequestIDKey is the key for the AWS extended request ID metric. - AWSExtendedRequestIDKey = "AwsExtendedRequestId" - // AWSRequestIDKey is the key for the AWS request ID metric. - AWSRequestIDKey = "AwsRequestId" - // BackoffDelayDurationKey is the key for the backoff delay duration metric. - BackoffDelayDurationKey = "BackoffDelayDuration" - // StreamThroughputKey is the key for the stream throughput metric. - StreamThroughputKey = "Throughput" - // ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric. - ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration" - // PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric. - PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires" - // SigningDurationKey is the key for the signing duration metric. - SigningDurationKey = "SigningDuration" - // UnmarshallingDurationKey is the key for the unmarshalling duration metric. - UnmarshallingDurationKey = "UnmarshallingDuration" - // TimeToFirstByteKey is the key for the time to first byte metric. - TimeToFirstByteKey = "TimeToFirstByte" - // ServiceCallDurationKey is the key for the service call duration metric. - ServiceCallDurationKey = "ServiceCallDuration" - // EndpointResolutionDurationKey is the key for the endpoint resolution duration metric. - EndpointResolutionDurationKey = "EndpointResolutionDuration" - // AttemptNumberKey is the key for the attempt number metric. - AttemptNumberKey = "AttemptNumber" - // MaxConcurrencyKey is the key for the max concurrency metric. - MaxConcurrencyKey = "MaxConcurrency" - // AvailableConcurrencyKey is the key for the available concurrency metric. - AvailableConcurrencyKey = "AvailableConcurrency" -) - -// MetricPublisher provides the interface to provide custom MetricPublishers. -// PostRequestMetrics will be invoked by the MetricCollection middleware to post request. -// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics. -type MetricPublisher interface { - PostRequestMetrics(*MetricData) error - PostStreamMetrics(*MetricData) error -} - -// Serializer provides the interface to provide custom Serializers. -// Serialize will transform any input object in its corresponding string representation. -type Serializer interface { - Serialize(obj interface{}) (string, error) -} - -// DefaultSerializer is an implementation of the Serializer interface. -type DefaultSerializer struct{} - -// Serialize uses the default JSON serializer to obtain the string representation of an object. -func (DefaultSerializer) Serialize(obj interface{}) (string, error) { - bytes, err := json.Marshal(obj) - if err != nil { - return "", err - } - return string(bytes), nil -} - -type metricContextKey struct{} - -// MetricContext contains fields to store metric-related information. -type MetricContext struct { - connectionCounter *SharedConnectionCounter - publisher MetricPublisher - data *MetricData -} - -// MetricData stores the collected metric data. -type MetricData struct { - RequestStartTime time.Time - RequestEndTime time.Time - APICallDuration time.Duration - SerializeStartTime time.Time - SerializeEndTime time.Time - MarshallingDuration time.Duration - ResolveEndpointStartTime time.Time - ResolveEndpointEndTime time.Time - EndpointResolutionDuration time.Duration - GetIdentityStartTime time.Time - GetIdentityEndTime time.Time - InThroughput float64 - OutThroughput float64 - RetryCount int - Success uint8 - StatusCode int - ClientRequestID string - ServiceID string - OperationName string - PartitionID string - Region string - UserAgent string - RequestContentLength int64 - Stream StreamMetrics - Attempts []AttemptMetrics -} - -// StreamMetrics stores metrics related to streaming data. -type StreamMetrics struct { - ReadDuration time.Duration - ReadBytes int64 - Throughput float64 -} - -// AttemptMetrics stores metrics related to individual attempts. -type AttemptMetrics struct { - ServiceCallStart time.Time - ServiceCallEnd time.Time - ServiceCallDuration time.Duration - FirstByteTime time.Time - TimeToFirstByte time.Duration - ConnRequestedTime time.Time - ConnObtainedTime time.Time - ConcurrencyAcquireDuration time.Duration - SignStartTime time.Time - SignEndTime time.Time - SigningDuration time.Duration - DeserializeStartTime time.Time - DeserializeEndTime time.Time - UnMarshallingDuration time.Duration - RetryDelay time.Duration - ResponseContentLength int64 - StatusCode int - RequestID string - ExtendedRequestID string - HTTPClient string - MaxConcurrency int - PendingConnectionAcquires int - AvailableConcurrency int - ActiveRequests int - ReusedConnection bool -} - -// Data returns the MetricData associated with the MetricContext. -func (mc *MetricContext) Data() *MetricData { - return mc.data -} - -// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext. -func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter { - return mc.connectionCounter -} - -// Publisher returns the MetricPublisher associated with the MetricContext. -func (mc *MetricContext) Publisher() MetricPublisher { - return mc.publisher -} - -// ComputeRequestMetrics calculates and populates derived metrics based on the collected data. -func (md *MetricData) ComputeRequestMetrics() { - - for idx := range md.Attempts { - attempt := &md.Attempts[idx] - attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime) - attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime) - attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime) - attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart) - attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart) - } - - md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime) - md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime) - md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime) - - md.RetryCount = len(md.Attempts) - 1 - - latestAttempt, err := md.LatestAttempt() - - if err != nil { - fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error()) - } else { - - md.StatusCode = latestAttempt.StatusCode - - if md.Success == 1 { - if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { - md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds() - } - if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { - md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds() - } - } - } -} - -// LatestAttempt returns the latest attempt metrics. -// It returns an error if no attempts are initialized. -func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) { - if md.Attempts == nil || len(md.Attempts) == 0 { - return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first") - } - return &md.Attempts[len(md.Attempts)-1], nil -} - -// NewAttempt initializes new attempt metrics. -func (md *MetricData) NewAttempt() { - if md.Attempts == nil { - md.Attempts = []AttemptMetrics{} - } - md.Attempts = append(md.Attempts, AttemptMetrics{}) -} - -// SharedConnectionCounter is a counter shared across API calls. -type SharedConnectionCounter struct { - mu sync.Mutex - - activeRequests int - pendingConnectionAcquire int -} - -// ActiveRequests returns the count of active requests. -func (cc *SharedConnectionCounter) ActiveRequests() int { - cc.mu.Lock() - defer cc.mu.Unlock() - - return cc.activeRequests -} - -// PendingConnectionAcquire returns the count of pending connection acquires. -func (cc *SharedConnectionCounter) PendingConnectionAcquire() int { - cc.mu.Lock() - defer cc.mu.Unlock() - - return cc.pendingConnectionAcquire -} - -// AddActiveRequest increments the count of active requests. -func (cc *SharedConnectionCounter) AddActiveRequest() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.activeRequests++ -} - -// RemoveActiveRequest decrements the count of active requests. -func (cc *SharedConnectionCounter) RemoveActiveRequest() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.activeRequests-- -} - -// AddPendingConnectionAcquire increments the count of pending connection acquires. -func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.pendingConnectionAcquire++ -} - -// RemovePendingConnectionAcquire decrements the count of pending connection acquires. -func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.pendingConnectionAcquire-- -} - -// InitMetricContext initializes the metric context with the provided counter and publisher. -// It returns the updated context. -func InitMetricContext( - ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher, -) context.Context { - if middleware.GetStackValue(ctx, metricContextKey{}) == nil { - ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{ - connectionCounter: counter, - publisher: publisher, - data: &MetricData{ - Attempts: []AttemptMetrics{}, - Stream: StreamMetrics{}, - }, - }) - } - return ctx -} - -// Context returns the metric context from the given context. -// It returns nil if the metric context is not found. -func Context(ctx context.Context) *MetricContext { - mctx := middleware.GetStackValue(ctx, metricContextKey{}) - if mctx == nil { - return nil - } - return mctx.(*MetricContext) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go index e7d268c3da..128b60a731 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -4,6 +4,7 @@ import ( "context" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -45,6 +46,9 @@ func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middlewar if v := resp.Header.Get(h); len(v) != 0 { // set reqID on metadata for successful responses. SetRequestIDMetadata(&metadata, v) + + span, _ := tracing.GetSpan(ctx) + span.SetProperty("aws.request_id", v) break } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index ff0bc921f1..67aaa02265 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -85,6 +85,7 @@ const ( UserAgentFeatureS3ExpressBucket = "J" UserAgentFeatureS3AccessGrants = "K" // not yet implemented UserAgentFeatureGZIPRequestCompression = "L" + UserAgentFeatureProtocolRPCV2CBOR = "M" ) // RequestUserAgent is a build middleware that set the User-Agent for the request. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go new file mode 100644 index 0000000000..bfa5bf7d13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go @@ -0,0 +1,51 @@ +package retry + +import ( + "context" + + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" +) + +type attemptMetrics struct { + Attempts metrics.Int64Counter + Errors metrics.Int64Counter + + AttemptDuration metrics.Float64Histogram +} + +func newAttemptMetrics(meter metrics.Meter) (*attemptMetrics, error) { + m := &attemptMetrics{} + var err error + + m.Attempts, err = meter.Int64Counter("client.call.attempts", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "{attempt}" + o.Description = "The number of attempts for an individual operation" + }) + if err != nil { + return nil, err + } + m.Errors, err = meter.Int64Counter("client.call.errors", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "{error}" + o.Description = "The number of errors for an operation" + }) + if err != nil { + return nil, err + } + m.AttemptDuration, err = meter.Float64Histogram("client.call.attempt_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes to connect to the service, send the request, and get back HTTP status code and headers (including time queued waiting to be sent)" + }) + if err != nil { + return nil, err + } + + return m, nil +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index b645fbdf13..52d59b04bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -8,14 +8,16 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go" "github.com/aws/aws-sdk-go-v2/aws" awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" "github.com/aws/smithy-go/transport/http" ) @@ -38,6 +40,9 @@ type Attempt struct { // attempts are reached. LogAttempts bool + // A Meter instance for recording retry-related metrics. + OperationMeter metrics.Meter + retryer aws.RetryerV2 requestCloner RequestCloner } @@ -55,6 +60,10 @@ func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optF for _, fn := range optFns { fn(m) } + if m.OperationMeter == nil { + m.OperationMeter = metrics.NopMeterProvider{}.Meter("") + } + return m } @@ -80,6 +89,11 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn maxAttempts := r.retryer.MaxAttempts() releaseRetryToken := nopRelease + retryMetrics, err := newAttemptMetrics(r.OperationMeter) + if err != nil { + return out, metadata, err + } + for { attemptNum++ attemptInput := in @@ -97,7 +111,25 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) var attemptResult AttemptResult + + attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) { + o.Properties.Set("operation.attempt", attemptNum) + }) + retryMetrics.Attempts.Add(ctx, 1, withOperationMetadata(ctx)) + + start := sdk.NowTime() out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + elapsed := sdk.NowTime().Sub(start) + + retryMetrics.AttemptDuration.Record(ctx, float64(elapsed)/1e9, withOperationMetadata(ctx)) + if err != nil { + retryMetrics.Errors.Add(ctx, 1, withOperationMetadata(ctx), func(o *metrics.RecordMetricOptions) { + o.Properties.Set("exception.type", errorType(err)) + }) + } + + span.End() + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) // AttemptResult Retried states that the attempt was not successful, and @@ -238,13 +270,6 @@ func (r *Attempt) handleAttempt( // that time. Potentially early exist if the sleep is canceled via the // context. retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) - mctx := metrics.Context(ctx) - if mctx != nil { - attempt, err := mctx.Data().LatestAttempt() - if err != nil { - attempt.RetryDelay = retryDelay - } - } if reqErr != nil { return out, attemptResult, releaseRetryToken, reqErr } @@ -381,3 +406,13 @@ func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresO } return nil } + +// Determines the value of exception.type for metrics purposes. We prefer an +// API-specific error code, otherwise it's just the Go type for the value. +func errorType(err error) string { + var terr smithy.APIError + if errors.As(err, &terr) { + return terr.ErrorCode() + } + return fmt.Sprintf("%T", err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index a9db6433de..a10ee510af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -15,6 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -161,6 +162,9 @@ func (m *ComputePayloadSHA256) HandleFinalize( return next.HandleFinalize(ctx, in) } + _, span := tracing.StartSpan(ctx, "ComputePayloadSHA256") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &HashComputationError{ @@ -186,6 +190,7 @@ func (m *ComputePayloadSHA256) HandleFinalize( ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go index 26d90719b2..8d7c35a9ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -1,13 +1,16 @@ package http import ( + "context" "crypto/tls" - "github.com/aws/aws-sdk-go-v2/aws" "net" "net/http" "reflect" "sync" "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/tracing" ) // Defaults for the HTTPTransportBuilder. @@ -179,7 +182,7 @@ func defaultHTTPTransport() *http.Transport { tr := &http.Transport{ Proxy: http.ProxyFromEnvironment, - DialContext: dialer.DialContext, + DialContext: traceDialContext(dialer.DialContext), TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, MaxIdleConns: DefaultHTTPTransportMaxIdleConns, MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, @@ -194,6 +197,35 @@ func defaultHTTPTransport() *http.Transport { return tr } +type dialContext func(ctx context.Context, network, addr string) (net.Conn, error) + +func traceDialContext(dc dialContext) dialContext { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + span, _ := tracing.GetSpan(ctx) + span.SetProperty("net.peer.name", addr) + + conn, err := dc(ctx, network, addr) + if err != nil { + return conn, err + } + + raddr := conn.RemoteAddr() + if raddr == nil { + return conn, err + } + + host, port, err := net.SplitHostPort(raddr.String()) + if err != nil { // don't blow up just because we couldn't parse + span.SetProperty("net.peer.addr", raddr.String()) + } else { + span.SetProperty("net.peer.host", host) + span.SetProperty("net.peer.port", port) + } + + return conn, err + } +} + // shallowCopyStruct creates a shallow copy of the passed in source struct, and // returns that copy of the same struct type. func shallowCopyStruct(src interface{}) interface{} { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 8325c8a0be..51f3b93ac1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,43 @@ +# v1.27.43 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.42 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.41 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.40 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.39 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.38 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.37 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.36 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.35 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.34 (2024-09-16) + +* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it + # v1.27.33 (2024-09-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 411bda659d..33eedc7e8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.33" +const goModuleVersion = "1.27.43" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index 89368520f3..7ae252e2e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -162,12 +162,12 @@ func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *En // Get credentials from CredentialProcess err = processCredentials(ctx, cfg, sharedConfig, configs) - case len(envConfig.ContainerCredentialsEndpoint) != 0: - err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - case len(envConfig.ContainerCredentialsRelativePath) != 0: err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + case len(envConfig.ContainerCredentialsEndpoint) != 0: + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + default: err = resolveEC2RoleCredentials(ctx, cfg, configs) } @@ -355,10 +355,13 @@ func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *Env cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} case credSourceECSContainer: - if len(envConfig.ContainerCredentialsRelativePath) == 0 { - return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") + if len(envConfig.ContainerCredentialsRelativePath) != 0 { + return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + } + if len(envConfig.ContainerCredentialsEndpoint) != 0 { + return resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) } - return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + return fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") default: return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index dfb3dc94b7..911a4e7ea3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,39 @@ +# v1.17.41 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.40 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.39 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.38 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.37 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.36 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.35 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.34 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.33 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.32 (2024-09-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index e7467a7d98..866fe0b550 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.32" +const goModuleVersion = "1.17.41" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 821fd36539..235e0815e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.16.17 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.14 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.13 (2024-09-03) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 949c9ca46b..fdc8330745 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.13" +const goModuleVersion = "1.16.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index f6833e54c2..c0583678da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.3.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.17 (2024-09-03) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 3bcd95b80c..c7e2a00a2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.17" +const goModuleVersion = "1.3.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 712d31e3fe..a2f0680888 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -9,7 +9,7 @@ "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index a6860c0e23..4f733d01a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,19 @@ +# v2.6.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.17 (2024-09-03) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 5403b821c1..3c4074e0fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.17" +const goModuleVersion = "2.6.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 56f89df8d5..297618fcce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.12.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. + +# v1.11.5 (2024-09-20) + +* No change notes available for this release. + # v1.11.4 (2024-08-15) * **Dependency Update**: Bump minimum Go version to 1.21. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 47d97ccfb9..4e50b2578e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.4" +const goModuleVersion = "1.12.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 1203b556e3..64a3e054ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.12.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.19 (2024-09-03) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 0259bbfb76..10981a5204 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.19" +const goModuleVersion = "1.12.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md index 612bc3c116..ef0edc7271 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -1,3 +1,41 @@ +# v1.37.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.4 (2024-10-03) + +* No change notes available for this release. + +# v1.36.3 (2024-09-27) + +* No change notes available for this release. + +# v1.36.2 (2024-09-25) + +* No change notes available for this release. + +# v1.36.1 (2024-09-23) + +* No change notes available for this release. + +# v1.36.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + # v1.35.7 (2024-09-04) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go index d059206428..64c4d0f805 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -4,6 +4,7 @@ package kms import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -19,7 +20,9 @@ import ( smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" @@ -30,6 +33,133 @@ import ( const ServiceID = "KMS" const ServiceAPIVersion = "2014-11-01" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/kms") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/kms") +} + // Client provides the API client to make operations call for AWS Key Management // Service. type Client struct { @@ -57,6 +187,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -89,8 +223,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -114,15 +255,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -160,7 +342,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -238,16 +420,15 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AccountIDEndpointMode: cfg.AccountIDEndpointMode, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -435,6 +616,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -478,6 +683,7 @@ func addIsPaginatorUserAgent(o *Options) { func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") }) if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { return err @@ -541,25 +747,6 @@ func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } -func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { - switch mode { - case aws.AccountIDEndpointModeUnset: - case aws.AccountIDEndpointModePreferred: - case aws.AccountIDEndpointModeDisabled: - case aws.AccountIDEndpointModeRequired: - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { - return fmt.Errorf("accountID is required but not set") - } else if ca.Credentials.AccountID == "" { - return fmt.Errorf("accountID is required but not set") - } - // default check in case invalid mode is configured through request config - default: - return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) - } - - return nil -} - func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -575,6 +762,18 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -626,3 +825,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go index 71c9a0c01b..706c00b301 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -125,6 +125,9 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -164,6 +167,18 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go index 967eb34fd6..8656b61c86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -179,6 +179,9 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -218,6 +221,18 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go index cca6fb5c9f..84e8a58d48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -178,6 +178,9 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -217,6 +220,18 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go index b3e5109cc2..548da759f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -327,6 +327,9 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -366,6 +369,18 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go index 95754b366c..d30a5e80e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -285,6 +285,9 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -324,6 +327,18 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go index 25aa94a074..259c5fce61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -533,6 +533,9 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -572,6 +575,18 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go index 6558f5bb84..93d64f5803 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -296,6 +296,9 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -335,6 +338,18 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go index 4dab0d2532..58af22e36e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -167,6 +170,18 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go index bab682962f..b773858b97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -145,6 +145,9 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -184,6 +187,18 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go index 480dfbcca5..c952f63059 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -166,6 +169,18 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go index 1ec4daeaf7..fef5dc4265 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go @@ -298,6 +298,9 @@ func (c *Client) addOperationDeriveSharedSecretMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -337,6 +340,18 @@ func (c *Client) addOperationDeriveSharedSecretMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go index 107f19bc7d..438537acd6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -178,6 +178,9 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -214,6 +217,18 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go index 67b026c3b8..3ac7fafa05 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -191,6 +191,9 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -230,6 +233,18 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go index 20bfe6cb94..3d247346da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -158,6 +161,18 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go index 0a12984ac3..30ee5155f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -188,6 +191,18 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go index 0b563fcbca..55327cb726 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -173,6 +176,18 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go index a792631d21..fc8402f5e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -116,6 +116,9 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -155,6 +158,18 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go index 2b6bae01b4..3236e4970f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -188,6 +188,9 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -227,6 +230,18 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go index 9430f62f8e..ac6031bdf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -256,6 +256,9 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -295,6 +298,18 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go index d030a69437..5cab73d3f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -311,6 +311,9 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -350,6 +353,18 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go index 8934cc5d8f..394bac8149 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -309,6 +309,9 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -348,6 +351,18 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go index bed654be64..5bbc48fda4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -236,6 +236,9 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -275,6 +278,18 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go index bf13047537..69b9550219 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -236,6 +236,9 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -275,6 +278,18 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go index cec517c2b5..a5505e3331 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -181,6 +181,9 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -220,6 +223,18 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go index 29aba8fa1c..3ff869365d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -169,6 +169,9 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -205,6 +208,18 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go index ee38bff337..b0368d5347 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -123,6 +123,9 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -162,6 +165,18 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go index 18d0e207e1..0484bb59c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -192,6 +192,9 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -231,6 +234,18 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go index f1af81f15f..e3ffaca7a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -240,6 +240,9 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -279,6 +282,18 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go index a32205fe30..245cf4610b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -231,6 +231,9 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -270,6 +273,18 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go index 60395169be..ee7037a49f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -255,6 +255,9 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -294,6 +297,18 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go index 67bc9d7aaf..e7f68db3f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -166,6 +166,9 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -202,6 +205,18 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go index 2ee6363dd5..bd9433fa60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -172,6 +172,9 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -211,6 +214,18 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go index 34522dd1a9..c68867a534 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -146,6 +146,9 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -185,6 +188,18 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go index bc3524b325..eed276b15a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go @@ -154,6 +154,9 @@ func (c *Client) addOperationListKeyRotationsMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -193,6 +196,18 @@ func (c *Client) addOperationListKeyRotationsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go index 80ed5ba24d..9c3860fcc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -131,6 +131,9 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -167,6 +170,18 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go index a18dd7f019..7bdf6304c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -162,6 +162,9 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -201,6 +204,18 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go index 2bc033112b..7fe68d8e21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -167,6 +167,9 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -206,6 +209,18 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go index 53c3766900..4957cdee98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -178,6 +178,9 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -217,6 +220,18 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go index f65a577fa4..7d6f278ded 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -335,6 +335,9 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -374,6 +377,18 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go index 84fa3d11ef..4c5bb7b179 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -332,6 +332,9 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -371,6 +374,18 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go index 9158b1c1ca..016739eabd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -150,6 +150,9 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -186,6 +189,18 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go index ed45c1596a..c97a332bf2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -149,6 +149,9 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -188,6 +191,18 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go index ed5d27a060..5424efcf6b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go @@ -171,6 +171,9 @@ func (c *Client) addOperationRotateKeyOnDemandMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -210,6 +213,18 @@ func (c *Client) addOperationRotateKeyOnDemandMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go index 6000bc763f..090468d5e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -202,6 +202,9 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -241,6 +244,18 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go index b3b7d732f7..553404e237 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -259,6 +259,9 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -298,6 +301,18 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go index 6d964f39b6..6ec3ee335f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -160,6 +160,9 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -199,6 +202,18 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go index d62a3ae0a5..ee01e6e7c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -143,6 +143,9 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -182,6 +185,18 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go index fc9ad58571..2238087b27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -174,6 +174,9 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -213,6 +216,18 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go index a615be4b0b..fa7c84dfd1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -284,6 +284,9 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -323,6 +326,18 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go index 82e9e6e5c7..83ed78459e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -127,6 +127,9 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -166,6 +169,18 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go index 3792c12db7..e79f816aba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -182,6 +182,9 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -221,6 +224,18 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go index 8f3676ae16..4a25ff29ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -247,6 +247,9 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -286,6 +289,18 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go index 7be101c10f..484906a5b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -180,6 +180,9 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -219,6 +222,18 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go index fea1da697b..f9d4ef0354 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go @@ -8,7 +8,9 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -145,6 +147,9 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { @@ -157,6 +162,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -216,7 +224,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -226,12 +237,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -247,6 +266,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -256,6 +276,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -276,9 +299,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go index f5e1a43d4e..acd0c603d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go @@ -15,6 +15,7 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" @@ -45,6 +46,10 @@ func (m *awsAwsjson11_deserializeOpCancelKeyDeletion) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -164,6 +169,10 @@ func (m *awsAwsjson11_deserializeOpConnectCustomKeyStore) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -283,6 +292,10 @@ func (m *awsAwsjson11_deserializeOpCreateAlias) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -386,6 +399,10 @@ func (m *awsAwsjson11_deserializeOpCreateCustomKeyStore) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -541,6 +558,10 @@ func (m *awsAwsjson11_deserializeOpCreateGrant) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -672,6 +693,10 @@ func (m *awsAwsjson11_deserializeOpCreateKey) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -815,6 +840,10 @@ func (m *awsAwsjson11_deserializeOpDecrypt) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -952,6 +981,10 @@ func (m *awsAwsjson11_deserializeOpDeleteAlias) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1046,6 +1079,10 @@ func (m *awsAwsjson11_deserializeOpDeleteCustomKeyStore) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1162,6 +1199,10 @@ func (m *awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) HandleDeserialize( return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1262,6 +1303,10 @@ func (m *awsAwsjson11_deserializeOpDeriveSharedSecret) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1393,6 +1438,10 @@ func (m *awsAwsjson11_deserializeOpDescribeCustomKeyStores) HandleDeserialize(ct return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1506,6 +1555,10 @@ func (m *awsAwsjson11_deserializeOpDescribeKey) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1622,6 +1675,10 @@ func (m *awsAwsjson11_deserializeOpDisableKey) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1719,6 +1776,10 @@ func (m *awsAwsjson11_deserializeOpDisableKeyRotation) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1822,6 +1883,10 @@ func (m *awsAwsjson11_deserializeOpDisconnectCustomKeyStore) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1935,6 +2000,10 @@ func (m *awsAwsjson11_deserializeOpEnableKey) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2035,6 +2104,10 @@ func (m *awsAwsjson11_deserializeOpEnableKeyRotation) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2138,6 +2211,10 @@ func (m *awsAwsjson11_deserializeOpEncrypt) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2269,6 +2346,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKey) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2400,6 +2481,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKeyPair) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2534,6 +2619,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) HandleDe return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2668,6 +2757,10 @@ func (m *awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2799,6 +2892,10 @@ func (m *awsAwsjson11_deserializeOpGenerateMac) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2927,6 +3024,10 @@ func (m *awsAwsjson11_deserializeOpGenerateRandom) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3046,6 +3147,10 @@ func (m *awsAwsjson11_deserializeOpGetKeyPolicy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3165,6 +3270,10 @@ func (m *awsAwsjson11_deserializeOpGetKeyRotationStatus) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3287,6 +3396,10 @@ func (m *awsAwsjson11_deserializeOpGetParametersForImport) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3409,6 +3522,10 @@ func (m *awsAwsjson11_deserializeOpGetPublicKey) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3543,6 +3660,10 @@ func (m *awsAwsjson11_deserializeOpImportKeyMaterial) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3677,6 +3798,10 @@ func (m *awsAwsjson11_deserializeOpListAliases) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3796,6 +3921,10 @@ func (m *awsAwsjson11_deserializeOpListGrants) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3921,6 +4050,10 @@ func (m *awsAwsjson11_deserializeOpListKeyPolicies) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4040,6 +4173,10 @@ func (m *awsAwsjson11_deserializeOpListKeyRotations) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4162,6 +4299,10 @@ func (m *awsAwsjson11_deserializeOpListKeys) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4275,6 +4416,10 @@ func (m *awsAwsjson11_deserializeOpListResourceTags) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4391,6 +4536,10 @@ func (m *awsAwsjson11_deserializeOpListRetirableGrants) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4510,6 +4659,10 @@ func (m *awsAwsjson11_deserializeOpPutKeyPolicy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4616,6 +4769,10 @@ func (m *awsAwsjson11_deserializeOpReEncrypt) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4753,6 +4910,10 @@ func (m *awsAwsjson11_deserializeOpReplicateKey) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4887,6 +5048,10 @@ func (m *awsAwsjson11_deserializeOpRetireGrant) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4993,6 +5158,10 @@ func (m *awsAwsjson11_deserializeOpRevokeGrant) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5096,6 +5265,10 @@ func (m *awsAwsjson11_deserializeOpRotateKeyOnDemand) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5227,6 +5400,10 @@ func (m *awsAwsjson11_deserializeOpScheduleKeyDeletion) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5346,6 +5523,10 @@ func (m *awsAwsjson11_deserializeOpSign) HandleDeserialize(ctx context.Context, return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5477,6 +5658,10 @@ func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5577,6 +5762,10 @@ func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5674,6 +5863,10 @@ func (m *awsAwsjson11_deserializeOpUpdateAlias) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5771,6 +5964,10 @@ func (m *awsAwsjson11_deserializeOpUpdateCustomKeyStore) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5926,6 +6123,10 @@ func (m *awsAwsjson11_deserializeOpUpdateKeyDescription) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6023,6 +6224,10 @@ func (m *awsAwsjson11_deserializeOpUpdatePrimaryRegion) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6123,6 +6328,10 @@ func (m *awsAwsjson11_deserializeOpVerify) HandleDeserialize(ctx context.Context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6257,6 +6466,10 @@ func (m *awsAwsjson11_deserializeOpVerifyMac) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go index 57bd194b65..0d9f88d68b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go @@ -16,6 +16,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -483,14 +484,13 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } - if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { - return out, metadata, fmt.Errorf("invalid accountID set: %w", err) - } - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -501,11 +501,16 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid } params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -527,5 +532,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go index 48d8c52bc9..b9c3b883fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -3,4 +3,4 @@ package kms // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.35.7" +const goModuleVersion = "1.37.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go index 2402184ddb..e7f5513b48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -24,9 +26,6 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error - // Indicates how aws account ID is applied in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - // The optional application specific identifier appended to the User-Agent header. AppID string @@ -69,6 +68,9 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string @@ -103,6 +105,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved // value was at that point in time. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go index 0548a52e0d..1f9dabb9c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go @@ -12,6 +12,7 @@ import ( smithyjson "github.com/aws/smithy-go/encoding/json" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "path" ) @@ -26,6 +27,10 @@ func (*awsAwsjson11_serializeOpCancelKeyDeletion) ID() string { func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -68,6 +73,8 @@ func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -81,6 +88,10 @@ func (*awsAwsjson11_serializeOpConnectCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -123,6 +134,8 @@ func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -136,6 +149,10 @@ func (*awsAwsjson11_serializeOpCreateAlias) ID() string { func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -178,6 +195,8 @@ func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -191,6 +210,10 @@ func (*awsAwsjson11_serializeOpCreateCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -233,6 +256,8 @@ func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -246,6 +271,10 @@ func (*awsAwsjson11_serializeOpCreateGrant) ID() string { func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -288,6 +317,8 @@ func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -301,6 +332,10 @@ func (*awsAwsjson11_serializeOpCreateKey) ID() string { func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -343,6 +378,8 @@ func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -356,6 +393,10 @@ func (*awsAwsjson11_serializeOpDecrypt) ID() string { func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -398,6 +439,8 @@ func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -411,6 +454,10 @@ func (*awsAwsjson11_serializeOpDeleteAlias) ID() string { func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -453,6 +500,8 @@ func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -466,6 +515,10 @@ func (*awsAwsjson11_serializeOpDeleteCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -508,6 +561,8 @@ func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -521,6 +576,10 @@ func (*awsAwsjson11_serializeOpDeleteImportedKeyMaterial) ID() string { func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -563,6 +622,8 @@ func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -576,6 +637,10 @@ func (*awsAwsjson11_serializeOpDeriveSharedSecret) ID() string { func (m *awsAwsjson11_serializeOpDeriveSharedSecret) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -618,6 +683,8 @@ func (m *awsAwsjson11_serializeOpDeriveSharedSecret) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -631,6 +698,10 @@ func (*awsAwsjson11_serializeOpDescribeCustomKeyStores) ID() string { func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -673,6 +744,8 @@ func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -686,6 +759,10 @@ func (*awsAwsjson11_serializeOpDescribeKey) ID() string { func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -728,6 +805,8 @@ func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -741,6 +820,10 @@ func (*awsAwsjson11_serializeOpDisableKey) ID() string { func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -783,6 +866,8 @@ func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -796,6 +881,10 @@ func (*awsAwsjson11_serializeOpDisableKeyRotation) ID() string { func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -838,6 +927,8 @@ func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -851,6 +942,10 @@ func (*awsAwsjson11_serializeOpDisconnectCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -893,6 +988,8 @@ func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -906,6 +1003,10 @@ func (*awsAwsjson11_serializeOpEnableKey) ID() string { func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -948,6 +1049,8 @@ func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -961,6 +1064,10 @@ func (*awsAwsjson11_serializeOpEnableKeyRotation) ID() string { func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1003,6 +1110,8 @@ func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1016,6 +1125,10 @@ func (*awsAwsjson11_serializeOpEncrypt) ID() string { func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1058,6 +1171,8 @@ func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1071,6 +1186,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKey) ID() string { func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1113,6 +1232,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1126,6 +1247,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKeyPair) ID() string { func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1168,6 +1293,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1181,6 +1308,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) ID() string func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1223,6 +1354,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSeri } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1236,6 +1369,10 @@ func (*awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) ID() string { func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1278,6 +1415,8 @@ func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1291,6 +1430,10 @@ func (*awsAwsjson11_serializeOpGenerateMac) ID() string { func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1333,6 +1476,8 @@ func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1346,6 +1491,10 @@ func (*awsAwsjson11_serializeOpGenerateRandom) ID() string { func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1388,6 +1537,8 @@ func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1401,6 +1552,10 @@ func (*awsAwsjson11_serializeOpGetKeyPolicy) ID() string { func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1443,6 +1598,8 @@ func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1456,6 +1613,10 @@ func (*awsAwsjson11_serializeOpGetKeyRotationStatus) ID() string { func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1498,6 +1659,8 @@ func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1511,6 +1674,10 @@ func (*awsAwsjson11_serializeOpGetParametersForImport) ID() string { func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1553,6 +1720,8 @@ func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1566,6 +1735,10 @@ func (*awsAwsjson11_serializeOpGetPublicKey) ID() string { func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1608,6 +1781,8 @@ func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1621,6 +1796,10 @@ func (*awsAwsjson11_serializeOpImportKeyMaterial) ID() string { func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1663,6 +1842,8 @@ func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1676,6 +1857,10 @@ func (*awsAwsjson11_serializeOpListAliases) ID() string { func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1718,6 +1903,8 @@ func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1731,6 +1918,10 @@ func (*awsAwsjson11_serializeOpListGrants) ID() string { func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1773,6 +1964,8 @@ func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1786,6 +1979,10 @@ func (*awsAwsjson11_serializeOpListKeyPolicies) ID() string { func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1828,6 +2025,8 @@ func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1841,6 +2040,10 @@ func (*awsAwsjson11_serializeOpListKeyRotations) ID() string { func (m *awsAwsjson11_serializeOpListKeyRotations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1883,6 +2086,8 @@ func (m *awsAwsjson11_serializeOpListKeyRotations) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1896,6 +2101,10 @@ func (*awsAwsjson11_serializeOpListKeys) ID() string { func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1938,6 +2147,8 @@ func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -1951,6 +2162,10 @@ func (*awsAwsjson11_serializeOpListResourceTags) ID() string { func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1993,6 +2208,8 @@ func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2006,6 +2223,10 @@ func (*awsAwsjson11_serializeOpListRetirableGrants) ID() string { func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2048,6 +2269,8 @@ func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2061,6 +2284,10 @@ func (*awsAwsjson11_serializeOpPutKeyPolicy) ID() string { func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2103,6 +2330,8 @@ func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2116,6 +2345,10 @@ func (*awsAwsjson11_serializeOpReEncrypt) ID() string { func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2158,6 +2391,8 @@ func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2171,6 +2406,10 @@ func (*awsAwsjson11_serializeOpReplicateKey) ID() string { func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2213,6 +2452,8 @@ func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2226,6 +2467,10 @@ func (*awsAwsjson11_serializeOpRetireGrant) ID() string { func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2268,6 +2513,8 @@ func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2281,6 +2528,10 @@ func (*awsAwsjson11_serializeOpRevokeGrant) ID() string { func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2323,6 +2574,8 @@ func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2336,6 +2589,10 @@ func (*awsAwsjson11_serializeOpRotateKeyOnDemand) ID() string { func (m *awsAwsjson11_serializeOpRotateKeyOnDemand) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2378,6 +2635,8 @@ func (m *awsAwsjson11_serializeOpRotateKeyOnDemand) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2391,6 +2650,10 @@ func (*awsAwsjson11_serializeOpScheduleKeyDeletion) ID() string { func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2433,6 +2696,8 @@ func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2446,6 +2711,10 @@ func (*awsAwsjson11_serializeOpSign) ID() string { func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2488,6 +2757,8 @@ func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in m } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2501,6 +2772,10 @@ func (*awsAwsjson11_serializeOpTagResource) ID() string { func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2543,6 +2818,8 @@ func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2556,6 +2833,10 @@ func (*awsAwsjson11_serializeOpUntagResource) ID() string { func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2598,6 +2879,8 @@ func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2611,6 +2894,10 @@ func (*awsAwsjson11_serializeOpUpdateAlias) ID() string { func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2653,6 +2940,8 @@ func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2666,6 +2955,10 @@ func (*awsAwsjson11_serializeOpUpdateCustomKeyStore) ID() string { func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2708,6 +3001,8 @@ func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2721,6 +3016,10 @@ func (*awsAwsjson11_serializeOpUpdateKeyDescription) ID() string { func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2763,6 +3062,8 @@ func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2776,6 +3077,10 @@ func (*awsAwsjson11_serializeOpUpdatePrimaryRegion) ID() string { func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2818,6 +3123,8 @@ func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2831,6 +3138,10 @@ func (*awsAwsjson11_serializeOpVerify) ID() string { func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2873,6 +3184,8 @@ func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -2886,6 +3199,10 @@ func (*awsAwsjson11_serializeOpVerifyMac) ID() string { func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2928,6 +3245,8 @@ func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsAwsjson11_serializeDocumentEncryptionContextType(v map[string]string, value smithyjson.Value) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index a72bd91dfb..c4bb87d6d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,41 @@ +# v1.24.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.4 (2024-10-03) + +* No change notes available for this release. + +# v1.23.3 (2024-09-27) + +* No change notes available for this release. + +# v1.23.2 (2024-09-25) + +* No change notes available for this release. + +# v1.23.1 (2024-09-23) + +* No change notes available for this release. + +# v1.23.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + # v1.22.7 (2024-09-04) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index a06c6e738f..644ee1e058 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -4,6 +4,7 @@ package sso import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -19,7 +20,9 @@ import ( smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" @@ -30,6 +33,133 @@ import ( const ServiceID = "SSO" const ServiceAPIVersion = "2019-06-10" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso") +} + // Client provides the API client to make operations call for AWS Single Sign-On. type Client struct { options Options @@ -56,6 +186,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -88,8 +222,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -113,15 +254,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -159,7 +341,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -237,16 +419,15 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AccountIDEndpointMode: cfg.AccountIDEndpointMode, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -434,6 +615,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -477,6 +682,7 @@ func addIsPaginatorUserAgent(o *Options) { func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") }) if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { return err @@ -540,25 +746,6 @@ func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } -func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { - switch mode { - case aws.AccountIDEndpointModeUnset: - case aws.AccountIDEndpointModePreferred: - case aws.AccountIDEndpointModeDisabled: - case aws.AccountIDEndpointModeRequired: - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { - return fmt.Errorf("accountID is required but not set") - } else if ca.Credentials.AccountID == "" { - return fmt.Errorf("accountID is required but not set") - } - // default check in case invalid mode is configured through request config - default: - return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) - } - - return nil -} - func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -574,6 +761,18 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -625,3 +824,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 5ce00b4961..a656020237 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -102,6 +102,9 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -141,6 +144,18 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index f20e3acbfc..315526ef1a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -107,6 +107,9 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -146,6 +149,18 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 391b567db9..d867b78a6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -106,6 +106,9 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -145,6 +148,18 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 456e4a3717..434b430852 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -101,6 +101,9 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -140,6 +143,18 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go index a93a77cd7f..366963b49f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -8,7 +8,9 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -169,6 +171,9 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { @@ -181,6 +186,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -240,7 +248,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -250,12 +261,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -271,6 +290,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -280,6 +300,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -300,9 +323,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index d6297fa6a1..5f0cce2bf1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -14,6 +14,7 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" @@ -44,6 +45,10 @@ func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -83,6 +88,7 @@ func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -200,6 +206,10 @@ func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -239,6 +249,7 @@ func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -365,6 +376,10 @@ func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -404,6 +419,7 @@ func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.C } } + span.End() return out, metadata, err } @@ -530,6 +546,10 @@ func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -547,6 +567,7 @@ func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context } } + span.End() return out, metadata, err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 75ae283ef8..53c6bc7561 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -16,6 +16,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -502,14 +503,13 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } - if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { - return out, metadata, fmt.Errorf("invalid accountID set: %w", err) - } - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -520,11 +520,16 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid } params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -546,5 +551,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 012580e77f..f248e45958 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.7" +const goModuleVersion = "1.24.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index 0ba182e976..aa744f1594 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -24,9 +26,6 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error - // Indicates how aws account ID is applied in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - // The optional application specific identifier appended to the User-Agent header. AppID string @@ -69,6 +68,9 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string @@ -103,6 +105,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved // value was at that point in time. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go index 02e3141156..a7a5b57de0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -8,6 +8,7 @@ import ( smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/encoding/httpbinding" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -21,6 +22,10 @@ func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -57,6 +62,8 @@ func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { @@ -64,7 +71,7 @@ func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCrede return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } @@ -90,6 +97,10 @@ func (*awsRestjson1_serializeOpListAccountRoles) ID() string { func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -126,6 +137,8 @@ func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { @@ -133,7 +146,7 @@ func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRol return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } @@ -163,6 +176,10 @@ func (*awsRestjson1_serializeOpListAccounts) ID() string { func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -199,6 +216,8 @@ func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { @@ -206,7 +225,7 @@ func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } @@ -232,6 +251,10 @@ func (*awsRestjson1_serializeOpLogout) ID() string { func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -268,6 +291,8 @@ func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { @@ -275,7 +300,7 @@ func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *ht return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 6924f05836..54c7d83fc1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,41 @@ +# v1.28.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2024-10-03) + +* No change notes available for this release. + +# v1.27.3 (2024-09-27) + +* No change notes available for this release. + +# v1.27.2 (2024-09-25) + +* No change notes available for this release. + +# v1.27.1 (2024-09-23) + +* No change notes available for this release. + +# v1.27.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + # v1.26.7 (2024-09-04) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 25cd1c0488..0b05bf6c73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -4,6 +4,7 @@ package ssooidc import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -19,7 +20,9 @@ import ( smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" @@ -30,6 +33,133 @@ import ( const ServiceID = "SSO OIDC" const ServiceAPIVersion = "2019-06-10" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc") +} + // Client provides the API client to make operations call for AWS SSO OIDC. type Client struct { options Options @@ -56,6 +186,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -88,8 +222,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -113,15 +254,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -159,7 +341,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -237,16 +419,15 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AccountIDEndpointMode: cfg.AccountIDEndpointMode, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -434,6 +615,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -477,6 +682,7 @@ func addIsPaginatorUserAgent(o *Options) { func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") }) if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { return err @@ -540,25 +746,6 @@ func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } -func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { - switch mode { - case aws.AccountIDEndpointModeUnset: - case aws.AccountIDEndpointModePreferred: - case aws.AccountIDEndpointModeDisabled: - case aws.AccountIDEndpointModeRequired: - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { - return fmt.Errorf("accountID is required but not set") - } else if ca.Credentials.AccountID == "" { - return fmt.Errorf("accountID is required but not set") - } - // default check in case invalid mode is configured through request config - default: - return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) - } - - return nil -} - func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -574,6 +761,18 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -625,3 +824,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 8b829188eb..5fb8d2ab94 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -174,6 +174,9 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -213,6 +216,18 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index af04c251a2..8abd43690d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -205,6 +205,9 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -244,6 +247,18 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index d8c766c989..03a3594be0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -135,6 +135,9 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -174,6 +177,18 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index 7c2b38ba90..203ca5e67b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -125,6 +125,9 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -164,6 +167,18 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go index e6058da813..e4b87f5bc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -8,7 +8,9 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -163,6 +165,9 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { @@ -175,6 +180,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -234,7 +242,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -244,12 +255,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -265,6 +284,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -274,6 +294,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -294,9 +317,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index 05e8c6b7e5..ae9f145e62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -14,6 +14,7 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" @@ -43,6 +44,10 @@ func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -82,6 +87,7 @@ func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -264,6 +270,10 @@ func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -303,6 +313,7 @@ func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -502,6 +513,10 @@ func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -541,6 +556,7 @@ func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -721,6 +737,10 @@ func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -760,6 +780,7 @@ func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(c } } + span.End() return out, metadata, err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index d7099721fe..6feea0c9fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -16,6 +16,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -502,14 +503,13 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } - if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { - return out, metadata, fmt.Errorf("invalid accountID set: %w", err) - } - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -520,11 +520,16 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid } params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -546,5 +551,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 880f8bbeff..ab2907e583 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.7" +const goModuleVersion = "1.28.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index a012e4cb8d..55dd80d0e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -24,9 +26,6 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error - // Indicates how aws account ID is applied in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - // The optional application specific identifier appended to the User-Agent header. AppID string @@ -69,6 +68,9 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string @@ -103,6 +105,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved // value was at that point in time. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go index 04411bd616..1ad103d1ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -10,6 +10,7 @@ import ( "github.com/aws/smithy-go/encoding/httpbinding" smithyjson "github.com/aws/smithy-go/encoding/json" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -23,6 +24,10 @@ func (*awsRestjson1_serializeOpCreateToken) ID() string { func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -66,6 +71,8 @@ func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { @@ -140,6 +147,10 @@ func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -183,6 +194,8 @@ func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { @@ -267,6 +280,10 @@ func (*awsRestjson1_serializeOpRegisterClient) ID() string { func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -310,6 +327,8 @@ func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { @@ -378,6 +397,10 @@ func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -421,6 +444,8 @@ func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 9bcf2568ed..06ebd69ea0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,41 @@ +# v1.32.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.4 (2024-10-03) + +* No change notes available for this release. + +# v1.31.3 (2024-09-27) + +* No change notes available for this release. + +# v1.31.2 (2024-09-25) + +* No change notes available for this release. + +# v1.31.1 (2024-09-23) + +* No change notes available for this release. + +# v1.31.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + # v1.30.7 (2024-09-04) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index acd2b8e7a1..4e678ce2ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -4,6 +4,7 @@ package sts import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -22,7 +23,9 @@ import ( smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" @@ -33,6 +36,133 @@ import ( const ServiceID = "STS" const ServiceAPIVersion = "2011-06-15" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts") +} + // Client provides the API client to make operations call for AWS Security Token // Service. type Client struct { @@ -60,6 +190,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -92,8 +226,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -117,15 +258,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -163,7 +345,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -241,16 +423,15 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AccountIDEndpointMode: cfg.AccountIDEndpointMode, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -438,6 +619,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -481,6 +686,7 @@ func addIsPaginatorUserAgent(o *Options) { func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") }) if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { return err @@ -544,25 +750,6 @@ func initializeTimeOffsetResolver(c *Client) { c.timeOffset = new(atomic.Int64) } -func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { - switch mode { - case aws.AccountIDEndpointModeUnset: - case aws.AccountIDEndpointModePreferred: - case aws.AccountIDEndpointModeDisabled: - case aws.AccountIDEndpointModeRequired: - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { - return fmt.Errorf("accountID is required but not set") - } else if ca.Credentials.AccountID == "" { - return fmt.Errorf("accountID is required but not set") - } - // default check in case invalid mode is configured through request config - default: - return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) - } - - return nil -} - func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -578,6 +765,18 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -777,3 +976,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index e74fc8ba9f..be03f017d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -445,6 +445,9 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -484,6 +487,18 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 4c685abd5f..b8b0c095f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -385,6 +385,9 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -424,6 +427,18 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 0b5e5a377c..ffe2479f63 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -396,6 +396,9 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -435,6 +438,18 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b1f14d28ce..a56840e1b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -165,6 +168,18 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index 3ba00873db..c80b0550b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -117,6 +117,9 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -156,6 +159,18 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index abac49ad2f..49304bdaf7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -108,6 +108,9 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -144,6 +147,18 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 2bae67429f..96f59ec63d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -330,6 +330,9 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -369,6 +372,18 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index c73316a3c0..0ed9ecbc74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -179,6 +179,9 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -215,6 +218,18 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go index e842a7f7e8..a90b2b7362 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -8,7 +8,9 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -157,6 +159,9 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { @@ -169,6 +174,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -228,7 +236,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -238,12 +249,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -259,6 +278,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -268,6 +288,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -288,9 +311,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 7e4346ec9f..cf0cc54e2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -16,6 +16,7 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strconv" @@ -46,6 +47,10 @@ func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -163,6 +168,10 @@ func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -286,6 +295,10 @@ func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -412,6 +425,10 @@ func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize( return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -520,6 +537,10 @@ func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -625,6 +646,10 @@ func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -730,6 +755,10 @@ func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -844,6 +873,10 @@ func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index 35305d8976..dca2ce3599 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -17,6 +17,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -1082,14 +1083,13 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } - if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { - return out, metadata, fmt.Errorf("invalid accountID set: %w", err) - } - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -1100,11 +1100,16 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid } params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -1126,5 +1131,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index c3a970dff4..5e0fa189bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.30.7" +const goModuleVersion = "1.32.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index a9a35881af..e1398f3bb8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -24,9 +26,6 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error - // Indicates how aws account ID is applied in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - // The optional application specific identifier appended to the User-Agent header. AppID string @@ -69,6 +68,9 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string @@ -103,6 +105,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved // value was at that point in time. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go index 4c08061c0c..1bcbc82842 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -11,6 +11,7 @@ import ( smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/encoding/httpbinding" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "path" ) @@ -25,6 +26,10 @@ func (*awsAwsquery_serializeOpAssumeRole) ID() string { func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -76,6 +81,8 @@ func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -89,6 +96,10 @@ func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -140,6 +151,8 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -153,6 +166,10 @@ func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -204,6 +221,8 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -217,6 +236,10 @@ func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -268,6 +291,8 @@ func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -281,6 +306,10 @@ func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -332,6 +361,8 @@ func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -345,6 +376,10 @@ func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -392,6 +427,8 @@ func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -405,6 +442,10 @@ func (*awsAwsquery_serializeOpGetFederationToken) ID() string { func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -456,6 +497,8 @@ func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -469,6 +512,10 @@ func (*awsAwsquery_serializeOpGetSessionToken) ID() string { func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -520,6 +567,8 @@ func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 96d57df805..c63f18f506 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,31 @@ +# Release (2024-10-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.0 + * **Feature**: Add HTTP client metrics. + +# Release (2024-09-25) + +## Module Highlights +* `github.com/aws/smithy-go/aws-http-auth`: [v1.0.0](aws-http-auth/CHANGELOG.md#v100-2024-09-25) + * **Release**: Initial release of module aws-http-auth, which implements generically consumable SigV4 and SigV4a request signing. + +# Release (2024-09-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.21.0 + * **Feature**: Add tracing and metrics APIs, and builtin instrumentation for both, in generated clients. +* `github.com/aws/smithy-go/metrics/smithyotelmetrics`: [v1.0.0](metrics/smithyotelmetrics/CHANGELOG.md#v100-2024-09-19) + * **Release**: Initial release of `smithyotelmetrics` module, which is used to adapt an OpenTelemetry SDK meter provider to be used with Smithy clients. +* `github.com/aws/smithy-go/tracing/smithyoteltracing`: [v1.0.0](tracing/smithyoteltracing/CHANGELOG.md#v100-2024-09-19) + * **Release**: Initial release of `smithyoteltracing` module, which is used to adapt an OpenTelemetry SDK tracer provider to be used with Smithy clients. + # Release (2024-08-14) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 33355b22c8..d7a7627bdc 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.4" +const goModuleVersion = "1.22.0" diff --git a/vendor/github.com/aws/smithy-go/metrics/metrics.go b/vendor/github.com/aws/smithy-go/metrics/metrics.go new file mode 100644 index 0000000000..c009d9f278 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/metrics/metrics.go @@ -0,0 +1,136 @@ +// Package metrics defines the metrics APIs used by Smithy clients. +package metrics + +import ( + "context" + + "github.com/aws/smithy-go" +) + +// MeterProvider is the entry point for creating a Meter. +type MeterProvider interface { + Meter(scope string, opts ...MeterOption) Meter +} + +// MeterOption applies configuration to a Meter. +type MeterOption func(o *MeterOptions) + +// MeterOptions represents configuration for a Meter. +type MeterOptions struct { + Properties smithy.Properties +} + +// Meter is the entry point for creation of measurement instruments. +type Meter interface { + // integer/synchronous + Int64Counter(name string, opts ...InstrumentOption) (Int64Counter, error) + Int64UpDownCounter(name string, opts ...InstrumentOption) (Int64UpDownCounter, error) + Int64Gauge(name string, opts ...InstrumentOption) (Int64Gauge, error) + Int64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) + + // integer/asynchronous + Int64AsyncCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Int64AsyncUpDownCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Int64AsyncGauge(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + + // floating-point/synchronous + Float64Counter(name string, opts ...InstrumentOption) (Float64Counter, error) + Float64UpDownCounter(name string, opts ...InstrumentOption) (Float64UpDownCounter, error) + Float64Gauge(name string, opts ...InstrumentOption) (Float64Gauge, error) + Float64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) + + // floating-point/asynchronous + Float64AsyncCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Float64AsyncUpDownCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Float64AsyncGauge(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) +} + +// InstrumentOption applies configuration to an instrument. +type InstrumentOption func(o *InstrumentOptions) + +// InstrumentOptions represents configuration for an instrument. +type InstrumentOptions struct { + UnitLabel string + Description string +} + +// Int64Counter measures a monotonically increasing int64 value. +type Int64Counter interface { + Add(context.Context, int64, ...RecordMetricOption) +} + +// Int64UpDownCounter measures a fluctuating int64 value. +type Int64UpDownCounter interface { + Add(context.Context, int64, ...RecordMetricOption) +} + +// Int64Gauge samples a discrete int64 value. +type Int64Gauge interface { + Sample(context.Context, int64, ...RecordMetricOption) +} + +// Int64Histogram records multiple data points for an int64 value. +type Int64Histogram interface { + Record(context.Context, int64, ...RecordMetricOption) +} + +// Float64Counter measures a monotonically increasing float64 value. +type Float64Counter interface { + Add(context.Context, float64, ...RecordMetricOption) +} + +// Float64UpDownCounter measures a fluctuating float64 value. +type Float64UpDownCounter interface { + Add(context.Context, float64, ...RecordMetricOption) +} + +// Float64Gauge samples a discrete float64 value. +type Float64Gauge interface { + Sample(context.Context, float64, ...RecordMetricOption) +} + +// Float64Histogram records multiple data points for an float64 value. +type Float64Histogram interface { + Record(context.Context, float64, ...RecordMetricOption) +} + +// AsyncInstrument is the universal handle returned for creation of all async +// instruments. +// +// Callers use the Stop() API to unregister the callback passed at instrument +// creation. +type AsyncInstrument interface { + Stop() +} + +// Int64Callback describes a function invoked when an async int64 instrument is +// read. +type Int64Callback func(context.Context, Int64Observer) + +// Int64Observer is the interface passed to async int64 instruments. +// +// Callers use the Observe() API of this interface to report metrics to the +// underlying collector. +type Int64Observer interface { + Observe(context.Context, int64, ...RecordMetricOption) +} + +// Float64Callback describes a function invoked when an async float64 +// instrument is read. +type Float64Callback func(context.Context, Float64Observer) + +// Float64Observer is the interface passed to async int64 instruments. +// +// Callers use the Observe() API of this interface to report metrics to the +// underlying collector. +type Float64Observer interface { + Observe(context.Context, float64, ...RecordMetricOption) +} + +// RecordMetricOption applies configuration to a recorded metric. +type RecordMetricOption func(o *RecordMetricOptions) + +// RecordMetricOptions represents configuration for a recorded metric. +type RecordMetricOptions struct { + Properties smithy.Properties +} diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go new file mode 100644 index 0000000000..fb374e1fb8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/metrics/nop.go @@ -0,0 +1,67 @@ +package metrics + +import "context" + +// NopMeterProvider is a no-op metrics implementation. +type NopMeterProvider struct{} + +var _ MeterProvider = (*NopMeterProvider)(nil) + +// Meter returns a meter which creates no-op instruments. +func (NopMeterProvider) Meter(string, ...MeterOption) Meter { + return nopMeter{} +} + +type nopMeter struct{} + +var _ Meter = (*nopMeter)(nil) + +func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[float64]{}, nil +} + +type nopInstrument[N any] struct{} + +func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {} +func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {} +func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {} +func (nopInstrument[_]) Stop() {} diff --git a/vendor/github.com/aws/smithy-go/middleware/context.go b/vendor/github.com/aws/smithy-go/middleware/context.go new file mode 100644 index 0000000000..f51aa4f04f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/context.go @@ -0,0 +1,41 @@ +package middleware + +import "context" + +type ( + serviceIDKey struct{} + operationNameKey struct{} +) + +// WithServiceID adds a service ID to the context, scoped to middleware stack +// values. +// +// This API is called in the client runtime when bootstrapping an operation and +// should not typically be used directly. +func WithServiceID(parent context.Context, id string) context.Context { + return WithStackValue(parent, serviceIDKey{}, id) +} + +// GetServiceID retrieves the service ID from the context. This is typically +// the service shape's name from its Smithy model. Service clients for specific +// systems (e.g. AWS SDK) may use an alternate designated value. +func GetServiceID(ctx context.Context) string { + id, _ := GetStackValue(ctx, serviceIDKey{}).(string) + return id +} + +// WithOperationName adds the operation name to the context, scoped to +// middleware stack values. +// +// This API is called in the client runtime when bootstrapping an operation and +// should not typically be used directly. +func WithOperationName(parent context.Context, id string) context.Context { + return WithStackValue(parent, operationNameKey{}, id) +} + +// GetOperationName retrieves the operation name from the context. This is +// typically the operation shape's name from its Smithy model. +func GetOperationName(ctx context.Context) string { + name, _ := GetStackValue(ctx, operationNameKey{}).(string) + return name +} diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go index c9af66c0ea..68df4c4e0e 100644 --- a/vendor/github.com/aws/smithy-go/properties.go +++ b/vendor/github.com/aws/smithy-go/properties.go @@ -1,9 +1,11 @@ package smithy +import "maps" + // PropertiesReader provides an interface for reading metadata from the // underlying metadata container. type PropertiesReader interface { - Get(key interface{}) interface{} + Get(key any) any } // Properties provides storing and reading metadata values. Keys may be any @@ -12,14 +14,14 @@ type PropertiesReader interface { // The zero value for a Properties instance is ready for reads/writes without // any additional initialization. type Properties struct { - values map[interface{}]interface{} + values map[any]any } // Get attempts to retrieve the value the key points to. Returns nil if the // key was not found. // // Panics if key type is not comparable. -func (m *Properties) Get(key interface{}) interface{} { +func (m *Properties) Get(key any) any { m.lazyInit() return m.values[key] } @@ -28,7 +30,7 @@ func (m *Properties) Get(key interface{}) interface{} { // that key it will be replaced with the new value. // // Panics if the key type is not comparable. -func (m *Properties) Set(key, value interface{}) { +func (m *Properties) Set(key, value any) { m.lazyInit() m.values[key] = value } @@ -36,7 +38,7 @@ func (m *Properties) Set(key, value interface{}) { // Has returns whether the key exists in the metadata. // // Panics if the key type is not comparable. -func (m *Properties) Has(key interface{}) bool { +func (m *Properties) Has(key any) bool { m.lazyInit() _, ok := m.values[key] return ok @@ -55,8 +57,13 @@ func (m *Properties) SetAll(other *Properties) { } } +// Values returns a shallow clone of the property set's values. +func (m *Properties) Values() map[any]any { + return maps.Clone(m.values) +} + func (m *Properties) lazyInit() { if m.values == nil { - m.values = map[interface{}]interface{}{} + m.values = map[any]any{} } } diff --git a/vendor/github.com/aws/smithy-go/tracing/context.go b/vendor/github.com/aws/smithy-go/tracing/context.go new file mode 100644 index 0000000000..a404ed9d37 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/tracing/context.go @@ -0,0 +1,96 @@ +package tracing + +import "context" + +type ( + operationTracerKey struct{} + spanLineageKey struct{} +) + +// GetSpan returns the active trace Span on the context. +// +// The boolean in the return indicates whether a Span was actually in the +// context, but a no-op implementation will be returned if not, so callers +// can generally disregard the boolean unless they wish to explicitly confirm +// presence/absence of a Span. +func GetSpan(ctx context.Context) (Span, bool) { + lineage := getLineage(ctx) + if len(lineage) == 0 { + return nopSpan{}, false + } + + return lineage[len(lineage)-1], true +} + +// WithSpan sets the active trace Span on the context. +func WithSpan(parent context.Context, span Span) context.Context { + lineage := getLineage(parent) + if len(lineage) == 0 { + return context.WithValue(parent, spanLineageKey{}, []Span{span}) + } + + lineage = append(lineage, span) + return context.WithValue(parent, spanLineageKey{}, lineage) +} + +// PopSpan pops the current Span off the context, setting the active Span on +// the returned Context back to its parent and returning the REMOVED one. +// +// PopSpan on a context with no active Span will return a no-op instance. +// +// This is mostly necessary for the runtime to manage base trace spans due to +// the wrapped-function nature of the middleware stack. End-users of Smithy +// clients SHOULD NOT generally be using this API. +func PopSpan(parent context.Context) (context.Context, Span) { + lineage := getLineage(parent) + if len(lineage) == 0 { + return parent, nopSpan{} + } + + span := lineage[len(lineage)-1] + lineage = lineage[:len(lineage)-1] + return context.WithValue(parent, spanLineageKey{}, lineage), span +} + +func getLineage(ctx context.Context) []Span { + v := ctx.Value(spanLineageKey{}) + if v == nil { + return nil + } + + return v.([]Span) +} + +// GetOperationTracer returns the embedded operation-scoped Tracer on a +// Context. +// +// The boolean in the return indicates whether a Tracer was actually in the +// context, but a no-op implementation will be returned if not, so callers +// can generally disregard the boolean unless they wish to explicitly confirm +// presence/absence of a Tracer. +func GetOperationTracer(ctx context.Context) (Tracer, bool) { + v := ctx.Value(operationTracerKey{}) + if v == nil { + return nopTracer{}, false + } + + return v.(Tracer), true +} + +// WithOperationTracer returns a child Context embedding the given Tracer. +// +// The runtime will use this embed a scoped tracer for client operations, +// Smithy/SDK client callers DO NOT need to do this explicitly. +func WithOperationTracer(parent context.Context, tracer Tracer) context.Context { + return context.WithValue(parent, operationTracerKey{}, tracer) +} + +// StartSpan is a convenience API for creating tracing Spans from a Context. +// +// StartSpan uses the operation-scoped Tracer, previously stored using +// [WithOperationTracer], to start the Span. If a Tracer has not been embedded +// the returned Span will be a no-op implementation. +func StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { + tracer, _ := GetOperationTracer(ctx) + return tracer.StartSpan(ctx, name, opts...) +} diff --git a/vendor/github.com/aws/smithy-go/tracing/nop.go b/vendor/github.com/aws/smithy-go/tracing/nop.go new file mode 100644 index 0000000000..573d28b1c1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/tracing/nop.go @@ -0,0 +1,32 @@ +package tracing + +import "context" + +// NopTracerProvider is a no-op tracing implementation. +type NopTracerProvider struct{} + +var _ TracerProvider = (*NopTracerProvider)(nil) + +// Tracer returns a tracer which creates no-op spans. +func (NopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return nopTracer{} +} + +type nopTracer struct{} + +var _ Tracer = (*nopTracer)(nil) + +func (nopTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { + return ctx, nopSpan{} +} + +type nopSpan struct{} + +var _ Span = (*nopSpan)(nil) + +func (nopSpan) Name() string { return "" } +func (nopSpan) Context() SpanContext { return SpanContext{} } +func (nopSpan) AddEvent(string, ...EventOption) {} +func (nopSpan) SetProperty(any, any) {} +func (nopSpan) SetStatus(SpanStatus) {} +func (nopSpan) End() {} diff --git a/vendor/github.com/aws/smithy-go/tracing/tracing.go b/vendor/github.com/aws/smithy-go/tracing/tracing.go new file mode 100644 index 0000000000..089ed3932e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/tracing/tracing.go @@ -0,0 +1,95 @@ +// Package tracing defines tracing APIs to be used by Smithy clients. +package tracing + +import ( + "context" + + "github.com/aws/smithy-go" +) + +// SpanStatus records the "success" state of an observed span. +type SpanStatus int + +// Enumeration of SpanStatus. +const ( + SpanStatusUnset SpanStatus = iota + SpanStatusOK + SpanStatusError +) + +// SpanKind indicates the nature of the work being performed. +type SpanKind int + +// Enumeration of SpanKind. +const ( + SpanKindInternal SpanKind = iota + SpanKindClient + SpanKindServer + SpanKindProducer + SpanKindConsumer +) + +// TracerProvider is the entry point for creating client traces. +type TracerProvider interface { + Tracer(scope string, opts ...TracerOption) Tracer +} + +// TracerOption applies configuration to a tracer. +type TracerOption func(o *TracerOptions) + +// TracerOptions represent configuration for tracers. +type TracerOptions struct { + Properties smithy.Properties +} + +// Tracer is the entry point for creating observed client Spans. +// +// Spans created by tracers propagate by existing on the Context. Consumers of +// the API can use [GetSpan] to pull the active Span from a Context. +// +// Creation of child Spans is implicit through Context persistence. If +// CreateSpan is called with a Context that holds a Span, the result will be a +// child of that Span. +type Tracer interface { + StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) +} + +// SpanOption applies configuration to a span. +type SpanOption func(o *SpanOptions) + +// SpanOptions represent configuration for span events. +type SpanOptions struct { + Kind SpanKind + Properties smithy.Properties +} + +// Span records a conceptually individual unit of work that takes place in a +// Smithy client operation. +type Span interface { + Name() string + Context() SpanContext + AddEvent(name string, opts ...EventOption) + SetStatus(status SpanStatus) + SetProperty(k, v any) + End() +} + +// EventOption applies configuration to a span event. +type EventOption func(o *EventOptions) + +// EventOptions represent configuration for span events. +type EventOptions struct { + Properties smithy.Properties +} + +// SpanContext uniquely identifies a Span. +type SpanContext struct { + TraceID string + SpanID string + IsRemote bool +} + +// IsValid is true when a span has nonzero trace and span IDs. +func (ctx *SpanContext) IsValid() bool { + return len(ctx.TraceID) != 0 && len(ctx.SpanID) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go index e691c69bf4..0fceae81db 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/client.go +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -6,7 +6,9 @@ import ( "net/http" smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" ) // ClientDo provides the interface for custom HTTP client implementations. @@ -27,13 +29,30 @@ func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { // implementation is http.Client. type ClientHandler struct { client ClientDo + + Meter metrics.Meter // For HTTP client metrics. } // NewClientHandler returns an initialized middleware handler for the client. +// +// Deprecated: Use [NewClientHandlerWithOptions]. func NewClientHandler(client ClientDo) ClientHandler { - return ClientHandler{ + return NewClientHandlerWithOptions(client) +} + +// NewClientHandlerWithOptions returns an initialized middleware handler for the client +// with applied options. +func NewClientHandlerWithOptions(client ClientDo, opts ...func(*ClientHandler)) ClientHandler { + h := ClientHandler{ client: client, } + for _, opt := range opts { + opt(&h) + } + if h.Meter == nil { + h.Meter = metrics.NopMeterProvider{}.Meter("") + } + return h } // Handle implements the middleware Handler interface, that will invoke the @@ -42,6 +61,14 @@ func NewClientHandler(client ClientDo) ClientHandler { func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( out interface{}, metadata middleware.Metadata, err error, ) { + ctx, span := tracing.StartSpan(ctx, "DoHTTPRequest") + defer span.End() + + ctx, client, err := withMetrics(ctx, c.client, c.Meter) + if err != nil { + return nil, metadata, fmt.Errorf("instrument with HTTP metrics: %w", err) + } + req, ok := input.(*Request) if !ok { return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) @@ -52,7 +79,17 @@ func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( return nil, metadata, err } - resp, err := c.client.Do(builtRequest) + span.SetProperty("http.method", req.Method) + span.SetProperty("http.request_content_length", -1) // at least indicate unknown + length, ok, err := req.StreamLength() + if err != nil { + return nil, metadata, err + } + if ok { + span.SetProperty("http.request_content_length", length) + } + + resp, err := client.Do(builtRequest) if resp == nil { // Ensure a http response value is always present to prevent unexpected // panics. @@ -79,6 +116,10 @@ func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( _ = builtRequest.Body.Close() } + span.SetProperty("net.protocol.version", fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor)) + span.SetProperty("http.status_code", resp.StatusCode) + span.SetProperty("http.response_content_length", resp.ContentLength) + return &Response{Response: resp}, metadata, err } diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go new file mode 100644 index 0000000000..ab1101394c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go @@ -0,0 +1,184 @@ +package http + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/aws/smithy-go/metrics" +) + +var now = time.Now + +// withMetrics instruments an HTTP client and context to collect HTTP metrics. +func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) ( + context.Context, ClientDo, error, +) { + hm, err := newHTTPMetrics(meter) + if err != nil { + return nil, nil, err + } + + ctx := httptrace.WithClientTrace(parent, &httptrace.ClientTrace{ + DNSStart: hm.DNSStart, + ConnectStart: hm.ConnectStart, + TLSHandshakeStart: hm.TLSHandshakeStart, + + GotConn: hm.GotConn(parent), + PutIdleConn: hm.PutIdleConn(parent), + ConnectDone: hm.ConnectDone(parent), + DNSDone: hm.DNSDone(parent), + TLSHandshakeDone: hm.TLSHandshakeDone(parent), + GotFirstResponseByte: hm.GotFirstResponseByte(parent), + }) + return ctx, &timedClientDo{client, hm}, nil +} + +type timedClientDo struct { + ClientDo + hm *httpMetrics +} + +func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) { + c.hm.doStart = now() + resp, err := c.ClientDo.Do(r) + + c.hm.DoRequestDuration.Record(r.Context(), elapsed(c.hm.doStart)) + return resp, err +} + +type httpMetrics struct { + DNSLookupDuration metrics.Float64Histogram // client.http.connections.dns_lookup_duration + ConnectDuration metrics.Float64Histogram // client.http.connections.acquire_duration + TLSHandshakeDuration metrics.Float64Histogram // client.http.connections.tls_handshake_duration + ConnectionUsage metrics.Int64UpDownCounter // client.http.connections.usage + + DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration + TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte + + doStart time.Time + dnsStart time.Time + connectStart time.Time + tlsStart time.Time +} + +func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { + hm := &httpMetrics{} + + var err error + hm.DNSLookupDuration, err = meter.Float64Histogram("client.http.connections.dns_lookup_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes a request to perform DNS lookup." + }) + if err != nil { + return nil, err + } + hm.ConnectDuration, err = meter.Float64Histogram("client.http.connections.acquire_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes a request to acquire a connection." + }) + if err != nil { + return nil, err + } + hm.TLSHandshakeDuration, err = meter.Float64Histogram("client.http.connections.tls_handshake_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes an HTTP request to perform the TLS handshake." + }) + if err != nil { + return nil, err + } + hm.ConnectionUsage, err = meter.Int64UpDownCounter("client.http.connections.usage", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "{connection}" + o.Description = "Current state of connections pool." + }) + if err != nil { + return nil, err + } + hm.DoRequestDuration, err = meter.Float64Histogram("client.http.do_request_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "Time spent performing an entire HTTP transaction." + }) + if err != nil { + return nil, err + } + hm.TimeToFirstByte, err = meter.Float64Histogram("client.http.time_to_first_byte", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "Time from start of transaction to when the first response byte is available." + }) + if err != nil { + return nil, err + } + + return hm, nil +} + +func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) { + m.dnsStart = now() +} + +func (m *httpMetrics) ConnectStart(string, string) { + m.connectStart = now() +} + +func (m *httpMetrics) TLSHandshakeStart() { + m.tlsStart = now() +} + +func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) { + return func(httptrace.GotConnInfo) { + m.addConnAcquired(ctx, 1) + } +} + +func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) { + return func(error) { + m.addConnAcquired(ctx, -1) + } +} + +func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) { + return func(httptrace.DNSDoneInfo) { + m.DNSLookupDuration.Record(ctx, elapsed(m.dnsStart)) + } +} + +func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) { + return func(string, string, error) { + m.ConnectDuration.Record(ctx, elapsed(m.connectStart)) + } +} + +func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) { + return func(tls.ConnectionState, error) { + m.TLSHandshakeDuration.Record(ctx, elapsed(m.tlsStart)) + } +} + +func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() { + return func() { + m.TimeToFirstByte.Record(ctx, elapsed(m.doStart)) + } +} + +func (m *httpMetrics) addConnAcquired(ctx context.Context, incr int64) { + m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("state", "acquired") + }) +} + +// Not used: it is recommended to track acquired vs idle conn, but we can't +// determine when something is truly idle with the current HTTP client hooks +// available to us. +func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) { + m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("state", "idle") + }) +} + +func elapsed(start time.Time) float64 { + end := now() + elapsed := end.Sub(start) + return float64(elapsed) / 1e9 +} diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml deleted file mode 100644 index 47a6a46ec2..0000000000 --- a/vendor/github.com/cenkalti/backoff/v3/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore similarity index 94% rename from vendor/github.com/cenkalti/backoff/v3/.gitignore rename to vendor/github.com/cenkalti/backoff/v4/.gitignore index 00268614f0..50d95c548b 100644 --- a/vendor/github.com/cenkalti/backoff/v3/.gitignore +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -20,3 +20,6 @@ _cgo_export.* _testmain.go *.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/v3/LICENSE rename to vendor/github.com/cenkalti/backoff/v4/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md similarity index 67% rename from vendor/github.com/cenkalti/backoff/v3/README.md rename to vendor/github.com/cenkalti/backoff/v4/README.md index 3673df487f..9433004a28 100644 --- a/vendor/github.com/cenkalti/backoff/v3/README.md +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,10 +9,9 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end. +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. -godoc.org does not support modules yet, -so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation. +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. ## Contributing @@ -20,14 +19,12 @@ so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the docume * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master [coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master [coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/v3/backoff.go rename to vendor/github.com/cenkalti/backoff/v4/backoff.go diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go similarity index 86% rename from vendor/github.com/cenkalti/backoff/v3/context.go rename to vendor/github.com/cenkalti/backoff/v4/context.go index fcff86c1b3..48482330eb 100644 --- a/vendor/github.com/cenkalti/backoff/v3/context.go +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -57,10 +57,6 @@ func (b *backOffContext) NextBackOff() time.Duration { case <-b.ctx.Done(): return Stop default: + return b.BackOff.NextBackOff() } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple - return Stop - } - return next } diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go similarity index 67% rename from vendor/github.com/cenkalti/backoff/v3/exponential.go rename to vendor/github.com/cenkalti/backoff/v4/exponential.go index cb11cc1d21..aac99f196a 100644 --- a/vendor/github.com/cenkalti/backoff/v3/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -56,9 +56,10 @@ type ExponentialBackOff struct { RandomizationFactor float64 Multiplier float64 MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. + // After MaxElapsedTime the ExponentialBackOff returns Stop. // It never stops if MaxElapsedTime == 0. MaxElapsedTime time.Duration + Stop time.Duration Clock Clock currentInterval time.Duration @@ -70,6 +71,9 @@ type Clock interface { Now() time.Time } +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + // Default values for ExponentialBackOff. const ( DefaultInitialInterval = 500 * time.Millisecond @@ -80,19 +84,72 @@ const ( ) // NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { b := &ExponentialBackOff{ InitialInterval: DefaultInitialInterval, RandomizationFactor: DefaultRandomizationFactor, Multiplier: DefaultMultiplier, MaxInterval: DefaultMaxInterval, MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, Clock: SystemClock, } + for _, fn := range opts { + fn(b) + } b.Reset() return b } +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + type systemClock struct{} func (t systemClock) Now() time.Time { @@ -113,11 +170,13 @@ func (b *ExponentialBackOff) Reset() { // Randomized interval = RetryInterval * (1 ± RandomizationFactor) func (b *ExponentialBackOff) NextBackOff() time.Duration { // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + return next } // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance @@ -141,8 +200,11 @@ func (b *ExponentialBackOff) incrementCurrentInterval() { } // Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } var delta = randomizationFactor * float64(currentInterval) var minInterval = float64(currentInterval) - delta var maxInterval = float64(currentInterval) + delta diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go similarity index 54% rename from vendor/github.com/cenkalti/backoff/v3/retry.go rename to vendor/github.com/cenkalti/backoff/v4/retry.go index 6c776ccf8e..b9c0c51cd7 100644 --- a/vendor/github.com/cenkalti/backoff/v3/retry.go +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -1,11 +1,24 @@ package backoff -import "time" +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) // An Operation is executing by Retry() or RetryNotify(). // The operation will be retried using a backoff policy if it returns an error. type Operation func() error +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + // Notify is a notify-on-error function. It receives an operation error and // backoff delay if the operation failed (with an error). // @@ -25,18 +38,41 @@ func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + // RetryNotify calls notify function with the error and wait duration // for each failed attempt before sleep. func RetryNotify(operation Operation, b BackOff, notify Notify) error { return RetryNotifyWithTimer(operation, b, notify, nil) } +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + // RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer // for each failed attempt before sleep. // A default timer that uses system timer is used when nil is passed. func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - var err error - var next time.Duration + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) if t == nil { t = &defaultTimer{} } @@ -49,16 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer b.Reset() for { - if err = operation(); err == nil { - return nil + res, err = operation() + if err == nil { + return res, nil } - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err } if next = b.NextBackOff(); next == Stop { - return err + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err } if notify != nil { @@ -69,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer select { case <-ctx.Done(): - return ctx.Err() + return res, ctx.Err() case <-t.C(): } } @@ -88,8 +130,16 @@ func (e *PermanentError) Unwrap() error { return e.Err } +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + // Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { +func Permanent(err error) error { + if err == nil { + return nil + } return &PermanentError{ Err: err, } diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go similarity index 97% rename from vendor/github.com/cenkalti/backoff/v3/ticker.go rename to vendor/github.com/cenkalti/backoff/v4/ticker.go index ed699e0e30..df9d68bce5 100644 --- a/vendor/github.com/cenkalti/backoff/v3/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -33,6 +33,9 @@ func NewTicker(b BackOff) *Ticker { // NewTickerWithTimer returns a new Ticker with a custom timer. // A default timer that uses system timer is used when nil is passed. func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } c := make(chan time.Time) t := &Ticker{ C: c, diff --git a/vendor/github.com/cenkalti/backoff/v3/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/v3/timer.go rename to vendor/github.com/cenkalti/backoff/v4/timer.go diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go similarity index 94% rename from vendor/github.com/cenkalti/backoff/v3/tries.go rename to vendor/github.com/cenkalti/backoff/v4/tries.go index cfeefd9b76..28d58ca37c 100644 --- a/vendor/github.com/cenkalti/backoff/v3/tries.go +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -20,6 +20,9 @@ type backOffTries struct { } func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } if b.maxTries > 0 { if b.maxTries <= b.numTries { return Stop diff --git a/vendor/github.com/cncf/xds/go/LICENSE b/vendor/github.com/cncf/xds/go/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/cncf/xds/go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go new file mode 100644 index 0000000000..0281b3ee58 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go @@ -0,0 +1,411 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/migrate.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` +} + +func (x *MigrateAnnotation) Reset() { + *x = MigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_migrate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateAnnotation) ProtoMessage() {} + +func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_migrate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead. +func (*MigrateAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{0} +} + +func (x *MigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +type FieldMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` + OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"` +} + +func (x *FieldMigrateAnnotation) Reset() { + *x = FieldMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_migrate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMigrateAnnotation) ProtoMessage() {} + +func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_migrate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{1} +} + +func (x *FieldMigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +func (x *FieldMigrateAnnotation) GetOneofPromotion() string { + if x != nil { + return x.OneofPromotion + } + return "" +} + +type FileMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"` +} + +func (x *FileMigrateAnnotation) Reset() { + *x = FileMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_migrate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileMigrateAnnotation) ProtoMessage() {} + +func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_migrate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{2} +} + +func (x *FileMigrateAnnotation) GetMoveToPackage() string { + if x != nil { + return x.MoveToPackage + } + return "" +} + +var file_udpa_annotations_migrate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.message_migrate", + Tag: "bytes,171962766,opt,name=message_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldMigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.field_migrate", + Tag: "bytes,171962766,opt,name=field_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.enum_migrate", + Tag: "bytes,171962766,opt,name=enum_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.enum_value_migrate", + Tag: "bytes,171962766,opt,name=enum_value_migrate", + Filename: "udpa/annotations/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*FileMigrateAnnotation)(nil), + Field: 171962766, + Name: "udpa.annotations.file_migrate", + Tag: "bytes,171962766,opt,name=file_migrate", + Filename: "udpa/annotations/migrate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional udpa.annotations.MigrateAnnotation message_migrate = 171962766; + E_MessageMigrate = &file_udpa_annotations_migrate_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional udpa.annotations.FieldMigrateAnnotation field_migrate = 171962766; + E_FieldMigrate = &file_udpa_annotations_migrate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // optional udpa.annotations.MigrateAnnotation enum_migrate = 171962766; + E_EnumMigrate = &file_udpa_annotations_migrate_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // optional udpa.annotations.MigrateAnnotation enum_value_migrate = 171962766; + E_EnumValueMigrate = &file_udpa_annotations_migrate_proto_extTypes[3] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional udpa.annotations.FileMigrateAnnotation file_migrate = 171962766; + E_FileMigrate = &file_udpa_annotations_migrate_proto_extTypes[4] +) + +var File_udpa_annotations_migrate_proto protoreflect.FileDescriptor + +var file_udpa_annotations_migrate_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f, + 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3f, 0x0a, 0x15, + 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x74, 0x6f, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x3a, 0x70, 0x0a, + 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70, + 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, + 0x6f, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x3a, 0x67, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x8e, + 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, + 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x77, 0x0a, 0x12, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, + 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x10, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x3a, 0x6b, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x75, 0x64, 0x70, 0x61, + 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42, + 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_udpa_annotations_migrate_proto_rawDescOnce sync.Once + file_udpa_annotations_migrate_proto_rawDescData = file_udpa_annotations_migrate_proto_rawDesc +) + +func file_udpa_annotations_migrate_proto_rawDescGZIP() []byte { + file_udpa_annotations_migrate_proto_rawDescOnce.Do(func() { + file_udpa_annotations_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_migrate_proto_rawDescData) + }) + return file_udpa_annotations_migrate_proto_rawDescData +} + +var file_udpa_annotations_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_udpa_annotations_migrate_proto_goTypes = []interface{}{ + (*MigrateAnnotation)(nil), // 0: udpa.annotations.MigrateAnnotation + (*FieldMigrateAnnotation)(nil), // 1: udpa.annotations.FieldMigrateAnnotation + (*FileMigrateAnnotation)(nil), // 2: udpa.annotations.FileMigrateAnnotation + (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions + (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions + (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions +} +var file_udpa_annotations_migrate_proto_depIdxs = []int32{ + 3, // 0: udpa.annotations.message_migrate:extendee -> google.protobuf.MessageOptions + 4, // 1: udpa.annotations.field_migrate:extendee -> google.protobuf.FieldOptions + 5, // 2: udpa.annotations.enum_migrate:extendee -> google.protobuf.EnumOptions + 6, // 3: udpa.annotations.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions + 7, // 4: udpa.annotations.file_migrate:extendee -> google.protobuf.FileOptions + 0, // 5: udpa.annotations.message_migrate:type_name -> udpa.annotations.MigrateAnnotation + 1, // 6: udpa.annotations.field_migrate:type_name -> udpa.annotations.FieldMigrateAnnotation + 0, // 7: udpa.annotations.enum_migrate:type_name -> udpa.annotations.MigrateAnnotation + 0, // 8: udpa.annotations.enum_value_migrate:type_name -> udpa.annotations.MigrateAnnotation + 2, // 9: udpa.annotations.file_migrate:type_name -> udpa.annotations.FileMigrateAnnotation + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 5, // [5:10] is the sub-list for extension type_name + 0, // [0:5] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_migrate_proto_init() } +func file_udpa_annotations_migrate_proto_init() { + if File_udpa_annotations_migrate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_udpa_annotations_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_udpa_annotations_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_migrate_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 5, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_migrate_proto_goTypes, + DependencyIndexes: file_udpa_annotations_migrate_proto_depIdxs, + MessageInfos: file_udpa_annotations_migrate_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_migrate_proto_extTypes, + }.Build() + File_udpa_annotations_migrate_proto = out.File + file_udpa_annotations_migrate_proto_rawDesc = nil + file_udpa_annotations_migrate_proto_goTypes = nil + file_udpa_annotations_migrate_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go new file mode 100644 index 0000000000..38196d5eb0 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go @@ -0,0 +1,350 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/migrate.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MigrateAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MigrateAnnotationMultiError, or nil if none found. +func (m *MigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + if len(errors) > 0 { + return MigrateAnnotationMultiError(errors) + } + + return nil +} + +// MigrateAnnotationMultiError is an error wrapping multiple validation errors +// returned by MigrateAnnotation.ValidateAll() if the designated constraints +// aren't met. +type MigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MigrateAnnotationMultiError) AllErrors() []error { return m } + +// MigrateAnnotationValidationError is the validation error returned by +// MigrateAnnotation.Validate if the designated constraints aren't met. +type MigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MigrateAnnotationValidationError) ErrorName() string { + return "MigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e MigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MigrateAnnotationValidationError{} + +// Validate checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldMigrateAnnotationMultiError, or nil if none found. +func (m *FieldMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + // no validation rules for OneofPromotion + + if len(errors) > 0 { + return FieldMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FieldMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FieldMigrateAnnotationValidationError is the validation error returned by +// FieldMigrateAnnotation.Validate if the designated constraints aren't met. +type FieldMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldMigrateAnnotationValidationError) ErrorName() string { + return "FieldMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldMigrateAnnotationValidationError{} + +// Validate checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FileMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileMigrateAnnotationMultiError, or nil if none found. +func (m *FileMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MoveToPackage + + if len(errors) > 0 { + return FileMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FileMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FileMigrateAnnotationValidationError is the validation error returned by +// FileMigrateAnnotation.Validate if the designated constraints aren't met. +type FileMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileMigrateAnnotationValidationError) ErrorName() string { + return "FileMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FileMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFileMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileMigrateAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go new file mode 100644 index 0000000000..cf858bd977 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go @@ -0,0 +1,196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/security.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FieldSecurityAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"` + ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"` +} + +func (x *FieldSecurityAnnotation) Reset() { + *x = FieldSecurityAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_security_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldSecurityAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldSecurityAnnotation) ProtoMessage() {} + +func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_security_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead. +func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_security_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool { + if x != nil { + return x.ConfigureForUntrustedDownstream + } + return false +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool { + if x != nil { + return x.ConfigureForUntrustedUpstream + } + return false +} + +var file_udpa_annotations_security_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldSecurityAnnotation)(nil), + Field: 11122993, + Name: "udpa.annotations.security", + Tag: "bytes,11122993,opt,name=security", + Filename: "udpa/annotations/security.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional udpa.annotations.FieldSecurityAnnotation security = 11122993; + E_Security = &file_udpa_annotations_security_proto_extTypes[0] +) + +var File_udpa_annotations_security_proto protoreflect.FileDescriptor + +var file_udpa_annotations_security_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a, + 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, + 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x67, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0xb1, 0xf2, 0xa6, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x75, 0x64, 0x70, + 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, + 0x31, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x08, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, + 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_udpa_annotations_security_proto_rawDescOnce sync.Once + file_udpa_annotations_security_proto_rawDescData = file_udpa_annotations_security_proto_rawDesc +) + +func file_udpa_annotations_security_proto_rawDescGZIP() []byte { + file_udpa_annotations_security_proto_rawDescOnce.Do(func() { + file_udpa_annotations_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_security_proto_rawDescData) + }) + return file_udpa_annotations_security_proto_rawDescData +} + +var file_udpa_annotations_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_annotations_security_proto_goTypes = []interface{}{ + (*FieldSecurityAnnotation)(nil), // 0: udpa.annotations.FieldSecurityAnnotation + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_udpa_annotations_security_proto_depIdxs = []int32{ + 1, // 0: udpa.annotations.security:extendee -> google.protobuf.FieldOptions + 0, // 1: udpa.annotations.security:type_name -> udpa.annotations.FieldSecurityAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_security_proto_init() } +func file_udpa_annotations_security_proto_init() { + if File_udpa_annotations_security_proto != nil { + return + } + file_udpa_annotations_status_proto_init() + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldSecurityAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_security_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_security_proto_goTypes, + DependencyIndexes: file_udpa_annotations_security_proto_depIdxs, + MessageInfos: file_udpa_annotations_security_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_security_proto_extTypes, + }.Build() + File_udpa_annotations_security_proto = out.File + file_udpa_annotations_security_proto_rawDesc = nil + file_udpa_annotations_security_proto_goTypes = nil + file_udpa_annotations_security_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go new file mode 100644 index 0000000000..acc9bd7a12 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/security.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FieldSecurityAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldSecurityAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldSecurityAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldSecurityAnnotationMultiError, or nil if none found. +func (m *FieldSecurityAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldSecurityAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ConfigureForUntrustedDownstream + + // no validation rules for ConfigureForUntrustedUpstream + + if len(errors) > 0 { + return FieldSecurityAnnotationMultiError(errors) + } + + return nil +} + +// FieldSecurityAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldSecurityAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldSecurityAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m } + +// FieldSecurityAnnotationValidationError is the validation error returned by +// FieldSecurityAnnotation.Validate if the designated constraints aren't met. +type FieldSecurityAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldSecurityAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldSecurityAnnotationValidationError) ErrorName() string { + return "FieldSecurityAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldSecurityAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldSecurityAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldSecurityAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldSecurityAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go new file mode 100644 index 0000000000..2d5c78dc29 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/sensitive.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_udpa_annotations_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 76569463, + Name: "udpa.annotations.sensitive", + Tag: "varint,76569463,opt,name=sensitive", + Filename: "udpa/annotations/sensitive.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional bool sensitive = 76569463; + E_Sensitive = &file_udpa_annotations_sensitive_proto_extTypes[0] +) + +var File_udpa_annotations_sensitive_proto protoreflect.FileDescriptor + +var file_udpa_annotations_sensitive_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xf7, 0xb6, 0xc1, 0x24, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_udpa_annotations_sensitive_proto_goTypes = []interface{}{ + (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions +} +var file_udpa_annotations_sensitive_proto_depIdxs = []int32{ + 0, // 0: udpa.annotations.sensitive:extendee -> google.protobuf.FieldOptions + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_sensitive_proto_init() } +func file_udpa_annotations_sensitive_proto_init() { + if File_udpa_annotations_sensitive_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_sensitive_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_sensitive_proto_goTypes, + DependencyIndexes: file_udpa_annotations_sensitive_proto_depIdxs, + ExtensionInfos: file_udpa_annotations_sensitive_proto_extTypes, + }.Build() + File_udpa_annotations_sensitive_proto = out.File + file_udpa_annotations_sensitive_proto_rawDesc = nil + file_udpa_annotations_sensitive_proto_goTypes = nil + file_udpa_annotations_sensitive_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go new file mode 100644 index 0000000000..f3fa61974c --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go @@ -0,0 +1,36 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/sensitive.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go new file mode 100644 index 0000000000..c96818b17c --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go @@ -0,0 +1,253 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/status.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PackageVersionStatus int32 + +const ( + PackageVersionStatus_UNKNOWN PackageVersionStatus = 0 + PackageVersionStatus_FROZEN PackageVersionStatus = 1 + PackageVersionStatus_ACTIVE PackageVersionStatus = 2 + PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3 +) + +// Enum value maps for PackageVersionStatus. +var ( + PackageVersionStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "FROZEN", + 2: "ACTIVE", + 3: "NEXT_MAJOR_VERSION_CANDIDATE", + } + PackageVersionStatus_value = map[string]int32{ + "UNKNOWN": 0, + "FROZEN": 1, + "ACTIVE": 2, + "NEXT_MAJOR_VERSION_CANDIDATE": 3, + } +) + +func (x PackageVersionStatus) Enum() *PackageVersionStatus { + p := new(PackageVersionStatus) + *p = x + return p +} + +func (x PackageVersionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_udpa_annotations_status_proto_enumTypes[0].Descriptor() +} + +func (PackageVersionStatus) Type() protoreflect.EnumType { + return &file_udpa_annotations_status_proto_enumTypes[0] +} + +func (x PackageVersionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PackageVersionStatus.Descriptor instead. +func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) { + return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0} +} + +type StatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` + PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=udpa.annotations.PackageVersionStatus" json:"package_version_status,omitempty"` +} + +func (x *StatusAnnotation) Reset() { + *x = StatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusAnnotation) ProtoMessage() {} + +func (x *StatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead. +func (*StatusAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0} +} + +func (x *StatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus { + if x != nil { + return x.PackageVersionStatus + } + return PackageVersionStatus_UNKNOWN +} + +var file_udpa_annotations_status_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*StatusAnnotation)(nil), + Field: 222707719, + Name: "udpa.annotations.file_status", + Tag: "bytes,222707719,opt,name=file_status", + Filename: "udpa/annotations/status.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional udpa.annotations.StatusAnnotation file_status = 222707719; + E_FileStatus = &file_udpa_annotations_status_proto_extTypes[0] +) + +var File_udpa_annotations_status_proto protoreflect.FileDescriptor + +var file_udpa_annotations_status_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, + 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a, + 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a, + 0x64, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x87, 0x80, 0x99, + 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_udpa_annotations_status_proto_rawDescOnce sync.Once + file_udpa_annotations_status_proto_rawDescData = file_udpa_annotations_status_proto_rawDesc +) + +func file_udpa_annotations_status_proto_rawDescGZIP() []byte { + file_udpa_annotations_status_proto_rawDescOnce.Do(func() { + file_udpa_annotations_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_status_proto_rawDescData) + }) + return file_udpa_annotations_status_proto_rawDescData +} + +var file_udpa_annotations_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_udpa_annotations_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_annotations_status_proto_goTypes = []interface{}{ + (PackageVersionStatus)(0), // 0: udpa.annotations.PackageVersionStatus + (*StatusAnnotation)(nil), // 1: udpa.annotations.StatusAnnotation + (*descriptorpb.FileOptions)(nil), // 2: google.protobuf.FileOptions +} +var file_udpa_annotations_status_proto_depIdxs = []int32{ + 0, // 0: udpa.annotations.StatusAnnotation.package_version_status:type_name -> udpa.annotations.PackageVersionStatus + 2, // 1: udpa.annotations.file_status:extendee -> google.protobuf.FileOptions + 1, // 2: udpa.annotations.file_status:type_name -> udpa.annotations.StatusAnnotation + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_status_proto_init() } +func file_udpa_annotations_status_proto_init() { + if File_udpa_annotations_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_status_proto_goTypes, + DependencyIndexes: file_udpa_annotations_status_proto_depIdxs, + EnumInfos: file_udpa_annotations_status_proto_enumTypes, + MessageInfos: file_udpa_annotations_status_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_status_proto_extTypes, + }.Build() + File_udpa_annotations_status_proto = out.File + file_udpa_annotations_status_proto_rawDesc = nil + file_udpa_annotations_status_proto_goTypes = nil + file_udpa_annotations_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go new file mode 100644 index 0000000000..5633a83831 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/status.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StatusAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusAnnotationMultiError, or nil if none found. +func (m *StatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + // no validation rules for PackageVersionStatus + + if len(errors) > 0 { + return StatusAnnotationMultiError(errors) + } + + return nil +} + +// StatusAnnotationMultiError is an error wrapping multiple validation errors +// returned by StatusAnnotation.ValidateAll() if the designated constraints +// aren't met. +type StatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusAnnotationMultiError) AllErrors() []error { return m } + +// StatusAnnotationValidationError is the validation error returned by +// StatusAnnotation.Validate if the designated constraints aren't met. +type StatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" } + +// Error satisfies the builtin error interface +func (e StatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatusAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go new file mode 100644 index 0000000000..b3ab9e346b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/annotations/versioning.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VersioningAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"` +} + +func (x *VersioningAnnotation) Reset() { + *x = VersioningAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_annotations_versioning_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersioningAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersioningAnnotation) ProtoMessage() {} + +func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_udpa_annotations_versioning_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead. +func (*VersioningAnnotation) Descriptor() ([]byte, []int) { + return file_udpa_annotations_versioning_proto_rawDescGZIP(), []int{0} +} + +func (x *VersioningAnnotation) GetPreviousMessageType() string { + if x != nil { + return x.PreviousMessageType + } + return "" +} + +var file_udpa_annotations_versioning_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*VersioningAnnotation)(nil), + Field: 7881811, + Name: "udpa.annotations.versioning", + Tag: "bytes,7881811,opt,name=versioning", + Filename: "udpa/annotations/versioning.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional udpa.annotations.VersioningAnnotation versioning = 7881811; + E_Versioning = &file_udpa_annotations_versioning_proto_extTypes[0] +) + +var File_udpa_annotations_versioning_proto protoreflect.FileDescriptor + +var file_udpa_annotations_versioning_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xd3, 0x88, 0xe1, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x75, 0x64, + 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42, + 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_udpa_annotations_versioning_proto_rawDescOnce sync.Once + file_udpa_annotations_versioning_proto_rawDescData = file_udpa_annotations_versioning_proto_rawDesc +) + +func file_udpa_annotations_versioning_proto_rawDescGZIP() []byte { + file_udpa_annotations_versioning_proto_rawDescOnce.Do(func() { + file_udpa_annotations_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_versioning_proto_rawDescData) + }) + return file_udpa_annotations_versioning_proto_rawDescData +} + +var file_udpa_annotations_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_annotations_versioning_proto_goTypes = []interface{}{ + (*VersioningAnnotation)(nil), // 0: udpa.annotations.VersioningAnnotation + (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions +} +var file_udpa_annotations_versioning_proto_depIdxs = []int32{ + 1, // 0: udpa.annotations.versioning:extendee -> google.protobuf.MessageOptions + 0, // 1: udpa.annotations.versioning:type_name -> udpa.annotations.VersioningAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_udpa_annotations_versioning_proto_init() } +func file_udpa_annotations_versioning_proto_init() { + if File_udpa_annotations_versioning_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_annotations_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersioningAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_annotations_versioning_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_udpa_annotations_versioning_proto_goTypes, + DependencyIndexes: file_udpa_annotations_versioning_proto_depIdxs, + MessageInfos: file_udpa_annotations_versioning_proto_msgTypes, + ExtensionInfos: file_udpa_annotations_versioning_proto_extTypes, + }.Build() + File_udpa_annotations_versioning_proto = out.File + file_udpa_annotations_versioning_proto_rawDesc = nil + file_udpa_annotations_versioning_proto_goTypes = nil + file_udpa_annotations_versioning_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go new file mode 100644 index 0000000000..5fd86baffd --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/annotations/versioning.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *VersioningAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// VersioningAnnotationMultiError, or nil if none found. +func (m *VersioningAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *VersioningAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for PreviousMessageType + + if len(errors) > 0 { + return VersioningAnnotationMultiError(errors) + } + + return nil +} + +// VersioningAnnotationMultiError is an error wrapping multiple validation +// errors returned by VersioningAnnotation.ValidateAll() if the designated +// constraints aren't met. +type VersioningAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersioningAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersioningAnnotationMultiError) AllErrors() []error { return m } + +// VersioningAnnotationValidationError is the validation error returned by +// VersioningAnnotation.Validate if the designated constraints aren't met. +type VersioningAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VersioningAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VersioningAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VersioningAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VersioningAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VersioningAnnotationValidationError) ErrorName() string { + return "VersioningAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e VersioningAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVersioningAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VersioningAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VersioningAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go new file mode 100644 index 0000000000..e8f23f7858 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: udpa/type/v1/typed_struct.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TypedStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypedStruct) Reset() { + *x = TypedStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedStruct) ProtoMessage() {} + +func (x *TypedStruct) ProtoReflect() protoreflect.Message { + mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead. +func (*TypedStruct) Descriptor() ([]byte, []int) { + return file_udpa_type_v1_typed_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedStruct) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *TypedStruct) GetValue() *structpb.Struct { + if x != nil { + return x.Value + } + return nil +} + +var File_udpa_type_v1_typed_struct_proto protoreflect.FileDescriptor + +var file_udpa_type_v1_typed_struct_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, + 0x0b, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_udpa_type_v1_typed_struct_proto_rawDescOnce sync.Once + file_udpa_type_v1_typed_struct_proto_rawDescData = file_udpa_type_v1_typed_struct_proto_rawDesc +) + +func file_udpa_type_v1_typed_struct_proto_rawDescGZIP() []byte { + file_udpa_type_v1_typed_struct_proto_rawDescOnce.Do(func() { + file_udpa_type_v1_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_type_v1_typed_struct_proto_rawDescData) + }) + return file_udpa_type_v1_typed_struct_proto_rawDescData +} + +var file_udpa_type_v1_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_udpa_type_v1_typed_struct_proto_goTypes = []interface{}{ + (*TypedStruct)(nil), // 0: udpa.type.v1.TypedStruct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_udpa_type_v1_typed_struct_proto_depIdxs = []int32{ + 1, // 0: udpa.type.v1.TypedStruct.value:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_udpa_type_v1_typed_struct_proto_init() } +func file_udpa_type_v1_typed_struct_proto_init() { + if File_udpa_type_v1_typed_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_udpa_type_v1_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_udpa_type_v1_typed_struct_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_udpa_type_v1_typed_struct_proto_goTypes, + DependencyIndexes: file_udpa_type_v1_typed_struct_proto_depIdxs, + MessageInfos: file_udpa_type_v1_typed_struct_proto_msgTypes, + }.Build() + File_udpa_type_v1_typed_struct_proto = out.File + file_udpa_type_v1_typed_struct_proto_rawDesc = nil + file_udpa_type_v1_typed_struct_proto_goTypes = nil + file_udpa_type_v1_typed_struct_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go new file mode 100644 index 0000000000..e336fb4a72 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: udpa/type/v1/typed_struct.proto + +package v1 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TypedStruct) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TypedStructMultiError, or +// nil if none found. +func (m *TypedStruct) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedStruct) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TypeUrl + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TypedStructMultiError(errors) + } + + return nil +} + +// TypedStructMultiError is an error wrapping multiple validation errors +// returned by TypedStruct.ValidateAll() if the designated constraints aren't met. +type TypedStructMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedStructMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedStructMultiError) AllErrors() []error { return m } + +// TypedStructValidationError is the validation error returned by +// TypedStruct.Validate if the designated constraints aren't met. +type TypedStructValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedStructValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedStructValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedStructValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedStructValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" } + +// Error satisfies the builtin error interface +func (e TypedStructValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedStruct.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedStructValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedStructValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go new file mode 100644 index 0000000000..705a71e887 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go @@ -0,0 +1,412 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/migrate.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` +} + +func (x *MigrateAnnotation) Reset() { + *x = MigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateAnnotation) ProtoMessage() {} + +func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead. +func (*MigrateAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{0} +} + +func (x *MigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +type FieldMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"` + OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"` +} + +func (x *FieldMigrateAnnotation) Reset() { + *x = FieldMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMigrateAnnotation) ProtoMessage() {} + +func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{1} +} + +func (x *FieldMigrateAnnotation) GetRename() string { + if x != nil { + return x.Rename + } + return "" +} + +func (x *FieldMigrateAnnotation) GetOneofPromotion() string { + if x != nil { + return x.OneofPromotion + } + return "" +} + +type FileMigrateAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"` +} + +func (x *FileMigrateAnnotation) Reset() { + *x = FileMigrateAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileMigrateAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileMigrateAnnotation) ProtoMessage() {} + +func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead. +func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{2} +} + +func (x *FileMigrateAnnotation) GetMoveToPackage() string { + if x != nil { + return x.MoveToPackage + } + return "" +} + +var file_xds_annotations_v3_migrate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.message_migrate", + Tag: "bytes,112948430,opt,name=message_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldMigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.field_migrate", + Tag: "bytes,112948430,opt,name=field_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.enum_migrate", + Tag: "bytes,112948430,opt,name=enum_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*MigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.enum_value_migrate", + Tag: "bytes,112948430,opt,name=enum_value_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*FileMigrateAnnotation)(nil), + Field: 112948430, + Name: "xds.annotations.v3.file_migrate", + Tag: "bytes,112948430,opt,name=file_migrate", + Filename: "xds/annotations/v3/migrate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional xds.annotations.v3.MigrateAnnotation message_migrate = 112948430; + E_MessageMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional xds.annotations.v3.FieldMigrateAnnotation field_migrate = 112948430; + E_FieldMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.EnumOptions. +var ( + // optional xds.annotations.v3.MigrateAnnotation enum_migrate = 112948430; + E_EnumMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[2] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // optional xds.annotations.v3.MigrateAnnotation enum_value_migrate = 112948430; + E_EnumValueMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[3] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional xds.annotations.v3.FileMigrateAnnotation file_migrate = 112948430; + E_FileMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[4] +) + +var File_xds_annotations_v3_migrate_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_migrate_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x3f, 0x0a, 0x15, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76, + 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x3a, 0x72, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x71, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x69, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x3a, 0x79, 0x0a, 0x12, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, + 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x65, 0x6e, + 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x6d, + 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, + 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42, 0x2b, 0x5a, + 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_migrate_proto_rawDescOnce sync.Once + file_xds_annotations_v3_migrate_proto_rawDescData = file_xds_annotations_v3_migrate_proto_rawDesc +) + +func file_xds_annotations_v3_migrate_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_migrate_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_migrate_proto_rawDescData) + }) + return file_xds_annotations_v3_migrate_proto_rawDescData +} + +var file_xds_annotations_v3_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_xds_annotations_v3_migrate_proto_goTypes = []interface{}{ + (*MigrateAnnotation)(nil), // 0: xds.annotations.v3.MigrateAnnotation + (*FieldMigrateAnnotation)(nil), // 1: xds.annotations.v3.FieldMigrateAnnotation + (*FileMigrateAnnotation)(nil), // 2: xds.annotations.v3.FileMigrateAnnotation + (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions + (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions + (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions +} +var file_xds_annotations_v3_migrate_proto_depIdxs = []int32{ + 3, // 0: xds.annotations.v3.message_migrate:extendee -> google.protobuf.MessageOptions + 4, // 1: xds.annotations.v3.field_migrate:extendee -> google.protobuf.FieldOptions + 5, // 2: xds.annotations.v3.enum_migrate:extendee -> google.protobuf.EnumOptions + 6, // 3: xds.annotations.v3.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions + 7, // 4: xds.annotations.v3.file_migrate:extendee -> google.protobuf.FileOptions + 0, // 5: xds.annotations.v3.message_migrate:type_name -> xds.annotations.v3.MigrateAnnotation + 1, // 6: xds.annotations.v3.field_migrate:type_name -> xds.annotations.v3.FieldMigrateAnnotation + 0, // 7: xds.annotations.v3.enum_migrate:type_name -> xds.annotations.v3.MigrateAnnotation + 0, // 8: xds.annotations.v3.enum_value_migrate:type_name -> xds.annotations.v3.MigrateAnnotation + 2, // 9: xds.annotations.v3.file_migrate:type_name -> xds.annotations.v3.FileMigrateAnnotation + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 5, // [5:10] is the sub-list for extension type_name + 0, // [0:5] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_migrate_proto_init() } +func file_xds_annotations_v3_migrate_proto_init() { + if File_xds_annotations_v3_migrate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileMigrateAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_migrate_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 5, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_migrate_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_migrate_proto_depIdxs, + MessageInfos: file_xds_annotations_v3_migrate_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_migrate_proto_extTypes, + }.Build() + File_xds_annotations_v3_migrate_proto = out.File + file_xds_annotations_v3_migrate_proto_rawDesc = nil + file_xds_annotations_v3_migrate_proto_goTypes = nil + file_xds_annotations_v3_migrate_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go new file mode 100644 index 0000000000..d57d778247 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go @@ -0,0 +1,350 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/migrate.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MigrateAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MigrateAnnotationMultiError, or nil if none found. +func (m *MigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + if len(errors) > 0 { + return MigrateAnnotationMultiError(errors) + } + + return nil +} + +// MigrateAnnotationMultiError is an error wrapping multiple validation errors +// returned by MigrateAnnotation.ValidateAll() if the designated constraints +// aren't met. +type MigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MigrateAnnotationMultiError) AllErrors() []error { return m } + +// MigrateAnnotationValidationError is the validation error returned by +// MigrateAnnotation.Validate if the designated constraints aren't met. +type MigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MigrateAnnotationValidationError) ErrorName() string { + return "MigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e MigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MigrateAnnotationValidationError{} + +// Validate checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldMigrateAnnotationMultiError, or nil if none found. +func (m *FieldMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Rename + + // no validation rules for OneofPromotion + + if len(errors) > 0 { + return FieldMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FieldMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FieldMigrateAnnotationValidationError is the validation error returned by +// FieldMigrateAnnotation.Validate if the designated constraints aren't met. +type FieldMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldMigrateAnnotationValidationError) ErrorName() string { + return "FieldMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldMigrateAnnotationValidationError{} + +// Validate checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FileMigrateAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileMigrateAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileMigrateAnnotationMultiError, or nil if none found. +func (m *FileMigrateAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileMigrateAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MoveToPackage + + if len(errors) > 0 { + return FileMigrateAnnotationMultiError(errors) + } + + return nil +} + +// FileMigrateAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileMigrateAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileMigrateAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMigrateAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m } + +// FileMigrateAnnotationValidationError is the validation error returned by +// FileMigrateAnnotation.Validate if the designated constraints aren't met. +type FileMigrateAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileMigrateAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileMigrateAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileMigrateAnnotationValidationError) ErrorName() string { + return "FileMigrateAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FileMigrateAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFileMigrateAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileMigrateAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileMigrateAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go new file mode 100644 index 0000000000..0278e51658 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/security.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FieldSecurityAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"` + ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"` +} + +func (x *FieldSecurityAnnotation) Reset() { + *x = FieldSecurityAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_security_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldSecurityAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldSecurityAnnotation) ProtoMessage() {} + +func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_security_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead. +func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_security_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool { + if x != nil { + return x.ConfigureForUntrustedDownstream + } + return false +} + +func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool { + if x != nil { + return x.ConfigureForUntrustedUpstream + } + return false +} + +var file_xds_annotations_v3_security_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldSecurityAnnotation)(nil), + Field: 99044135, + Name: "xds.annotations.v3.security", + Tag: "bytes,99044135,opt,name=security", + Filename: "xds/annotations/v3/security.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional xds.annotations.v3.FieldSecurityAnnotation security = 99044135; + E_Security = &file_xds_annotations_v3_security_proto_extTypes[0] +) + +var File_xds_annotations_v3_security_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_security_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x69, 0x0a, 0x08, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa7, 0x96, 0x9d, 0x2f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, + 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_security_proto_rawDescOnce sync.Once + file_xds_annotations_v3_security_proto_rawDescData = file_xds_annotations_v3_security_proto_rawDesc +) + +func file_xds_annotations_v3_security_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_security_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_security_proto_rawDescData) + }) + return file_xds_annotations_v3_security_proto_rawDescData +} + +var file_xds_annotations_v3_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_annotations_v3_security_proto_goTypes = []interface{}{ + (*FieldSecurityAnnotation)(nil), // 0: xds.annotations.v3.FieldSecurityAnnotation + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_xds_annotations_v3_security_proto_depIdxs = []int32{ + 1, // 0: xds.annotations.v3.security:extendee -> google.protobuf.FieldOptions + 0, // 1: xds.annotations.v3.security:type_name -> xds.annotations.v3.FieldSecurityAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_security_proto_init() } +func file_xds_annotations_v3_security_proto_init() { + if File_xds_annotations_v3_security_proto != nil { + return + } + file_xds_annotations_v3_status_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldSecurityAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_security_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_security_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_security_proto_depIdxs, + MessageInfos: file_xds_annotations_v3_security_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_security_proto_extTypes, + }.Build() + File_xds_annotations_v3_security_proto = out.File + file_xds_annotations_v3_security_proto_rawDesc = nil + file_xds_annotations_v3_security_proto_goTypes = nil + file_xds_annotations_v3_security_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go new file mode 100644 index 0000000000..ac0143f277 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/security.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FieldSecurityAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldSecurityAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldSecurityAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldSecurityAnnotationMultiError, or nil if none found. +func (m *FieldSecurityAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldSecurityAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ConfigureForUntrustedDownstream + + // no validation rules for ConfigureForUntrustedUpstream + + if len(errors) > 0 { + return FieldSecurityAnnotationMultiError(errors) + } + + return nil +} + +// FieldSecurityAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldSecurityAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldSecurityAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m } + +// FieldSecurityAnnotationValidationError is the validation error returned by +// FieldSecurityAnnotation.Validate if the designated constraints aren't met. +type FieldSecurityAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldSecurityAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldSecurityAnnotationValidationError) ErrorName() string { + return "FieldSecurityAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldSecurityAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldSecurityAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldSecurityAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldSecurityAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go new file mode 100644 index 0000000000..57161aab47 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/sensitive.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_xds_annotations_v3_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 61008053, + Name: "xds.annotations.v3.sensitive", + Tag: "varint,61008053,opt,name=sensitive", + Filename: "xds/annotations/v3/sensitive.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional bool sensitive = 61008053; + E_Sensitive = &file_xds_annotations_v3_sensitive_proto_extTypes[0] +) + +var File_xds_annotations_v3_sensitive_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_sensitive_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65, + 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb5, 0xd1, 0x8b, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_xds_annotations_v3_sensitive_proto_goTypes = []interface{}{ + (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions +} +var file_xds_annotations_v3_sensitive_proto_depIdxs = []int32{ + 0, // 0: xds.annotations.v3.sensitive:extendee -> google.protobuf.FieldOptions + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_sensitive_proto_init() } +func file_xds_annotations_v3_sensitive_proto_init() { + if File_xds_annotations_v3_sensitive_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_sensitive_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_sensitive_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_sensitive_proto_depIdxs, + ExtensionInfos: file_xds_annotations_v3_sensitive_proto_extTypes, + }.Build() + File_xds_annotations_v3_sensitive_proto = out.File + file_xds_annotations_v3_sensitive_proto_rawDesc = nil + file_xds_annotations_v3_sensitive_proto_goTypes = nil + file_xds_annotations_v3_sensitive_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go new file mode 100644 index 0000000000..c101d3acc4 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go @@ -0,0 +1,36 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/sensitive.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go new file mode 100644 index 0000000000..255d109fc5 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go @@ -0,0 +1,495 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/status.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PackageVersionStatus int32 + +const ( + PackageVersionStatus_UNKNOWN PackageVersionStatus = 0 + PackageVersionStatus_FROZEN PackageVersionStatus = 1 + PackageVersionStatus_ACTIVE PackageVersionStatus = 2 + PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3 +) + +// Enum value maps for PackageVersionStatus. +var ( + PackageVersionStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "FROZEN", + 2: "ACTIVE", + 3: "NEXT_MAJOR_VERSION_CANDIDATE", + } + PackageVersionStatus_value = map[string]int32{ + "UNKNOWN": 0, + "FROZEN": 1, + "ACTIVE": 2, + "NEXT_MAJOR_VERSION_CANDIDATE": 3, + } +) + +func (x PackageVersionStatus) Enum() *PackageVersionStatus { + p := new(PackageVersionStatus) + *p = x + return p +} + +func (x PackageVersionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_xds_annotations_v3_status_proto_enumTypes[0].Descriptor() +} + +func (PackageVersionStatus) Type() protoreflect.EnumType { + return &file_xds_annotations_v3_status_proto_enumTypes[0] +} + +func (x PackageVersionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PackageVersionStatus.Descriptor instead. +func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0} +} + +type FileStatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` +} + +func (x *FileStatusAnnotation) Reset() { + *x = FileStatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileStatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileStatusAnnotation) ProtoMessage() {} + +func (x *FileStatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileStatusAnnotation.ProtoReflect.Descriptor instead. +func (*FileStatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0} +} + +func (x *FileStatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +type MessageStatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` +} + +func (x *MessageStatusAnnotation) Reset() { + *x = MessageStatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageStatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageStatusAnnotation) ProtoMessage() {} + +func (x *MessageStatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageStatusAnnotation.ProtoReflect.Descriptor instead. +func (*MessageStatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageStatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +type FieldStatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` +} + +func (x *FieldStatusAnnotation) Reset() { + *x = FieldStatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldStatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldStatusAnnotation) ProtoMessage() {} + +func (x *FieldStatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldStatusAnnotation.ProtoReflect.Descriptor instead. +func (*FieldStatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{2} +} + +func (x *FieldStatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +type StatusAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"` + PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=xds.annotations.v3.PackageVersionStatus" json:"package_version_status,omitempty"` +} + +func (x *StatusAnnotation) Reset() { + *x = StatusAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_status_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusAnnotation) ProtoMessage() {} + +func (x *StatusAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_status_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead. +func (*StatusAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{3} +} + +func (x *StatusAnnotation) GetWorkInProgress() bool { + if x != nil { + return x.WorkInProgress + } + return false +} + +func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus { + if x != nil { + return x.PackageVersionStatus + } + return PackageVersionStatus_UNKNOWN +} + +var file_xds_annotations_v3_status_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*FileStatusAnnotation)(nil), + Field: 226829418, + Name: "xds.annotations.v3.file_status", + Tag: "bytes,226829418,opt,name=file_status", + Filename: "xds/annotations/v3/status.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MessageStatusAnnotation)(nil), + Field: 226829418, + Name: "xds.annotations.v3.message_status", + Tag: "bytes,226829418,opt,name=message_status", + Filename: "xds/annotations/v3/status.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldStatusAnnotation)(nil), + Field: 226829418, + Name: "xds.annotations.v3.field_status", + Tag: "bytes,226829418,opt,name=field_status", + Filename: "xds/annotations/v3/status.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional xds.annotations.v3.FileStatusAnnotation file_status = 226829418; + E_FileStatus = &file_xds_annotations_v3_status_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional xds.annotations.v3.MessageStatusAnnotation message_status = 226829418; + E_MessageStatus = &file_xds_annotations_v3_status_proto_extTypes[1] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional xds.annotations.v3.FieldStatusAnnotation field_status = 226829418; + E_FieldStatus = &file_xds_annotations_v3_status_proto_extTypes[2] +) + +var File_xds_annotations_v3_status_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_status_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x40, 0x0a, 0x14, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x43, 0x0a, 0x17, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x41, + 0x0a, 0x15, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x5e, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a, + 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a, + 0x6a, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94, + 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x76, 0x0a, 0x0e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea, + 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x3a, 0x6e, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_status_proto_rawDescOnce sync.Once + file_xds_annotations_v3_status_proto_rawDescData = file_xds_annotations_v3_status_proto_rawDesc +) + +func file_xds_annotations_v3_status_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_status_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_status_proto_rawDescData) + }) + return file_xds_annotations_v3_status_proto_rawDescData +} + +var file_xds_annotations_v3_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_xds_annotations_v3_status_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_xds_annotations_v3_status_proto_goTypes = []interface{}{ + (PackageVersionStatus)(0), // 0: xds.annotations.v3.PackageVersionStatus + (*FileStatusAnnotation)(nil), // 1: xds.annotations.v3.FileStatusAnnotation + (*MessageStatusAnnotation)(nil), // 2: xds.annotations.v3.MessageStatusAnnotation + (*FieldStatusAnnotation)(nil), // 3: xds.annotations.v3.FieldStatusAnnotation + (*StatusAnnotation)(nil), // 4: xds.annotations.v3.StatusAnnotation + (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 7: google.protobuf.FieldOptions +} +var file_xds_annotations_v3_status_proto_depIdxs = []int32{ + 0, // 0: xds.annotations.v3.StatusAnnotation.package_version_status:type_name -> xds.annotations.v3.PackageVersionStatus + 5, // 1: xds.annotations.v3.file_status:extendee -> google.protobuf.FileOptions + 6, // 2: xds.annotations.v3.message_status:extendee -> google.protobuf.MessageOptions + 7, // 3: xds.annotations.v3.field_status:extendee -> google.protobuf.FieldOptions + 1, // 4: xds.annotations.v3.file_status:type_name -> xds.annotations.v3.FileStatusAnnotation + 2, // 5: xds.annotations.v3.message_status:type_name -> xds.annotations.v3.MessageStatusAnnotation + 3, // 6: xds.annotations.v3.field_status:type_name -> xds.annotations.v3.FieldStatusAnnotation + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 4, // [4:7] is the sub-list for extension type_name + 1, // [1:4] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_status_proto_init() } +func file_xds_annotations_v3_status_proto_init() { + if File_xds_annotations_v3_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileStatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageStatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_status_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldStatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_annotations_v3_status_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_status_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_status_proto_depIdxs, + EnumInfos: file_xds_annotations_v3_status_proto_enumTypes, + MessageInfos: file_xds_annotations_v3_status_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_status_proto_extTypes, + }.Build() + File_xds_annotations_v3_status_proto = out.File + file_xds_annotations_v3_status_proto_rawDesc = nil + file_xds_annotations_v3_status_proto_goTypes = nil + file_xds_annotations_v3_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go new file mode 100644 index 0000000000..a87dbee8d8 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go @@ -0,0 +1,452 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/status.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FileStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FileStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FileStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FileStatusAnnotationMultiError, or nil if none found. +func (m *FileStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FileStatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + if len(errors) > 0 { + return FileStatusAnnotationMultiError(errors) + } + + return nil +} + +// FileStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by FileStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FileStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileStatusAnnotationMultiError) AllErrors() []error { return m } + +// FileStatusAnnotationValidationError is the validation error returned by +// FileStatusAnnotation.Validate if the designated constraints aren't met. +type FileStatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileStatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileStatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileStatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileStatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileStatusAnnotationValidationError) ErrorName() string { + return "FileStatusAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FileStatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFileStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileStatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileStatusAnnotationValidationError{} + +// Validate checks the field values on MessageStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MessageStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MessageStatusAnnotation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MessageStatusAnnotationMultiError, or nil if none found. +func (m *MessageStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *MessageStatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + if len(errors) > 0 { + return MessageStatusAnnotationMultiError(errors) + } + + return nil +} + +// MessageStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by MessageStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type MessageStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MessageStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MessageStatusAnnotationMultiError) AllErrors() []error { return m } + +// MessageStatusAnnotationValidationError is the validation error returned by +// MessageStatusAnnotation.Validate if the designated constraints aren't met. +type MessageStatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MessageStatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MessageStatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MessageStatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MessageStatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MessageStatusAnnotationValidationError) ErrorName() string { + return "MessageStatusAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e MessageStatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMessageStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MessageStatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MessageStatusAnnotationValidationError{} + +// Validate checks the field values on FieldStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FieldStatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldStatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FieldStatusAnnotationMultiError, or nil if none found. +func (m *FieldStatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldStatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + if len(errors) > 0 { + return FieldStatusAnnotationMultiError(errors) + } + + return nil +} + +// FieldStatusAnnotationMultiError is an error wrapping multiple validation +// errors returned by FieldStatusAnnotation.ValidateAll() if the designated +// constraints aren't met. +type FieldStatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldStatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldStatusAnnotationMultiError) AllErrors() []error { return m } + +// FieldStatusAnnotationValidationError is the validation error returned by +// FieldStatusAnnotation.Validate if the designated constraints aren't met. +type FieldStatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldStatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldStatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldStatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldStatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldStatusAnnotationValidationError) ErrorName() string { + return "FieldStatusAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e FieldStatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldStatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldStatusAnnotationValidationError{} + +// Validate checks the field values on StatusAnnotation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StatusAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusAnnotationMultiError, or nil if none found. +func (m *StatusAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for WorkInProgress + + // no validation rules for PackageVersionStatus + + if len(errors) > 0 { + return StatusAnnotationMultiError(errors) + } + + return nil +} + +// StatusAnnotationMultiError is an error wrapping multiple validation errors +// returned by StatusAnnotation.ValidateAll() if the designated constraints +// aren't met. +type StatusAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusAnnotationMultiError) AllErrors() []error { return m } + +// StatusAnnotationValidationError is the validation error returned by +// StatusAnnotation.Validate if the designated constraints aren't met. +type StatusAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatusAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatusAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatusAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatusAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" } + +// Error satisfies the builtin error interface +func (e StatusAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatusAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatusAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatusAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go new file mode 100644 index 0000000000..2de032f159 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/annotations/v3/versioning.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VersioningAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"` +} + +func (x *VersioningAnnotation) Reset() { + *x = VersioningAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersioningAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersioningAnnotation) ProtoMessage() {} + +func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead. +func (*VersioningAnnotation) Descriptor() ([]byte, []int) { + return file_xds_annotations_v3_versioning_proto_rawDescGZIP(), []int{0} +} + +func (x *VersioningAnnotation) GetPreviousMessageType() string { + if x != nil { + return x.PreviousMessageType + } + return "" +} + +var file_xds_annotations_v3_versioning_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*VersioningAnnotation)(nil), + Field: 92389011, + Name: "xds.annotations.v3.versioning", + Tag: "bytes,92389011,opt,name=versioning", + Filename: "xds/annotations/v3/versioning.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional xds.annotations.v3.VersioningAnnotation versioning = 92389011; + E_Versioning = &file_xds_annotations_v3_versioning_proto_extTypes[0] +) + +var File_xds_annotations_v3_versioning_proto protoreflect.FileDescriptor + +var file_xds_annotations_v3_versioning_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x93, 0xfd, 0x86, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_annotations_v3_versioning_proto_rawDescOnce sync.Once + file_xds_annotations_v3_versioning_proto_rawDescData = file_xds_annotations_v3_versioning_proto_rawDesc +) + +func file_xds_annotations_v3_versioning_proto_rawDescGZIP() []byte { + file_xds_annotations_v3_versioning_proto_rawDescOnce.Do(func() { + file_xds_annotations_v3_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_versioning_proto_rawDescData) + }) + return file_xds_annotations_v3_versioning_proto_rawDescData +} + +var file_xds_annotations_v3_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_annotations_v3_versioning_proto_goTypes = []interface{}{ + (*VersioningAnnotation)(nil), // 0: xds.annotations.v3.VersioningAnnotation + (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions +} +var file_xds_annotations_v3_versioning_proto_depIdxs = []int32{ + 1, // 0: xds.annotations.v3.versioning:extendee -> google.protobuf.MessageOptions + 0, // 1: xds.annotations.v3.versioning:type_name -> xds.annotations.v3.VersioningAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_annotations_v3_versioning_proto_init() } +func file_xds_annotations_v3_versioning_proto_init() { + if File_xds_annotations_v3_versioning_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_annotations_v3_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersioningAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_annotations_v3_versioning_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_xds_annotations_v3_versioning_proto_goTypes, + DependencyIndexes: file_xds_annotations_v3_versioning_proto_depIdxs, + MessageInfos: file_xds_annotations_v3_versioning_proto_msgTypes, + ExtensionInfos: file_xds_annotations_v3_versioning_proto_extTypes, + }.Build() + File_xds_annotations_v3_versioning_proto = out.File + file_xds_annotations_v3_versioning_proto_rawDesc = nil + file_xds_annotations_v3_versioning_proto_goTypes = nil + file_xds_annotations_v3_versioning_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go new file mode 100644 index 0000000000..042c266e13 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/annotations/v3/versioning.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *VersioningAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VersioningAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// VersioningAnnotationMultiError, or nil if none found. +func (m *VersioningAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *VersioningAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for PreviousMessageType + + if len(errors) > 0 { + return VersioningAnnotationMultiError(errors) + } + + return nil +} + +// VersioningAnnotationMultiError is an error wrapping multiple validation +// errors returned by VersioningAnnotation.ValidateAll() if the designated +// constraints aren't met. +type VersioningAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersioningAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersioningAnnotationMultiError) AllErrors() []error { return m } + +// VersioningAnnotationValidationError is the validation error returned by +// VersioningAnnotation.Validate if the designated constraints aren't met. +type VersioningAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VersioningAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VersioningAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VersioningAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VersioningAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VersioningAnnotationValidationError) ErrorName() string { + return "VersioningAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e VersioningAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVersioningAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VersioningAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VersioningAnnotationValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go new file mode 100644 index 0000000000..3058286d57 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/authority.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Authority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Authority) Reset() { + *x = Authority{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_authority_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Authority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Authority) ProtoMessage() {} + +func (x *Authority) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_authority_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Authority.ProtoReflect.Descriptor instead. +func (*Authority) Descriptor() ([]byte, []int) { + return file_xds_core_v3_authority_proto_rawDescGZIP(), []int{0} +} + +func (x *Authority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_xds_core_v3_authority_proto protoreflect.FileDescriptor + +var file_xds_core_v3_authority_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x56, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_authority_proto_rawDescOnce sync.Once + file_xds_core_v3_authority_proto_rawDescData = file_xds_core_v3_authority_proto_rawDesc +) + +func file_xds_core_v3_authority_proto_rawDescGZIP() []byte { + file_xds_core_v3_authority_proto_rawDescOnce.Do(func() { + file_xds_core_v3_authority_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_authority_proto_rawDescData) + }) + return file_xds_core_v3_authority_proto_rawDescData +} + +var file_xds_core_v3_authority_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_authority_proto_goTypes = []interface{}{ + (*Authority)(nil), // 0: xds.core.v3.Authority +} +var file_xds_core_v3_authority_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_authority_proto_init() } +func file_xds_core_v3_authority_proto_init() { + if File_xds_core_v3_authority_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_authority_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Authority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_authority_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_authority_proto_goTypes, + DependencyIndexes: file_xds_core_v3_authority_proto_depIdxs, + MessageInfos: file_xds_core_v3_authority_proto_msgTypes, + }.Build() + File_xds_core_v3_authority_proto = out.File + file_xds_core_v3_authority_proto_rawDesc = nil + file_xds_core_v3_authority_proto_goTypes = nil + file_xds_core_v3_authority_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go new file mode 100644 index 0000000000..94317c2af0 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/authority.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Authority with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Authority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Authority with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AuthorityMultiError, or nil +// if none found. +func (m *Authority) ValidateAll() error { + return m.validate(true) +} + +func (m *Authority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := AuthorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AuthorityMultiError(errors) + } + + return nil +} + +// AuthorityMultiError is an error wrapping multiple validation errors returned +// by Authority.ValidateAll() if the designated constraints aren't met. +type AuthorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AuthorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AuthorityMultiError) AllErrors() []error { return m } + +// AuthorityValidationError is the validation error returned by +// Authority.Validate if the designated constraints aren't met. +type AuthorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AuthorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AuthorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AuthorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AuthorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AuthorityValidationError) ErrorName() string { return "AuthorityValidationError" } + +// Error satisfies the builtin error interface +func (e AuthorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAuthority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AuthorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AuthorityValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go new file mode 100644 index 0000000000..0e339b5899 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go @@ -0,0 +1,172 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/cidr.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CidrRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` +} + +func (x *CidrRange) Reset() { + *x = CidrRange{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_cidr_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CidrRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CidrRange) ProtoMessage() {} + +func (x *CidrRange) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_cidr_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead. +func (*CidrRange) Descriptor() ([]byte, []int) { + return file_xds_core_v3_cidr_proto_rawDescGZIP(), []int{0} +} + +func (x *CidrRange) GetAddressPrefix() string { + if x != nil { + return x.AddressPrefix + } + return "" +} + +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { + if x != nil { + return x.PrefixLen + } + return nil +} + +var File_xds_core_v3_cidr_proto protoreflect.FileDescriptor + +var file_xds_core_v3_cidr_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, + 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x82, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, + 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, + 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_cidr_proto_rawDescOnce sync.Once + file_xds_core_v3_cidr_proto_rawDescData = file_xds_core_v3_cidr_proto_rawDesc +) + +func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte { + file_xds_core_v3_cidr_proto_rawDescOnce.Do(func() { + file_xds_core_v3_cidr_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_cidr_proto_rawDescData) + }) + return file_xds_core_v3_cidr_proto_rawDescData +} + +var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_cidr_proto_goTypes = []interface{}{ + (*CidrRange)(nil), // 0: xds.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value +} +var file_xds_core_v3_cidr_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_cidr_proto_init() } +func file_xds_core_v3_cidr_proto_init() { + if File_xds_core_v3_cidr_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_cidr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CidrRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_cidr_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_cidr_proto_goTypes, + DependencyIndexes: file_xds_core_v3_cidr_proto_depIdxs, + MessageInfos: file_xds_core_v3_cidr_proto_msgTypes, + }.Build() + File_xds_core_v3_cidr_proto = out.File + file_xds_core_v3_cidr_proto_rawDesc = nil + file_xds_core_v3_cidr_proto_goTypes = nil + file_xds_core_v3_cidr_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go new file mode 100644 index 0000000000..43327f56b5 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/cidr.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CidrRange with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CidrRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CidrRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CidrRangeMultiError, or nil +// if none found. +func (m *CidrRange) ValidateAll() error { + return m.validate(true) +} + +func (m *CidrRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 { + err := CidrRangeValidationError{ + field: "AddressPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetPrefixLen(); wrapper != nil { + + if wrapper.GetValue() > 128 { + err := CidrRangeValidationError{ + field: "PrefixLen", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return CidrRangeMultiError(errors) + } + + return nil +} + +// CidrRangeMultiError is an error wrapping multiple validation errors returned +// by CidrRange.ValidateAll() if the designated constraints aren't met. +type CidrRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CidrRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CidrRangeMultiError) AllErrors() []error { return m } + +// CidrRangeValidationError is the validation error returned by +// CidrRange.Validate if the designated constraints aren't met. +type CidrRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CidrRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CidrRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CidrRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CidrRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" } + +// Error satisfies the builtin error interface +func (e CidrRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCidrRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CidrRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CidrRangeValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go new file mode 100644 index 0000000000..0d45b961bf --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go @@ -0,0 +1,297 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/collection_entry.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CollectionEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ResourceSpecifier: + // + // *CollectionEntry_Locator + // *CollectionEntry_InlineEntry_ + ResourceSpecifier isCollectionEntry_ResourceSpecifier `protobuf_oneof:"resource_specifier"` +} + +func (x *CollectionEntry) Reset() { + *x = CollectionEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionEntry) ProtoMessage() {} + +func (x *CollectionEntry) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionEntry.ProtoReflect.Descriptor instead. +func (*CollectionEntry) Descriptor() ([]byte, []int) { + return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0} +} + +func (m *CollectionEntry) GetResourceSpecifier() isCollectionEntry_ResourceSpecifier { + if m != nil { + return m.ResourceSpecifier + } + return nil +} + +func (x *CollectionEntry) GetLocator() *ResourceLocator { + if x, ok := x.GetResourceSpecifier().(*CollectionEntry_Locator); ok { + return x.Locator + } + return nil +} + +func (x *CollectionEntry) GetInlineEntry() *CollectionEntry_InlineEntry { + if x, ok := x.GetResourceSpecifier().(*CollectionEntry_InlineEntry_); ok { + return x.InlineEntry + } + return nil +} + +type isCollectionEntry_ResourceSpecifier interface { + isCollectionEntry_ResourceSpecifier() +} + +type CollectionEntry_Locator struct { + Locator *ResourceLocator `protobuf:"bytes,1,opt,name=locator,proto3,oneof"` +} + +type CollectionEntry_InlineEntry_ struct { + InlineEntry *CollectionEntry_InlineEntry `protobuf:"bytes,2,opt,name=inline_entry,json=inlineEntry,proto3,oneof"` +} + +func (*CollectionEntry_Locator) isCollectionEntry_ResourceSpecifier() {} + +func (*CollectionEntry_InlineEntry_) isCollectionEntry_ResourceSpecifier() {} + +type CollectionEntry_InlineEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *CollectionEntry_InlineEntry) Reset() { + *x = CollectionEntry_InlineEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionEntry_InlineEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionEntry_InlineEntry) ProtoMessage() {} + +func (x *CollectionEntry_InlineEntry) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionEntry_InlineEntry.ProtoReflect.Descriptor instead. +func (*CollectionEntry_InlineEntry) Descriptor() ([]byte, []int) { + return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CollectionEntry_InlineEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CollectionEntry_InlineEntry) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *CollectionEntry_InlineEntry) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +var File_xds_core_v3_collection_entry_proto protoreflect.FileDescriptor + +var file_xds_core_v3_collection_entry_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, + 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78, + 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x02, 0x0a, 0x0f, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, + 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, + 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x4d, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x8b, 0x01, 0x0a, 0x0b, 0x49, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x72, 0x17, 0x32, 0x15, 0x5e, 0x5b, + 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x7e, 0x3a, + 0x5d, 0x2b, 0x24, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x19, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x42, 0x5c, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_collection_entry_proto_rawDescOnce sync.Once + file_xds_core_v3_collection_entry_proto_rawDescData = file_xds_core_v3_collection_entry_proto_rawDesc +) + +func file_xds_core_v3_collection_entry_proto_rawDescGZIP() []byte { + file_xds_core_v3_collection_entry_proto_rawDescOnce.Do(func() { + file_xds_core_v3_collection_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_collection_entry_proto_rawDescData) + }) + return file_xds_core_v3_collection_entry_proto_rawDescData +} + +var file_xds_core_v3_collection_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_core_v3_collection_entry_proto_goTypes = []interface{}{ + (*CollectionEntry)(nil), // 0: xds.core.v3.CollectionEntry + (*CollectionEntry_InlineEntry)(nil), // 1: xds.core.v3.CollectionEntry.InlineEntry + (*ResourceLocator)(nil), // 2: xds.core.v3.ResourceLocator + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_xds_core_v3_collection_entry_proto_depIdxs = []int32{ + 2, // 0: xds.core.v3.CollectionEntry.locator:type_name -> xds.core.v3.ResourceLocator + 1, // 1: xds.core.v3.CollectionEntry.inline_entry:type_name -> xds.core.v3.CollectionEntry.InlineEntry + 3, // 2: xds.core.v3.CollectionEntry.InlineEntry.resource:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_collection_entry_proto_init() } +func file_xds_core_v3_collection_entry_proto_init() { + if File_xds_core_v3_collection_entry_proto != nil { + return + } + file_xds_core_v3_resource_locator_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_collection_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_core_v3_collection_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionEntry_InlineEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_core_v3_collection_entry_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CollectionEntry_Locator)(nil), + (*CollectionEntry_InlineEntry_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_collection_entry_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_collection_entry_proto_goTypes, + DependencyIndexes: file_xds_core_v3_collection_entry_proto_depIdxs, + MessageInfos: file_xds_core_v3_collection_entry_proto_msgTypes, + }.Build() + File_xds_core_v3_collection_entry_proto = out.File + file_xds_core_v3_collection_entry_proto_rawDesc = nil + file_xds_core_v3_collection_entry_proto_goTypes = nil + file_xds_core_v3_collection_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go new file mode 100644 index 0000000000..610990b7fe --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go @@ -0,0 +1,383 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/collection_entry.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CollectionEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CollectionEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CollectionEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CollectionEntryMultiError, or nil if none found. +func (m *CollectionEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *CollectionEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofResourceSpecifierPresent := false + switch v := m.ResourceSpecifier.(type) { + case *CollectionEntry_Locator: + if v == nil { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofResourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocator()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CollectionEntryValidationError{ + field: "Locator", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CollectionEntry_InlineEntry_: + if v == nil { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofResourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetInlineEntry()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CollectionEntryValidationError{ + field: "InlineEntry", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofResourceSpecifierPresent { + err := CollectionEntryValidationError{ + field: "ResourceSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CollectionEntryMultiError(errors) + } + + return nil +} + +// CollectionEntryMultiError is an error wrapping multiple validation errors +// returned by CollectionEntry.ValidateAll() if the designated constraints +// aren't met. +type CollectionEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CollectionEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CollectionEntryMultiError) AllErrors() []error { return m } + +// CollectionEntryValidationError is the validation error returned by +// CollectionEntry.Validate if the designated constraints aren't met. +type CollectionEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CollectionEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CollectionEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CollectionEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CollectionEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CollectionEntryValidationError) ErrorName() string { return "CollectionEntryValidationError" } + +// Error satisfies the builtin error interface +func (e CollectionEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCollectionEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CollectionEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CollectionEntryValidationError{} + +// Validate checks the field values on CollectionEntry_InlineEntry with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CollectionEntry_InlineEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CollectionEntry_InlineEntry with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CollectionEntry_InlineEntryMultiError, or nil if none found. +func (m *CollectionEntry_InlineEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *CollectionEntry_InlineEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) { + err := CollectionEntry_InlineEntryValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CollectionEntry_InlineEntryValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CollectionEntry_InlineEntryMultiError(errors) + } + + return nil +} + +// CollectionEntry_InlineEntryMultiError is an error wrapping multiple +// validation errors returned by CollectionEntry_InlineEntry.ValidateAll() if +// the designated constraints aren't met. +type CollectionEntry_InlineEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CollectionEntry_InlineEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CollectionEntry_InlineEntryMultiError) AllErrors() []error { return m } + +// CollectionEntry_InlineEntryValidationError is the validation error returned +// by CollectionEntry_InlineEntry.Validate if the designated constraints +// aren't met. +type CollectionEntry_InlineEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CollectionEntry_InlineEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CollectionEntry_InlineEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CollectionEntry_InlineEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CollectionEntry_InlineEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CollectionEntry_InlineEntryValidationError) ErrorName() string { + return "CollectionEntry_InlineEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e CollectionEntry_InlineEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCollectionEntry_InlineEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CollectionEntry_InlineEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CollectionEntry_InlineEntryValidationError{} + +var _CollectionEntry_InlineEntry_Name_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\.~:]+$") diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go new file mode 100644 index 0000000000..714ab43673 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/context_params.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ContextParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Params map[string]string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ContextParams) Reset() { + *x = ContextParams{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_context_params_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContextParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContextParams) ProtoMessage() {} + +func (x *ContextParams) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_context_params_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContextParams.ProtoReflect.Descriptor instead. +func (*ContextParams) Descriptor() ([]byte, []int) { + return file_xds_core_v3_context_params_proto_rawDescGZIP(), []int{0} +} + +func (x *ContextParams) GetParams() map[string]string { + if x != nil { + return x.Params + } + return nil +} + +var File_xds_core_v3_context_params_proto protoreflect.FileDescriptor + +var file_xds_core_v3_context_params_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x8a, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x3e, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, + 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_xds_core_v3_context_params_proto_rawDescOnce sync.Once + file_xds_core_v3_context_params_proto_rawDescData = file_xds_core_v3_context_params_proto_rawDesc +) + +func file_xds_core_v3_context_params_proto_rawDescGZIP() []byte { + file_xds_core_v3_context_params_proto_rawDescOnce.Do(func() { + file_xds_core_v3_context_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_context_params_proto_rawDescData) + }) + return file_xds_core_v3_context_params_proto_rawDescData +} + +var file_xds_core_v3_context_params_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_core_v3_context_params_proto_goTypes = []interface{}{ + (*ContextParams)(nil), // 0: xds.core.v3.ContextParams + nil, // 1: xds.core.v3.ContextParams.ParamsEntry +} +var file_xds_core_v3_context_params_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.ContextParams.params:type_name -> xds.core.v3.ContextParams.ParamsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_context_params_proto_init() } +func file_xds_core_v3_context_params_proto_init() { + if File_xds_core_v3_context_params_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_context_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContextParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_context_params_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_context_params_proto_goTypes, + DependencyIndexes: file_xds_core_v3_context_params_proto_depIdxs, + MessageInfos: file_xds_core_v3_context_params_proto_msgTypes, + }.Build() + File_xds_core_v3_context_params_proto = out.File + file_xds_core_v3_context_params_proto_rawDesc = nil + file_xds_core_v3_context_params_proto_goTypes = nil + file_xds_core_v3_context_params_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go new file mode 100644 index 0000000000..1c9accaa3a --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/context_params.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ContextParams with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ContextParams) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ContextParams with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ContextParamsMultiError, or +// nil if none found. +func (m *ContextParams) ValidateAll() error { + return m.validate(true) +} + +func (m *ContextParams) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Params + + if len(errors) > 0 { + return ContextParamsMultiError(errors) + } + + return nil +} + +// ContextParamsMultiError is an error wrapping multiple validation errors +// returned by ContextParams.ValidateAll() if the designated constraints +// aren't met. +type ContextParamsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ContextParamsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ContextParamsMultiError) AllErrors() []error { return m } + +// ContextParamsValidationError is the validation error returned by +// ContextParams.Validate if the designated constraints aren't met. +type ContextParamsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ContextParamsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ContextParamsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ContextParamsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ContextParamsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ContextParamsValidationError) ErrorName() string { return "ContextParamsValidationError" } + +// Error satisfies the builtin error interface +func (e ContextParamsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sContextParams.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ContextParamsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ContextParamsValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go new file mode 100644 index 0000000000..be4ea10c6b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/extension.proto + +package v3 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TypedExtensionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *TypedExtensionConfig) Reset() { + *x = TypedExtensionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedExtensionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedExtensionConfig) ProtoMessage() {} + +func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead. +func (*TypedExtensionConfig) Descriptor() ([]byte, []int) { + return file_xds_core_v3_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedExtensionConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +var File_xds_core_v3_extension_proto protoreflect.FileDescriptor + +var file_xds_core_v3_extension_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76, + 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x4e, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_extension_proto_rawDescOnce sync.Once + file_xds_core_v3_extension_proto_rawDescData = file_xds_core_v3_extension_proto_rawDesc +) + +func file_xds_core_v3_extension_proto_rawDescGZIP() []byte { + file_xds_core_v3_extension_proto_rawDescOnce.Do(func() { + file_xds_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_extension_proto_rawDescData) + }) + return file_xds_core_v3_extension_proto_rawDescData +} + +var file_xds_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_extension_proto_goTypes = []interface{}{ + (*TypedExtensionConfig)(nil), // 0: xds.core.v3.TypedExtensionConfig + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_xds_core_v3_extension_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_extension_proto_init() } +func file_xds_core_v3_extension_proto_init() { + if File_xds_core_v3_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedExtensionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_extension_proto_goTypes, + DependencyIndexes: file_xds_core_v3_extension_proto_depIdxs, + MessageInfos: file_xds_core_v3_extension_proto_msgTypes, + }.Build() + File_xds_core_v3_extension_proto = out.File + file_xds_core_v3_extension_proto_rawDesc = nil + file_xds_core_v3_extension_proto_goTypes = nil + file_xds_core_v3_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go new file mode 100644 index 0000000000..839f3fef79 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/extension.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TypedExtensionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TypedExtensionConfigMultiError, or nil if none found. +func (m *TypedExtensionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedExtensionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TypedExtensionConfigValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTypedConfig() == nil { + err := TypedExtensionConfigValidationError{ + field: "TypedConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if a := m.GetTypedConfig(); a != nil { + + } + + if len(errors) > 0 { + return TypedExtensionConfigMultiError(errors) + } + + return nil +} + +// TypedExtensionConfigMultiError is an error wrapping multiple validation +// errors returned by TypedExtensionConfig.ValidateAll() if the designated +// constraints aren't met. +type TypedExtensionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedExtensionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedExtensionConfigMultiError) AllErrors() []error { return m } + +// TypedExtensionConfigValidationError is the validation error returned by +// TypedExtensionConfig.Validate if the designated constraints aren't met. +type TypedExtensionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedExtensionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedExtensionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedExtensionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedExtensionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedExtensionConfigValidationError) ErrorName() string { + return "TypedExtensionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e TypedExtensionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedExtensionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedExtensionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedExtensionConfigValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go new file mode 100644 index 0000000000..641e3411ac --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/resource.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *Resource) GetName() *ResourceName { + if x != nil { + return x.Name + } + return nil +} + +func (x *Resource) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Resource) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +var File_xds_core_v3_resource_proto protoreflect.FileDescriptor + +var file_xds_core_v3_resource_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x55, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_resource_proto_rawDescOnce sync.Once + file_xds_core_v3_resource_proto_rawDescData = file_xds_core_v3_resource_proto_rawDesc +) + +func file_xds_core_v3_resource_proto_rawDescGZIP() []byte { + file_xds_core_v3_resource_proto_rawDescOnce.Do(func() { + file_xds_core_v3_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_proto_rawDescData) + }) + return file_xds_core_v3_resource_proto_rawDescData +} + +var file_xds_core_v3_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_resource_proto_goTypes = []interface{}{ + (*Resource)(nil), // 0: xds.core.v3.Resource + (*ResourceName)(nil), // 1: xds.core.v3.ResourceName + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_xds_core_v3_resource_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.Resource.name:type_name -> xds.core.v3.ResourceName + 2, // 1: xds.core.v3.Resource.resource:type_name -> google.protobuf.Any + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_resource_proto_init() } +func file_xds_core_v3_resource_proto_init() { + if File_xds_core_v3_resource_proto != nil { + return + } + file_xds_core_v3_resource_name_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_resource_proto_goTypes, + DependencyIndexes: file_xds_core_v3_resource_proto_depIdxs, + MessageInfos: file_xds_core_v3_resource_proto_msgTypes, + }.Build() + File_xds_core_v3_resource_proto = out.File + file_xds_core_v3_resource_proto_rawDesc = nil + file_xds_core_v3_resource_proto_goTypes = nil + file_xds_core_v3_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go new file mode 100644 index 0000000000..dc972171c9 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go @@ -0,0 +1,195 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/resource.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Resource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Name", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceMultiError(errors) + } + + return nil +} + +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + +// ResourceValidationError is the validation error returned by +// Resource.Validate if the designated constraints aren't met. +type ResourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go new file mode 100644 index 0000000000..3f99d4beec --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go @@ -0,0 +1,406 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/resource_locator.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceLocator_Scheme int32 + +const ( + ResourceLocator_XDSTP ResourceLocator_Scheme = 0 + ResourceLocator_HTTP ResourceLocator_Scheme = 1 + ResourceLocator_FILE ResourceLocator_Scheme = 2 +) + +// Enum value maps for ResourceLocator_Scheme. +var ( + ResourceLocator_Scheme_name = map[int32]string{ + 0: "XDSTP", + 1: "HTTP", + 2: "FILE", + } + ResourceLocator_Scheme_value = map[string]int32{ + "XDSTP": 0, + "HTTP": 1, + "FILE": 2, + } +) + +func (x ResourceLocator_Scheme) Enum() *ResourceLocator_Scheme { + p := new(ResourceLocator_Scheme) + *p = x + return p +} + +func (x ResourceLocator_Scheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceLocator_Scheme) Descriptor() protoreflect.EnumDescriptor { + return file_xds_core_v3_resource_locator_proto_enumTypes[0].Descriptor() +} + +func (ResourceLocator_Scheme) Type() protoreflect.EnumType { + return &file_xds_core_v3_resource_locator_proto_enumTypes[0] +} + +func (x ResourceLocator_Scheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceLocator_Scheme.Descriptor instead. +func (ResourceLocator_Scheme) EnumDescriptor() ([]byte, []int) { + return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0} +} + +type ResourceLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Scheme ResourceLocator_Scheme `protobuf:"varint,1,opt,name=scheme,proto3,enum=xds.core.v3.ResourceLocator_Scheme" json:"scheme,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // Types that are assignable to ContextParamSpecifier: + // + // *ResourceLocator_ExactContext + ContextParamSpecifier isResourceLocator_ContextParamSpecifier `protobuf_oneof:"context_param_specifier"` + Directives []*ResourceLocator_Directive `protobuf:"bytes,6,rep,name=directives,proto3" json:"directives,omitempty"` +} + +func (x *ResourceLocator) Reset() { + *x = ResourceLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLocator) ProtoMessage() {} + +func (x *ResourceLocator) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLocator.ProtoReflect.Descriptor instead. +func (*ResourceLocator) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceLocator) GetScheme() ResourceLocator_Scheme { + if x != nil { + return x.Scheme + } + return ResourceLocator_XDSTP +} + +func (x *ResourceLocator) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ResourceLocator) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ResourceLocator) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (m *ResourceLocator) GetContextParamSpecifier() isResourceLocator_ContextParamSpecifier { + if m != nil { + return m.ContextParamSpecifier + } + return nil +} + +func (x *ResourceLocator) GetExactContext() *ContextParams { + if x, ok := x.GetContextParamSpecifier().(*ResourceLocator_ExactContext); ok { + return x.ExactContext + } + return nil +} + +func (x *ResourceLocator) GetDirectives() []*ResourceLocator_Directive { + if x != nil { + return x.Directives + } + return nil +} + +type isResourceLocator_ContextParamSpecifier interface { + isResourceLocator_ContextParamSpecifier() +} + +type ResourceLocator_ExactContext struct { + ExactContext *ContextParams `protobuf:"bytes,5,opt,name=exact_context,json=exactContext,proto3,oneof"` +} + +func (*ResourceLocator_ExactContext) isResourceLocator_ContextParamSpecifier() {} + +type ResourceLocator_Directive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Directive: + // + // *ResourceLocator_Directive_Alt + // *ResourceLocator_Directive_Entry + Directive isResourceLocator_Directive_Directive `protobuf_oneof:"directive"` +} + +func (x *ResourceLocator_Directive) Reset() { + *x = ResourceLocator_Directive{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLocator_Directive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLocator_Directive) ProtoMessage() {} + +func (x *ResourceLocator_Directive) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLocator_Directive.ProtoReflect.Descriptor instead. +func (*ResourceLocator_Directive) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *ResourceLocator_Directive) GetDirective() isResourceLocator_Directive_Directive { + if m != nil { + return m.Directive + } + return nil +} + +func (x *ResourceLocator_Directive) GetAlt() *ResourceLocator { + if x, ok := x.GetDirective().(*ResourceLocator_Directive_Alt); ok { + return x.Alt + } + return nil +} + +func (x *ResourceLocator_Directive) GetEntry() string { + if x, ok := x.GetDirective().(*ResourceLocator_Directive_Entry); ok { + return x.Entry + } + return "" +} + +type isResourceLocator_Directive_Directive interface { + isResourceLocator_Directive_Directive() +} + +type ResourceLocator_Directive_Alt struct { + Alt *ResourceLocator `protobuf:"bytes,1,opt,name=alt,proto3,oneof"` +} + +type ResourceLocator_Directive_Entry struct { + Entry string `protobuf:"bytes,2,opt,name=entry,proto3,oneof"` +} + +func (*ResourceLocator_Directive_Alt) isResourceLocator_Directive_Directive() {} + +func (*ResourceLocator_Directive_Entry) isResourceLocator_Directive_Directive() {} + +var File_xds_core_v3_resource_locator_proto protoreflect.FileDescriptor + +var file_xds_core_v3_resource_locator_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x04, + 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x12, 0x45, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x23, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x52, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x1a, + 0x88, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x30, 0x0a, + 0x03, 0x61, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6c, 0x74, 0x12, + 0x37, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1f, + 0xfa, 0x42, 0x1c, 0x72, 0x1a, 0x10, 0x01, 0x32, 0x16, 0x5e, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d, + 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x2f, 0x7e, 0x3a, 0x5d, 0x2b, 0x24, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x27, 0x0a, 0x06, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x58, 0x44, 0x53, 0x54, 0x50, 0x10, 0x00, 0x12, + 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x49, 0x4c, + 0x45, 0x10, 0x02, 0x42, 0x19, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x5c, + 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_resource_locator_proto_rawDescOnce sync.Once + file_xds_core_v3_resource_locator_proto_rawDescData = file_xds_core_v3_resource_locator_proto_rawDesc +) + +func file_xds_core_v3_resource_locator_proto_rawDescGZIP() []byte { + file_xds_core_v3_resource_locator_proto_rawDescOnce.Do(func() { + file_xds_core_v3_resource_locator_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_locator_proto_rawDescData) + }) + return file_xds_core_v3_resource_locator_proto_rawDescData +} + +var file_xds_core_v3_resource_locator_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_xds_core_v3_resource_locator_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_core_v3_resource_locator_proto_goTypes = []interface{}{ + (ResourceLocator_Scheme)(0), // 0: xds.core.v3.ResourceLocator.Scheme + (*ResourceLocator)(nil), // 1: xds.core.v3.ResourceLocator + (*ResourceLocator_Directive)(nil), // 2: xds.core.v3.ResourceLocator.Directive + (*ContextParams)(nil), // 3: xds.core.v3.ContextParams +} +var file_xds_core_v3_resource_locator_proto_depIdxs = []int32{ + 0, // 0: xds.core.v3.ResourceLocator.scheme:type_name -> xds.core.v3.ResourceLocator.Scheme + 3, // 1: xds.core.v3.ResourceLocator.exact_context:type_name -> xds.core.v3.ContextParams + 2, // 2: xds.core.v3.ResourceLocator.directives:type_name -> xds.core.v3.ResourceLocator.Directive + 1, // 3: xds.core.v3.ResourceLocator.Directive.alt:type_name -> xds.core.v3.ResourceLocator + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_resource_locator_proto_init() } +func file_xds_core_v3_resource_locator_proto_init() { + if File_xds_core_v3_resource_locator_proto != nil { + return + } + file_xds_core_v3_context_params_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_resource_locator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_core_v3_resource_locator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLocator_Directive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_core_v3_resource_locator_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ResourceLocator_ExactContext)(nil), + } + file_xds_core_v3_resource_locator_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ResourceLocator_Directive_Alt)(nil), + (*ResourceLocator_Directive_Entry)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_resource_locator_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_resource_locator_proto_goTypes, + DependencyIndexes: file_xds_core_v3_resource_locator_proto_depIdxs, + EnumInfos: file_xds_core_v3_resource_locator_proto_enumTypes, + MessageInfos: file_xds_core_v3_resource_locator_proto_msgTypes, + }.Build() + File_xds_core_v3_resource_locator_proto = out.File + file_xds_core_v3_resource_locator_proto_rawDesc = nil + file_xds_core_v3_resource_locator_proto_goTypes = nil + file_xds_core_v3_resource_locator_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go new file mode 100644 index 0000000000..1686e98d12 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go @@ -0,0 +1,439 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/resource_locator.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceLocator with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceLocator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocatorMultiError, or nil if none found. +func (m *ResourceLocator) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok { + err := ResourceLocatorValidationError{ + field: "Scheme", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Id + + // no validation rules for Authority + + if utf8.RuneCountInString(m.GetResourceType()) < 1 { + err := ResourceLocatorValidationError{ + field: "ResourceType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetDirectives() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceLocatorValidationError{ + field: fmt.Sprintf("Directives[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + switch v := m.ContextParamSpecifier.(type) { + case *ResourceLocator_ExactContext: + if v == nil { + err := ResourceLocatorValidationError{ + field: "ContextParamSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExactContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceLocatorValidationError{ + field: "ExactContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ResourceLocatorMultiError(errors) + } + + return nil +} + +// ResourceLocatorMultiError is an error wrapping multiple validation errors +// returned by ResourceLocator.ValidateAll() if the designated constraints +// aren't met. +type ResourceLocatorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocatorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocatorMultiError) AllErrors() []error { return m } + +// ResourceLocatorValidationError is the validation error returned by +// ResourceLocator.Validate if the designated constraints aren't met. +type ResourceLocatorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceLocatorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceLocatorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceLocatorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceLocatorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceLocatorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceLocator.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceLocatorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceLocatorValidationError{} + +// Validate checks the field values on ResourceLocator_Directive with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceLocator_Directive) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator_Directive with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocator_DirectiveMultiError, or nil if none found. +func (m *ResourceLocator_Directive) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator_Directive) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofDirectivePresent := false + switch v := m.Directive.(type) { + case *ResourceLocator_Directive_Alt: + if v == nil { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDirectivePresent = true + + if all { + switch v := interface{}(m.GetAlt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceLocator_DirectiveValidationError{ + field: "Alt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ResourceLocator_Directive_Entry: + if v == nil { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDirectivePresent = true + + if utf8.RuneCountInString(m.GetEntry()) < 1 { + err := ResourceLocator_DirectiveValidationError{ + field: "Entry", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) { + err := ResourceLocator_DirectiveValidationError{ + field: "Entry", + reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofDirectivePresent { + err := ResourceLocator_DirectiveValidationError{ + field: "Directive", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ResourceLocator_DirectiveMultiError(errors) + } + + return nil +} + +// ResourceLocator_DirectiveMultiError is an error wrapping multiple validation +// errors returned by ResourceLocator_Directive.ValidateAll() if the +// designated constraints aren't met. +type ResourceLocator_DirectiveMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocator_DirectiveMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocator_DirectiveMultiError) AllErrors() []error { return m } + +// ResourceLocator_DirectiveValidationError is the validation error returned by +// ResourceLocator_Directive.Validate if the designated constraints aren't met. +type ResourceLocator_DirectiveValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceLocator_DirectiveValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceLocator_DirectiveValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceLocator_DirectiveValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceLocator_DirectiveValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceLocator_DirectiveValidationError) ErrorName() string { + return "ResourceLocator_DirectiveValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceLocator_DirectiveValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceLocator_Directive.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceLocator_DirectiveValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceLocator_DirectiveValidationError{} + +var _ResourceLocator_Directive_Entry_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\./~:]+$") diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go new file mode 100644 index 0000000000..3d42818b7a --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/core/v3/resource_name.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + ResourceType string `protobuf:"bytes,3,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + Context *ContextParams `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"` +} + +func (x *ResourceName) Reset() { + *x = ResourceName{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_core_v3_resource_name_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceName) ProtoMessage() {} + +func (x *ResourceName) ProtoReflect() protoreflect.Message { + mi := &file_xds_core_v3_resource_name_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceName.ProtoReflect.Descriptor instead. +func (*ResourceName) Descriptor() ([]byte, []int) { + return file_xds_core_v3_resource_name_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceName) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ResourceName) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ResourceName) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceName) GetContext() *ContextParams { + if x != nil { + return x.Context + } + return nil +} + +var File_xds_core_v3_resource_name_proto protoreflect.FileDescriptor + +var file_xds_core_v3_resource_name_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, + 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_core_v3_resource_name_proto_rawDescOnce sync.Once + file_xds_core_v3_resource_name_proto_rawDescData = file_xds_core_v3_resource_name_proto_rawDesc +) + +func file_xds_core_v3_resource_name_proto_rawDescGZIP() []byte { + file_xds_core_v3_resource_name_proto_rawDescOnce.Do(func() { + file_xds_core_v3_resource_name_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_name_proto_rawDescData) + }) + return file_xds_core_v3_resource_name_proto_rawDescData +} + +var file_xds_core_v3_resource_name_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_core_v3_resource_name_proto_goTypes = []interface{}{ + (*ResourceName)(nil), // 0: xds.core.v3.ResourceName + (*ContextParams)(nil), // 1: xds.core.v3.ContextParams +} +var file_xds_core_v3_resource_name_proto_depIdxs = []int32{ + 1, // 0: xds.core.v3.ResourceName.context:type_name -> xds.core.v3.ContextParams + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_core_v3_resource_name_proto_init() } +func file_xds_core_v3_resource_name_proto_init() { + if File_xds_core_v3_resource_name_proto != nil { + return + } + file_xds_core_v3_context_params_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_core_v3_resource_name_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_core_v3_resource_name_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_core_v3_resource_name_proto_goTypes, + DependencyIndexes: file_xds_core_v3_resource_name_proto_depIdxs, + MessageInfos: file_xds_core_v3_resource_name_proto_msgTypes, + }.Build() + File_xds_core_v3_resource_name_proto = out.File + file_xds_core_v3_resource_name_proto_rawDesc = nil + file_xds_core_v3_resource_name_proto_goTypes = nil + file_xds_core_v3_resource_name_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go new file mode 100644 index 0000000000..270e921bc3 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/core/v3/resource_name.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceName with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceNameMultiError, or +// nil if none found. +func (m *ResourceName) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + // no validation rules for Authority + + if utf8.RuneCountInString(m.GetResourceType()) < 1 { + err := ResourceNameValidationError{ + field: "ResourceType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceNameValidationError{ + field: "Context", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceNameMultiError(errors) + } + + return nil +} + +// ResourceNameMultiError is an error wrapping multiple validation errors +// returned by ResourceName.ValidateAll() if the designated constraints aren't met. +type ResourceNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceNameMultiError) AllErrors() []error { return m } + +// ResourceNameValidationError is the validation error returned by +// ResourceName.Validate if the designated constraints aren't met. +type ResourceNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceNameValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceNameValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceNameValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceNameValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go new file mode 100644 index 0000000000..74899339b8 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go @@ -0,0 +1,272 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/data/orca/v3/orca_load_report.proto + +package v3 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OrcaLoadReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + MemUtilization float64 `protobuf:"fixed64,2,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` + // Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto. + Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"` + RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + RpsFractional float64 `protobuf:"fixed64,6,opt,name=rps_fractional,json=rpsFractional,proto3" json:"rps_fractional,omitempty"` + Eps float64 `protobuf:"fixed64,7,opt,name=eps,proto3" json:"eps,omitempty"` + NamedMetrics map[string]float64 `protobuf:"bytes,8,rep,name=named_metrics,json=namedMetrics,proto3" json:"named_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + ApplicationUtilization float64 `protobuf:"fixed64,9,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"` +} + +func (x *OrcaLoadReport) Reset() { + *x = OrcaLoadReport{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrcaLoadReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrcaLoadReport) ProtoMessage() {} + +func (x *OrcaLoadReport) ProtoReflect() protoreflect.Message { + mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrcaLoadReport.ProtoReflect.Descriptor instead. +func (*OrcaLoadReport) Descriptor() ([]byte, []int) { + return file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP(), []int{0} +} + +func (x *OrcaLoadReport) GetCpuUtilization() float64 { + if x != nil { + return x.CpuUtilization + } + return 0 +} + +func (x *OrcaLoadReport) GetMemUtilization() float64 { + if x != nil { + return x.MemUtilization + } + return 0 +} + +// Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto. +func (x *OrcaLoadReport) GetRps() uint64 { + if x != nil { + return x.Rps + } + return 0 +} + +func (x *OrcaLoadReport) GetRequestCost() map[string]float64 { + if x != nil { + return x.RequestCost + } + return nil +} + +func (x *OrcaLoadReport) GetUtilization() map[string]float64 { + if x != nil { + return x.Utilization + } + return nil +} + +func (x *OrcaLoadReport) GetRpsFractional() float64 { + if x != nil { + return x.RpsFractional + } + return 0 +} + +func (x *OrcaLoadReport) GetEps() float64 { + if x != nil { + return x.Eps + } + return 0 +} + +func (x *OrcaLoadReport) GetNamedMetrics() map[string]float64 { + if x != nil { + return x.NamedMetrics + } + return nil +} + +func (x *OrcaLoadReport) GetApplicationUtilization() float64 { + if x != nil { + return x.ApplicationUtilization + } + return 0 +} + +var File_xds_data_orca_v3_orca_load_report_proto protoreflect.FileDescriptor + +var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, + 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x78, 0x64, 0x73, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, + 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x71, + 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, + 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x9a, 0x01, 0x16, 0x2a, 0x14, + 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, + 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, + 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, + 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a, + 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72, + 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce sync.Once + file_xds_data_orca_v3_orca_load_report_proto_rawDescData = file_xds_data_orca_v3_orca_load_report_proto_rawDesc +) + +func file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP() []byte { + file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce.Do(func() { + file_xds_data_orca_v3_orca_load_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_data_orca_v3_orca_load_report_proto_rawDescData) + }) + return file_xds_data_orca_v3_orca_load_report_proto_rawDescData +} + +var file_xds_data_orca_v3_orca_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_xds_data_orca_v3_orca_load_report_proto_goTypes = []interface{}{ + (*OrcaLoadReport)(nil), // 0: xds.data.orca.v3.OrcaLoadReport + nil, // 1: xds.data.orca.v3.OrcaLoadReport.RequestCostEntry + nil, // 2: xds.data.orca.v3.OrcaLoadReport.UtilizationEntry + nil, // 3: xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry +} +var file_xds_data_orca_v3_orca_load_report_proto_depIdxs = []int32{ + 1, // 0: xds.data.orca.v3.OrcaLoadReport.request_cost:type_name -> xds.data.orca.v3.OrcaLoadReport.RequestCostEntry + 2, // 1: xds.data.orca.v3.OrcaLoadReport.utilization:type_name -> xds.data.orca.v3.OrcaLoadReport.UtilizationEntry + 3, // 2: xds.data.orca.v3.OrcaLoadReport.named_metrics:type_name -> xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_data_orca_v3_orca_load_report_proto_init() } +func file_xds_data_orca_v3_orca_load_report_proto_init() { + if File_xds_data_orca_v3_orca_load_report_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrcaLoadReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_data_orca_v3_orca_load_report_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_data_orca_v3_orca_load_report_proto_goTypes, + DependencyIndexes: file_xds_data_orca_v3_orca_load_report_proto_depIdxs, + MessageInfos: file_xds_data_orca_v3_orca_load_report_proto_msgTypes, + }.Build() + File_xds_data_orca_v3_orca_load_report_proto = out.File + file_xds_data_orca_v3_orca_load_report_proto_rawDesc = nil + file_xds_data_orca_v3_orca_load_report_proto_goTypes = nil + file_xds_data_orca_v3_orca_load_report_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go new file mode 100644 index 0000000000..8dd55330ac --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go @@ -0,0 +1,225 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/data/orca/v3/orca_load_report.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OrcaLoadReport with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OrcaLoadReport) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrcaLoadReport with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrcaLoadReportMultiError, +// or nil if none found. +func (m *OrcaLoadReport) ValidateAll() error { + return m.validate(true) +} + +func (m *OrcaLoadReport) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetCpuUtilization() < 0 { + err := OrcaLoadReportValidationError{ + field: "CpuUtilization", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if val := m.GetMemUtilization(); val < 0 || val > 1 { + err := OrcaLoadReportValidationError{ + field: "MemUtilization", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Rps + + // no validation rules for RequestCost + + { + sorted_keys := make([]string, len(m.GetUtilization())) + i := 0 + for key := range m.GetUtilization() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetUtilization()[key] + _ = val + + // no validation rules for Utilization[key] + + if val := val; val < 0 || val > 1 { + err := OrcaLoadReportValidationError{ + field: fmt.Sprintf("Utilization[%v]", key), + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if m.GetRpsFractional() < 0 { + err := OrcaLoadReportValidationError{ + field: "RpsFractional", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetEps() < 0 { + err := OrcaLoadReportValidationError{ + field: "Eps", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for NamedMetrics + + if m.GetApplicationUtilization() < 0 { + err := OrcaLoadReportValidationError{ + field: "ApplicationUtilization", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return OrcaLoadReportMultiError(errors) + } + + return nil +} + +// OrcaLoadReportMultiError is an error wrapping multiple validation errors +// returned by OrcaLoadReport.ValidateAll() if the designated constraints +// aren't met. +type OrcaLoadReportMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrcaLoadReportMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrcaLoadReportMultiError) AllErrors() []error { return m } + +// OrcaLoadReportValidationError is the validation error returned by +// OrcaLoadReport.Validate if the designated constraints aren't met. +type OrcaLoadReportValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrcaLoadReportValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrcaLoadReportValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrcaLoadReportValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrcaLoadReportValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrcaLoadReportValidationError) ErrorName() string { return "OrcaLoadReportValidationError" } + +// Error satisfies the builtin error interface +func (e OrcaLoadReportValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrcaLoadReport.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrcaLoadReportValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrcaLoadReportValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go new file mode 100644 index 0000000000..463f4ed331 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OrcaLoadReportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReportInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"` + RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"` +} + +func (x *OrcaLoadReportRequest) Reset() { + *x = OrcaLoadReportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrcaLoadReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrcaLoadReportRequest) ProtoMessage() {} + +func (x *OrcaLoadReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrcaLoadReportRequest.ProtoReflect.Descriptor instead. +func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) { + return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0} +} + +func (x *OrcaLoadReportRequest) GetReportInterval() *durationpb.Duration { + if x != nil { + return x.ReportInterval + } + return nil +} + +func (x *OrcaLoadReportRequest) GetRequestCostNames() []string { + if x != nil { + return x.RequestCostNames + } + return nil +} + +var File_xds_service_orca_v3_orca_proto protoreflect.FileDescriptor + +var file_xds_service_orca_v3_orca_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, + 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, + 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, + 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, + 0x01, 0x0a, 0x15, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x43, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x75, 0x0a, 0x0e, 0x4f, 0x70, + 0x65, 0x6e, 0x52, 0x63, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x11, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x72, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x2a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x30, + 0x01, 0x42, 0x59, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, 0x63, 0x61, + 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4f, 0x72, 0x63, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_service_orca_v3_orca_proto_rawDescOnce sync.Once + file_xds_service_orca_v3_orca_proto_rawDescData = file_xds_service_orca_v3_orca_proto_rawDesc +) + +func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte { + file_xds_service_orca_v3_orca_proto_rawDescOnce.Do(func() { + file_xds_service_orca_v3_orca_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_service_orca_v3_orca_proto_rawDescData) + }) + return file_xds_service_orca_v3_orca_proto_rawDescData +} + +var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{ + (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration + (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport +} +var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{ + 1, // 0: xds.service.orca.v3.OrcaLoadReportRequest.report_interval:type_name -> google.protobuf.Duration + 0, // 1: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:input_type -> xds.service.orca.v3.OrcaLoadReportRequest + 2, // 2: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:output_type -> xds.data.orca.v3.OrcaLoadReport + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_service_orca_v3_orca_proto_init() } +func file_xds_service_orca_v3_orca_proto_init() { + if File_xds_service_orca_v3_orca_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_service_orca_v3_orca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrcaLoadReportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_service_orca_v3_orca_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_xds_service_orca_v3_orca_proto_goTypes, + DependencyIndexes: file_xds_service_orca_v3_orca_proto_depIdxs, + MessageInfos: file_xds_service_orca_v3_orca_proto_msgTypes, + }.Build() + File_xds_service_orca_v3_orca_proto = out.File + file_xds_service_orca_v3_orca_proto_rawDesc = nil + file_xds_service_orca_v3_orca_proto_goTypes = nil + file_xds_service_orca_v3_orca_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go new file mode 100644 index 0000000000..8949e8372b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OrcaLoadReportRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *OrcaLoadReportRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrcaLoadReportRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OrcaLoadReportRequestMultiError, or nil if none found. +func (m *OrcaLoadReportRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *OrcaLoadReportRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReportInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrcaLoadReportRequestValidationError{ + field: "ReportInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OrcaLoadReportRequestMultiError(errors) + } + + return nil +} + +// OrcaLoadReportRequestMultiError is an error wrapping multiple validation +// errors returned by OrcaLoadReportRequest.ValidateAll() if the designated +// constraints aren't met. +type OrcaLoadReportRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrcaLoadReportRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrcaLoadReportRequestMultiError) AllErrors() []error { return m } + +// OrcaLoadReportRequestValidationError is the validation error returned by +// OrcaLoadReportRequest.Validate if the designated constraints aren't met. +type OrcaLoadReportRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrcaLoadReportRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrcaLoadReportRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrcaLoadReportRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrcaLoadReportRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrcaLoadReportRequestValidationError) ErrorName() string { + return "OrcaLoadReportRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e OrcaLoadReportRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrcaLoadReportRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrcaLoadReportRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrcaLoadReportRequestValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go new file mode 100644 index 0000000000..6cecac149a --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.27.0--rc2 +// source: xds/service/orca/v3/orca.proto + +package v3 + +import ( + context "context" + v3 "github.com/cncf/xds/go/xds/data/orca/v3" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + OpenRcaService_StreamCoreMetrics_FullMethodName = "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics" +) + +// OpenRcaServiceClient is the client API for OpenRcaService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenRcaServiceClient interface { + StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) +} + +type openRcaServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient { + return &openRcaServiceClient{cc} +} + +func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenRcaService_ServiceDesc.Streams[0], OpenRcaService_StreamCoreMetrics_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &openRcaServiceStreamCoreMetricsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenRcaService_StreamCoreMetricsClient interface { + Recv() (*v3.OrcaLoadReport, error) + grpc.ClientStream +} + +type openRcaServiceStreamCoreMetricsClient struct { + grpc.ClientStream +} + +func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) { + m := new(v3.OrcaLoadReport) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// OpenRcaServiceServer is the server API for OpenRcaService service. +// All implementations should embed UnimplementedOpenRcaServiceServer +// for forward compatibility +type OpenRcaServiceServer interface { + StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error +} + +// UnimplementedOpenRcaServiceServer should be embedded to have forward compatible implementations. +type UnimplementedOpenRcaServiceServer struct { +} + +func (UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented") +} + +// UnsafeOpenRcaServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenRcaServiceServer will +// result in compilation errors. +type UnsafeOpenRcaServiceServer interface { + mustEmbedUnimplementedOpenRcaServiceServer() +} + +func RegisterOpenRcaServiceServer(s grpc.ServiceRegistrar, srv OpenRcaServiceServer) { + s.RegisterService(&OpenRcaService_ServiceDesc, srv) +} + +func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(OrcaLoadReportRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream}) +} + +type OpenRcaService_StreamCoreMetricsServer interface { + Send(*v3.OrcaLoadReport) error + grpc.ServerStream +} + +type openRcaServiceStreamCoreMetricsServer struct { + grpc.ServerStream +} + +func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error { + return x.ServerStream.SendMsg(m) +} + +// OpenRcaService_ServiceDesc is the grpc.ServiceDesc for OpenRcaService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenRcaService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "xds.service.orca.v3.OpenRcaService", + HandlerType: (*OpenRcaServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamCoreMetrics", + Handler: _OpenRcaService_StreamCoreMetrics_Handler, + ServerStreams: true, + }, + }, + Metadata: "xds/service/orca/v3/orca.proto", +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go new file mode 100644 index 0000000000..7299227a3d --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/cel.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CelMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExprMatch *v3.CelExpression `protobuf:"bytes,1,opt,name=expr_match,json=exprMatch,proto3" json:"expr_match,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *CelMatcher) Reset() { + *x = CelMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CelMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CelMatcher) ProtoMessage() {} + +func (x *CelMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CelMatcher.ProtoReflect.Descriptor instead. +func (*CelMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_cel_proto_rawDescGZIP(), []int{0} +} + +func (x *CelMatcher) GetExprMatch() *v3.CelExpression { + if x != nil { + return x.ExprMatch + } + return nil +} + +func (x *CelMatcher) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +var File_xds_type_matcher_v3_cel_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, + 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_cel_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_cel_proto_rawDescData = file_xds_type_matcher_v3_cel_proto_rawDesc +) + +func file_xds_type_matcher_v3_cel_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_cel_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_cel_proto_rawDescData) + }) + return file_xds_type_matcher_v3_cel_proto_rawDescData +} + +var file_xds_type_matcher_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_type_matcher_v3_cel_proto_goTypes = []interface{}{ + (*CelMatcher)(nil), // 0: xds.type.matcher.v3.CelMatcher + (*v3.CelExpression)(nil), // 1: xds.type.v3.CelExpression +} +var file_xds_type_matcher_v3_cel_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.CelMatcher.expr_match:type_name -> xds.type.v3.CelExpression + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_cel_proto_init() } +func file_xds_type_matcher_v3_cel_proto_init() { + if File_xds_type_matcher_v3_cel_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CelMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_cel_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_cel_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_cel_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_cel_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_cel_proto = out.File + file_xds_type_matcher_v3_cel_proto_rawDesc = nil + file_xds_type_matcher_v3_cel_proto_goTypes = nil + file_xds_type_matcher_v3_cel_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go new file mode 100644 index 0000000000..091267b0cd --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go @@ -0,0 +1,177 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/cel.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CelMatcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CelMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CelMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CelMatcherMultiError, or +// nil if none found. +func (m *CelMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *CelMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetExprMatch() == nil { + err := CelMatcherValidationError{ + field: "ExprMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExprMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelMatcherValidationError{ + field: "ExprMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelMatcherValidationError{ + field: "ExprMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExprMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelMatcherValidationError{ + field: "ExprMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Description + + if len(errors) > 0 { + return CelMatcherMultiError(errors) + } + + return nil +} + +// CelMatcherMultiError is an error wrapping multiple validation errors +// returned by CelMatcher.ValidateAll() if the designated constraints aren't met. +type CelMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CelMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CelMatcherMultiError) AllErrors() []error { return m } + +// CelMatcherValidationError is the validation error returned by +// CelMatcher.Validate if the designated constraints aren't met. +type CelMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CelMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CelMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CelMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CelMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CelMatcherValidationError) ErrorName() string { return "CelMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e CelMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCelMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CelMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CelMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go new file mode 100644 index 0000000000..5f72c8d110 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/domain.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServerNameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DomainMatchers []*ServerNameMatcher_DomainMatcher `protobuf:"bytes,1,rep,name=domain_matchers,json=domainMatchers,proto3" json:"domain_matchers,omitempty"` +} + +func (x *ServerNameMatcher) Reset() { + *x = ServerNameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerNameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerNameMatcher) ProtoMessage() {} + +func (x *ServerNameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerNameMatcher.ProtoReflect.Descriptor instead. +func (*ServerNameMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerNameMatcher) GetDomainMatchers() []*ServerNameMatcher_DomainMatcher { + if x != nil { + return x.DomainMatchers + } + return nil +} + +type ServerNameMatcher_DomainMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *ServerNameMatcher_DomainMatcher) Reset() { + *x = ServerNameMatcher_DomainMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerNameMatcher_DomainMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerNameMatcher_DomainMatcher) ProtoMessage() {} + +func (x *ServerNameMatcher_DomainMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerNameMatcher_DomainMatcher.ProtoReflect.Descriptor instead. +func (*ServerNameMatcher_DomainMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ServerNameMatcher_DomainMatcher) GetDomains() []string { + if x != nil { + return x.Domains + } + return nil +} + +func (x *ServerNameMatcher_DomainMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +var File_xds_type_matcher_v3_domain_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_domain_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0f, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0e, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x74, 0x0a, 0x0d, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x07, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3f, + 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x6e, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_domain_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_domain_proto_rawDescData = file_xds_type_matcher_v3_domain_proto_rawDesc +) + +func file_xds_type_matcher_v3_domain_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_domain_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_domain_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_domain_proto_rawDescData) + }) + return file_xds_type_matcher_v3_domain_proto_rawDescData +} + +var file_xds_type_matcher_v3_domain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_domain_proto_goTypes = []interface{}{ + (*ServerNameMatcher)(nil), // 0: xds.type.matcher.v3.ServerNameMatcher + (*ServerNameMatcher_DomainMatcher)(nil), // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher + (*Matcher_OnMatch)(nil), // 2: xds.type.matcher.v3.Matcher.OnMatch +} +var file_xds_type_matcher_v3_domain_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.ServerNameMatcher.domain_matchers:type_name -> xds.type.matcher.v3.ServerNameMatcher.DomainMatcher + 2, // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_domain_proto_init() } +func file_xds_type_matcher_v3_domain_proto_init() { + if File_xds_type_matcher_v3_domain_proto != nil { + return + } + file_xds_type_matcher_v3_matcher_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_domain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerNameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_domain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerNameMatcher_DomainMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_domain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_domain_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_domain_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_domain_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_domain_proto = out.File + file_xds_type_matcher_v3_domain_proto_rawDesc = nil + file_xds_type_matcher_v3_domain_proto_goTypes = nil + file_xds_type_matcher_v3_domain_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go new file mode 100644 index 0000000000..e95bdfa28f --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go @@ -0,0 +1,315 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/domain.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ServerNameMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ServerNameMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ServerNameMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ServerNameMatcherMultiError, or nil if none found. +func (m *ServerNameMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ServerNameMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDomainMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerNameMatcherValidationError{ + field: fmt.Sprintf("DomainMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerNameMatcherValidationError{ + field: fmt.Sprintf("DomainMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerNameMatcherValidationError{ + field: fmt.Sprintf("DomainMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ServerNameMatcherMultiError(errors) + } + + return nil +} + +// ServerNameMatcherMultiError is an error wrapping multiple validation errors +// returned by ServerNameMatcher.ValidateAll() if the designated constraints +// aren't met. +type ServerNameMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ServerNameMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ServerNameMatcherMultiError) AllErrors() []error { return m } + +// ServerNameMatcherValidationError is the validation error returned by +// ServerNameMatcher.Validate if the designated constraints aren't met. +type ServerNameMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ServerNameMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ServerNameMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ServerNameMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ServerNameMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ServerNameMatcherValidationError) ErrorName() string { + return "ServerNameMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ServerNameMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sServerNameMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ServerNameMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ServerNameMatcherValidationError{} + +// Validate checks the field values on ServerNameMatcher_DomainMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ServerNameMatcher_DomainMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ServerNameMatcher_DomainMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ServerNameMatcher_DomainMatcherMultiError, or nil if none found. +func (m *ServerNameMatcher_DomainMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ServerNameMatcher_DomainMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetDomains()) < 1 { + err := ServerNameMatcher_DomainMatcherValidationError{ + field: "Domains", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerNameMatcher_DomainMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ServerNameMatcher_DomainMatcherMultiError(errors) + } + + return nil +} + +// ServerNameMatcher_DomainMatcherMultiError is an error wrapping multiple +// validation errors returned by ServerNameMatcher_DomainMatcher.ValidateAll() +// if the designated constraints aren't met. +type ServerNameMatcher_DomainMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ServerNameMatcher_DomainMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ServerNameMatcher_DomainMatcherMultiError) AllErrors() []error { return m } + +// ServerNameMatcher_DomainMatcherValidationError is the validation error +// returned by ServerNameMatcher_DomainMatcher.Validate if the designated +// constraints aren't met. +type ServerNameMatcher_DomainMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ServerNameMatcher_DomainMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ServerNameMatcher_DomainMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ServerNameMatcher_DomainMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ServerNameMatcher_DomainMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ServerNameMatcher_DomainMatcherValidationError) ErrorName() string { + return "ServerNameMatcher_DomainMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ServerNameMatcher_DomainMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sServerNameMatcher_DomainMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ServerNameMatcher_DomainMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ServerNameMatcher_DomainMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go new file mode 100644 index 0000000000..4393bb7e29 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/http_inputs.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HttpAttributesCelMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpAttributesCelMatchInput) Reset() { + *x = HttpAttributesCelMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpAttributesCelMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpAttributesCelMatchInput) ProtoMessage() {} + +func (x *HttpAttributesCelMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpAttributesCelMatchInput.ProtoReflect.Descriptor instead. +func (*HttpAttributesCelMatchInput) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0} +} + +var File_xds_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b, + 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65, + 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, + 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_http_inputs_proto_rawDescData = file_xds_type_matcher_v3_http_inputs_proto_rawDesc +) + +func file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_http_inputs_proto_rawDescData) + }) + return file_xds_type_matcher_v3_http_inputs_proto_rawDescData +} + +var file_xds_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{ + (*HttpAttributesCelMatchInput)(nil), // 0: xds.type.matcher.v3.HttpAttributesCelMatchInput +} +var file_xds_type_matcher_v3_http_inputs_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_http_inputs_proto_init() } +func file_xds_type_matcher_v3_http_inputs_proto_init() { + if File_xds_type_matcher_v3_http_inputs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpAttributesCelMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_http_inputs_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_http_inputs_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_http_inputs_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_http_inputs_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_http_inputs_proto = out.File + file_xds_type_matcher_v3_http_inputs_proto_rawDesc = nil + file_xds_type_matcher_v3_http_inputs_proto_goTypes = nil + file_xds_type_matcher_v3_http_inputs_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go new file mode 100644 index 0000000000..5d87429279 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/http_inputs.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpAttributesCelMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpAttributesCelMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpAttributesCelMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpAttributesCelMatchInputMultiError, or nil if none found. +func (m *HttpAttributesCelMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpAttributesCelMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpAttributesCelMatchInputMultiError(errors) + } + + return nil +} + +// HttpAttributesCelMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpAttributesCelMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpAttributesCelMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpAttributesCelMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpAttributesCelMatchInputMultiError) AllErrors() []error { return m } + +// HttpAttributesCelMatchInputValidationError is the validation error returned +// by HttpAttributesCelMatchInput.Validate if the designated constraints +// aren't met. +type HttpAttributesCelMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpAttributesCelMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpAttributesCelMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpAttributesCelMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpAttributesCelMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpAttributesCelMatchInputValidationError) ErrorName() string { + return "HttpAttributesCelMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpAttributesCelMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpAttributesCelMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpAttributesCelMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpAttributesCelMatchInputValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go new file mode 100644 index 0000000000..fdb6599461 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go @@ -0,0 +1,256 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/ip.proto + +package v3 + +import ( + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type IPMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*IPMatcher_IPRangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *IPMatcher) Reset() { + *x = IPMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPMatcher) ProtoMessage() {} + +func (x *IPMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPMatcher.ProtoReflect.Descriptor instead. +func (*IPMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0} +} + +func (x *IPMatcher) GetRangeMatchers() []*IPMatcher_IPRangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type IPMatcher_IPRangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.CidrRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` + Exclusive bool `protobuf:"varint,3,opt,name=exclusive,proto3" json:"exclusive,omitempty"` +} + +func (x *IPMatcher_IPRangeMatcher) Reset() { + *x = IPMatcher_IPRangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPMatcher_IPRangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPMatcher_IPRangeMatcher) ProtoMessage() {} + +func (x *IPMatcher_IPRangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPMatcher_IPRangeMatcher.ProtoReflect.Descriptor instead. +func (*IPMatcher_IPRangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *IPMatcher_IPRangeMatcher) GetRanges() []*v3.CidrRange { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *IPMatcher_IPRangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +func (x *IPMatcher_IPRangeMatcher) GetExclusive() bool { + if x != nil { + return x.Exclusive + } + return false +} + +var File_xds_type_matcher_v3_ip_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_ip_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x69, 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, + 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x09, 0x49, 0x50, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x49, + 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0x0a, + 0x0e, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x38, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x42, 0x66, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, + 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x0e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_ip_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_ip_proto_rawDescData = file_xds_type_matcher_v3_ip_proto_rawDesc +) + +func file_xds_type_matcher_v3_ip_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_ip_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_ip_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_ip_proto_rawDescData) + }) + return file_xds_type_matcher_v3_ip_proto_rawDescData +} + +var file_xds_type_matcher_v3_ip_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_ip_proto_goTypes = []interface{}{ + (*IPMatcher)(nil), // 0: xds.type.matcher.v3.IPMatcher + (*IPMatcher_IPRangeMatcher)(nil), // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher + (*v3.CidrRange)(nil), // 2: xds.core.v3.CidrRange + (*Matcher_OnMatch)(nil), // 3: xds.type.matcher.v3.Matcher.OnMatch +} +var file_xds_type_matcher_v3_ip_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.IPMatcher.range_matchers:type_name -> xds.type.matcher.v3.IPMatcher.IPRangeMatcher + 2, // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.ranges:type_name -> xds.core.v3.CidrRange + 3, // 2: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_ip_proto_init() } +func file_xds_type_matcher_v3_ip_proto_init() { + if File_xds_type_matcher_v3_ip_proto != nil { + return + } + file_xds_type_matcher_v3_matcher_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_ip_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_ip_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPMatcher_IPRangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_ip_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_ip_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_ip_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_ip_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_ip_proto = out.File + file_xds_type_matcher_v3_ip_proto_rawDesc = nil + file_xds_type_matcher_v3_ip_proto_goTypes = nil + file_xds_type_matcher_v3_ip_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go new file mode 100644 index 0000000000..c1fca03bcb --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/ip.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on IPMatcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *IPMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on IPMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in IPMatcherMultiError, or nil +// if none found. +func (m *IPMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *IPMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, IPMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, IPMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return IPMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return IPMatcherMultiError(errors) + } + + return nil +} + +// IPMatcherMultiError is an error wrapping multiple validation errors returned +// by IPMatcher.ValidateAll() if the designated constraints aren't met. +type IPMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m IPMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m IPMatcherMultiError) AllErrors() []error { return m } + +// IPMatcherValidationError is the validation error returned by +// IPMatcher.Validate if the designated constraints aren't met. +type IPMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e IPMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e IPMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e IPMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e IPMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e IPMatcherValidationError) ErrorName() string { return "IPMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e IPMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sIPMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = IPMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = IPMatcherValidationError{} + +// Validate checks the field values on IPMatcher_IPRangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *IPMatcher_IPRangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on IPMatcher_IPRangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// IPMatcher_IPRangeMatcherMultiError, or nil if none found. +func (m *IPMatcher_IPRangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *IPMatcher_IPRangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := IPMatcher_IPRangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return IPMatcher_IPRangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, IPMatcher_IPRangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return IPMatcher_IPRangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Exclusive + + if len(errors) > 0 { + return IPMatcher_IPRangeMatcherMultiError(errors) + } + + return nil +} + +// IPMatcher_IPRangeMatcherMultiError is an error wrapping multiple validation +// errors returned by IPMatcher_IPRangeMatcher.ValidateAll() if the designated +// constraints aren't met. +type IPMatcher_IPRangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m IPMatcher_IPRangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m IPMatcher_IPRangeMatcherMultiError) AllErrors() []error { return m } + +// IPMatcher_IPRangeMatcherValidationError is the validation error returned by +// IPMatcher_IPRangeMatcher.Validate if the designated constraints aren't met. +type IPMatcher_IPRangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e IPMatcher_IPRangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e IPMatcher_IPRangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e IPMatcher_IPRangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e IPMatcher_IPRangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e IPMatcher_IPRangeMatcherValidationError) ErrorName() string { + return "IPMatcher_IPRangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e IPMatcher_IPRangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sIPMatcher_IPRangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = IPMatcher_IPRangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = IPMatcher_IPRangeMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go new file mode 100644 index 0000000000..d94b03b559 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go @@ -0,0 +1,1056 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/matcher.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Matcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatcherType: + // + // *Matcher_MatcherList_ + // *Matcher_MatcherTree_ + MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"` + OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"` +} + +func (x *Matcher) Reset() { + *x = Matcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher) ProtoMessage() {} + +func (x *Matcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher.ProtoReflect.Descriptor instead. +func (*Matcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0} +} + +func (m *Matcher) GetMatcherType() isMatcher_MatcherType { + if m != nil { + return m.MatcherType + } + return nil +} + +func (x *Matcher) GetMatcherList() *Matcher_MatcherList { + if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok { + return x.MatcherList + } + return nil +} + +func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree { + if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok { + return x.MatcherTree + } + return nil +} + +func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch { + if x != nil { + return x.OnNoMatch + } + return nil +} + +type isMatcher_MatcherType interface { + isMatcher_MatcherType() +} + +type Matcher_MatcherList_ struct { + MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"` +} + +type Matcher_MatcherTree_ struct { + MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"` +} + +func (*Matcher_MatcherList_) isMatcher_MatcherType() {} + +func (*Matcher_MatcherTree_) isMatcher_MatcherType() {} + +type Matcher_OnMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OnMatch: + // + // *Matcher_OnMatch_Matcher + // *Matcher_OnMatch_Action + OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"` +} + +func (x *Matcher_OnMatch) Reset() { + *x = Matcher_OnMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_OnMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_OnMatch) ProtoMessage() {} + +func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead. +func (*Matcher_OnMatch) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch { + if m != nil { + return m.OnMatch + } + return nil +} + +func (x *Matcher_OnMatch) GetMatcher() *Matcher { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok { + return x.Matcher + } + return nil +} + +func (x *Matcher_OnMatch) GetAction() *v3.TypedExtensionConfig { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok { + return x.Action + } + return nil +} + +type isMatcher_OnMatch_OnMatch interface { + isMatcher_OnMatch_OnMatch() +} + +type Matcher_OnMatch_Matcher struct { + Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"` +} + +type Matcher_OnMatch_Action struct { + Action *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"` +} + +func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {} + +func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {} + +type Matcher_MatcherList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (x *Matcher_MatcherList) Reset() { + *x = Matcher_MatcherList{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList) ProtoMessage() {} + +func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher { + if x != nil { + return x.Matchers + } + return nil +} + +type Matcher_MatcherTree struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Types that are assignable to TreeType: + // + // *Matcher_MatcherTree_ExactMatchMap + // *Matcher_MatcherTree_PrefixMatchMap + // *Matcher_MatcherTree_CustomMatch + TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"` +} + +func (x *Matcher_MatcherTree) Reset() { + *x = Matcher_MatcherTree{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree) ProtoMessage() {} + +func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Matcher_MatcherTree) GetInput() *v3.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType { + if m != nil { + return m.TreeType + } + return nil +} + +func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok { + return x.ExactMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok { + return x.PrefixMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetCustomMatch() *v3.TypedExtensionConfig { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherTree_TreeType interface { + isMatcher_MatcherTree_TreeType() +} + +type Matcher_MatcherTree_ExactMatchMap struct { + ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_PrefixMatchMap struct { + PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_CustomMatch struct { + CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {} + +type Matcher_MatcherList_Predicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchType: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ + // *Matcher_MatcherList_Predicate_OrMatcher + // *Matcher_MatcherList_Predicate_AndMatcher + // *Matcher_MatcherList_Predicate_NotMatcher + MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"` +} + +func (x *Matcher_MatcherList_Predicate) Reset() { + *x = Matcher_MatcherList_Predicate{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType { + if m != nil { + return m.MatchType + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + return x.SinglePredicate + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok { + return x.OrMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok { + return x.AndMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok { + return x.NotMatcher + } + return nil +} + +type isMatcher_MatcherList_Predicate_MatchType interface { + isMatcher_MatcherList_Predicate_MatchType() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ struct { + SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_OrMatcher struct { + OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_AndMatcher struct { + AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_NotMatcher struct { + NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +type Matcher_MatcherList_FieldMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Matcher_MatcherList_FieldMatcher) Reset() { + *x = Matcher_MatcherList_FieldMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_FieldMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {} + +func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +type Matcher_MatcherList_Predicate_SinglePredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Types that are assignable to Matcher: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch + // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch + Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"` +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() { + *x = Matcher_MatcherList_Predicate_SinglePredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0} +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v3.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *StringMatcher { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + return x.ValueMatch + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v3.TypedExtensionConfig { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface { + isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct { + ValueMatch *StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct { + CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +type Matcher_MatcherList_Predicate_PredicateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() { + *x = Matcher_MatcherList_Predicate_PredicateList{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1} +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +type Matcher_MatcherTree_MatchMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Matcher_MatcherTree_MatchMap) Reset() { + *x = Matcher_MatcherTree_MatchMap{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree_MatchMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {} + +func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch { + if x != nil { + return x.Map + } + return nil +} + +var File_xds_type_matcher_v3_matcher_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf6, 0x0f, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, + 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, + 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, + 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, 0x04, + 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, + 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, + 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d, + 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, 0x0a, + 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, + 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, 0x0c, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_matcher_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_matcher_proto_rawDescData = file_xds_type_matcher_v3_matcher_proto_rawDesc +) + +func file_xds_type_matcher_v3_matcher_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_matcher_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_matcher_proto_rawDescData) + }) + return file_xds_type_matcher_v3_matcher_proto_rawDescData +} + +var file_xds_type_matcher_v3_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_xds_type_matcher_v3_matcher_proto_goTypes = []interface{}{ + (*Matcher)(nil), // 0: xds.type.matcher.v3.Matcher + (*Matcher_OnMatch)(nil), // 1: xds.type.matcher.v3.Matcher.OnMatch + (*Matcher_MatcherList)(nil), // 2: xds.type.matcher.v3.Matcher.MatcherList + (*Matcher_MatcherTree)(nil), // 3: xds.type.matcher.v3.Matcher.MatcherTree + (*Matcher_MatcherList_Predicate)(nil), // 4: xds.type.matcher.v3.Matcher.MatcherList.Predicate + (*Matcher_MatcherList_FieldMatcher)(nil), // 5: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher + (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 6: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 7: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + (*Matcher_MatcherTree_MatchMap)(nil), // 8: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap + nil, // 9: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + (*v3.TypedExtensionConfig)(nil), // 10: xds.core.v3.TypedExtensionConfig + (*StringMatcher)(nil), // 11: xds.type.matcher.v3.StringMatcher +} +var file_xds_type_matcher_v3_matcher_proto_depIdxs = []int32{ + 2, // 0: xds.type.matcher.v3.Matcher.matcher_list:type_name -> xds.type.matcher.v3.Matcher.MatcherList + 3, // 1: xds.type.matcher.v3.Matcher.matcher_tree:type_name -> xds.type.matcher.v3.Matcher.MatcherTree + 1, // 2: xds.type.matcher.v3.Matcher.on_no_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 0, // 3: xds.type.matcher.v3.Matcher.OnMatch.matcher:type_name -> xds.type.matcher.v3.Matcher + 10, // 4: xds.type.matcher.v3.Matcher.OnMatch.action:type_name -> xds.core.v3.TypedExtensionConfig + 5, // 5: xds.type.matcher.v3.Matcher.MatcherList.matchers:type_name -> xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher + 10, // 6: xds.type.matcher.v3.Matcher.MatcherTree.input:type_name -> xds.core.v3.TypedExtensionConfig + 8, // 7: xds.type.matcher.v3.Matcher.MatcherTree.exact_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap + 8, // 8: xds.type.matcher.v3.Matcher.MatcherTree.prefix_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap + 10, // 9: xds.type.matcher.v3.Matcher.MatcherTree.custom_match:type_name -> xds.core.v3.TypedExtensionConfig + 6, // 10: xds.type.matcher.v3.Matcher.MatcherList.Predicate.single_predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + 7, // 11: xds.type.matcher.v3.Matcher.MatcherList.Predicate.or_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 7, // 12: xds.type.matcher.v3.Matcher.MatcherList.Predicate.and_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 4, // 13: xds.type.matcher.v3.Matcher.MatcherList.Predicate.not_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate + 4, // 14: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate + 1, // 15: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 10, // 16: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> xds.core.v3.TypedExtensionConfig + 11, // 17: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> xds.type.matcher.v3.StringMatcher + 10, // 18: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> xds.core.v3.TypedExtensionConfig + 4, // 19: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate + 9, // 20: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + 1, // 21: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 22, // [22:22] is the sub-list for method output_type + 22, // [22:22] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_matcher_proto_init() } +func file_xds_type_matcher_v3_matcher_proto_init() { + if File_xds_type_matcher_v3_matcher_proto != nil { + return + } + file_xds_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_OnMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_FieldMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree_MatchMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_)(nil), + (*Matcher_MatcherTree_)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Matcher_OnMatch_Matcher)(nil), + (*Matcher_OnMatch_Action)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Matcher_MatcherTree_ExactMatchMap)(nil), + (*Matcher_MatcherTree_PrefixMatchMap)(nil), + (*Matcher_MatcherTree_CustomMatch)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil), + (*Matcher_MatcherList_Predicate_OrMatcher)(nil), + (*Matcher_MatcherList_Predicate_AndMatcher)(nil), + (*Matcher_MatcherList_Predicate_NotMatcher)(nil), + } + file_xds_type_matcher_v3_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil), + (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_matcher_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_matcher_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_matcher_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_matcher_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_matcher_proto = out.File + file_xds_type_matcher_v3_matcher_proto_rawDesc = nil + file_xds_type_matcher_v3_matcher_proto_goTypes = nil + file_xds_type_matcher_v3_matcher_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go new file mode 100644 index 0000000000..60b721f5f5 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go @@ -0,0 +1,1913 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/matcher.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in MatcherMultiError, or nil if none found. +func (m *Matcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetOnNoMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnNoMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.MatcherType.(type) { + case *Matcher_MatcherList_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatcherList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatcherTree()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherTree()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return MatcherMultiError(errors) + } + + return nil +} + +// MatcherMultiError is an error wrapping multiple validation errors returned +// by Matcher.ValidateAll() if the designated constraints aren't met. +type MatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatcherMultiError) AllErrors() []error { return m } + +// MatcherValidationError is the validation error returned by Matcher.Validate +// if the designated constraints aren't met. +type MatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatcherValidationError) ErrorName() string { return "MatcherValidationError" } + +// Error satisfies the builtin error interface +func (e MatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatcherValidationError{} + +// Validate checks the field values on Matcher_OnMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Matcher_OnMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_OnMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_OnMatchMultiError, or nil if none found. +func (m *Matcher_OnMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_OnMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOnMatchPresent := false + switch v := m.OnMatch.(type) { + case *Matcher_OnMatch_Matcher: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_OnMatch_Action: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOnMatchPresent { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_OnMatchMultiError(errors) + } + + return nil +} + +// Matcher_OnMatchMultiError is an error wrapping multiple validation errors +// returned by Matcher_OnMatch.ValidateAll() if the designated constraints +// aren't met. +type Matcher_OnMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_OnMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_OnMatchMultiError) AllErrors() []error { return m } + +// Matcher_OnMatchValidationError is the validation error returned by +// Matcher_OnMatch.Validate if the designated constraints aren't met. +type Matcher_OnMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_OnMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_OnMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_OnMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_OnMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_OnMatchValidationError) ErrorName() string { return "Matcher_OnMatchValidationError" } + +// Error satisfies the builtin error interface +func (e Matcher_OnMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_OnMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_OnMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_OnMatchValidationError{} + +// Validate checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherListMultiError, or nil if none found. +func (m *Matcher_MatcherList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMatchers()) < 1 { + err := Matcher_MatcherListValidationError{ + field: "Matchers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherListMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherListValidationError is the validation error returned by +// Matcher_MatcherList.Validate if the designated constraints aren't met. +type Matcher_MatcherListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherListValidationError) ErrorName() string { + return "Matcher_MatcherListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTreeMultiError, or nil if none found. +func (m *Matcher_MatcherTree) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTreeTypePresent := false + switch v := m.TreeType.(type) { + case *Matcher_MatcherTree_ExactMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetExactMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_PrefixMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetPrefixMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrefixMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_CustomMatch: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTreeTypePresent { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherTreeMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTreeMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherTree.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherTreeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTreeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTreeMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTreeValidationError is the validation error returned by +// Matcher_MatcherTree.Validate if the designated constraints aren't met. +type Matcher_MatcherTreeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTreeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTreeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTreeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTreeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTreeValidationError) ErrorName() string { + return "Matcher_MatcherTreeValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTreeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTreeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTreeValidationError{} + +// Validate checks the field values on Matcher_MatcherList_Predicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_Predicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_PredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchTypePresent := false + switch v := m.MatchType.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetSinglePredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSinglePredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_OrMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetOrMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_AndMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetAndMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_NotMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetNotMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchTypePresent { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_PredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_PredicateMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherList_Predicate.ValidateAll() +// if the designated constraints aren't met. +type Matcher_MatcherList_PredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_PredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_PredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_PredicateValidationError is the validation error +// returned by Matcher_MatcherList_Predicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_PredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_PredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_PredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_PredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_PredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_PredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_PredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_PredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_PredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_PredicateValidationError{} + +// Validate checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *Matcher_MatcherList_FieldMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_FieldMatcherMultiError, or nil if none found. +func (m *Matcher_MatcherList_FieldMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_FieldMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPredicate() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetOnMatch() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Matcher_MatcherList_FieldMatcherMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_FieldMatcherMultiError is an error wrapping multiple +// validation errors returned by +// Matcher_MatcherList_FieldMatcher.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_FieldMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_FieldMatcherMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_FieldMatcherValidationError is the validation error +// returned by Matcher_MatcherList_FieldMatcher.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_FieldMatcherValidationError) ErrorName() string { + return "Matcher_MatcherList_FieldMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_FieldMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_FieldMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_FieldMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_FieldMatcherValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_SinglePredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_SinglePredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_SinglePredicateMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_SinglePredicate.ValidateAll() if the +// designated constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_SinglePredicateValidationError is the +// validation error returned by +// Matcher_MatcherList_Predicate_SinglePredicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_SinglePredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_SinglePredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_PredicateList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_PredicateListMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_PredicateList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPredicate()) < 2 { + err := Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: "Predicate", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_PredicateListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_PredicateListMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_PredicateList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_PredicateListValidationError is the validation +// error returned by Matcher_MatcherList_Predicate_PredicateList.Validate if +// the designated constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_PredicateListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_PredicateList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree_MatchMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTree_MatchMapMultiError, or nil if none found. +func (m *Matcher_MatcherTree_MatchMap) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree_MatchMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMap()) < 1 { + err := Matcher_MatcherTree_MatchMapValidationError{ + field: "Map", + reason: "value must contain at least 1 pair(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + { + sorted_keys := make([]string, len(m.GetMap())) + i := 0 + for key := range m.GetMap() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetMap()[key] + _ = val + + // no validation rules for Map[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return Matcher_MatcherTree_MatchMapMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTree_MatchMapMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherTree_MatchMap.ValidateAll() if +// the designated constraints aren't met. +type Matcher_MatcherTree_MatchMapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTree_MatchMapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTree_MatchMapMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTree_MatchMapValidationError is the validation error returned +// by Matcher_MatcherTree_MatchMap.Validate if the designated constraints +// aren't met. +type Matcher_MatcherTree_MatchMapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTree_MatchMapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTree_MatchMapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTree_MatchMapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTree_MatchMapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTree_MatchMapValidationError) ErrorName() string { + return "Matcher_MatcherTree_MatchMapValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTree_MatchMapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree_MatchMap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTree_MatchMapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTree_MatchMapValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go new file mode 100644 index 0000000000..2861768daa --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go @@ -0,0 +1,539 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/range.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Int64RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*Int64RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *Int64RangeMatcher) Reset() { + *x = Int64RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64RangeMatcher) ProtoMessage() {} + +func (x *Int64RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int64RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0} +} + +func (x *Int64RangeMatcher) GetRangeMatchers() []*Int64RangeMatcher_RangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type Int32RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*Int32RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *Int32RangeMatcher) Reset() { + *x = Int32RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32RangeMatcher) ProtoMessage() {} + +func (x *Int32RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int32RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1} +} + +func (x *Int32RangeMatcher) GetRangeMatchers() []*Int32RangeMatcher_RangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type DoubleRangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RangeMatchers []*DoubleRangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"` +} + +func (x *DoubleRangeMatcher) Reset() { + *x = DoubleRangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRangeMatcher) ProtoMessage() {} + +func (x *DoubleRangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRangeMatcher.ProtoReflect.Descriptor instead. +func (*DoubleRangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRangeMatcher) GetRangeMatchers() []*DoubleRangeMatcher_RangeMatcher { + if x != nil { + return x.RangeMatchers + } + return nil +} + +type Int64RangeMatcher_RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.Int64Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Int64RangeMatcher_RangeMatcher) Reset() { + *x = Int64RangeMatcher_RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64RangeMatcher_RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64RangeMatcher_RangeMatcher) ProtoMessage() {} + +func (x *Int64RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int64RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Int64RangeMatcher_RangeMatcher) GetRanges() []*v3.Int64Range { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *Int64RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +type Int32RangeMatcher_RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.Int32Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Int32RangeMatcher_RangeMatcher) Reset() { + *x = Int32RangeMatcher_RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32RangeMatcher_RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32RangeMatcher_RangeMatcher) ProtoMessage() {} + +func (x *Int32RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead. +func (*Int32RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Int32RangeMatcher_RangeMatcher) GetRanges() []*v3.Int32Range { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *Int32RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +type DoubleRangeMatcher_RangeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ranges []*v3.DoubleRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"` + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *DoubleRangeMatcher_RangeMatcher) Reset() { + *x = DoubleRangeMatcher_RangeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRangeMatcher_RangeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRangeMatcher_RangeMatcher) ProtoMessage() {} + +func (x *DoubleRangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead. +func (*DoubleRangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DoubleRangeMatcher_RangeMatcher) GetRanges() []*v3.DoubleRange { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *DoubleRangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +var File_xds_type_matcher_v3_range_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_range_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfc, 0x01, 0x0a, 0x11, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01, + 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, + 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xfc, 0x01, 0x0a, 0x11, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01, 0x0a, + 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, + 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xff, 0x01, 0x0a, 0x12, 0x44, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x5b, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8b, 0x01, + 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3a, + 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x5a, 0x0a, 0x1e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_range_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_range_proto_rawDescData = file_xds_type_matcher_v3_range_proto_rawDesc +) + +func file_xds_type_matcher_v3_range_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_range_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_range_proto_rawDescData) + }) + return file_xds_type_matcher_v3_range_proto_rawDescData +} + +var file_xds_type_matcher_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_xds_type_matcher_v3_range_proto_goTypes = []interface{}{ + (*Int64RangeMatcher)(nil), // 0: xds.type.matcher.v3.Int64RangeMatcher + (*Int32RangeMatcher)(nil), // 1: xds.type.matcher.v3.Int32RangeMatcher + (*DoubleRangeMatcher)(nil), // 2: xds.type.matcher.v3.DoubleRangeMatcher + (*Int64RangeMatcher_RangeMatcher)(nil), // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher + (*Int32RangeMatcher_RangeMatcher)(nil), // 4: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher + (*DoubleRangeMatcher_RangeMatcher)(nil), // 5: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher + (*v3.Int64Range)(nil), // 6: xds.type.v3.Int64Range + (*Matcher_OnMatch)(nil), // 7: xds.type.matcher.v3.Matcher.OnMatch + (*v3.Int32Range)(nil), // 8: xds.type.v3.Int32Range + (*v3.DoubleRange)(nil), // 9: xds.type.v3.DoubleRange +} +var file_xds_type_matcher_v3_range_proto_depIdxs = []int32{ + 3, // 0: xds.type.matcher.v3.Int64RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher + 4, // 1: xds.type.matcher.v3.Int32RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher + 5, // 2: xds.type.matcher.v3.DoubleRangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher + 6, // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int64Range + 7, // 4: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 8, // 5: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int32Range + 7, // 6: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 9, // 7: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.DoubleRange + 7, // 8: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_range_proto_init() } +func file_xds_type_matcher_v3_range_proto_init() { + if File_xds_type_matcher_v3_range_proto != nil { + return + } + file_xds_type_matcher_v3_matcher_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64RangeMatcher_RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32RangeMatcher_RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_range_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRangeMatcher_RangeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_range_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_range_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_range_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_range_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_range_proto = out.File + file_xds_type_matcher_v3_range_proto_rawDesc = nil + file_xds_type_matcher_v3_range_proto_goTypes = nil + file_xds_type_matcher_v3_range_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go new file mode 100644 index 0000000000..8cb5986438 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go @@ -0,0 +1,975 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/range.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Int64RangeMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Int64RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64RangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Int64RangeMatcherMultiError, or nil if none found. +func (m *Int64RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int64RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int64RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int64RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Int64RangeMatcherMultiError(errors) + } + + return nil +} + +// Int64RangeMatcherMultiError is an error wrapping multiple validation errors +// returned by Int64RangeMatcher.ValidateAll() if the designated constraints +// aren't met. +type Int64RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMatcherMultiError) AllErrors() []error { return m } + +// Int64RangeMatcherValidationError is the validation error returned by +// Int64RangeMatcher.Validate if the designated constraints aren't met. +type Int64RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeMatcherValidationError) ErrorName() string { + return "Int64RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int64RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeMatcherValidationError{} + +// Validate checks the field values on Int32RangeMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Int32RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32RangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Int32RangeMatcherMultiError, or nil if none found. +func (m *Int32RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int32RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int32RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int32RangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Int32RangeMatcherMultiError(errors) + } + + return nil +} + +// Int32RangeMatcherMultiError is an error wrapping multiple validation errors +// returned by Int32RangeMatcher.ValidateAll() if the designated constraints +// aren't met. +type Int32RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMatcherMultiError) AllErrors() []error { return m } + +// Int32RangeMatcherValidationError is the validation error returned by +// Int32RangeMatcher.Validate if the designated constraints aren't met. +type Int32RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeMatcherValidationError) ErrorName() string { + return "Int32RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int32RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeMatcherValidationError{} + +// Validate checks the field values on DoubleRangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DoubleRangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRangeMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DoubleRangeMatcherMultiError, or nil if none found. +func (m *DoubleRangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRangeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleRangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleRangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleRangeMatcherValidationError{ + field: fmt.Sprintf("RangeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return DoubleRangeMatcherMultiError(errors) + } + + return nil +} + +// DoubleRangeMatcherMultiError is an error wrapping multiple validation errors +// returned by DoubleRangeMatcher.ValidateAll() if the designated constraints +// aren't met. +type DoubleRangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMatcherMultiError) AllErrors() []error { return m } + +// DoubleRangeMatcherValidationError is the validation error returned by +// DoubleRangeMatcher.Validate if the designated constraints aren't met. +type DoubleRangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeMatcherValidationError) ErrorName() string { + return "DoubleRangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e DoubleRangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeMatcherValidationError{} + +// Validate checks the field values on Int64RangeMatcher_RangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Int64RangeMatcher_RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64RangeMatcher_RangeMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Int64RangeMatcher_RangeMatcherMultiError, or nil if none found. +func (m *Int64RangeMatcher_RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64RangeMatcher_RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := Int64RangeMatcher_RangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int64RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int64RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Int64RangeMatcher_RangeMatcherMultiError(errors) + } + + return nil +} + +// Int64RangeMatcher_RangeMatcherMultiError is an error wrapping multiple +// validation errors returned by Int64RangeMatcher_RangeMatcher.ValidateAll() +// if the designated constraints aren't met. +type Int64RangeMatcher_RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMatcher_RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m } + +// Int64RangeMatcher_RangeMatcherValidationError is the validation error +// returned by Int64RangeMatcher_RangeMatcher.Validate if the designated +// constraints aren't met. +type Int64RangeMatcher_RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeMatcher_RangeMatcherValidationError) ErrorName() string { + return "Int64RangeMatcher_RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int64RangeMatcher_RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64RangeMatcher_RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeMatcher_RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeMatcher_RangeMatcherValidationError{} + +// Validate checks the field values on Int32RangeMatcher_RangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Int32RangeMatcher_RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32RangeMatcher_RangeMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Int32RangeMatcher_RangeMatcherMultiError, or nil if none found. +func (m *Int32RangeMatcher_RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32RangeMatcher_RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := Int32RangeMatcher_RangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int32RangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Int32RangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Int32RangeMatcher_RangeMatcherMultiError(errors) + } + + return nil +} + +// Int32RangeMatcher_RangeMatcherMultiError is an error wrapping multiple +// validation errors returned by Int32RangeMatcher_RangeMatcher.ValidateAll() +// if the designated constraints aren't met. +type Int32RangeMatcher_RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMatcher_RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m } + +// Int32RangeMatcher_RangeMatcherValidationError is the validation error +// returned by Int32RangeMatcher_RangeMatcher.Validate if the designated +// constraints aren't met. +type Int32RangeMatcher_RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeMatcher_RangeMatcherValidationError) ErrorName() string { + return "Int32RangeMatcher_RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Int32RangeMatcher_RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32RangeMatcher_RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeMatcher_RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeMatcher_RangeMatcherValidationError{} + +// Validate checks the field values on DoubleRangeMatcher_RangeMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DoubleRangeMatcher_RangeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRangeMatcher_RangeMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// DoubleRangeMatcher_RangeMatcherMultiError, or nil if none found. +func (m *DoubleRangeMatcher_RangeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRangeMatcher_RangeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRanges()) < 1 { + err := DoubleRangeMatcher_RangeMatcherValidationError{ + field: "Ranges", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleRangeMatcher_RangeMatcherValidationError{ + field: fmt.Sprintf("Ranges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleRangeMatcher_RangeMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DoubleRangeMatcher_RangeMatcherMultiError(errors) + } + + return nil +} + +// DoubleRangeMatcher_RangeMatcherMultiError is an error wrapping multiple +// validation errors returned by DoubleRangeMatcher_RangeMatcher.ValidateAll() +// if the designated constraints aren't met. +type DoubleRangeMatcher_RangeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMatcher_RangeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m } + +// DoubleRangeMatcher_RangeMatcherValidationError is the validation error +// returned by DoubleRangeMatcher_RangeMatcher.Validate if the designated +// constraints aren't met. +type DoubleRangeMatcher_RangeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeMatcher_RangeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeMatcher_RangeMatcherValidationError) ErrorName() string { + return "DoubleRangeMatcher_RangeMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e DoubleRangeMatcher_RangeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRangeMatcher_RangeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeMatcher_RangeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeMatcher_RangeMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go new file mode 100644 index 0000000000..3dcf303ac2 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/regex.proto + +package v3 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RegexMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to EngineType: + // + // *RegexMatcher_GoogleRe2 + EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"` + Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"` +} + +func (x *RegexMatcher) Reset() { + *x = RegexMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher) ProtoMessage() {} + +func (x *RegexMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead. +func (*RegexMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0} +} + +func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType { + if m != nil { + return m.EngineType + } + return nil +} + +func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 { + if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok { + return x.GoogleRe2 + } + return nil +} + +func (x *RegexMatcher) GetRegex() string { + if x != nil { + return x.Regex + } + return "" +} + +type isRegexMatcher_EngineType interface { + isRegexMatcher_EngineType() +} + +type RegexMatcher_GoogleRe2 struct { + GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"` +} + +func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {} + +type RegexMatcher_GoogleRE2 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RegexMatcher_GoogleRE2) Reset() { + *x = RegexMatcher_GoogleRE2{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher_GoogleRE2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher_GoogleRE2) ProtoMessage() {} + +func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead. +func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0} +} + +var File_xds_type_matcher_v3_regex_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_regex_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xa6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x56, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, + 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x0b, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x52, 0x45, 0x32, 0x42, 0x12, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x5a, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, 0x67, 0x65, + 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, + 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_regex_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_regex_proto_rawDescData = file_xds_type_matcher_v3_regex_proto_rawDesc +) + +func file_xds_type_matcher_v3_regex_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_regex_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_regex_proto_rawDescData) + }) + return file_xds_type_matcher_v3_regex_proto_rawDescData +} + +var file_xds_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_regex_proto_goTypes = []interface{}{ + (*RegexMatcher)(nil), // 0: xds.type.matcher.v3.RegexMatcher + (*RegexMatcher_GoogleRE2)(nil), // 1: xds.type.matcher.v3.RegexMatcher.GoogleRE2 +} +var file_xds_type_matcher_v3_regex_proto_depIdxs = []int32{ + 1, // 0: xds.type.matcher.v3.RegexMatcher.google_re2:type_name -> xds.type.matcher.v3.RegexMatcher.GoogleRE2 + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_regex_proto_init() } +func file_xds_type_matcher_v3_regex_proto_init() { + if File_xds_type_matcher_v3_regex_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher_GoogleRE2); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RegexMatcher_GoogleRe2)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_regex_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_regex_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_regex_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_regex_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_regex_proto = out.File + file_xds_type_matcher_v3_regex_proto_rawDesc = nil + file_xds_type_matcher_v3_regex_proto_goTypes = nil + file_xds_type_matcher_v3_regex_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go new file mode 100644 index 0000000000..8b7682964b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/regex.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RegexMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RegexMatcherMultiError, or +// nil if none found. +func (m *RegexMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetRegex()) < 1 { + err := RegexMatcherValidationError{ + field: "Regex", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofEngineTypePresent := false + switch v := m.EngineType.(type) { + case *RegexMatcher_GoogleRe2: + if v == nil { + err := RegexMatcherValidationError{ + field: "EngineType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofEngineTypePresent = true + + if m.GetGoogleRe2() == nil { + err := RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGoogleRe2()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofEngineTypePresent { + err := RegexMatcherValidationError{ + field: "EngineType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RegexMatcherMultiError(errors) + } + + return nil +} + +// RegexMatcherMultiError is an error wrapping multiple validation errors +// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met. +type RegexMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcherMultiError) AllErrors() []error { return m } + +// RegexMatcherValidationError is the validation error returned by +// RegexMatcher.Validate if the designated constraints aren't met. +type RegexMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e RegexMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcherValidationError{} + +// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher_GoogleRE2) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RegexMatcher_GoogleRE2MultiError, or nil if none found. +func (m *RegexMatcher_GoogleRE2) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher_GoogleRE2) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RegexMatcher_GoogleRE2MultiError(errors) + } + + return nil +} + +// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation +// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated +// constraints aren't met. +type RegexMatcher_GoogleRE2MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcher_GoogleRE2MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m } + +// RegexMatcher_GoogleRE2ValidationError is the validation error returned by +// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met. +type RegexMatcher_GoogleRE2ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string { + return "RegexMatcher_GoogleRE2ValidationError" +} + +// Error satisfies the builtin error interface +func (e RegexMatcher_GoogleRE2ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher_GoogleRE2.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcher_GoogleRE2ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcher_GoogleRE2ValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go new file mode 100644 index 0000000000..f9067918c7 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go @@ -0,0 +1,353 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/matcher/v3/string.proto + +package v3 + +import ( + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *StringMatcher_Exact + // *StringMatcher_Prefix + // *StringMatcher_Suffix + // *StringMatcher_SafeRegex + // *StringMatcher_Contains + // *StringMatcher_Custom + MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` + IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` +} + +func (x *StringMatcher) Reset() { + *x = StringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringMatcher) ProtoMessage() {} + +func (x *StringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead. +func (*StringMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{0} +} + +func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *StringMatcher) GetExact() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok { + return x.Exact + } + return "" +} + +func (x *StringMatcher) GetPrefix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok { + return x.Prefix + } + return "" +} + +func (x *StringMatcher) GetSuffix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok { + return x.Suffix + } + return "" +} + +func (x *StringMatcher) GetSafeRegex() *RegexMatcher { + if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok { + return x.SafeRegex + } + return nil +} + +func (x *StringMatcher) GetContains() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok { + return x.Contains + } + return "" +} + +func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig { + if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok { + return x.Custom + } + return nil +} + +func (x *StringMatcher) GetIgnoreCase() bool { + if x != nil { + return x.IgnoreCase + } + return false +} + +type isStringMatcher_MatchPattern interface { + isStringMatcher_MatchPattern() +} + +type StringMatcher_Exact struct { + Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` +} + +type StringMatcher_Prefix struct { + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` +} + +type StringMatcher_Suffix struct { + Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` +} + +type StringMatcher_SafeRegex struct { + SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"` +} + +type StringMatcher_Contains struct { + Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` +} + +type StringMatcher_Custom struct { + Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {} + +type ListStringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *ListStringMatcher) Reset() { + *x = ListStringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListStringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListStringMatcher) ProtoMessage() {} + +func (x *ListStringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead. +func (*ListStringMatcher) Descriptor() ([]byte, []int) { + return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{1} +} + +func (x *ListStringMatcher) GetPatterns() []*StringMatcher { + if x != nil { + return x.Patterns + } + return nil +} + +var File_xds_type_matcher_v3_string_proto protoreflect.FileDescriptor + +var file_xds_type_matcher_v3_string_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, + 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, + 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4c, + 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, + 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, + 0x65, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x5d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x08, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x42, 0x5b, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_matcher_v3_string_proto_rawDescOnce sync.Once + file_xds_type_matcher_v3_string_proto_rawDescData = file_xds_type_matcher_v3_string_proto_rawDesc +) + +func file_xds_type_matcher_v3_string_proto_rawDescGZIP() []byte { + file_xds_type_matcher_v3_string_proto_rawDescOnce.Do(func() { + file_xds_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_string_proto_rawDescData) + }) + return file_xds_type_matcher_v3_string_proto_rawDescData +} + +var file_xds_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_matcher_v3_string_proto_goTypes = []interface{}{ + (*StringMatcher)(nil), // 0: xds.type.matcher.v3.StringMatcher + (*ListStringMatcher)(nil), // 1: xds.type.matcher.v3.ListStringMatcher + (*RegexMatcher)(nil), // 2: xds.type.matcher.v3.RegexMatcher + (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig +} +var file_xds_type_matcher_v3_string_proto_depIdxs = []int32{ + 2, // 0: xds.type.matcher.v3.StringMatcher.safe_regex:type_name -> xds.type.matcher.v3.RegexMatcher + 3, // 1: xds.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig + 0, // 2: xds.type.matcher.v3.ListStringMatcher.patterns:type_name -> xds.type.matcher.v3.StringMatcher + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_xds_type_matcher_v3_string_proto_init() } +func file_xds_type_matcher_v3_string_proto_init() { + if File_xds_type_matcher_v3_string_proto != nil { + return + } + file_xds_type_matcher_v3_regex_proto_init() + if !protoimpl.UnsafeEnabled { + file_xds_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*StringMatcher_Exact)(nil), + (*StringMatcher_Prefix)(nil), + (*StringMatcher_Suffix)(nil), + (*StringMatcher_SafeRegex)(nil), + (*StringMatcher_Contains)(nil), + (*StringMatcher_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_matcher_v3_string_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_matcher_v3_string_proto_goTypes, + DependencyIndexes: file_xds_type_matcher_v3_string_proto_depIdxs, + MessageInfos: file_xds_type_matcher_v3_string_proto_msgTypes, + }.Build() + File_xds_type_matcher_v3_string_proto = out.File + file_xds_type_matcher_v3_string_proto_rawDesc = nil + file_xds_type_matcher_v3_string_proto_goTypes = nil + file_xds_type_matcher_v3_string_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go new file mode 100644 index 0000000000..339d3b631b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go @@ -0,0 +1,481 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/matcher/v3/string.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StringMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StringMatcherMultiError, or +// nil if none found. +func (m *StringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for IgnoreCase + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *StringMatcher_Exact: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for Exact + case *StringMatcher_Prefix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetPrefix()) < 1 { + err := StringMatcherValidationError{ + field: "Prefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Suffix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetSuffix()) < 1 { + err := StringMatcherValidationError{ + field: "Suffix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_SafeRegex: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if m.GetSafeRegex() == nil { + err := StringMatcherValidationError{ + field: "SafeRegex", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StringMatcher_Contains: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetContains()) < 1 { + err := StringMatcherValidationError{ + field: "Contains", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Custom: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetCustom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StringMatcherMultiError(errors) + } + + return nil +} + +// StringMatcherMultiError is an error wrapping multiple validation errors +// returned by StringMatcher.ValidateAll() if the designated constraints +// aren't met. +type StringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StringMatcherMultiError) AllErrors() []error { return m } + +// StringMatcherValidationError is the validation error returned by +// StringMatcher.Validate if the designated constraints aren't met. +type StringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StringMatcherValidationError{} + +// Validate checks the field values on ListStringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListStringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListStringMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListStringMatcherMultiError, or nil if none found. +func (m *ListStringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ListStringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPatterns()) < 1 { + err := ListStringMatcherValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListStringMatcherMultiError(errors) + } + + return nil +} + +// ListStringMatcherMultiError is an error wrapping multiple validation errors +// returned by ListStringMatcher.ValidateAll() if the designated constraints +// aren't met. +type ListStringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListStringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListStringMatcherMultiError) AllErrors() []error { return m } + +// ListStringMatcherValidationError is the validation error returned by +// ListStringMatcher.Validate if the designated constraints aren't met. +type ListStringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListStringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListStringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListStringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListStringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListStringMatcherValidationError) ErrorName() string { + return "ListStringMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ListStringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListStringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListStringMatcherValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go new file mode 100644 index 0000000000..c7d42d4a94 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go @@ -0,0 +1,330 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/v3/cel.proto + +package v3 + +import ( + expr "cel.dev/expr" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CelExpression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ExprSpecifier: + // + // *CelExpression_ParsedExpr + // *CelExpression_CheckedExpr + ExprSpecifier isCelExpression_ExprSpecifier `protobuf_oneof:"expr_specifier"` + CelExprParsed *expr.ParsedExpr `protobuf:"bytes,3,opt,name=cel_expr_parsed,json=celExprParsed,proto3" json:"cel_expr_parsed,omitempty"` + CelExprChecked *expr.CheckedExpr `protobuf:"bytes,4,opt,name=cel_expr_checked,json=celExprChecked,proto3" json:"cel_expr_checked,omitempty"` +} + +func (x *CelExpression) Reset() { + *x = CelExpression{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_cel_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CelExpression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CelExpression) ProtoMessage() {} + +func (x *CelExpression) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_cel_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CelExpression.ProtoReflect.Descriptor instead. +func (*CelExpression) Descriptor() ([]byte, []int) { + return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{0} +} + +func (m *CelExpression) GetExprSpecifier() isCelExpression_ExprSpecifier { + if m != nil { + return m.ExprSpecifier + } + return nil +} + +// Deprecated: Marked as deprecated in xds/type/v3/cel.proto. +func (x *CelExpression) GetParsedExpr() *v1alpha1.ParsedExpr { + if x, ok := x.GetExprSpecifier().(*CelExpression_ParsedExpr); ok { + return x.ParsedExpr + } + return nil +} + +// Deprecated: Marked as deprecated in xds/type/v3/cel.proto. +func (x *CelExpression) GetCheckedExpr() *v1alpha1.CheckedExpr { + if x, ok := x.GetExprSpecifier().(*CelExpression_CheckedExpr); ok { + return x.CheckedExpr + } + return nil +} + +func (x *CelExpression) GetCelExprParsed() *expr.ParsedExpr { + if x != nil { + return x.CelExprParsed + } + return nil +} + +func (x *CelExpression) GetCelExprChecked() *expr.CheckedExpr { + if x != nil { + return x.CelExprChecked + } + return nil +} + +type isCelExpression_ExprSpecifier interface { + isCelExpression_ExprSpecifier() +} + +type CelExpression_ParsedExpr struct { + // Deprecated: Marked as deprecated in xds/type/v3/cel.proto. + ParsedExpr *v1alpha1.ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3,oneof"` +} + +type CelExpression_CheckedExpr struct { + // Deprecated: Marked as deprecated in xds/type/v3/cel.proto. + CheckedExpr *v1alpha1.CheckedExpr `protobuf:"bytes,2,opt,name=checked_expr,json=checkedExpr,proto3,oneof"` +} + +func (*CelExpression_ParsedExpr) isCelExpression_ExprSpecifier() {} + +func (*CelExpression_CheckedExpr) isCelExpression_ExprSpecifier() {} + +type CelExtractString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExprExtract *CelExpression `protobuf:"bytes,1,opt,name=expr_extract,json=exprExtract,proto3" json:"expr_extract,omitempty"` + DefaultValue *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CelExtractString) Reset() { + *x = CelExtractString{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_cel_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CelExtractString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CelExtractString) ProtoMessage() {} + +func (x *CelExtractString) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_cel_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CelExtractString.ProtoReflect.Descriptor instead. +func (*CelExtractString) Descriptor() ([]byte, []int) { + return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{1} +} + +func (x *CelExtractString) GetExprExtract() *CelExpression { + if x != nil { + return x.ExprExtract + } + return nil +} + +func (x *CelExtractString) GetDefaultValue() *wrapperspb.StringValue { + if x != nil { + return x.DefaultValue + } + return nil +} + +var File_xds_type_v3_cel_proto protoreflect.FileDescriptor + +var file_xds_type_v3_cel_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x63, 0x65, 0x6c, + 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x02, 0x0a, + 0x0d, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4b, + 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0a, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0c, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3c, 0x0a, 0x0f, 0x63, + 0x65, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45, + 0x78, 0x70, 0x72, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x65, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x45, + 0x78, 0x70, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x65, 0x78, + 0x70, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, + 0x10, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x47, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, + 0x78, 0x70, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_v3_cel_proto_rawDescOnce sync.Once + file_xds_type_v3_cel_proto_rawDescData = file_xds_type_v3_cel_proto_rawDesc +) + +func file_xds_type_v3_cel_proto_rawDescGZIP() []byte { + file_xds_type_v3_cel_proto_rawDescOnce.Do(func() { + file_xds_type_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_cel_proto_rawDescData) + }) + return file_xds_type_v3_cel_proto_rawDescData +} + +var file_xds_type_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_xds_type_v3_cel_proto_goTypes = []interface{}{ + (*CelExpression)(nil), // 0: xds.type.v3.CelExpression + (*CelExtractString)(nil), // 1: xds.type.v3.CelExtractString + (*v1alpha1.ParsedExpr)(nil), // 2: google.api.expr.v1alpha1.ParsedExpr + (*v1alpha1.CheckedExpr)(nil), // 3: google.api.expr.v1alpha1.CheckedExpr + (*expr.ParsedExpr)(nil), // 4: cel.expr.ParsedExpr + (*expr.CheckedExpr)(nil), // 5: cel.expr.CheckedExpr + (*wrapperspb.StringValue)(nil), // 6: google.protobuf.StringValue +} +var file_xds_type_v3_cel_proto_depIdxs = []int32{ + 2, // 0: xds.type.v3.CelExpression.parsed_expr:type_name -> google.api.expr.v1alpha1.ParsedExpr + 3, // 1: xds.type.v3.CelExpression.checked_expr:type_name -> google.api.expr.v1alpha1.CheckedExpr + 4, // 2: xds.type.v3.CelExpression.cel_expr_parsed:type_name -> cel.expr.ParsedExpr + 5, // 3: xds.type.v3.CelExpression.cel_expr_checked:type_name -> cel.expr.CheckedExpr + 0, // 4: xds.type.v3.CelExtractString.expr_extract:type_name -> xds.type.v3.CelExpression + 6, // 5: xds.type.v3.CelExtractString.default_value:type_name -> google.protobuf.StringValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_xds_type_v3_cel_proto_init() } +func file_xds_type_v3_cel_proto_init() { + if File_xds_type_v3_cel_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CelExpression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_v3_cel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CelExtractString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_xds_type_v3_cel_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CelExpression_ParsedExpr)(nil), + (*CelExpression_CheckedExpr)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_v3_cel_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_v3_cel_proto_goTypes, + DependencyIndexes: file_xds_type_v3_cel_proto_depIdxs, + MessageInfos: file_xds_type_v3_cel_proto_msgTypes, + }.Build() + File_xds_type_v3_cel_proto = out.File + file_xds_type_v3_cel_proto_rawDesc = nil + file_xds_type_v3_cel_proto_goTypes = nil + file_xds_type_v3_cel_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go new file mode 100644 index 0000000000..0855edee9b --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go @@ -0,0 +1,450 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/v3/cel.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CelExpression with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CelExpression) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CelExpression with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CelExpressionMultiError, or +// nil if none found. +func (m *CelExpression) ValidateAll() error { + return m.validate(true) +} + +func (m *CelExpression) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCelExprParsed()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCelExprParsed()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CelExprParsed", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCelExprChecked()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCelExprChecked()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CelExprChecked", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ExprSpecifier.(type) { + case *CelExpression_ParsedExpr: + if v == nil { + err := CelExpressionValidationError{ + field: "ExprSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetParsedExpr()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "ParsedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "ParsedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParsedExpr()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "ParsedExpr", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CelExpression_CheckedExpr: + if v == nil { + err := CelExpressionValidationError{ + field: "ExprSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCheckedExpr()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CheckedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExpressionValidationError{ + field: "CheckedExpr", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCheckedExpr()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExpressionValidationError{ + field: "CheckedExpr", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CelExpressionMultiError(errors) + } + + return nil +} + +// CelExpressionMultiError is an error wrapping multiple validation errors +// returned by CelExpression.ValidateAll() if the designated constraints +// aren't met. +type CelExpressionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CelExpressionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CelExpressionMultiError) AllErrors() []error { return m } + +// CelExpressionValidationError is the validation error returned by +// CelExpression.Validate if the designated constraints aren't met. +type CelExpressionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CelExpressionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CelExpressionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CelExpressionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CelExpressionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CelExpressionValidationError) ErrorName() string { return "CelExpressionValidationError" } + +// Error satisfies the builtin error interface +func (e CelExpressionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCelExpression.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CelExpressionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CelExpressionValidationError{} + +// Validate checks the field values on CelExtractString with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CelExtractString) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CelExtractString with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CelExtractStringMultiError, or nil if none found. +func (m *CelExtractString) ValidateAll() error { + return m.validate(true) +} + +func (m *CelExtractString) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetExprExtract() == nil { + err := CelExtractStringValidationError{ + field: "ExprExtract", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExprExtract()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "ExprExtract", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "ExprExtract", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExprExtract()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExtractStringValidationError{ + field: "ExprExtract", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CelExtractStringValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CelExtractStringValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CelExtractStringMultiError(errors) + } + + return nil +} + +// CelExtractStringMultiError is an error wrapping multiple validation errors +// returned by CelExtractString.ValidateAll() if the designated constraints +// aren't met. +type CelExtractStringMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CelExtractStringMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CelExtractStringMultiError) AllErrors() []error { return m } + +// CelExtractStringValidationError is the validation error returned by +// CelExtractString.Validate if the designated constraints aren't met. +type CelExtractStringValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CelExtractStringValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CelExtractStringValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CelExtractStringValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CelExtractStringValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CelExtractStringValidationError) ErrorName() string { return "CelExtractStringValidationError" } + +// Error satisfies the builtin error interface +func (e CelExtractStringValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCelExtractString.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CelExtractStringValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CelExtractStringValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go new file mode 100644 index 0000000000..ca9d3e1b7f --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go @@ -0,0 +1,298 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/v3/range.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Int64Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int64Range) Reset() { + *x = Int64Range{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_range_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Range) ProtoMessage() {} + +func (x *Int64Range) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_range_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead. +func (*Int64Range) Descriptor() ([]byte, []int) { + return file_xds_type_v3_range_proto_rawDescGZIP(), []int{0} +} + +func (x *Int64Range) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int64Range) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +type Int32Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int32Range) Reset() { + *x = Int32Range{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_range_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Range) ProtoMessage() {} + +func (x *Int32Range) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_range_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead. +func (*Int32Range) Descriptor() ([]byte, []int) { + return file_xds_type_v3_range_proto_rawDescGZIP(), []int{1} +} + +func (x *Int32Range) GetStart() int32 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int32Range) GetEnd() int32 { + if x != nil { + return x.End + } + return 0 +} + +type DoubleRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"` + End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *DoubleRange) Reset() { + *x = DoubleRange{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_range_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRange) ProtoMessage() {} + +func (x *DoubleRange) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_range_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead. +func (*DoubleRange) Descriptor() ([]byte, []int) { + return file_xds_type_v3_range_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRange) GetStart() float64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *DoubleRange) GetEnd() float64 { + if x != nil { + return x.End + } + return 0 +} + +var File_xds_type_v3_range_proto protoreflect.FileDescriptor + +var file_xds_type_v3_range_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x22, 0x34, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x0a, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x4a, 0x0a, 0x16, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, + 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_v3_range_proto_rawDescOnce sync.Once + file_xds_type_v3_range_proto_rawDescData = file_xds_type_v3_range_proto_rawDesc +) + +func file_xds_type_v3_range_proto_rawDescGZIP() []byte { + file_xds_type_v3_range_proto_rawDescOnce.Do(func() { + file_xds_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_range_proto_rawDescData) + }) + return file_xds_type_v3_range_proto_rawDescData +} + +var file_xds_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_xds_type_v3_range_proto_goTypes = []interface{}{ + (*Int64Range)(nil), // 0: xds.type.v3.Int64Range + (*Int32Range)(nil), // 1: xds.type.v3.Int32Range + (*DoubleRange)(nil), // 2: xds.type.v3.DoubleRange +} +var file_xds_type_v3_range_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_xds_type_v3_range_proto_init() } +func file_xds_type_v3_range_proto_init() { + if File_xds_type_v3_range_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_xds_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_v3_range_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_v3_range_proto_goTypes, + DependencyIndexes: file_xds_type_v3_range_proto_depIdxs, + MessageInfos: file_xds_type_v3_range_proto_msgTypes, + }.Build() + File_xds_type_v3_range_proto = out.File + file_xds_type_v3_range_proto_rawDesc = nil + file_xds_type_v3_range_proto_goTypes = nil + file_xds_type_v3_range_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go new file mode 100644 index 0000000000..ccaf418e5d --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go @@ -0,0 +1,345 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/v3/range.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Int64Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int64Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int64RangeMultiError, or +// nil if none found. +func (m *Int64Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int64RangeMultiError(errors) + } + + return nil +} + +// Int64RangeMultiError is an error wrapping multiple validation errors +// returned by Int64Range.ValidateAll() if the designated constraints aren't met. +type Int64RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMultiError) AllErrors() []error { return m } + +// Int64RangeValidationError is the validation error returned by +// Int64Range.Validate if the designated constraints aren't met. +type Int64RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int64RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeValidationError{} + +// Validate checks the field values on Int32Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int32Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int32RangeMultiError, or +// nil if none found. +func (m *Int32Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int32RangeMultiError(errors) + } + + return nil +} + +// Int32RangeMultiError is an error wrapping multiple validation errors +// returned by Int32Range.ValidateAll() if the designated constraints aren't met. +type Int32RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMultiError) AllErrors() []error { return m } + +// Int32RangeValidationError is the validation error returned by +// Int32Range.Validate if the designated constraints aren't met. +type Int32RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int32RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeValidationError{} + +// Validate checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DoubleRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DoubleRangeMultiError, or +// nil if none found. +func (m *DoubleRange) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return DoubleRangeMultiError(errors) + } + + return nil +} + +// DoubleRangeMultiError is an error wrapping multiple validation errors +// returned by DoubleRange.ValidateAll() if the designated constraints aren't met. +type DoubleRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMultiError) AllErrors() []error { return m } + +// DoubleRangeValidationError is the validation error returned by +// DoubleRange.Validate if the designated constraints aren't met. +type DoubleRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" } + +// Error satisfies the builtin error interface +func (e DoubleRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeValidationError{} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go new file mode 100644 index 0000000000..72ec85ed60 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.27.0--rc2 +// source: xds/type/v3/typed_struct.proto + +package v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TypedStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypedStruct) Reset() { + *x = TypedStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedStruct) ProtoMessage() {} + +func (x *TypedStruct) ProtoReflect() protoreflect.Message { + mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead. +func (*TypedStruct) Descriptor() ([]byte, []int) { + return file_xds_type_v3_typed_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedStruct) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *TypedStruct) GetValue() *structpb.Struct { + if x != nil { + return x.Value + } + return nil +} + +var File_xds_type_v3_typed_struct_proto protoreflect.FileDescriptor + +var file_xds_type_v3_typed_struct_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0b, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, + 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, + 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_xds_type_v3_typed_struct_proto_rawDescOnce sync.Once + file_xds_type_v3_typed_struct_proto_rawDescData = file_xds_type_v3_typed_struct_proto_rawDesc +) + +func file_xds_type_v3_typed_struct_proto_rawDescGZIP() []byte { + file_xds_type_v3_typed_struct_proto_rawDescOnce.Do(func() { + file_xds_type_v3_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_typed_struct_proto_rawDescData) + }) + return file_xds_type_v3_typed_struct_proto_rawDescData +} + +var file_xds_type_v3_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_xds_type_v3_typed_struct_proto_goTypes = []interface{}{ + (*TypedStruct)(nil), // 0: xds.type.v3.TypedStruct + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_xds_type_v3_typed_struct_proto_depIdxs = []int32{ + 1, // 0: xds.type.v3.TypedStruct.value:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_xds_type_v3_typed_struct_proto_init() } +func file_xds_type_v3_typed_struct_proto_init() { + if File_xds_type_v3_typed_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_xds_type_v3_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_xds_type_v3_typed_struct_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_xds_type_v3_typed_struct_proto_goTypes, + DependencyIndexes: file_xds_type_v3_typed_struct_proto_depIdxs, + MessageInfos: file_xds_type_v3_typed_struct_proto_msgTypes, + }.Build() + File_xds_type_v3_typed_struct_proto = out.File + file_xds_type_v3_typed_struct_proto_rawDesc = nil + file_xds_type_v3_typed_struct_proto_goTypes = nil + file_xds_type_v3_typed_struct_proto_depIdxs = nil +} diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go new file mode 100644 index 0000000000..f39bce9066 --- /dev/null +++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: xds/type/v3/typed_struct.proto + +package v3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TypedStruct) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedStruct with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TypedStructMultiError, or +// nil if none found. +func (m *TypedStruct) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedStruct) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TypeUrl + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TypedStructValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TypedStructMultiError(errors) + } + + return nil +} + +// TypedStructMultiError is an error wrapping multiple validation errors +// returned by TypedStruct.ValidateAll() if the designated constraints aren't met. +type TypedStructMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedStructMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedStructMultiError) AllErrors() []error { return m } + +// TypedStructValidationError is the validation error returned by +// TypedStruct.Validate if the designated constraints aren't met. +type TypedStructValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedStructValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedStructValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedStructValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedStructValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" } + +// Error satisfies the builtin error interface +func (e TypedStructValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedStruct.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedStructValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedStructValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/LICENSE b/vendor/github.com/envoyproxy/go-control-plane/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go new file mode 100644 index 0000000000..13d644dba6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go @@ -0,0 +1,607 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of certificate details. Admin endpoint uses this wrapper for “/certs“ to +// display certificate information. See :ref:`/certs ` for more +// information. +type Certificates struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of certificates known to an Envoy. + Certificates []*Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` +} + +func (x *Certificates) Reset() { + *x = Certificates{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificates) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificates) ProtoMessage() {} + +func (x *Certificates) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificates.ProtoReflect.Descriptor instead. +func (*Certificates) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{0} +} + +func (x *Certificates) GetCertificates() []*Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +type Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Details of CA certificate. + CaCert []*CertificateDetails `protobuf:"bytes,1,rep,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + // Details of Certificate Chain + CertChain []*CertificateDetails `protobuf:"bytes,2,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` +} + +func (x *Certificate) Reset() { + *x = Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificate) ProtoMessage() {} + +func (x *Certificate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificate.ProtoReflect.Descriptor instead. +func (*Certificate) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{1} +} + +func (x *Certificate) GetCaCert() []*CertificateDetails { + if x != nil { + return x.CaCert + } + return nil +} + +func (x *Certificate) GetCertChain() []*CertificateDetails { + if x != nil { + return x.CertChain + } + return nil +} + +// [#next-free-field: 8] +type CertificateDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path of the certificate. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // Certificate Serial Number. + SerialNumber string `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + // List of Subject Alternate names. + SubjectAltNames []*SubjectAlternateName `protobuf:"bytes,3,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"` + // Minimum of days until expiration of certificate and it's chain. + DaysUntilExpiration uint64 `protobuf:"varint,4,opt,name=days_until_expiration,json=daysUntilExpiration,proto3" json:"days_until_expiration,omitempty"` + // Indicates the time from which the certificate is valid. + ValidFrom *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` + // Indicates the time at which the certificate expires. + ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` + // Details related to the OCSP response associated with this certificate, if any. + OcspDetails *CertificateDetails_OcspDetails `protobuf:"bytes,7,opt,name=ocsp_details,json=ocspDetails,proto3" json:"ocsp_details,omitempty"` +} + +func (x *CertificateDetails) Reset() { + *x = CertificateDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateDetails) ProtoMessage() {} + +func (x *CertificateDetails) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateDetails.ProtoReflect.Descriptor instead. +func (*CertificateDetails) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2} +} + +func (x *CertificateDetails) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *CertificateDetails) GetSerialNumber() string { + if x != nil { + return x.SerialNumber + } + return "" +} + +func (x *CertificateDetails) GetSubjectAltNames() []*SubjectAlternateName { + if x != nil { + return x.SubjectAltNames + } + return nil +} + +func (x *CertificateDetails) GetDaysUntilExpiration() uint64 { + if x != nil { + return x.DaysUntilExpiration + } + return 0 +} + +func (x *CertificateDetails) GetValidFrom() *timestamppb.Timestamp { + if x != nil { + return x.ValidFrom + } + return nil +} + +func (x *CertificateDetails) GetExpirationTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpirationTime + } + return nil +} + +func (x *CertificateDetails) GetOcspDetails() *CertificateDetails_OcspDetails { + if x != nil { + return x.OcspDetails + } + return nil +} + +type SubjectAlternateName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Subject Alternate Name. + // + // Types that are assignable to Name: + // + // *SubjectAlternateName_Dns + // *SubjectAlternateName_Uri + // *SubjectAlternateName_IpAddress + Name isSubjectAlternateName_Name `protobuf_oneof:"name"` +} + +func (x *SubjectAlternateName) Reset() { + *x = SubjectAlternateName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubjectAlternateName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternateName) ProtoMessage() {} + +func (x *SubjectAlternateName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternateName.ProtoReflect.Descriptor instead. +func (*SubjectAlternateName) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{3} +} + +func (m *SubjectAlternateName) GetName() isSubjectAlternateName_Name { + if m != nil { + return m.Name + } + return nil +} + +func (x *SubjectAlternateName) GetDns() string { + if x, ok := x.GetName().(*SubjectAlternateName_Dns); ok { + return x.Dns + } + return "" +} + +func (x *SubjectAlternateName) GetUri() string { + if x, ok := x.GetName().(*SubjectAlternateName_Uri); ok { + return x.Uri + } + return "" +} + +func (x *SubjectAlternateName) GetIpAddress() string { + if x, ok := x.GetName().(*SubjectAlternateName_IpAddress); ok { + return x.IpAddress + } + return "" +} + +type isSubjectAlternateName_Name interface { + isSubjectAlternateName_Name() +} + +type SubjectAlternateName_Dns struct { + Dns string `protobuf:"bytes,1,opt,name=dns,proto3,oneof"` +} + +type SubjectAlternateName_Uri struct { + Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"` +} + +type SubjectAlternateName_IpAddress struct { + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3,oneof"` +} + +func (*SubjectAlternateName_Dns) isSubjectAlternateName_Name() {} + +func (*SubjectAlternateName_Uri) isSubjectAlternateName_Name() {} + +func (*SubjectAlternateName_IpAddress) isSubjectAlternateName_Name() {} + +type CertificateDetails_OcspDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates the time from which the OCSP response is valid. + ValidFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"` + // Indicates the time at which the OCSP response expires. + Expiration *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiration,proto3" json:"expiration,omitempty"` +} + +func (x *CertificateDetails_OcspDetails) Reset() { + *x = CertificateDetails_OcspDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateDetails_OcspDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateDetails_OcspDetails) ProtoMessage() {} + +func (x *CertificateDetails_OcspDetails) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_certs_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateDetails_OcspDetails.ProtoReflect.Descriptor instead. +func (*CertificateDetails_OcspDetails) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CertificateDetails_OcspDetails) GetValidFrom() *timestamppb.Timestamp { + if x != nil { + return x.ValidFrom + } + return nil +} + +func (x *CertificateDetails_OcspDetails) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +var File_envoy_admin_v3_certs_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_certs_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x78, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, + 0x3f, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, + 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x0b, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x06, + 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x09, + 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x22, 0xdc, 0x04, 0x0a, 0x12, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0d, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x50, 0x0a, 0x11, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x75, 0x6e, 0x74, 0x69, + 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x45, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x72, + 0x6f, 0x6d, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0b, 0x6f, + 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0b, 0x4f, + 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x22, 0x98, 0x01, 0x0a, 0x14, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x12, 0x0a, + 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, + 0x69, 0x12, 0x1f, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x73, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x43, 0x65, 0x72, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_certs_proto_rawDescOnce sync.Once + file_envoy_admin_v3_certs_proto_rawDescData = file_envoy_admin_v3_certs_proto_rawDesc +) + +func file_envoy_admin_v3_certs_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_certs_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_certs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_certs_proto_rawDescData) + }) + return file_envoy_admin_v3_certs_proto_rawDescData +} + +var file_envoy_admin_v3_certs_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_admin_v3_certs_proto_goTypes = []interface{}{ + (*Certificates)(nil), // 0: envoy.admin.v3.Certificates + (*Certificate)(nil), // 1: envoy.admin.v3.Certificate + (*CertificateDetails)(nil), // 2: envoy.admin.v3.CertificateDetails + (*SubjectAlternateName)(nil), // 3: envoy.admin.v3.SubjectAlternateName + (*CertificateDetails_OcspDetails)(nil), // 4: envoy.admin.v3.CertificateDetails.OcspDetails + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_envoy_admin_v3_certs_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.Certificates.certificates:type_name -> envoy.admin.v3.Certificate + 2, // 1: envoy.admin.v3.Certificate.ca_cert:type_name -> envoy.admin.v3.CertificateDetails + 2, // 2: envoy.admin.v3.Certificate.cert_chain:type_name -> envoy.admin.v3.CertificateDetails + 3, // 3: envoy.admin.v3.CertificateDetails.subject_alt_names:type_name -> envoy.admin.v3.SubjectAlternateName + 5, // 4: envoy.admin.v3.CertificateDetails.valid_from:type_name -> google.protobuf.Timestamp + 5, // 5: envoy.admin.v3.CertificateDetails.expiration_time:type_name -> google.protobuf.Timestamp + 4, // 6: envoy.admin.v3.CertificateDetails.ocsp_details:type_name -> envoy.admin.v3.CertificateDetails.OcspDetails + 5, // 7: envoy.admin.v3.CertificateDetails.OcspDetails.valid_from:type_name -> google.protobuf.Timestamp + 5, // 8: envoy.admin.v3.CertificateDetails.OcspDetails.expiration:type_name -> google.protobuf.Timestamp + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_certs_proto_init() } +func file_envoy_admin_v3_certs_proto_init() { + if File_envoy_admin_v3_certs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_certs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificates); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubjectAlternateName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_certs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateDetails_OcspDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_admin_v3_certs_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*SubjectAlternateName_Dns)(nil), + (*SubjectAlternateName_Uri)(nil), + (*SubjectAlternateName_IpAddress)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_certs_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_certs_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_certs_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_certs_proto_msgTypes, + }.Build() + File_envoy_admin_v3_certs_proto = out.File + file_envoy_admin_v3_certs_proto_rawDesc = nil + file_envoy_admin_v3_certs_proto_goTypes = nil + file_envoy_admin_v3_certs_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go new file mode 100644 index 0000000000..413895689e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go @@ -0,0 +1,870 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Certificates with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Certificates) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Certificates with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CertificatesMultiError, or +// nil if none found. +func (m *Certificates) ValidateAll() error { + return m.validate(true) +} + +func (m *Certificates) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetCertificates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificatesValidationError{ + field: fmt.Sprintf("Certificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificatesValidationError{ + field: fmt.Sprintf("Certificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificatesValidationError{ + field: fmt.Sprintf("Certificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CertificatesMultiError(errors) + } + + return nil +} + +// CertificatesMultiError is an error wrapping multiple validation errors +// returned by Certificates.ValidateAll() if the designated constraints aren't met. +type CertificatesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificatesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificatesMultiError) AllErrors() []error { return m } + +// CertificatesValidationError is the validation error returned by +// Certificates.Validate if the designated constraints aren't met. +type CertificatesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificatesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificatesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificatesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificatesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificatesValidationError) ErrorName() string { return "CertificatesValidationError" } + +// Error satisfies the builtin error interface +func (e CertificatesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificates.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificatesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificatesValidationError{} + +// Validate checks the field values on Certificate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Certificate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Certificate with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CertificateMultiError, or +// nil if none found. +func (m *Certificate) ValidateAll() error { + return m.validate(true) +} + +func (m *Certificate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetCaCert() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CaCert[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CaCert[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationError{ + field: fmt.Sprintf("CaCert[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetCertChain() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CertChain[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationError{ + field: fmt.Sprintf("CertChain[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationError{ + field: fmt.Sprintf("CertChain[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CertificateMultiError(errors) + } + + return nil +} + +// CertificateMultiError is an error wrapping multiple validation errors +// returned by Certificate.ValidateAll() if the designated constraints aren't met. +type CertificateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateMultiError) AllErrors() []error { return m } + +// CertificateValidationError is the validation error returned by +// Certificate.Validate if the designated constraints aren't met. +type CertificateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationError) ErrorName() string { return "CertificateValidationError" } + +// Error satisfies the builtin error interface +func (e CertificateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationError{} + +// Validate checks the field values on CertificateDetails with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CertificateDetails) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateDetails with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CertificateDetailsMultiError, or nil if none found. +func (m *CertificateDetails) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateDetails) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Path + + // no validation rules for SerialNumber + + for idx, item := range m.GetSubjectAltNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: fmt.Sprintf("SubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: fmt.Sprintf("SubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: fmt.Sprintf("SubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for DaysUntilExpiration + + if all { + switch v := interface{}(m.GetValidFrom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetExpirationTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ExpirationTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "ExpirationTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpirationTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: "ExpirationTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOcspDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "OcspDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetailsValidationError{ + field: "OcspDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOcspDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetailsValidationError{ + field: "OcspDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CertificateDetailsMultiError(errors) + } + + return nil +} + +// CertificateDetailsMultiError is an error wrapping multiple validation errors +// returned by CertificateDetails.ValidateAll() if the designated constraints +// aren't met. +type CertificateDetailsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateDetailsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateDetailsMultiError) AllErrors() []error { return m } + +// CertificateDetailsValidationError is the validation error returned by +// CertificateDetails.Validate if the designated constraints aren't met. +type CertificateDetailsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateDetailsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateDetailsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateDetailsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateDetailsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateDetailsValidationError) ErrorName() string { + return "CertificateDetailsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateDetailsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateDetails.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateDetailsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateDetailsValidationError{} + +// Validate checks the field values on SubjectAlternateName with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubjectAlternateName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubjectAlternateName with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubjectAlternateNameMultiError, or nil if none found. +func (m *SubjectAlternateName) ValidateAll() error { + return m.validate(true) +} + +func (m *SubjectAlternateName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Name.(type) { + case *SubjectAlternateName_Dns: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Dns + case *SubjectAlternateName_Uri: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Uri + case *SubjectAlternateName_IpAddress: + if v == nil { + err := SubjectAlternateNameValidationError{ + field: "Name", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for IpAddress + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SubjectAlternateNameMultiError(errors) + } + + return nil +} + +// SubjectAlternateNameMultiError is an error wrapping multiple validation +// errors returned by SubjectAlternateName.ValidateAll() if the designated +// constraints aren't met. +type SubjectAlternateNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubjectAlternateNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubjectAlternateNameMultiError) AllErrors() []error { return m } + +// SubjectAlternateNameValidationError is the validation error returned by +// SubjectAlternateName.Validate if the designated constraints aren't met. +type SubjectAlternateNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubjectAlternateNameValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubjectAlternateNameValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubjectAlternateNameValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubjectAlternateNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubjectAlternateNameValidationError) ErrorName() string { + return "SubjectAlternateNameValidationError" +} + +// Error satisfies the builtin error interface +func (e SubjectAlternateNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubjectAlternateName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubjectAlternateNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubjectAlternateNameValidationError{} + +// Validate checks the field values on CertificateDetails_OcspDetails with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CertificateDetails_OcspDetails) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateDetails_OcspDetails with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// CertificateDetails_OcspDetailsMultiError, or nil if none found. +func (m *CertificateDetails_OcspDetails) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateDetails_OcspDetails) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetValidFrom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetails_OcspDetailsValidationError{ + field: "ValidFrom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetExpiration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "Expiration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateDetails_OcspDetailsValidationError{ + field: "Expiration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpiration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateDetails_OcspDetailsValidationError{ + field: "Expiration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CertificateDetails_OcspDetailsMultiError(errors) + } + + return nil +} + +// CertificateDetails_OcspDetailsMultiError is an error wrapping multiple +// validation errors returned by CertificateDetails_OcspDetails.ValidateAll() +// if the designated constraints aren't met. +type CertificateDetails_OcspDetailsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateDetails_OcspDetailsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateDetails_OcspDetailsMultiError) AllErrors() []error { return m } + +// CertificateDetails_OcspDetailsValidationError is the validation error +// returned by CertificateDetails_OcspDetails.Validate if the designated +// constraints aren't met. +type CertificateDetails_OcspDetailsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateDetails_OcspDetailsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateDetails_OcspDetailsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateDetails_OcspDetailsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateDetails_OcspDetailsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateDetails_OcspDetailsValidationError) ErrorName() string { + return "CertificateDetails_OcspDetailsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateDetails_OcspDetailsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateDetails_OcspDetails.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateDetails_OcspDetailsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateDetails_OcspDetailsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go new file mode 100644 index 0000000000..3c325787d2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go @@ -0,0 +1,504 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/certs.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Certificates) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificates) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Certificates) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Certificates) > 0 { + for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Certificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Certificate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Certificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertChain) > 0 { + for iNdEx := len(m.CertChain) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CertChain[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.CaCert) > 0 { + for iNdEx := len(m.CaCert) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CaCert[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CertificateDetails_OcspDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateDetails_OcspDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateDetails_OcspDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Expiration != nil { + size, err := (*timestamppb.Timestamp)(m.Expiration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ValidFrom != nil { + size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CertificateDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OcspDetails != nil { + size, err := m.OcspDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.ExpirationTime != nil { + size, err := (*timestamppb.Timestamp)(m.ExpirationTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.ValidFrom != nil { + size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.DaysUntilExpiration != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DaysUntilExpiration)) + i-- + dAtA[i] = 0x20 + } + if len(m.SubjectAltNames) > 0 { + for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.SerialNumber) > 0 { + i -= len(m.SerialNumber) + copy(dAtA[i:], m.SerialNumber) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SerialNumber))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAlternateName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAlternateName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Name.(*SubjectAlternateName_IpAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Name.(*SubjectAlternateName_Uri); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Name.(*SubjectAlternateName_Dns); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SubjectAlternateName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Dns) + copy(dAtA[i:], m.Dns) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SubjectAlternateName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *SubjectAlternateName_IpAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAlternateName_IpAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.IpAddress) + copy(dAtA[i:], m.IpAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.IpAddress))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Certificates) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Certificates) > 0 { + for _, e := range m.Certificates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Certificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CaCert) > 0 { + for _, e := range m.CaCert { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CertChain) > 0 { + for _, e := range m.CertChain { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateDetails_OcspDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidFrom != nil { + l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Expiration != nil { + l = (*timestamppb.Timestamp)(m.Expiration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SerialNumber) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SubjectAltNames) > 0 { + for _, e := range m.SubjectAltNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DaysUntilExpiration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DaysUntilExpiration)) + } + if m.ValidFrom != nil { + l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpirationTime != nil { + l = (*timestamppb.Timestamp)(m.ExpirationTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspDetails != nil { + l = m.OcspDetails.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAlternateName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Name.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAlternateName_Dns) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dns) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubjectAlternateName_Uri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubjectAlternateName_IpAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IpAddress) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go new file mode 100644 index 0000000000..06b79187fd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go @@ -0,0 +1,744 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Admin endpoint uses this wrapper for “/clusters“ to display cluster status information. +// See :ref:`/clusters ` for more information. +type Clusters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Mapping from cluster name to each cluster's status. + ClusterStatuses []*ClusterStatus `protobuf:"bytes,1,rep,name=cluster_statuses,json=clusterStatuses,proto3" json:"cluster_statuses,omitempty"` +} + +func (x *Clusters) Reset() { + *x = Clusters{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Clusters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Clusters) ProtoMessage() {} + +func (x *Clusters) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Clusters.ProtoReflect.Descriptor instead. +func (*Clusters) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{0} +} + +func (x *Clusters) GetClusterStatuses() []*ClusterStatus { + if x != nil { + return x.ClusterStatuses + } + return nil +} + +// Details an individual cluster's current status. +// [#next-free-field: 9] +type ClusterStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the cluster. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Denotes whether this cluster was added via API or configured statically. + AddedViaApi bool `protobuf:"varint,2,opt,name=added_via_api,json=addedViaApi,proto3" json:"added_via_api,omitempty"` + // The success rate threshold used in the last interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “false“, all errors: externally and locally generated were used to calculate the threshold. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“, only externally generated errors were used to calculate the threshold. + // The threshold is used to eject hosts based on their success rate. See + // :ref:`Cluster outlier detection ` documentation for details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + SuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,3,opt,name=success_rate_ejection_threshold,json=successRateEjectionThreshold,proto3" json:"success_rate_ejection_threshold,omitempty"` + // Mapping from host address to the host's current status. + HostStatuses []*HostStatus `protobuf:"bytes,4,rep,name=host_statuses,json=hostStatuses,proto3" json:"host_statuses,omitempty"` + // The success rate threshold used in the last interval when only locally originated failures were + // taken into account and externally originated errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“. The threshold is used to eject hosts based on their success rate. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: this field may be omitted in any of the three following cases: + // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. + LocalOriginSuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,5,opt,name=local_origin_success_rate_ejection_threshold,json=localOriginSuccessRateEjectionThreshold,proto3" json:"local_origin_success_rate_ejection_threshold,omitempty"` + // :ref:`Circuit breaking ` settings of the cluster. + CircuitBreakers *v31.CircuitBreakers `protobuf:"bytes,6,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` + // Observability name of the cluster. + ObservabilityName string `protobuf:"bytes,7,opt,name=observability_name,json=observabilityName,proto3" json:"observability_name,omitempty"` + // The :ref:`EDS service name ` if the cluster is an EDS cluster. + EdsServiceName string `protobuf:"bytes,8,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` +} + +func (x *ClusterStatus) Reset() { + *x = ClusterStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterStatus) ProtoMessage() {} + +func (x *ClusterStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead. +func (*ClusterStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{1} +} + +func (x *ClusterStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClusterStatus) GetAddedViaApi() bool { + if x != nil { + return x.AddedViaApi + } + return false +} + +func (x *ClusterStatus) GetSuccessRateEjectionThreshold() *v3.Percent { + if x != nil { + return x.SuccessRateEjectionThreshold + } + return nil +} + +func (x *ClusterStatus) GetHostStatuses() []*HostStatus { + if x != nil { + return x.HostStatuses + } + return nil +} + +func (x *ClusterStatus) GetLocalOriginSuccessRateEjectionThreshold() *v3.Percent { + if x != nil { + return x.LocalOriginSuccessRateEjectionThreshold + } + return nil +} + +func (x *ClusterStatus) GetCircuitBreakers() *v31.CircuitBreakers { + if x != nil { + return x.CircuitBreakers + } + return nil +} + +func (x *ClusterStatus) GetObservabilityName() string { + if x != nil { + return x.ObservabilityName + } + return "" +} + +func (x *ClusterStatus) GetEdsServiceName() string { + if x != nil { + return x.EdsServiceName + } + return "" +} + +// Current state of a particular host. +// [#next-free-field: 10] +type HostStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of this host. + Address *v32.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // List of stats specific to this host. + Stats []*SimpleMetric `protobuf:"bytes,2,rep,name=stats,proto3" json:"stats,omitempty"` + // The host's current health status. + HealthStatus *HostHealthStatus `protobuf:"bytes,3,opt,name=health_status,json=healthStatus,proto3" json:"health_status,omitempty"` + // Request success rate for this host over the last calculated interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “false“, all errors: externally and locally generated were used in success rate + // calculation. If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“, only externally generated errors were used in success rate calculation. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + SuccessRate *v3.Percent `protobuf:"bytes,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` + // The host's weight. If not configured, the value defaults to 1. + Weight uint32 `protobuf:"varint,5,opt,name=weight,proto3" json:"weight,omitempty"` + // The hostname of the host, if applicable. + Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"` + // The host's priority. If not configured, the value defaults to 0 (highest priority). + Priority uint32 `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"` + // Request success rate for this host over the last calculated + // interval when only locally originated errors are taken into account and externally originated + // errors were treated as success. + // This field should be interpreted only when + // :ref:`outlier_detection.split_external_local_origin_errors` + // is “true“. + // See :ref:`Cluster outlier detection ` documentation for + // details. + // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. + LocalOriginSuccessRate *v3.Percent `protobuf:"bytes,8,opt,name=local_origin_success_rate,json=localOriginSuccessRate,proto3" json:"local_origin_success_rate,omitempty"` + // locality of the host. + Locality *v32.Locality `protobuf:"bytes,9,opt,name=locality,proto3" json:"locality,omitempty"` +} + +func (x *HostStatus) Reset() { + *x = HostStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HostStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostStatus) ProtoMessage() {} + +func (x *HostStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostStatus.ProtoReflect.Descriptor instead. +func (*HostStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{2} +} + +func (x *HostStatus) GetAddress() *v32.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *HostStatus) GetStats() []*SimpleMetric { + if x != nil { + return x.Stats + } + return nil +} + +func (x *HostStatus) GetHealthStatus() *HostHealthStatus { + if x != nil { + return x.HealthStatus + } + return nil +} + +func (x *HostStatus) GetSuccessRate() *v3.Percent { + if x != nil { + return x.SuccessRate + } + return nil +} + +func (x *HostStatus) GetWeight() uint32 { + if x != nil { + return x.Weight + } + return 0 +} + +func (x *HostStatus) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *HostStatus) GetPriority() uint32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *HostStatus) GetLocalOriginSuccessRate() *v3.Percent { + if x != nil { + return x.LocalOriginSuccessRate + } + return nil +} + +func (x *HostStatus) GetLocality() *v32.Locality { + if x != nil { + return x.Locality + } + return nil +} + +// Health status for a host. +// [#next-free-field: 9] +type HostHealthStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The host is currently failing active health checks. + FailedActiveHealthCheck bool `protobuf:"varint,1,opt,name=failed_active_health_check,json=failedActiveHealthCheck,proto3" json:"failed_active_health_check,omitempty"` + // The host is currently considered an outlier and has been ejected. + FailedOutlierCheck bool `protobuf:"varint,2,opt,name=failed_outlier_check,json=failedOutlierCheck,proto3" json:"failed_outlier_check,omitempty"` + // The host is currently being marked as degraded through active health checking. + FailedActiveDegradedCheck bool `protobuf:"varint,4,opt,name=failed_active_degraded_check,json=failedActiveDegradedCheck,proto3" json:"failed_active_degraded_check,omitempty"` + // The host has been removed from service discovery, but is being stabilized due to active + // health checking. + PendingDynamicRemoval bool `protobuf:"varint,5,opt,name=pending_dynamic_removal,json=pendingDynamicRemoval,proto3" json:"pending_dynamic_removal,omitempty"` + // The host has not yet been health checked. + PendingActiveHc bool `protobuf:"varint,6,opt,name=pending_active_hc,json=pendingActiveHc,proto3" json:"pending_active_hc,omitempty"` + // The host should be excluded from panic, spillover, etc. calculations because it was explicitly + // taken out of rotation via protocol signal and is not meant to be routed to. + ExcludedViaImmediateHcFail bool `protobuf:"varint,7,opt,name=excluded_via_immediate_hc_fail,json=excludedViaImmediateHcFail,proto3" json:"excluded_via_immediate_hc_fail,omitempty"` + // The host failed active HC due to timeout. + ActiveHcTimeout bool `protobuf:"varint,8,opt,name=active_hc_timeout,json=activeHcTimeout,proto3" json:"active_hc_timeout,omitempty"` + // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported + // here. + // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] + EdsHealthStatus v32.HealthStatus `protobuf:"varint,3,opt,name=eds_health_status,json=edsHealthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"eds_health_status,omitempty"` +} + +func (x *HostHealthStatus) Reset() { + *x = HostHealthStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HostHealthStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostHealthStatus) ProtoMessage() {} + +func (x *HostHealthStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostHealthStatus.ProtoReflect.Descriptor instead. +func (*HostHealthStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{3} +} + +func (x *HostHealthStatus) GetFailedActiveHealthCheck() bool { + if x != nil { + return x.FailedActiveHealthCheck + } + return false +} + +func (x *HostHealthStatus) GetFailedOutlierCheck() bool { + if x != nil { + return x.FailedOutlierCheck + } + return false +} + +func (x *HostHealthStatus) GetFailedActiveDegradedCheck() bool { + if x != nil { + return x.FailedActiveDegradedCheck + } + return false +} + +func (x *HostHealthStatus) GetPendingDynamicRemoval() bool { + if x != nil { + return x.PendingDynamicRemoval + } + return false +} + +func (x *HostHealthStatus) GetPendingActiveHc() bool { + if x != nil { + return x.PendingActiveHc + } + return false +} + +func (x *HostHealthStatus) GetExcludedViaImmediateHcFail() bool { + if x != nil { + return x.ExcludedViaImmediateHcFail + } + return false +} + +func (x *HostHealthStatus) GetActiveHcTimeout() bool { + if x != nil { + return x.ActiveHcTimeout + } + return false +} + +func (x *HostHealthStatus) GetEdsHealthStatus() v32.HealthStatus { + if x != nil { + return x.EdsHealthStatus + } + return v32.HealthStatus(0) +} + +var File_envoy_admin_v3_clusters_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, + 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x5f, 0x62, + 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x08, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x3a, + 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x64, + 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x41, 0x70, 0x69, 0x12, 0x5d, + 0x0a, 0x1f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x1c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3f, 0x0a, + 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x75, + 0x0a, 0x2c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x27, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, + 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, 0x63, 0x75, + 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x04, + 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a, + 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x66, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a, + 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x49, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a, + 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64, + 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_clusters_proto_rawDescOnce sync.Once + file_envoy_admin_v3_clusters_proto_rawDescData = file_envoy_admin_v3_clusters_proto_rawDesc +) + +func file_envoy_admin_v3_clusters_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_clusters_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_clusters_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_clusters_proto_rawDescData) + }) + return file_envoy_admin_v3_clusters_proto_rawDescData +} + +var file_envoy_admin_v3_clusters_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_admin_v3_clusters_proto_goTypes = []interface{}{ + (*Clusters)(nil), // 0: envoy.admin.v3.Clusters + (*ClusterStatus)(nil), // 1: envoy.admin.v3.ClusterStatus + (*HostStatus)(nil), // 2: envoy.admin.v3.HostStatus + (*HostHealthStatus)(nil), // 3: envoy.admin.v3.HostHealthStatus + (*v3.Percent)(nil), // 4: envoy.type.v3.Percent + (*v31.CircuitBreakers)(nil), // 5: envoy.config.cluster.v3.CircuitBreakers + (*v32.Address)(nil), // 6: envoy.config.core.v3.Address + (*SimpleMetric)(nil), // 7: envoy.admin.v3.SimpleMetric + (*v32.Locality)(nil), // 8: envoy.config.core.v3.Locality + (v32.HealthStatus)(0), // 9: envoy.config.core.v3.HealthStatus +} +var file_envoy_admin_v3_clusters_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.Clusters.cluster_statuses:type_name -> envoy.admin.v3.ClusterStatus + 4, // 1: envoy.admin.v3.ClusterStatus.success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent + 2, // 2: envoy.admin.v3.ClusterStatus.host_statuses:type_name -> envoy.admin.v3.HostStatus + 4, // 3: envoy.admin.v3.ClusterStatus.local_origin_success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent + 5, // 4: envoy.admin.v3.ClusterStatus.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers + 6, // 5: envoy.admin.v3.HostStatus.address:type_name -> envoy.config.core.v3.Address + 7, // 6: envoy.admin.v3.HostStatus.stats:type_name -> envoy.admin.v3.SimpleMetric + 3, // 7: envoy.admin.v3.HostStatus.health_status:type_name -> envoy.admin.v3.HostHealthStatus + 4, // 8: envoy.admin.v3.HostStatus.success_rate:type_name -> envoy.type.v3.Percent + 4, // 9: envoy.admin.v3.HostStatus.local_origin_success_rate:type_name -> envoy.type.v3.Percent + 8, // 10: envoy.admin.v3.HostStatus.locality:type_name -> envoy.config.core.v3.Locality + 9, // 11: envoy.admin.v3.HostHealthStatus.eds_health_status:type_name -> envoy.config.core.v3.HealthStatus + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_clusters_proto_init() } +func file_envoy_admin_v3_clusters_proto_init() { + if File_envoy_admin_v3_clusters_proto != nil { + return + } + file_envoy_admin_v3_metrics_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_clusters_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Clusters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_clusters_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_clusters_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HostStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_clusters_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HostHealthStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_clusters_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_clusters_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_clusters_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_clusters_proto_msgTypes, + }.Build() + File_envoy_admin_v3_clusters_proto = out.File + file_envoy_admin_v3_clusters_proto_rawDesc = nil + file_envoy_admin_v3_clusters_proto_goTypes = nil + file_envoy_admin_v3_clusters_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go new file mode 100644 index 0000000000..d7658a09fa --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go @@ -0,0 +1,803 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.HealthStatus(0) +) + +// Validate checks the field values on Clusters with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Clusters) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Clusters with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClustersMultiError, or nil +// if none found. +func (m *Clusters) ValidateAll() error { + return m.validate(true) +} + +func (m *Clusters) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetClusterStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersValidationError{ + field: fmt.Sprintf("ClusterStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersValidationError{ + field: fmt.Sprintf("ClusterStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersValidationError{ + field: fmt.Sprintf("ClusterStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClustersMultiError(errors) + } + + return nil +} + +// ClustersMultiError is an error wrapping multiple validation errors returned +// by Clusters.ValidateAll() if the designated constraints aren't met. +type ClustersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersMultiError) AllErrors() []error { return m } + +// ClustersValidationError is the validation error returned by +// Clusters.Validate if the designated constraints aren't met. +type ClustersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersValidationError) ErrorName() string { return "ClustersValidationError" } + +// Error satisfies the builtin error interface +func (e ClustersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusters.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersValidationError{} + +// Validate checks the field values on ClusterStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterStatus with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterStatusMultiError, or +// nil if none found. +func (m *ClusterStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for AddedViaApi + + if all { + switch v := interface{}(m.GetSuccessRateEjectionThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "SuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "SuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: "SuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHostStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: fmt.Sprintf("HostStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: fmt.Sprintf("HostStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: fmt.Sprintf("HostStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "LocalOriginSuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "LocalOriginSuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: "LocalOriginSuccessRateEjectionThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCircuitBreakers()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatusValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCircuitBreakers()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatusValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ObservabilityName + + // no validation rules for EdsServiceName + + if len(errors) > 0 { + return ClusterStatusMultiError(errors) + } + + return nil +} + +// ClusterStatusMultiError is an error wrapping multiple validation errors +// returned by ClusterStatus.ValidateAll() if the designated constraints +// aren't met. +type ClusterStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterStatusMultiError) AllErrors() []error { return m } + +// ClusterStatusValidationError is the validation error returned by +// ClusterStatus.Validate if the designated constraints aren't met. +type ClusterStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterStatusValidationError) ErrorName() string { return "ClusterStatusValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterStatusValidationError{} + +// Validate checks the field values on HostStatus with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HostStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HostStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HostStatusMultiError, or +// nil if none found. +func (m *HostStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *HostStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: fmt.Sprintf("Stats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: fmt.Sprintf("Stats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: fmt.Sprintf("Stats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetHealthStatus()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "HealthStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "HealthStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthStatus()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "HealthStatus", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessRate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "SuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "SuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "SuccessRate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Weight + + // no validation rules for Hostname + + // no validation rules for Priority + + if all { + switch v := interface{}(m.GetLocalOriginSuccessRate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "LocalOriginSuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "LocalOriginSuccessRate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalOriginSuccessRate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "LocalOriginSuccessRate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HostStatusValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HostStatusValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HostStatusMultiError(errors) + } + + return nil +} + +// HostStatusMultiError is an error wrapping multiple validation errors +// returned by HostStatus.ValidateAll() if the designated constraints aren't met. +type HostStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HostStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HostStatusMultiError) AllErrors() []error { return m } + +// HostStatusValidationError is the validation error returned by +// HostStatus.Validate if the designated constraints aren't met. +type HostStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HostStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HostStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HostStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HostStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HostStatusValidationError) ErrorName() string { return "HostStatusValidationError" } + +// Error satisfies the builtin error interface +func (e HostStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHostStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HostStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HostStatusValidationError{} + +// Validate checks the field values on HostHealthStatus with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HostHealthStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HostHealthStatus with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HostHealthStatusMultiError, or nil if none found. +func (m *HostHealthStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *HostHealthStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FailedActiveHealthCheck + + // no validation rules for FailedOutlierCheck + + // no validation rules for FailedActiveDegradedCheck + + // no validation rules for PendingDynamicRemoval + + // no validation rules for PendingActiveHc + + // no validation rules for ExcludedViaImmediateHcFail + + // no validation rules for ActiveHcTimeout + + // no validation rules for EdsHealthStatus + + if len(errors) > 0 { + return HostHealthStatusMultiError(errors) + } + + return nil +} + +// HostHealthStatusMultiError is an error wrapping multiple validation errors +// returned by HostHealthStatus.ValidateAll() if the designated constraints +// aren't met. +type HostHealthStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HostHealthStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HostHealthStatusMultiError) AllErrors() []error { return m } + +// HostHealthStatusValidationError is the validation error returned by +// HostHealthStatus.Validate if the designated constraints aren't met. +type HostHealthStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HostHealthStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HostHealthStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HostHealthStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HostHealthStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HostHealthStatusValidationError) ErrorName() string { return "HostHealthStatusValidationError" } + +// Error satisfies the builtin error interface +func (e HostHealthStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHostHealthStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HostHealthStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HostHealthStatusValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go new file mode 100644 index 0000000000..418581107c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go @@ -0,0 +1,656 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/clusters.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Clusters) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Clusters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Clusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterStatuses) > 0 { + for iNdEx := len(m.ClusterStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EdsServiceName) > 0 { + i -= len(m.EdsServiceName) + copy(dAtA[i:], m.EdsServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EdsServiceName))) + i-- + dAtA[i] = 0x42 + } + if len(m.ObservabilityName) > 0 { + i -= len(m.ObservabilityName) + copy(dAtA[i:], m.ObservabilityName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ObservabilityName))) + i-- + dAtA[i] = 0x3a + } + if m.CircuitBreakers != nil { + if vtmsg, ok := interface{}(m.CircuitBreakers).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CircuitBreakers) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.LocalOriginSuccessRateEjectionThreshold != nil { + if vtmsg, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalOriginSuccessRateEjectionThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if len(m.HostStatuses) > 0 { + for iNdEx := len(m.HostStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HostStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.SuccessRateEjectionThreshold != nil { + if vtmsg, ok := interface{}(m.SuccessRateEjectionThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SuccessRateEjectionThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.AddedViaApi { + i-- + if m.AddedViaApi { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HostStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.LocalOriginSuccessRate != nil { + if vtmsg, ok := interface{}(m.LocalOriginSuccessRate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalOriginSuccessRate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x38 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x32 + } + if m.Weight != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessRate != nil { + if vtmsg, ok := interface{}(m.SuccessRate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SuccessRate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.HealthStatus != nil { + size, err := m.HealthStatus.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostHealthStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostHealthStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HostHealthStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ActiveHcTimeout { + i-- + if m.ActiveHcTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.ExcludedViaImmediateHcFail { + i-- + if m.ExcludedViaImmediateHcFail { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PendingActiveHc { + i-- + if m.PendingActiveHc { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.PendingDynamicRemoval { + i-- + if m.PendingDynamicRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.FailedActiveDegradedCheck { + i-- + if m.FailedActiveDegradedCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.EdsHealthStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EdsHealthStatus)) + i-- + dAtA[i] = 0x18 + } + if m.FailedOutlierCheck { + i-- + if m.FailedOutlierCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.FailedActiveHealthCheck { + i-- + if m.FailedActiveHealthCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Clusters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterStatuses) > 0 { + for _, e := range m.ClusterStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AddedViaApi { + n += 2 + } + if m.SuccessRateEjectionThreshold != nil { + if size, ok := interface{}(m.SuccessRateEjectionThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SuccessRateEjectionThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HostStatuses) > 0 { + for _, e := range m.HostStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalOriginSuccessRateEjectionThreshold != nil { + if size, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalOriginSuccessRateEjectionThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CircuitBreakers != nil { + if size, ok := interface{}(m.CircuitBreakers).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CircuitBreakers) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ObservabilityName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.EdsServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HostStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HealthStatus != nil { + l = m.HealthStatus.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRate != nil { + if size, ok := interface{}(m.SuccessRate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SuccessRate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Weight != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Weight)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.LocalOriginSuccessRate != nil { + if size, ok := interface{}(m.LocalOriginSuccessRate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalOriginSuccessRate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HostHealthStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedActiveHealthCheck { + n += 2 + } + if m.FailedOutlierCheck { + n += 2 + } + if m.EdsHealthStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.EdsHealthStatus)) + } + if m.FailedActiveDegradedCheck { + n += 2 + } + if m.PendingDynamicRemoval { + n += 2 + } + if m.PendingActiveHc { + n += 2 + } + if m.ExcludedViaImmediateHcFail { + n += 2 + } + if m.ActiveHcTimeout { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go new file mode 100644 index 0000000000..ef711d966f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go @@ -0,0 +1,642 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The :ref:`/config_dump ` admin endpoint uses this wrapper +// message to maintain and serve arbitrary configuration information from any component in Envoy. +type ConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This list is serialized and dumped in its entirety at the + // :ref:`/config_dump ` endpoint. + // + // The following configurations are currently supported and will be dumped in the order given + // below: + // + // * “bootstrap“: :ref:`BootstrapConfigDump ` + // * “clusters“: :ref:`ClustersConfigDump ` + // * “ecds_filter_http“: :ref:`EcdsConfigDump ` + // * “ecds_filter_quic_listener“: :ref:`EcdsConfigDump ` + // * “ecds_filter_tcp_listener“: :ref:`EcdsConfigDump ` + // * “endpoints“: :ref:`EndpointsConfigDump ` + // * “listeners“: :ref:`ListenersConfigDump ` + // * “scoped_routes“: :ref:`ScopedRoutesConfigDump ` + // * “routes“: :ref:`RoutesConfigDump ` + // * “secrets“: :ref:`SecretsConfigDump ` + // + // EDS Configuration will only be dumped by using parameter “?include_eds“ + // + // Currently ECDS is supported in HTTP and listener filters. Note, ECDS configuration for + // either HTTP or listener filter will only be dumped if it is actually configured. + // + // You can filter output with the resource and mask query parameters. + // See :ref:`/config_dump?resource={} `, + // :ref:`/config_dump?mask={} `, + // or :ref:`/config_dump?resource={},mask={} + // ` for more information. + Configs []*anypb.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` +} + +func (x *ConfigDump) Reset() { + *x = ConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigDump) ProtoMessage() {} + +func (x *ConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigDump.ProtoReflect.Descriptor instead. +func (*ConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0} +} + +func (x *ConfigDump) GetConfigs() []*anypb.Any { + if x != nil { + return x.Configs + } + return nil +} + +// This message describes the bootstrap configuration that Envoy was started with. This includes +// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate +// the static portions of an Envoy configuration by reusing the output as the bootstrap +// configuration for another Envoy. +type BootstrapConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bootstrap *v3.Bootstrap `protobuf:"bytes,1,opt,name=bootstrap,proto3" json:"bootstrap,omitempty"` + // The timestamp when the BootstrapConfig was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *BootstrapConfigDump) Reset() { + *x = BootstrapConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BootstrapConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BootstrapConfigDump) ProtoMessage() {} + +func (x *BootstrapConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BootstrapConfigDump.ProtoReflect.Descriptor instead. +func (*BootstrapConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{1} +} + +func (x *BootstrapConfigDump) GetBootstrap() *v3.Bootstrap { + if x != nil { + return x.Bootstrap + } + return nil +} + +func (x *BootstrapConfigDump) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. +type SecretsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded secrets. + StaticSecrets []*SecretsConfigDump_StaticSecret `protobuf:"bytes,1,rep,name=static_secrets,json=staticSecrets,proto3" json:"static_secrets,omitempty"` + // The dynamically loaded active secrets. These are secrets that are available to service + // clusters or listeners. + DynamicActiveSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,2,rep,name=dynamic_active_secrets,json=dynamicActiveSecrets,proto3" json:"dynamic_active_secrets,omitempty"` + // The dynamically loaded warming secrets. These are secrets that are currently undergoing + // warming in preparation to service clusters or listeners. + DynamicWarmingSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,3,rep,name=dynamic_warming_secrets,json=dynamicWarmingSecrets,proto3" json:"dynamic_warming_secrets,omitempty"` +} + +func (x *SecretsConfigDump) Reset() { + *x = SecretsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecretsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretsConfigDump) ProtoMessage() {} + +func (x *SecretsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecretsConfigDump.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2} +} + +func (x *SecretsConfigDump) GetStaticSecrets() []*SecretsConfigDump_StaticSecret { + if x != nil { + return x.StaticSecrets + } + return nil +} + +func (x *SecretsConfigDump) GetDynamicActiveSecrets() []*SecretsConfigDump_DynamicSecret { + if x != nil { + return x.DynamicActiveSecrets + } + return nil +} + +func (x *SecretsConfigDump) GetDynamicWarmingSecrets() []*SecretsConfigDump_DynamicSecret { + if x != nil { + return x.DynamicWarmingSecrets + } + return nil +} + +// DynamicSecret contains secret information fetched via SDS. +// [#next-free-field: 7] +type SecretsConfigDump_DynamicSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the secret. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This is the per-resource version information. + VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The timestamp when the secret was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + Secret *anypb.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The *error_state* field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *SecretsConfigDump_DynamicSecret) Reset() { + *x = SecretsConfigDump_DynamicSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecretsConfigDump_DynamicSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretsConfigDump_DynamicSecret) ProtoMessage() {} + +func (x *SecretsConfigDump_DynamicSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecretsConfigDump_DynamicSecret.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump_DynamicSecret) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *SecretsConfigDump_DynamicSecret) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *SecretsConfigDump_DynamicSecret) GetSecret() *anypb.Any { + if x != nil { + return x.Secret + } + return nil +} + +func (x *SecretsConfigDump_DynamicSecret) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *SecretsConfigDump_DynamicSecret) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// StaticSecret specifies statically loaded secret in bootstrap. +type SecretsConfigDump_StaticSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the secret. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The timestamp when the secret was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // The actual secret information. + // Security sensitive information is redacted (replaced with "[redacted]") for + // private keys and passwords in TLS certificates. + Secret *anypb.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"` +} + +func (x *SecretsConfigDump_StaticSecret) Reset() { + *x = SecretsConfigDump_StaticSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecretsConfigDump_StaticSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretsConfigDump_StaticSecret) ProtoMessage() {} + +func (x *SecretsConfigDump_StaticSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecretsConfigDump_StaticSecret.ProtoReflect.Descriptor instead. +func (*SecretsConfigDump_StaticSecret) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *SecretsConfigDump_StaticSecret) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *SecretsConfigDump_StaticSecret) GetSecret() *anypb.Any { + if x != nil { + return x.Secret + } + return nil +} + +var File_envoy_admin_v3_config_dump_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_config_dump_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x12, 0x42, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x22, 0xb7, 0x07, 0x0a, 0x11, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x12, 0x65, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x52, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x15, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x1a, 0xca, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x42, 0x78, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_config_dump_proto_rawDescOnce sync.Once + file_envoy_admin_v3_config_dump_proto_rawDescData = file_envoy_admin_v3_config_dump_proto_rawDesc +) + +func file_envoy_admin_v3_config_dump_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_config_dump_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_config_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_proto_rawDescData) + }) + return file_envoy_admin_v3_config_dump_proto_rawDescData +} + +var file_envoy_admin_v3_config_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{ + (*ConfigDump)(nil), // 0: envoy.admin.v3.ConfigDump + (*BootstrapConfigDump)(nil), // 1: envoy.admin.v3.BootstrapConfigDump + (*SecretsConfigDump)(nil), // 2: envoy.admin.v3.SecretsConfigDump + (*SecretsConfigDump_DynamicSecret)(nil), // 3: envoy.admin.v3.SecretsConfigDump.DynamicSecret + (*SecretsConfigDump_StaticSecret)(nil), // 4: envoy.admin.v3.SecretsConfigDump.StaticSecret + (*anypb.Any)(nil), // 5: google.protobuf.Any + (*v3.Bootstrap)(nil), // 6: envoy.config.bootstrap.v3.Bootstrap + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*UpdateFailureState)(nil), // 8: envoy.admin.v3.UpdateFailureState + (ClientResourceStatus)(0), // 9: envoy.admin.v3.ClientResourceStatus +} +var file_envoy_admin_v3_config_dump_proto_depIdxs = []int32{ + 5, // 0: envoy.admin.v3.ConfigDump.configs:type_name -> google.protobuf.Any + 6, // 1: envoy.admin.v3.BootstrapConfigDump.bootstrap:type_name -> envoy.config.bootstrap.v3.Bootstrap + 7, // 2: envoy.admin.v3.BootstrapConfigDump.last_updated:type_name -> google.protobuf.Timestamp + 4, // 3: envoy.admin.v3.SecretsConfigDump.static_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.StaticSecret + 3, // 4: envoy.admin.v3.SecretsConfigDump.dynamic_active_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret + 3, // 5: envoy.admin.v3.SecretsConfigDump.dynamic_warming_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret + 7, // 6: envoy.admin.v3.SecretsConfigDump.DynamicSecret.last_updated:type_name -> google.protobuf.Timestamp + 5, // 7: envoy.admin.v3.SecretsConfigDump.DynamicSecret.secret:type_name -> google.protobuf.Any + 8, // 8: envoy.admin.v3.SecretsConfigDump.DynamicSecret.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 9, // 9: envoy.admin.v3.SecretsConfigDump.DynamicSecret.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 7, // 10: envoy.admin.v3.SecretsConfigDump.StaticSecret.last_updated:type_name -> google.protobuf.Timestamp + 5, // 11: envoy.admin.v3.SecretsConfigDump.StaticSecret.secret:type_name -> google.protobuf.Any + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_config_dump_proto_init() } +func file_envoy_admin_v3_config_dump_proto_init() { + if File_envoy_admin_v3_config_dump_proto != nil { + return + } + file_envoy_admin_v3_config_dump_shared_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_config_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BootstrapConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecretsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecretsConfigDump_DynamicSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecretsConfigDump_StaticSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_config_dump_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_config_dump_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_config_dump_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_config_dump_proto_msgTypes, + }.Build() + File_envoy_admin_v3_config_dump_proto = out.File + file_envoy_admin_v3_config_dump_proto_rawDesc = nil + file_envoy_admin_v3_config_dump_proto_goTypes = nil + file_envoy_admin_v3_config_dump_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go new file mode 100644 index 0000000000..6f494af0b6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go @@ -0,0 +1,893 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ConfigDump with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConfigDump with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ConfigDumpMultiError, or +// nil if none found. +func (m *ConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigDumpValidationError{ + field: fmt.Sprintf("Configs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigDumpValidationError{ + field: fmt.Sprintf("Configs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigDumpValidationError{ + field: fmt.Sprintf("Configs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ConfigDumpMultiError(errors) + } + + return nil +} + +// ConfigDumpMultiError is an error wrapping multiple validation errors +// returned by ConfigDump.ValidateAll() if the designated constraints aren't met. +type ConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConfigDumpMultiError) AllErrors() []error { return m } + +// ConfigDumpValidationError is the validation error returned by +// ConfigDump.Validate if the designated constraints aren't met. +type ConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConfigDumpValidationError) ErrorName() string { return "ConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e ConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConfigDumpValidationError{} + +// Validate checks the field values on BootstrapConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *BootstrapConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BootstrapConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BootstrapConfigDumpMultiError, or nil if none found. +func (m *BootstrapConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *BootstrapConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBootstrap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "Bootstrap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "Bootstrap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBootstrap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapConfigDumpValidationError{ + field: "Bootstrap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapConfigDumpValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapConfigDumpValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BootstrapConfigDumpMultiError(errors) + } + + return nil +} + +// BootstrapConfigDumpMultiError is an error wrapping multiple validation +// errors returned by BootstrapConfigDump.ValidateAll() if the designated +// constraints aren't met. +type BootstrapConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BootstrapConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BootstrapConfigDumpMultiError) AllErrors() []error { return m } + +// BootstrapConfigDumpValidationError is the validation error returned by +// BootstrapConfigDump.Validate if the designated constraints aren't met. +type BootstrapConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BootstrapConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BootstrapConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BootstrapConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BootstrapConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BootstrapConfigDumpValidationError) ErrorName() string { + return "BootstrapConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e BootstrapConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrapConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BootstrapConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BootstrapConfigDumpValidationError{} + +// Validate checks the field values on SecretsConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecretsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SecretsConfigDumpMultiError, or nil if none found. +func (m *SecretsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *SecretsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("StaticSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicActiveSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicWarmingSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SecretsConfigDumpMultiError(errors) + } + + return nil +} + +// SecretsConfigDumpMultiError is an error wrapping multiple validation errors +// returned by SecretsConfigDump.ValidateAll() if the designated constraints +// aren't met. +type SecretsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretsConfigDumpMultiError) AllErrors() []error { return m } + +// SecretsConfigDumpValidationError is the validation error returned by +// SecretsConfigDump.Validate if the designated constraints aren't met. +type SecretsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretsConfigDumpValidationError) ErrorName() string { + return "SecretsConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e SecretsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecretsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretsConfigDumpValidationError{} + +// Validate checks the field values on SecretsConfigDump_DynamicSecret with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump_DynamicSecret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecretsConfigDump_DynamicSecret with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecretsConfigDump_DynamicSecretMultiError, or nil if none found. +func (m *SecretsConfigDump_DynamicSecret) ValidateAll() error { + return m.validate(true) +} + +func (m *SecretsConfigDump_DynamicSecret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_DynamicSecretValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return SecretsConfigDump_DynamicSecretMultiError(errors) + } + + return nil +} + +// SecretsConfigDump_DynamicSecretMultiError is an error wrapping multiple +// validation errors returned by SecretsConfigDump_DynamicSecret.ValidateAll() +// if the designated constraints aren't met. +type SecretsConfigDump_DynamicSecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretsConfigDump_DynamicSecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretsConfigDump_DynamicSecretMultiError) AllErrors() []error { return m } + +// SecretsConfigDump_DynamicSecretValidationError is the validation error +// returned by SecretsConfigDump_DynamicSecret.Validate if the designated +// constraints aren't met. +type SecretsConfigDump_DynamicSecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretsConfigDump_DynamicSecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretsConfigDump_DynamicSecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretsConfigDump_DynamicSecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretsConfigDump_DynamicSecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretsConfigDump_DynamicSecretValidationError) ErrorName() string { + return "SecretsConfigDump_DynamicSecretValidationError" +} + +// Error satisfies the builtin error interface +func (e SecretsConfigDump_DynamicSecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecretsConfigDump_DynamicSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretsConfigDump_DynamicSecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretsConfigDump_DynamicSecretValidationError{} + +// Validate checks the field values on SecretsConfigDump_StaticSecret with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecretsConfigDump_StaticSecret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecretsConfigDump_StaticSecret with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecretsConfigDump_StaticSecretMultiError, or nil if none found. +func (m *SecretsConfigDump_StaticSecret) ValidateAll() error { + return m.validate(true) +} + +func (m *SecretsConfigDump_StaticSecret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_StaticSecretValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretsConfigDump_StaticSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SecretsConfigDump_StaticSecretMultiError(errors) + } + + return nil +} + +// SecretsConfigDump_StaticSecretMultiError is an error wrapping multiple +// validation errors returned by SecretsConfigDump_StaticSecret.ValidateAll() +// if the designated constraints aren't met. +type SecretsConfigDump_StaticSecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretsConfigDump_StaticSecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretsConfigDump_StaticSecretMultiError) AllErrors() []error { return m } + +// SecretsConfigDump_StaticSecretValidationError is the validation error +// returned by SecretsConfigDump_StaticSecret.Validate if the designated +// constraints aren't met. +type SecretsConfigDump_StaticSecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretsConfigDump_StaticSecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretsConfigDump_StaticSecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretsConfigDump_StaticSecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretsConfigDump_StaticSecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretsConfigDump_StaticSecretValidationError) ErrorName() string { + return "SecretsConfigDump_StaticSecretValidationError" +} + +// Error satisfies the builtin error interface +func (e SecretsConfigDump_StaticSecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecretsConfigDump_StaticSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretsConfigDump_StaticSecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretsConfigDump_StaticSecretValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go new file mode 100644 index 0000000000..feb0921ae2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go @@ -0,0 +1,2242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource status from the view of a xDS client, which tells the synchronization +// status between the xDS client and the xDS server. +type ClientResourceStatus int32 + +const ( + // Resource status is not available/unknown. + ClientResourceStatus_UNKNOWN ClientResourceStatus = 0 + // Client requested this resource but hasn't received any update from management + // server. The client will not fail requests, but will queue them until update + // arrives or the client times out waiting for the resource. + ClientResourceStatus_REQUESTED ClientResourceStatus = 1 + // This resource has been requested by the client but has either not been + // delivered by the server or was previously delivered by the server and then + // subsequently removed from resources provided by the server. For more + // information, please refer to the :ref:`"Knowing When a Requested Resource + // Does Not Exist" ` section. + ClientResourceStatus_DOES_NOT_EXIST ClientResourceStatus = 2 + // Client received this resource and replied with ACK. + ClientResourceStatus_ACKED ClientResourceStatus = 3 + // Client received this resource and replied with NACK. + ClientResourceStatus_NACKED ClientResourceStatus = 4 +) + +// Enum value maps for ClientResourceStatus. +var ( + ClientResourceStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REQUESTED", + 2: "DOES_NOT_EXIST", + 3: "ACKED", + 4: "NACKED", + } + ClientResourceStatus_value = map[string]int32{ + "UNKNOWN": 0, + "REQUESTED": 1, + "DOES_NOT_EXIST": 2, + "ACKED": 3, + "NACKED": 4, + } +) + +func (x ClientResourceStatus) Enum() *ClientResourceStatus { + p := new(ClientResourceStatus) + *p = x + return p +} + +func (x ClientResourceStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientResourceStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0].Descriptor() +} + +func (ClientResourceStatus) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0] +} + +func (x ClientResourceStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientResourceStatus.Descriptor instead. +func (ClientResourceStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} +} + +type UpdateFailureState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // What the component configuration would have been if the update had succeeded. + // This field may not be populated by xDS clients due to storage overhead. + FailedConfiguration *anypb.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"` + // Time of the latest failed update attempt. + LastUpdateAttempt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"` + // Details about the last failed update attempt. + Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` + // This is the version of the rejected resource. + // [#not-implemented-hide:] + VersionInfo string `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` +} + +func (x *UpdateFailureState) Reset() { + *x = UpdateFailureState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateFailureState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateFailureState) ProtoMessage() {} + +func (x *UpdateFailureState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateFailureState.ProtoReflect.Descriptor instead. +func (*UpdateFailureState) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0} +} + +func (x *UpdateFailureState) GetFailedConfiguration() *anypb.Any { + if x != nil { + return x.FailedConfiguration + } + return nil +} + +func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateAttempt + } + return nil +} + +func (x *UpdateFailureState) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +func (x *UpdateFailureState) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +// Envoy's listener manager fills this message with all currently known listeners. Listener +// configuration information can be used to recreate an Envoy configuration by populating all +// listeners as static listeners or by returning them in a LDS response. +type ListenersConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the :ref:`version_info ` in the + // last processed LDS discovery response. If there are only static bootstrap listeners, this field + // will be "". + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The statically loaded listener configs. + StaticListeners []*ListenersConfigDump_StaticListener `protobuf:"bytes,2,rep,name=static_listeners,json=staticListeners,proto3" json:"static_listeners,omitempty"` + // State for any warming, active, or draining listeners. + DynamicListeners []*ListenersConfigDump_DynamicListener `protobuf:"bytes,3,rep,name=dynamic_listeners,json=dynamicListeners,proto3" json:"dynamic_listeners,omitempty"` +} + +func (x *ListenersConfigDump) Reset() { + *x = ListenersConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump) ProtoMessage() {} + +func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenersConfigDump) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ListenersConfigDump) GetStaticListeners() []*ListenersConfigDump_StaticListener { + if x != nil { + return x.StaticListeners + } + return nil +} + +func (x *ListenersConfigDump) GetDynamicListeners() []*ListenersConfigDump_DynamicListener { + if x != nil { + return x.DynamicListeners + } + return nil +} + +// Envoy's cluster manager fills this message with all currently known clusters. Cluster +// configuration information can be used to recreate an Envoy configuration by populating all +// clusters as static clusters or by returning them in a CDS response. +type ClustersConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the :ref:`version_info ` in the + // last processed CDS discovery response. If there are only static bootstrap clusters, this field + // will be "". + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The statically loaded cluster configs. + StaticClusters []*ClustersConfigDump_StaticCluster `protobuf:"bytes,2,rep,name=static_clusters,json=staticClusters,proto3" json:"static_clusters,omitempty"` + // The dynamically loaded active clusters. These are clusters that are available to service + // data plane traffic. + DynamicActiveClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,3,rep,name=dynamic_active_clusters,json=dynamicActiveClusters,proto3" json:"dynamic_active_clusters,omitempty"` + // The dynamically loaded warming clusters. These are clusters that are currently undergoing + // warming in preparation to service data plane traffic. Note that if attempting to recreate an + // Envoy configuration from a configuration dump, the warming clusters should generally be + // discarded. + DynamicWarmingClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,4,rep,name=dynamic_warming_clusters,json=dynamicWarmingClusters,proto3" json:"dynamic_warming_clusters,omitempty"` +} + +func (x *ClustersConfigDump) Reset() { + *x = ClustersConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump) ProtoMessage() {} + +func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2} +} + +func (x *ClustersConfigDump) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClustersConfigDump) GetStaticClusters() []*ClustersConfigDump_StaticCluster { + if x != nil { + return x.StaticClusters + } + return nil +} + +func (x *ClustersConfigDump) GetDynamicActiveClusters() []*ClustersConfigDump_DynamicCluster { + if x != nil { + return x.DynamicActiveClusters + } + return nil +} + +func (x *ClustersConfigDump) GetDynamicWarmingClusters() []*ClustersConfigDump_DynamicCluster { + if x != nil { + return x.DynamicWarmingClusters + } + return nil +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration +// or defined inline while configuring listeners are separated from those configured dynamically via RDS. +// Route configuration information can be used to recreate an Envoy configuration by populating all routes +// as static routes or by returning them in RDS responses. +type RoutesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded route configs. + StaticRouteConfigs []*RoutesConfigDump_StaticRouteConfig `protobuf:"bytes,2,rep,name=static_route_configs,json=staticRouteConfigs,proto3" json:"static_route_configs,omitempty"` + // The dynamically loaded route configs. + DynamicRouteConfigs []*RoutesConfigDump_DynamicRouteConfig `protobuf:"bytes,3,rep,name=dynamic_route_configs,json=dynamicRouteConfigs,proto3" json:"dynamic_route_configs,omitempty"` +} + +func (x *RoutesConfigDump) Reset() { + *x = RoutesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump) ProtoMessage() {} + +func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3} +} + +func (x *RoutesConfigDump) GetStaticRouteConfigs() []*RoutesConfigDump_StaticRouteConfig { + if x != nil { + return x.StaticRouteConfigs + } + return nil +} + +func (x *RoutesConfigDump) GetDynamicRouteConfigs() []*RoutesConfigDump_DynamicRouteConfig { + if x != nil { + return x.DynamicRouteConfigs + } + return nil +} + +// Envoy's scoped RDS implementation fills this message with all currently loaded route +// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both +// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the +// dynamically obtained scopes via the SRDS API. +type ScopedRoutesConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded scoped route configs. + InlineScopedRouteConfigs []*ScopedRoutesConfigDump_InlineScopedRouteConfigs `protobuf:"bytes,1,rep,name=inline_scoped_route_configs,json=inlineScopedRouteConfigs,proto3" json:"inline_scoped_route_configs,omitempty"` + // The dynamically loaded scoped route configs. + DynamicScopedRouteConfigs []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs `protobuf:"bytes,2,rep,name=dynamic_scoped_route_configs,json=dynamicScopedRouteConfigs,proto3" json:"dynamic_scoped_route_configs,omitempty"` +} + +func (x *ScopedRoutesConfigDump) Reset() { + *x = ScopedRoutesConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4} +} + +func (x *ScopedRoutesConfigDump) GetInlineScopedRouteConfigs() []*ScopedRoutesConfigDump_InlineScopedRouteConfigs { + if x != nil { + return x.InlineScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump) GetDynamicScopedRouteConfigs() []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs { + if x != nil { + return x.DynamicScopedRouteConfigs + } + return nil +} + +// Envoy's admin fill this message with all currently known endpoints. Endpoint +// configuration information can be used to recreate an Envoy configuration by populating all +// endpoints as static endpoints or by returning them in an EDS response. +type EndpointsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The statically loaded endpoint configs. + StaticEndpointConfigs []*EndpointsConfigDump_StaticEndpointConfig `protobuf:"bytes,2,rep,name=static_endpoint_configs,json=staticEndpointConfigs,proto3" json:"static_endpoint_configs,omitempty"` + // The dynamically loaded endpoint configs. + DynamicEndpointConfigs []*EndpointsConfigDump_DynamicEndpointConfig `protobuf:"bytes,3,rep,name=dynamic_endpoint_configs,json=dynamicEndpointConfigs,proto3" json:"dynamic_endpoint_configs,omitempty"` +} + +func (x *EndpointsConfigDump) Reset() { + *x = EndpointsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump) ProtoMessage() {} + +func (x *EndpointsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5} +} + +func (x *EndpointsConfigDump) GetStaticEndpointConfigs() []*EndpointsConfigDump_StaticEndpointConfig { + if x != nil { + return x.StaticEndpointConfigs + } + return nil +} + +func (x *EndpointsConfigDump) GetDynamicEndpointConfigs() []*EndpointsConfigDump_DynamicEndpointConfig { + if x != nil { + return x.DynamicEndpointConfigs + } + return nil +} + +// Envoy's ECDS service fills this message with all currently extension +// configuration. Extension configuration information can be used to recreate +// an Envoy ECDS listener and HTTP filters as static filters or by returning +// them in ECDS response. +type EcdsConfigDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ECDS filter configs. + EcdsFilters []*EcdsConfigDump_EcdsFilterConfig `protobuf:"bytes,1,rep,name=ecds_filters,json=ecdsFilters,proto3" json:"ecds_filters,omitempty"` +} + +func (x *EcdsConfigDump) Reset() { + *x = EcdsConfigDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EcdsConfigDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcdsConfigDump) ProtoMessage() {} + +func (x *EcdsConfigDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcdsConfigDump.ProtoReflect.Descriptor instead. +func (*EcdsConfigDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6} +} + +func (x *EcdsConfigDump) GetEcdsFilters() []*EcdsConfigDump_EcdsFilterConfig { + if x != nil { + return x.EcdsFilters + } + return nil +} + +// Describes a statically loaded listener. +type ListenersConfigDump_StaticListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The listener config. + Listener *anypb.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"` + // The timestamp when the Listener was last successfully updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ListenersConfigDump_StaticListener) Reset() { + *x = ListenersConfigDump_StaticListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_StaticListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_StaticListener) ProtoMessage() {} + +func (x *ListenersConfigDump_StaticListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_StaticListener.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *ListenersConfigDump_StaticListener) GetListener() *anypb.Any { + if x != nil { + return x.Listener + } + return nil +} + +func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +type ListenersConfigDump_DynamicListenerState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the listener was loaded. In the future, discrete per-listener versions may be supported + // by the API. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The listener config. + Listener *anypb.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"` + // The timestamp when the Listener was last successfully updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ListenersConfigDump_DynamicListenerState) Reset() { + *x = ListenersConfigDump_DynamicListenerState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_DynamicListenerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_DynamicListenerState) ProtoMessage() {} + +func (x *ListenersConfigDump_DynamicListenerState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_DynamicListenerState.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_DynamicListenerState) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ListenersConfigDump_DynamicListenerState) GetListener() *anypb.Any { + if x != nil { + return x.Listener + } + return nil +} + +func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Describes a dynamically loaded listener via the LDS API. +// [#next-free-field: 7] +type ListenersConfigDump_DynamicListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name or unique id of this listener, pulled from the DynamicListenerState config. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The listener state for any active listener by this name. + // These are listeners that are available to service data plane traffic. + ActiveState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,2,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"` + // The listener state for any warming listener by this name. + // These are listeners that are currently undergoing warming in preparation to service data + // plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the warming listeners should generally be discarded. + WarmingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,3,opt,name=warming_state,json=warmingState,proto3" json:"warming_state,omitempty"` + // The listener state for any draining listener by this name. + // These are listeners that are currently undergoing draining in preparation to stop servicing + // data plane traffic. Note that if attempting to recreate an Envoy configuration from a + // configuration dump, the draining listeners should generally be discarded. + DrainingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,4,opt,name=draining_state,json=drainingState,proto3" json:"draining_state,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ListenersConfigDump_DynamicListener) Reset() { + *x = ListenersConfigDump_DynamicListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenersConfigDump_DynamicListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenersConfigDump_DynamicListener) ProtoMessage() {} + +func (x *ListenersConfigDump_DynamicListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenersConfigDump_DynamicListener.ProtoReflect.Descriptor instead. +func (*ListenersConfigDump_DynamicListener) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *ListenersConfigDump_DynamicListener) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListenersConfigDump_DynamicListener) GetActiveState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.ActiveState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetWarmingState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.WarmingState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetDrainingState() *ListenersConfigDump_DynamicListenerState { + if x != nil { + return x.DrainingState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ListenersConfigDump_DynamicListener) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// Describes a statically loaded cluster. +type ClustersConfigDump_StaticCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster config. + Cluster *anypb.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The timestamp when the Cluster was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ClustersConfigDump_StaticCluster) Reset() { + *x = ClustersConfigDump_StaticCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump_StaticCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump_StaticCluster) ProtoMessage() {} + +func (x *ClustersConfigDump_StaticCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump_StaticCluster.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClustersConfigDump_StaticCluster) GetCluster() *anypb.Any { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// Describes a dynamically loaded cluster via the CDS API. +// [#next-free-field: 6] +type ClustersConfigDump_DynamicCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time + // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + // the API. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The cluster config. + Cluster *anypb.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The timestamp when the Cluster was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ClustersConfigDump_DynamicCluster) Reset() { + *x = ClustersConfigDump_DynamicCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClustersConfigDump_DynamicCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClustersConfigDump_DynamicCluster) ProtoMessage() {} + +func (x *ClustersConfigDump_DynamicCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClustersConfigDump_DynamicCluster.ProtoReflect.Descriptor instead. +func (*ClustersConfigDump_DynamicCluster) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClustersConfigDump_DynamicCluster) GetCluster() *anypb.Any { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ClustersConfigDump_DynamicCluster) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type RoutesConfigDump_StaticRouteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The route config. + RouteConfig *anypb.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + // The timestamp when the Route was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *RoutesConfigDump_StaticRouteConfig) Reset() { + *x = RoutesConfigDump_StaticRouteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump_StaticRouteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump_StaticRouteConfig) ProtoMessage() {} + +func (x *RoutesConfigDump_StaticRouteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump_StaticRouteConfig.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *anypb.Any { + if x != nil { + return x.RouteConfig + } + return nil +} + +func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 6] +type RoutesConfigDump_DynamicRouteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the route configuration was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The route config. + RouteConfig *anypb.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"` + // The timestamp when the Route was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *RoutesConfigDump_DynamicRouteConfig) Reset() { + *x = RoutesConfigDump_DynamicRouteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutesConfigDump_DynamicRouteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutesConfigDump_DynamicRouteConfig) ProtoMessage() {} + +func (x *RoutesConfigDump_DynamicRouteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutesConfigDump_DynamicRouteConfig.ProtoReflect.Descriptor instead. +func (*RoutesConfigDump_DynamicRouteConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *anypb.Any { + if x != nil { + return x.RouteConfig + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *RoutesConfigDump_DynamicRouteConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped route configurations. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The scoped route configurations. + ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + // The timestamp when the scoped route config set was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() { + *x = ScopedRoutesConfigDump_InlineScopedRouteConfigs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump_InlineScopedRouteConfigs.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any { + if x != nil { + return x.ScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 7] +type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped route configurations. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the scoped routes configuration was loaded. + VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The scoped route configurations. + ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"` + // The timestamp when the scoped route config set was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Reset() { + *x = ScopedRoutesConfigDump_DynamicScopedRouteConfigs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoMessage() {} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ProtoReflect.Descriptor instead. +func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any { + if x != nil { + return x.ScopedRouteConfigs + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +type EndpointsConfigDump_StaticEndpointConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The endpoint config. + EndpointConfig *anypb.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() { + *x = EndpointsConfigDump_StaticEndpointConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump_StaticEndpointConfig) ProtoMessage() {} + +func (x *EndpointsConfigDump_StaticEndpointConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump_StaticEndpointConfig.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *anypb.Any { + if x != nil { + return x.EndpointConfig + } + return nil +} + +func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +// [#next-free-field: 6] +type EndpointsConfigDump_DynamicEndpointConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + // :ref:`version_info ` field at the time that + // the endpoint configuration was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The endpoint config. + EndpointConfig *anypb.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"` + // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this particular + // resource along with the reason and timestamp. For successfully updated or + // acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) Reset() { + *x = EndpointsConfigDump_DynamicEndpointConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointsConfigDump_DynamicEndpointConfig) ProtoMessage() {} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointsConfigDump_DynamicEndpointConfig.ProtoReflect.Descriptor instead. +func (*EndpointsConfigDump_DynamicEndpointConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 1} +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *anypb.Any { + if x != nil { + return x.EndpointConfig + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *EndpointsConfigDump_DynamicEndpointConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +// [#next-free-field: 6] +type EcdsConfigDump_EcdsFilterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the per-resource version information. This version is currently + // taken from the :ref:`version_info + // ` + // field at the time that the ECDS filter was loaded. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The ECDS filter config. + EcdsFilter *anypb.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"` + // The timestamp when the ECDS filter was last updated. + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Set if the last update failed, cleared after the next successful update. + // The “error_state“ field contains the rejected version of this + // particular resource along with the reason and timestamp. For successfully + // updated or acknowledged resource, this field should be empty. + // [#not-implemented-hide:] + ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // The client status of this resource. + // [#not-implemented-hide:] + ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` +} + +func (x *EcdsConfigDump_EcdsFilterConfig) Reset() { + *x = EcdsConfigDump_EcdsFilterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EcdsConfigDump_EcdsFilterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EcdsConfigDump_EcdsFilterConfig) ProtoMessage() {} + +func (x *EcdsConfigDump_EcdsFilterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EcdsConfigDump_EcdsFilterConfig.ProtoReflect.Descriptor instead. +func (*EcdsConfigDump_EcdsFilterConfig) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *anypb.Any { + if x != nil { + return x.EcdsFilter + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetErrorState() *UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *EcdsConfigDump_EcdsFilterConfig) GetClientStatus() ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return ClientResourceStatus_UNKNOWN +} + +var File_envoy_admin_v3_config_dump_shared_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_config_dump_shared_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, + 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x13, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, + 0xf3, 0x09, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5d, 0x0a, 0x10, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x11, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x1a, 0xc0, 0x01, 0x0a, 0x0e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x30, + 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, + 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x1a, 0xef, + 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x0a, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, + 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x43, 0x9a, 0xc5, 0x88, + 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x1a, 0x92, 0x04, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5d, 0x0a, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xca, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x59, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x15, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x6b, 0x0a, 0x18, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x1a, 0xbb, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x1a, 0xf0, 0x02, 0x0a, 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x22, 0xdd, 0x06, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x67, 0x0a, + 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0c, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xff, 0x02, 0x0a, 0x12, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x37, 0x0a, + 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x22, 0x8c, 0x08, 0x0a, 0x16, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x7e, 0x0a, + 0x1b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x18, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x81, 0x01, + 0x0a, 0x1c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x19, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x1a, 0x81, 0x02, 0x0a, 0x18, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, + 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xb6, 0x03, 0x0a, 0x19, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x31, + 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x22, 0xde, 0x05, 0x0a, 0x13, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x70, 0x0a, 0x17, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x73, 0x0a, 0x18, 0x64, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x1a, 0x94, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xc8, 0x02, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x89, 0x04, 0x0a, 0x0e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x52, 0x0a, 0x0c, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x63, 0x64, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xf7, 0x02, 0x0a, 0x10, 0x45, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x35, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x63, + 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, + 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2a, 0x5d, + 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, + 0x58, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, + 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x04, 0x42, 0x7e, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x15, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, + 0x70, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce sync.Once + file_envoy_admin_v3_config_dump_shared_proto_rawDescData = file_envoy_admin_v3_config_dump_shared_proto_rawDesc +) + +func file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_config_dump_shared_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_shared_proto_rawDescData) + }) + return file_envoy_admin_v3_config_dump_shared_proto_rawDescData +} + +var file_envoy_admin_v3_config_dump_shared_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_admin_v3_config_dump_shared_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_envoy_admin_v3_config_dump_shared_proto_goTypes = []interface{}{ + (ClientResourceStatus)(0), // 0: envoy.admin.v3.ClientResourceStatus + (*UpdateFailureState)(nil), // 1: envoy.admin.v3.UpdateFailureState + (*ListenersConfigDump)(nil), // 2: envoy.admin.v3.ListenersConfigDump + (*ClustersConfigDump)(nil), // 3: envoy.admin.v3.ClustersConfigDump + (*RoutesConfigDump)(nil), // 4: envoy.admin.v3.RoutesConfigDump + (*ScopedRoutesConfigDump)(nil), // 5: envoy.admin.v3.ScopedRoutesConfigDump + (*EndpointsConfigDump)(nil), // 6: envoy.admin.v3.EndpointsConfigDump + (*EcdsConfigDump)(nil), // 7: envoy.admin.v3.EcdsConfigDump + (*ListenersConfigDump_StaticListener)(nil), // 8: envoy.admin.v3.ListenersConfigDump.StaticListener + (*ListenersConfigDump_DynamicListenerState)(nil), // 9: envoy.admin.v3.ListenersConfigDump.DynamicListenerState + (*ListenersConfigDump_DynamicListener)(nil), // 10: envoy.admin.v3.ListenersConfigDump.DynamicListener + (*ClustersConfigDump_StaticCluster)(nil), // 11: envoy.admin.v3.ClustersConfigDump.StaticCluster + (*ClustersConfigDump_DynamicCluster)(nil), // 12: envoy.admin.v3.ClustersConfigDump.DynamicCluster + (*RoutesConfigDump_StaticRouteConfig)(nil), // 13: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig + (*RoutesConfigDump_DynamicRouteConfig)(nil), // 14: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig + (*ScopedRoutesConfigDump_InlineScopedRouteConfigs)(nil), // 15: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs + (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs)(nil), // 16: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs + (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 17: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig + (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 18: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig + (*EcdsConfigDump_EcdsFilterConfig)(nil), // 19: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig + (*anypb.Any)(nil), // 20: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp +} +var file_envoy_admin_v3_config_dump_shared_proto_depIdxs = []int32{ + 20, // 0: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any + 21, // 1: envoy.admin.v3.UpdateFailureState.last_update_attempt:type_name -> google.protobuf.Timestamp + 8, // 2: envoy.admin.v3.ListenersConfigDump.static_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.StaticListener + 10, // 3: envoy.admin.v3.ListenersConfigDump.dynamic_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListener + 11, // 4: envoy.admin.v3.ClustersConfigDump.static_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.StaticCluster + 12, // 5: envoy.admin.v3.ClustersConfigDump.dynamic_active_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster + 12, // 6: envoy.admin.v3.ClustersConfigDump.dynamic_warming_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster + 13, // 7: envoy.admin.v3.RoutesConfigDump.static_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.StaticRouteConfig + 14, // 8: envoy.admin.v3.RoutesConfigDump.dynamic_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig + 15, // 9: envoy.admin.v3.ScopedRoutesConfigDump.inline_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs + 16, // 10: envoy.admin.v3.ScopedRoutesConfigDump.dynamic_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs + 17, // 11: envoy.admin.v3.EndpointsConfigDump.static_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig + 18, // 12: envoy.admin.v3.EndpointsConfigDump.dynamic_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig + 19, // 13: envoy.admin.v3.EcdsConfigDump.ecds_filters:type_name -> envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig + 20, // 14: envoy.admin.v3.ListenersConfigDump.StaticListener.listener:type_name -> google.protobuf.Any + 21, // 15: envoy.admin.v3.ListenersConfigDump.StaticListener.last_updated:type_name -> google.protobuf.Timestamp + 20, // 16: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.listener:type_name -> google.protobuf.Any + 21, // 17: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.last_updated:type_name -> google.protobuf.Timestamp + 9, // 18: envoy.admin.v3.ListenersConfigDump.DynamicListener.active_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 9, // 19: envoy.admin.v3.ListenersConfigDump.DynamicListener.warming_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 9, // 20: envoy.admin.v3.ListenersConfigDump.DynamicListener.draining_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState + 1, // 21: envoy.admin.v3.ListenersConfigDump.DynamicListener.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 22: envoy.admin.v3.ListenersConfigDump.DynamicListener.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 23: envoy.admin.v3.ClustersConfigDump.StaticCluster.cluster:type_name -> google.protobuf.Any + 21, // 24: envoy.admin.v3.ClustersConfigDump.StaticCluster.last_updated:type_name -> google.protobuf.Timestamp + 20, // 25: envoy.admin.v3.ClustersConfigDump.DynamicCluster.cluster:type_name -> google.protobuf.Any + 21, // 26: envoy.admin.v3.ClustersConfigDump.DynamicCluster.last_updated:type_name -> google.protobuf.Timestamp + 1, // 27: envoy.admin.v3.ClustersConfigDump.DynamicCluster.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 28: envoy.admin.v3.ClustersConfigDump.DynamicCluster.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 29: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.route_config:type_name -> google.protobuf.Any + 21, // 30: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.last_updated:type_name -> google.protobuf.Timestamp + 20, // 31: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.route_config:type_name -> google.protobuf.Any + 21, // 32: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 33: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 34: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 35: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any + 21, // 36: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp + 20, // 37: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any + 21, // 38: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp + 1, // 39: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 40: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 41: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.endpoint_config:type_name -> google.protobuf.Any + 21, // 42: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp + 20, // 43: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.endpoint_config:type_name -> google.protobuf.Any + 21, // 44: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 45: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 46: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 20, // 47: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.ecds_filter:type_name -> google.protobuf.Any + 21, // 48: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.last_updated:type_name -> google.protobuf.Timestamp + 1, // 49: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 0, // 50: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 51, // [51:51] is the sub-list for method output_type + 51, // [51:51] is the sub-list for method input_type + 51, // [51:51] is the sub-list for extension type_name + 51, // [51:51] is the sub-list for extension extendee + 0, // [0:51] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_config_dump_shared_proto_init() } +func file_envoy_admin_v3_config_dump_shared_proto_init() { + if File_envoy_admin_v3_config_dump_shared_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateFailureState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EcdsConfigDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_StaticListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_DynamicListenerState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenersConfigDump_DynamicListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump_StaticCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClustersConfigDump_DynamicCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump_StaticRouteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutesConfigDump_DynamicRouteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump_InlineScopedRouteConfigs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutesConfigDump_DynamicScopedRouteConfigs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump_StaticEndpointConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointsConfigDump_DynamicEndpointConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EcdsConfigDump_EcdsFilterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_config_dump_shared_proto_rawDesc, + NumEnums: 1, + NumMessages: 19, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_config_dump_shared_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_config_dump_shared_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_config_dump_shared_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_config_dump_shared_proto_msgTypes, + }.Build() + File_envoy_admin_v3_config_dump_shared_proto = out.File + file_envoy_admin_v3_config_dump_shared_proto_rawDesc = nil + file_envoy_admin_v3_config_dump_shared_proto_goTypes = nil + file_envoy_admin_v3_config_dump_shared_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go new file mode 100644 index 0000000000..dd16990ad4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go @@ -0,0 +1,3435 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpdateFailureState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateFailureState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateFailureState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateFailureStateMultiError, or nil if none found. +func (m *UpdateFailureState) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateFailureState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetFailedConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailedConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateFailureStateValidationError{ + field: "FailedConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdateAttempt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdateAttempt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateFailureStateValidationError{ + field: "LastUpdateAttempt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Details + + // no validation rules for VersionInfo + + if len(errors) > 0 { + return UpdateFailureStateMultiError(errors) + } + + return nil +} + +// UpdateFailureStateMultiError is an error wrapping multiple validation errors +// returned by UpdateFailureState.ValidateAll() if the designated constraints +// aren't met. +type UpdateFailureStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateFailureStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateFailureStateMultiError) AllErrors() []error { return m } + +// UpdateFailureStateValidationError is the validation error returned by +// UpdateFailureState.Validate if the designated constraints aren't met. +type UpdateFailureStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateFailureStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateFailureStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateFailureStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateFailureStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateFailureStateValidationError) ErrorName() string { + return "UpdateFailureStateValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateFailureStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateFailureState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateFailureStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateFailureStateValidationError{} + +// Validate checks the field values on ListenersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListenersConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenersConfigDumpMultiError, or nil if none found. +func (m *ListenersConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetStaticListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDumpValidationError{ + field: fmt.Sprintf("StaticListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicListeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenersConfigDumpMultiError(errors) + } + + return nil +} + +// ListenersConfigDumpMultiError is an error wrapping multiple validation +// errors returned by ListenersConfigDump.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDumpMultiError) AllErrors() []error { return m } + +// ListenersConfigDumpValidationError is the validation error returned by +// ListenersConfigDump.Validate if the designated constraints aren't met. +type ListenersConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDumpValidationError) ErrorName() string { + return "ListenersConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDumpValidationError{} + +// Validate checks the field values on ClustersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClustersConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClustersConfigDumpMultiError, or nil if none found. +func (m *ClustersConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetStaticClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("StaticClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicActiveClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicActiveClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicWarmingClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDumpValidationError{ + field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClustersConfigDumpMultiError(errors) + } + + return nil +} + +// ClustersConfigDumpMultiError is an error wrapping multiple validation errors +// returned by ClustersConfigDump.ValidateAll() if the designated constraints +// aren't met. +type ClustersConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDumpMultiError) AllErrors() []error { return m } + +// ClustersConfigDumpValidationError is the validation error returned by +// ClustersConfigDump.Validate if the designated constraints aren't met. +type ClustersConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDumpValidationError) ErrorName() string { + return "ClustersConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDumpValidationError{} + +// Validate checks the field values on RoutesConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RoutesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RoutesConfigDumpMultiError, or nil if none found. +func (m *RoutesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDumpValidationError{ + field: fmt.Sprintf("StaticRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RoutesConfigDumpMultiError(errors) + } + + return nil +} + +// RoutesConfigDumpMultiError is an error wrapping multiple validation errors +// returned by RoutesConfigDump.ValidateAll() if the designated constraints +// aren't met. +type RoutesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDumpMultiError) AllErrors() []error { return m } + +// RoutesConfigDumpValidationError is the validation error returned by +// RoutesConfigDump.Validate if the designated constraints aren't met. +type RoutesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDumpValidationError) ErrorName() string { return "RoutesConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e RoutesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDumpValidationError{} + +// Validate checks the field values on ScopedRoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutesConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutesConfigDumpMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetInlineScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDumpValidationError{ + field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRoutesConfigDumpMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDumpMultiError is an error wrapping multiple validation +// errors returned by ScopedRoutesConfigDump.ValidateAll() if the designated +// constraints aren't met. +type ScopedRoutesConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDumpMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDumpValidationError is the validation error returned by +// ScopedRoutesConfigDump.Validate if the designated constraints aren't met. +type ScopedRoutesConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutesConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutesConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDumpValidationError) ErrorName() string { + return "ScopedRoutesConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDumpValidationError{} + +// Validate checks the field values on EndpointsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EndpointsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EndpointsConfigDump with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EndpointsConfigDumpMultiError, or nil if none found. +func (m *EndpointsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStaticEndpointConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDynamicEndpointConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDumpValidationError{ + field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EndpointsConfigDumpMultiError(errors) + } + + return nil +} + +// EndpointsConfigDumpMultiError is an error wrapping multiple validation +// errors returned by EndpointsConfigDump.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDumpMultiError) AllErrors() []error { return m } + +// EndpointsConfigDumpValidationError is the validation error returned by +// EndpointsConfigDump.Validate if the designated constraints aren't met. +type EndpointsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDumpValidationError) ErrorName() string { + return "EndpointsConfigDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDumpValidationError{} + +// Validate checks the field values on EcdsConfigDump with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EcdsConfigDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EcdsConfigDump with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EcdsConfigDumpMultiError, +// or nil if none found. +func (m *EcdsConfigDump) ValidateAll() error { + return m.validate(true) +} + +func (m *EcdsConfigDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetEcdsFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDumpValidationError{ + field: fmt.Sprintf("EcdsFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EcdsConfigDumpMultiError(errors) + } + + return nil +} + +// EcdsConfigDumpMultiError is an error wrapping multiple validation errors +// returned by EcdsConfigDump.ValidateAll() if the designated constraints +// aren't met. +type EcdsConfigDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EcdsConfigDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EcdsConfigDumpMultiError) AllErrors() []error { return m } + +// EcdsConfigDumpValidationError is the validation error returned by +// EcdsConfigDump.Validate if the designated constraints aren't met. +type EcdsConfigDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EcdsConfigDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EcdsConfigDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EcdsConfigDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EcdsConfigDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EcdsConfigDumpValidationError) ErrorName() string { return "EcdsConfigDumpValidationError" } + +// Error satisfies the builtin error interface +func (e EcdsConfigDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEcdsConfigDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EcdsConfigDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EcdsConfigDumpValidationError{} + +// Validate checks the field values on ListenersConfigDump_StaticListener with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenersConfigDump_StaticListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump_StaticListener +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenersConfigDump_StaticListenerMultiError, or nil if none found. +func (m *ListenersConfigDump_StaticListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_StaticListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_StaticListenerValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_StaticListenerValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListenersConfigDump_StaticListenerMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_StaticListenerMultiError is an error wrapping multiple +// validation errors returned by +// ListenersConfigDump_StaticListener.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_StaticListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_StaticListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_StaticListenerMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_StaticListenerValidationError is the validation error +// returned by ListenersConfigDump_StaticListener.Validate if the designated +// constraints aren't met. +type ListenersConfigDump_StaticListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_StaticListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_StaticListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_StaticListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_StaticListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_StaticListenerValidationError) ErrorName() string { + return "ListenersConfigDump_StaticListenerValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_StaticListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_StaticListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_StaticListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_StaticListenerValidationError{} + +// Validate checks the field values on ListenersConfigDump_DynamicListenerState +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ListenersConfigDump_DynamicListenerState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ListenersConfigDump_DynamicListenerState with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ListenersConfigDump_DynamicListenerStateMultiError, or nil if none found. +func (m *ListenersConfigDump_DynamicListenerState) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_DynamicListenerState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerStateValidationError{ + field: "Listener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerStateValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListenersConfigDump_DynamicListenerStateMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_DynamicListenerStateMultiError is an error wrapping +// multiple validation errors returned by +// ListenersConfigDump_DynamicListenerState.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_DynamicListenerStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_DynamicListenerStateMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_DynamicListenerStateValidationError is the validation +// error returned by ListenersConfigDump_DynamicListenerState.Validate if the +// designated constraints aren't met. +type ListenersConfigDump_DynamicListenerStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_DynamicListenerStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_DynamicListenerStateValidationError) ErrorName() string { + return "ListenersConfigDump_DynamicListenerStateValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_DynamicListenerStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_DynamicListenerState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_DynamicListenerStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_DynamicListenerStateValidationError{} + +// Validate checks the field values on ListenersConfigDump_DynamicListener with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenersConfigDump_DynamicListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenersConfigDump_DynamicListener +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenersConfigDump_DynamicListenerMultiError, or nil if none found. +func (m *ListenersConfigDump_DynamicListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenersConfigDump_DynamicListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetActiveState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "ActiveState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWarmingState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWarmingState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "WarmingState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDrainingState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainingState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "DrainingState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersConfigDump_DynamicListenerValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ListenersConfigDump_DynamicListenerMultiError(errors) + } + + return nil +} + +// ListenersConfigDump_DynamicListenerMultiError is an error wrapping multiple +// validation errors returned by +// ListenersConfigDump_DynamicListener.ValidateAll() if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersConfigDump_DynamicListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersConfigDump_DynamicListenerMultiError) AllErrors() []error { return m } + +// ListenersConfigDump_DynamicListenerValidationError is the validation error +// returned by ListenersConfigDump_DynamicListener.Validate if the designated +// constraints aren't met. +type ListenersConfigDump_DynamicListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersConfigDump_DynamicListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersConfigDump_DynamicListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersConfigDump_DynamicListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersConfigDump_DynamicListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersConfigDump_DynamicListenerValidationError) ErrorName() string { + return "ListenersConfigDump_DynamicListenerValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenersConfigDump_DynamicListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenersConfigDump_DynamicListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersConfigDump_DynamicListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersConfigDump_DynamicListenerValidationError{} + +// Validate checks the field values on ClustersConfigDump_StaticCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ClustersConfigDump_StaticCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump_StaticCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClustersConfigDump_StaticClusterMultiError, or nil if none found. +func (m *ClustersConfigDump_StaticCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump_StaticCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_StaticClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_StaticClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClustersConfigDump_StaticClusterMultiError(errors) + } + + return nil +} + +// ClustersConfigDump_StaticClusterMultiError is an error wrapping multiple +// validation errors returned by +// ClustersConfigDump_StaticCluster.ValidateAll() if the designated +// constraints aren't met. +type ClustersConfigDump_StaticClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDump_StaticClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDump_StaticClusterMultiError) AllErrors() []error { return m } + +// ClustersConfigDump_StaticClusterValidationError is the validation error +// returned by ClustersConfigDump_StaticCluster.Validate if the designated +// constraints aren't met. +type ClustersConfigDump_StaticClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDump_StaticClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDump_StaticClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDump_StaticClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDump_StaticClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDump_StaticClusterValidationError) ErrorName() string { + return "ClustersConfigDump_StaticClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDump_StaticClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump_StaticCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDump_StaticClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDump_StaticClusterValidationError{} + +// Validate checks the field values on ClustersConfigDump_DynamicCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ClustersConfigDump_DynamicCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClustersConfigDump_DynamicCluster +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ClustersConfigDump_DynamicClusterMultiError, or nil if none found. +func (m *ClustersConfigDump_DynamicCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *ClustersConfigDump_DynamicCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClustersConfigDump_DynamicClusterValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ClustersConfigDump_DynamicClusterMultiError(errors) + } + + return nil +} + +// ClustersConfigDump_DynamicClusterMultiError is an error wrapping multiple +// validation errors returned by +// ClustersConfigDump_DynamicCluster.ValidateAll() if the designated +// constraints aren't met. +type ClustersConfigDump_DynamicClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClustersConfigDump_DynamicClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClustersConfigDump_DynamicClusterMultiError) AllErrors() []error { return m } + +// ClustersConfigDump_DynamicClusterValidationError is the validation error +// returned by ClustersConfigDump_DynamicCluster.Validate if the designated +// constraints aren't met. +type ClustersConfigDump_DynamicClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClustersConfigDump_DynamicClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClustersConfigDump_DynamicClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClustersConfigDump_DynamicClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClustersConfigDump_DynamicClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClustersConfigDump_DynamicClusterValidationError) ErrorName() string { + return "ClustersConfigDump_DynamicClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e ClustersConfigDump_DynamicClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClustersConfigDump_DynamicCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClustersConfigDump_DynamicClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClustersConfigDump_DynamicClusterValidationError{} + +// Validate checks the field values on RoutesConfigDump_StaticRouteConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RoutesConfigDump_StaticRouteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump_StaticRouteConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RoutesConfigDump_StaticRouteConfigMultiError, or nil if none found. +func (m *RoutesConfigDump_StaticRouteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump_StaticRouteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_StaticRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_StaticRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RoutesConfigDump_StaticRouteConfigMultiError(errors) + } + + return nil +} + +// RoutesConfigDump_StaticRouteConfigMultiError is an error wrapping multiple +// validation errors returned by +// RoutesConfigDump_StaticRouteConfig.ValidateAll() if the designated +// constraints aren't met. +type RoutesConfigDump_StaticRouteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDump_StaticRouteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDump_StaticRouteConfigMultiError) AllErrors() []error { return m } + +// RoutesConfigDump_StaticRouteConfigValidationError is the validation error +// returned by RoutesConfigDump_StaticRouteConfig.Validate if the designated +// constraints aren't met. +type RoutesConfigDump_StaticRouteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDump_StaticRouteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDump_StaticRouteConfigValidationError) ErrorName() string { + return "RoutesConfigDump_StaticRouteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RoutesConfigDump_StaticRouteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump_StaticRouteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDump_StaticRouteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDump_StaticRouteConfigValidationError{} + +// Validate checks the field values on RoutesConfigDump_DynamicRouteConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RoutesConfigDump_DynamicRouteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoutesConfigDump_DynamicRouteConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RoutesConfigDump_DynamicRouteConfigMultiError, or nil if none found. +func (m *RoutesConfigDump_DynamicRouteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RoutesConfigDump_DynamicRouteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoutesConfigDump_DynamicRouteConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return RoutesConfigDump_DynamicRouteConfigMultiError(errors) + } + + return nil +} + +// RoutesConfigDump_DynamicRouteConfigMultiError is an error wrapping multiple +// validation errors returned by +// RoutesConfigDump_DynamicRouteConfig.ValidateAll() if the designated +// constraints aren't met. +type RoutesConfigDump_DynamicRouteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoutesConfigDump_DynamicRouteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoutesConfigDump_DynamicRouteConfigMultiError) AllErrors() []error { return m } + +// RoutesConfigDump_DynamicRouteConfigValidationError is the validation error +// returned by RoutesConfigDump_DynamicRouteConfig.Validate if the designated +// constraints aren't met. +type RoutesConfigDump_DynamicRouteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoutesConfigDump_DynamicRouteConfigValidationError) ErrorName() string { + return "RoutesConfigDump_DynamicRouteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RoutesConfigDump_DynamicRouteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoutesConfigDump_DynamicRouteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoutesConfigDump_DynamicRouteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoutesConfigDump_DynamicRouteConfigValidationError{} + +// Validate checks the field values on +// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + for idx, item := range m.GetScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError is an error +// wrapping multiple validation errors returned by +// ScopedRoutesConfigDump_InlineScopedRouteConfigs.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError is the +// validation error returned by +// ScopedRoutesConfigDump_InlineScopedRouteConfigs.Validate if the designated +// constraints aren't met. +type ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) ErrorName() string { + return "ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump_InlineScopedRouteConfigs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{} + +// Validate checks the field values on +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError, or nil if none found. +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for VersionInfo + + for idx, item := range m.GetScopedRouteConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError(errors) + } + + return nil +} + +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError is an error +// wrapping multiple validation errors returned by +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) AllErrors() []error { return m } + +// ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError is the +// validation error returned by +// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.Validate if the designated +// constraints aren't met. +type ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) ErrorName() string { + return "ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutesConfigDump_DynamicScopedRouteConfigs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{} + +// Validate checks the field values on EndpointsConfigDump_StaticEndpointConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *EndpointsConfigDump_StaticEndpointConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EndpointsConfigDump_StaticEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// EndpointsConfigDump_StaticEndpointConfigMultiError, or nil if none found. +func (m *EndpointsConfigDump_StaticEndpointConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_StaticEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EndpointsConfigDump_StaticEndpointConfigMultiError(errors) + } + + return nil +} + +// EndpointsConfigDump_StaticEndpointConfigMultiError is an error wrapping +// multiple validation errors returned by +// EndpointsConfigDump_StaticEndpointConfig.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDump_StaticEndpointConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDump_StaticEndpointConfigMultiError) AllErrors() []error { return m } + +// EndpointsConfigDump_StaticEndpointConfigValidationError is the validation +// error returned by EndpointsConfigDump_StaticEndpointConfig.Validate if the +// designated constraints aren't met. +type EndpointsConfigDump_StaticEndpointConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) ErrorName() string { + return "EndpointsConfigDump_StaticEndpointConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump_StaticEndpointConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDump_StaticEndpointConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDump_StaticEndpointConfigValidationError{} + +// Validate checks the field values on +// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EndpointsConfigDump_DynamicEndpointConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// EndpointsConfigDump_DynamicEndpointConfigMultiError, or nil if none found. +func (m *EndpointsConfigDump_DynamicEndpointConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointsConfigDump_DynamicEndpointConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return EndpointsConfigDump_DynamicEndpointConfigMultiError(errors) + } + + return nil +} + +// EndpointsConfigDump_DynamicEndpointConfigMultiError is an error wrapping +// multiple validation errors returned by +// EndpointsConfigDump_DynamicEndpointConfig.ValidateAll() if the designated +// constraints aren't met. +type EndpointsConfigDump_DynamicEndpointConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) AllErrors() []error { return m } + +// EndpointsConfigDump_DynamicEndpointConfigValidationError is the validation +// error returned by EndpointsConfigDump_DynamicEndpointConfig.Validate if the +// designated constraints aren't met. +type EndpointsConfigDump_DynamicEndpointConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) ErrorName() string { + return "EndpointsConfigDump_DynamicEndpointConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointsConfigDump_DynamicEndpointConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointsConfigDump_DynamicEndpointConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointsConfigDump_DynamicEndpointConfigValidationError{} + +// Validate checks the field values on EcdsConfigDump_EcdsFilterConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EcdsConfigDump_EcdsFilterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EcdsConfigDump_EcdsFilterConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// EcdsConfigDump_EcdsFilterConfigMultiError, or nil if none found. +func (m *EcdsConfigDump_EcdsFilterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EcdsConfigDump_EcdsFilterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetEcdsFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEcdsFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "EcdsFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EcdsConfigDump_EcdsFilterConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ClientStatus + + if len(errors) > 0 { + return EcdsConfigDump_EcdsFilterConfigMultiError(errors) + } + + return nil +} + +// EcdsConfigDump_EcdsFilterConfigMultiError is an error wrapping multiple +// validation errors returned by EcdsConfigDump_EcdsFilterConfig.ValidateAll() +// if the designated constraints aren't met. +type EcdsConfigDump_EcdsFilterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EcdsConfigDump_EcdsFilterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EcdsConfigDump_EcdsFilterConfigMultiError) AllErrors() []error { return m } + +// EcdsConfigDump_EcdsFilterConfigValidationError is the validation error +// returned by EcdsConfigDump_EcdsFilterConfig.Validate if the designated +// constraints aren't met. +type EcdsConfigDump_EcdsFilterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EcdsConfigDump_EcdsFilterConfigValidationError) ErrorName() string { + return "EcdsConfigDump_EcdsFilterConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EcdsConfigDump_EcdsFilterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEcdsConfigDump_EcdsFilterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EcdsConfigDump_EcdsFilterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EcdsConfigDump_EcdsFilterConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go new file mode 100644 index 0000000000..934de8568b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go @@ -0,0 +1,1715 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/config_dump_shared.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpdateFailureState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateFailureState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpdateFailureState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Details) > 0 { + i -= len(m.Details) + copy(dAtA[i:], m.Details) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Details))) + i-- + dAtA[i] = 0x1a + } + if m.LastUpdateAttempt != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdateAttempt).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FailedConfiguration != nil { + size, err := (*anypb.Any)(m.FailedConfiguration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_StaticListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_StaticListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_StaticListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Listener != nil { + size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_DynamicListenerState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Listener != nil { + size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump_DynamicListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump_DynamicListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump_DynamicListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.DrainingState != nil { + size, err := m.DrainingState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.WarmingState != nil { + size, err := m.WarmingState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ActiveState != nil { + size, err := m.ActiveState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenersConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicListeners) > 0 { + for iNdEx := len(m.DynamicListeners) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticListeners) > 0 { + for iNdEx := len(m.StaticListeners) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump_StaticCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump_StaticCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump_StaticCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump_DynamicCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Cluster != nil { + size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClustersConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClustersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClustersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicWarmingClusters) > 0 { + for iNdEx := len(m.DynamicWarmingClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicWarmingClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.DynamicActiveClusters) > 0 { + for iNdEx := len(m.DynamicActiveClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicActiveClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticClusters) > 0 { + for iNdEx := len(m.StaticClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump_StaticRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RouteConfig != nil { + size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.RouteConfig != nil { + size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicRouteConfigs) > 0 { + for iNdEx := len(m.DynamicRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticRouteConfigs) > 0 { + for iNdEx := len(m.StaticRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopedRouteConfigs) > 0 { + for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.ScopedRouteConfigs) > 0 { + for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicScopedRouteConfigs) > 0 { + for iNdEx := len(m.DynamicScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.InlineScopedRouteConfigs) > 0 { + for iNdEx := len(m.InlineScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InlineScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EndpointConfig != nil { + size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.EndpointConfig != nil { + size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicEndpointConfigs) > 0 { + for iNdEx := len(m.DynamicEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.StaticEndpointConfigs) > 0 { + for iNdEx := len(m.StaticEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x28 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.EcdsFilter != nil { + size, err := (*anypb.Any)(m.EcdsFilter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EcdsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EcdsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EcdsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EcdsFilters) > 0 { + for iNdEx := len(m.EcdsFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.EcdsFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpdateFailureState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedConfiguration != nil { + l = (*anypb.Any)(m.FailedConfiguration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdateAttempt != nil { + l = (*timestamppb.Timestamp)(m.LastUpdateAttempt).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Details) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_StaticListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Listener != nil { + l = (*anypb.Any)(m.Listener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_DynamicListenerState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Listener != nil { + l = (*anypb.Any)(m.Listener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump_DynamicListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveState != nil { + l = m.ActiveState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WarmingState != nil { + l = m.WarmingState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainingState != nil { + l = m.DrainingState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenersConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StaticListeners) > 0 { + for _, e := range m.StaticListeners { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicListeners) > 0 { + for _, e := range m.DynamicListeners { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump_StaticCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = (*anypb.Any)(m.Cluster).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump_DynamicCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Cluster != nil { + l = (*anypb.Any)(m.Cluster).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClustersConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StaticClusters) > 0 { + for _, e := range m.StaticClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicActiveClusters) > 0 { + for _, e := range m.DynamicActiveClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicWarmingClusters) > 0 { + for _, e := range m.DynamicWarmingClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump_StaticRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + l = (*anypb.Any)(m.RouteConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump_DynamicRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RouteConfig != nil { + l = (*anypb.Any)(m.RouteConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *RoutesConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticRouteConfigs) > 0 { + for _, e := range m.StaticRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicRouteConfigs) > 0 { + for _, e := range m.DynamicRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ScopedRouteConfigs) > 0 { + for _, e := range m.ScopedRouteConfigs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ScopedRouteConfigs) > 0 { + for _, e := range m.ScopedRouteConfigs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutesConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.InlineScopedRouteConfigs) > 0 { + for _, e := range m.InlineScopedRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicScopedRouteConfigs) > 0 { + for _, e := range m.DynamicScopedRouteConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump_StaticEndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointConfig != nil { + l = (*anypb.Any)(m.EndpointConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump_DynamicEndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EndpointConfig != nil { + l = (*anypb.Any)(m.EndpointConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticEndpointConfigs) > 0 { + for _, e := range m.StaticEndpointConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicEndpointConfigs) > 0 { + for _, e := range m.DynamicEndpointConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *EcdsConfigDump_EcdsFilterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EcdsFilter != nil { + l = (*anypb.Any)(m.EcdsFilter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *EcdsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.EcdsFilters) > 0 { + for _, e := range m.EcdsFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go new file mode 100644 index 0000000000..78e37eec91 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go @@ -0,0 +1,466 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/config_dump.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Configs) > 0 { + for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.Configs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BootstrapConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BootstrapConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BootstrapConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Bootstrap != nil { + if vtmsg, ok := interface{}(m.Bootstrap).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Bootstrap) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump_DynamicSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x30 + } + if m.ErrorState != nil { + size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Secret != nil { + size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump_StaticSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump_StaticSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump_StaticSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Secret != nil { + size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretsConfigDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SecretsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicWarmingSecrets) > 0 { + for iNdEx := len(m.DynamicWarmingSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicWarmingSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DynamicActiveSecrets) > 0 { + for iNdEx := len(m.DynamicActiveSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DynamicActiveSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.StaticSecrets) > 0 { + for iNdEx := len(m.StaticSecrets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StaticSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *BootstrapConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Bootstrap != nil { + if size, ok := interface{}(m.Bootstrap).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Bootstrap) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump_DynamicSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Secret != nil { + l = (*anypb.Any)(m.Secret).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorState != nil { + l = m.ErrorState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump_StaticSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Secret != nil { + l = (*anypb.Any)(m.Secret).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SecretsConfigDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StaticSecrets) > 0 { + for _, e := range m.StaticSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicActiveSecrets) > 0 { + for _, e := range m.DynamicActiveSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicWarmingSecrets) > 0 { + for _, e := range m.DynamicWarmingSecrets { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go new file mode 100644 index 0000000000..388c1de326 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, +// which provides the information of their unready targets. +// The :ref:`/init_dump ` will dump all unready targets information. +type UnreadyTargetsDumps struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // You can choose specific component to dump unready targets with mask query parameter. + // See :ref:`/init_dump?mask={} ` for more information. + // The dumps of unready targets of all init managers. + UnreadyTargetsDumps []*UnreadyTargetsDumps_UnreadyTargetsDump `protobuf:"bytes,1,rep,name=unready_targets_dumps,json=unreadyTargetsDumps,proto3" json:"unready_targets_dumps,omitempty"` +} + +func (x *UnreadyTargetsDumps) Reset() { + *x = UnreadyTargetsDumps{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnreadyTargetsDumps) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnreadyTargetsDumps) ProtoMessage() {} + +func (x *UnreadyTargetsDumps) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnreadyTargetsDumps.ProtoReflect.Descriptor instead. +func (*UnreadyTargetsDumps) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0} +} + +func (x *UnreadyTargetsDumps) GetUnreadyTargetsDumps() []*UnreadyTargetsDumps_UnreadyTargetsDump { + if x != nil { + return x.UnreadyTargetsDumps + } + return nil +} + +// Message of unready targets information of an init manager. +type UnreadyTargetsDumps_UnreadyTargetsDump struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the init manager. Example: "init_manager_xxx". + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Names of unready targets of the init manager. Example: "target_xxx". + TargetNames []string `protobuf:"bytes,2,rep,name=target_names,json=targetNames,proto3" json:"target_names,omitempty"` +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) Reset() { + *x = UnreadyTargetsDumps_UnreadyTargetsDump{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnreadyTargetsDumps_UnreadyTargetsDump) ProtoMessage() {} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnreadyTargetsDumps_UnreadyTargetsDump.ProtoReflect.Descriptor instead. +func (*UnreadyTargetsDumps_UnreadyTargetsDump) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetTargetNames() []string { + if x != nil { + return x.TargetNames + } + return nil +} + +var File_envoy_admin_v3_init_dump_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_init_dump_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xce, 0x01, 0x0a, 0x13, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x75, 0x6e, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x2e, 0x55, 0x6e, 0x72, 0x65, + 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x52, 0x13, + 0x75, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, + 0x6d, 0x70, 0x73, 0x1a, 0x4b, 0x0a, 0x12, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x49, 0x6e, 0x69, 0x74, 0x44, 0x75, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_init_dump_proto_rawDescOnce sync.Once + file_envoy_admin_v3_init_dump_proto_rawDescData = file_envoy_admin_v3_init_dump_proto_rawDesc +) + +func file_envoy_admin_v3_init_dump_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_init_dump_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_init_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_init_dump_proto_rawDescData) + }) + return file_envoy_admin_v3_init_dump_proto_rawDescData +} + +var file_envoy_admin_v3_init_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_admin_v3_init_dump_proto_goTypes = []interface{}{ + (*UnreadyTargetsDumps)(nil), // 0: envoy.admin.v3.UnreadyTargetsDumps + (*UnreadyTargetsDumps_UnreadyTargetsDump)(nil), // 1: envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump +} +var file_envoy_admin_v3_init_dump_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.UnreadyTargetsDumps.unready_targets_dumps:type_name -> envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_init_dump_proto_init() } +func file_envoy_admin_v3_init_dump_proto_init() { + if File_envoy_admin_v3_init_dump_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_init_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnreadyTargetsDumps); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_init_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnreadyTargetsDumps_UnreadyTargetsDump); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_init_dump_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_init_dump_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_init_dump_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_init_dump_proto_msgTypes, + }.Build() + File_envoy_admin_v3_init_dump_proto = out.File + file_envoy_admin_v3_init_dump_proto_rawDesc = nil + file_envoy_admin_v3_init_dump_proto_goTypes = nil + file_envoy_admin_v3_init_dump_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go new file mode 100644 index 0000000000..f746a12648 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go @@ -0,0 +1,281 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UnreadyTargetsDumps with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UnreadyTargetsDumps) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UnreadyTargetsDumps with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UnreadyTargetsDumpsMultiError, or nil if none found. +func (m *UnreadyTargetsDumps) ValidateAll() error { + return m.validate(true) +} + +func (m *UnreadyTargetsDumps) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetUnreadyTargetsDumps() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UnreadyTargetsDumpsValidationError{ + field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UnreadyTargetsDumpsValidationError{ + field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UnreadyTargetsDumpsValidationError{ + field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return UnreadyTargetsDumpsMultiError(errors) + } + + return nil +} + +// UnreadyTargetsDumpsMultiError is an error wrapping multiple validation +// errors returned by UnreadyTargetsDumps.ValidateAll() if the designated +// constraints aren't met. +type UnreadyTargetsDumpsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnreadyTargetsDumpsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnreadyTargetsDumpsMultiError) AllErrors() []error { return m } + +// UnreadyTargetsDumpsValidationError is the validation error returned by +// UnreadyTargetsDumps.Validate if the designated constraints aren't met. +type UnreadyTargetsDumpsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnreadyTargetsDumpsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnreadyTargetsDumpsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnreadyTargetsDumpsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnreadyTargetsDumpsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnreadyTargetsDumpsValidationError) ErrorName() string { + return "UnreadyTargetsDumpsValidationError" +} + +// Error satisfies the builtin error interface +func (e UnreadyTargetsDumpsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnreadyTargetsDumps.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnreadyTargetsDumpsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnreadyTargetsDumpsValidationError{} + +// Validate checks the field values on UnreadyTargetsDumps_UnreadyTargetsDump +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// UnreadyTargetsDumps_UnreadyTargetsDump with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError, or nil if none found. +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) ValidateAll() error { + return m.validate(true) +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if len(errors) > 0 { + return UnreadyTargetsDumps_UnreadyTargetsDumpMultiError(errors) + } + + return nil +} + +// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError is an error wrapping +// multiple validation errors returned by +// UnreadyTargetsDumps_UnreadyTargetsDump.ValidateAll() if the designated +// constraints aren't met. +type UnreadyTargetsDumps_UnreadyTargetsDumpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) AllErrors() []error { return m } + +// UnreadyTargetsDumps_UnreadyTargetsDumpValidationError is the validation +// error returned by UnreadyTargetsDumps_UnreadyTargetsDump.Validate if the +// designated constraints aren't met. +type UnreadyTargetsDumps_UnreadyTargetsDumpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) ErrorName() string { + return "UnreadyTargetsDumps_UnreadyTargetsDumpValidationError" +} + +// Error satisfies the builtin error interface +func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnreadyTargetsDumps_UnreadyTargetsDump.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go new file mode 100644 index 0000000000..d957042b88 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go @@ -0,0 +1,149 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/init_dump.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TargetNames) > 0 { + for iNdEx := len(m.TargetNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetNames[iNdEx]) + copy(dAtA[i:], m.TargetNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UnreadyTargetsDumps) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnreadyTargetsDumps) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnreadyTargetsDumps) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.UnreadyTargetsDumps) > 0 { + for iNdEx := len(m.UnreadyTargetsDumps) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UnreadyTargetsDumps[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UnreadyTargetsDumps_UnreadyTargetsDump) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TargetNames) > 0 { + for _, s := range m.TargetNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UnreadyTargetsDumps) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UnreadyTargetsDumps) > 0 { + for _, e := range m.UnreadyTargetsDumps { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go new file mode 100644 index 0000000000..ac6015fac5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Admin endpoint uses this wrapper for “/listeners“ to display listener status information. +// See :ref:`/listeners ` for more information. +type Listeners struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of listener statuses. + ListenerStatuses []*ListenerStatus `protobuf:"bytes,1,rep,name=listener_statuses,json=listenerStatuses,proto3" json:"listener_statuses,omitempty"` +} + +func (x *Listeners) Reset() { + *x = Listeners{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listeners) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listeners) ProtoMessage() {} + +func (x *Listeners) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listeners.ProtoReflect.Descriptor instead. +func (*Listeners) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{0} +} + +func (x *Listeners) GetListenerStatuses() []*ListenerStatus { + if x != nil { + return x.ListenerStatuses + } + return nil +} + +// Details an individual listener's current status. +type ListenerStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the listener + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The actual local address that the listener is listening on. If a listener was configured + // to listen on port 0, then this address has the port that was allocated by the OS. + LocalAddress *v3.Address `protobuf:"bytes,2,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"` + // The additional addresses the listener is listening on as specified via the :ref:`additional_addresses ` + // configuration. + AdditionalLocalAddresses []*v3.Address `protobuf:"bytes,3,rep,name=additional_local_addresses,json=additionalLocalAddresses,proto3" json:"additional_local_addresses,omitempty"` +} + +func (x *ListenerStatus) Reset() { + *x = ListenerStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerStatus) ProtoMessage() {} + +func (x *ListenerStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerStatus.ProtoReflect.Descriptor instead. +func (*ListenerStatus) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenerStatus) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListenerStatus) GetLocalAddress() *v3.Address { + if x != nil { + return x.LocalAddress + } + return nil +} + +func (x *ListenerStatus) GetAdditionalLocalAddresses() []*v3.Address { + if x != nil { + return x.AdditionalLocalAddresses + } + return nil +} + +var File_envoy_admin_v3_listeners_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_listeners_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x11, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, + 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, + 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x5b, 0x0a, 0x1a, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x77, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x33, 0x42, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_listeners_proto_rawDescOnce sync.Once + file_envoy_admin_v3_listeners_proto_rawDescData = file_envoy_admin_v3_listeners_proto_rawDesc +) + +func file_envoy_admin_v3_listeners_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_listeners_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_listeners_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_listeners_proto_rawDescData) + }) + return file_envoy_admin_v3_listeners_proto_rawDescData +} + +var file_envoy_admin_v3_listeners_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_admin_v3_listeners_proto_goTypes = []interface{}{ + (*Listeners)(nil), // 0: envoy.admin.v3.Listeners + (*ListenerStatus)(nil), // 1: envoy.admin.v3.ListenerStatus + (*v3.Address)(nil), // 2: envoy.config.core.v3.Address +} +var file_envoy_admin_v3_listeners_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.Listeners.listener_statuses:type_name -> envoy.admin.v3.ListenerStatus + 2, // 1: envoy.admin.v3.ListenerStatus.local_address:type_name -> envoy.config.core.v3.Address + 2, // 2: envoy.admin.v3.ListenerStatus.additional_local_addresses:type_name -> envoy.config.core.v3.Address + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_listeners_proto_init() } +func file_envoy_admin_v3_listeners_proto_init() { + if File_envoy_admin_v3_listeners_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_listeners_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listeners); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_listeners_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_listeners_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_listeners_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_listeners_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_listeners_proto_msgTypes, + }.Build() + File_envoy_admin_v3_listeners_proto = out.File + file_envoy_admin_v3_listeners_proto_rawDesc = nil + file_envoy_admin_v3_listeners_proto_goTypes = nil + file_envoy_admin_v3_listeners_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go new file mode 100644 index 0000000000..02cce26391 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go @@ -0,0 +1,335 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Listeners with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Listeners) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listeners with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenersMultiError, or nil +// if none found. +func (m *Listeners) ValidateAll() error { + return m.validate(true) +} + +func (m *Listeners) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetListenerStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenersValidationError{ + field: fmt.Sprintf("ListenerStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenersValidationError{ + field: fmt.Sprintf("ListenerStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenersValidationError{ + field: fmt.Sprintf("ListenerStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenersMultiError(errors) + } + + return nil +} + +// ListenersMultiError is an error wrapping multiple validation errors returned +// by Listeners.ValidateAll() if the designated constraints aren't met. +type ListenersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenersMultiError) AllErrors() []error { return m } + +// ListenersValidationError is the validation error returned by +// Listeners.Validate if the designated constraints aren't met. +type ListenersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenersValidationError) ErrorName() string { return "ListenersValidationError" } + +// Error satisfies the builtin error interface +func (e ListenersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListeners.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenersValidationError{} + +// Validate checks the field values on ListenerStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListenerStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerStatus with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenerStatusMultiError, +// or nil if none found. +func (m *ListenerStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: "LocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: "LocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerStatusValidationError{ + field: "LocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAdditionalLocalAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerStatusValidationError{ + field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerStatusValidationError{ + field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenerStatusMultiError(errors) + } + + return nil +} + +// ListenerStatusMultiError is an error wrapping multiple validation errors +// returned by ListenerStatus.ValidateAll() if the designated constraints +// aren't met. +type ListenerStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerStatusMultiError) AllErrors() []error { return m } + +// ListenerStatusValidationError is the validation error returned by +// ListenerStatus.Validate if the designated constraints aren't met. +type ListenerStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerStatusValidationError) ErrorName() string { return "ListenerStatusValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerStatusValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go new file mode 100644 index 0000000000..816437acfb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go @@ -0,0 +1,203 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/listeners.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Listeners) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listeners) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listeners) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ListenerStatuses) > 0 { + for iNdEx := len(m.ListenerStatuses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListenerStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListenerStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AdditionalLocalAddresses) > 0 { + for iNdEx := len(m.AdditionalLocalAddresses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AdditionalLocalAddresses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdditionalLocalAddresses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.LocalAddress != nil { + if vtmsg, ok := interface{}(m.LocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listeners) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ListenerStatuses) > 0 { + for _, e := range m.ListenerStatuses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalAddress != nil { + if size, ok := interface{}(m.LocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalLocalAddresses) > 0 { + for _, e := range m.AdditionalLocalAddresses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go new file mode 100644 index 0000000000..32de56ce3b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go @@ -0,0 +1,228 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the internal memory consumption of an Envoy instance. These represent +// values extracted from an internal TCMalloc instance. For more information, see the section of the +// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). +// [#next-free-field: 7] +type Memory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of bytes allocated by the heap for Envoy. This is an alias for + // “generic.current_allocated_bytes“. + Allocated uint64 `protobuf:"varint,1,opt,name=allocated,proto3" json:"allocated,omitempty"` + // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for + // “generic.heap_size“. + HeapSize uint64 `protobuf:"varint,2,opt,name=heap_size,json=heapSize,proto3" json:"heap_size,omitempty"` + // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards + // virtual memory usage, and depending on the OS, typically do not count towards physical memory + // usage. This is an alias for “tcmalloc.pageheap_unmapped_bytes“. + PageheapUnmapped uint64 `protobuf:"varint,3,opt,name=pageheap_unmapped,json=pageheapUnmapped,proto3" json:"pageheap_unmapped,omitempty"` + // The number of bytes in free, mapped pages in the page heap. These bytes always count towards + // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also + // count towards physical memory usage. This is an alias for “tcmalloc.pageheap_free_bytes“. + PageheapFree uint64 `protobuf:"varint,4,opt,name=pageheap_free,json=pageheapFree,proto3" json:"pageheap_free,omitempty"` + // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias + // for “tcmalloc.current_total_thread_cache_bytes“. + TotalThreadCache uint64 `protobuf:"varint,5,opt,name=total_thread_cache,json=totalThreadCache,proto3" json:"total_thread_cache,omitempty"` + // The number of bytes of the physical memory usage by the allocator. This is an alias for + // “generic.total_physical_bytes“. + TotalPhysicalBytes uint64 `protobuf:"varint,6,opt,name=total_physical_bytes,json=totalPhysicalBytes,proto3" json:"total_physical_bytes,omitempty"` +} + +func (x *Memory) Reset() { + *x = Memory{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_memory_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Memory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Memory) ProtoMessage() {} + +func (x *Memory) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_memory_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Memory.ProtoReflect.Descriptor instead. +func (*Memory) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_memory_proto_rawDescGZIP(), []int{0} +} + +func (x *Memory) GetAllocated() uint64 { + if x != nil { + return x.Allocated + } + return 0 +} + +func (x *Memory) GetHeapSize() uint64 { + if x != nil { + return x.HeapSize + } + return 0 +} + +func (x *Memory) GetPageheapUnmapped() uint64 { + if x != nil { + return x.PageheapUnmapped + } + return 0 +} + +func (x *Memory) GetPageheapFree() uint64 { + if x != nil { + return x.PageheapFree + } + return 0 +} + +func (x *Memory) GetTotalThreadCache() uint64 { + if x != nil { + return x.TotalThreadCache + } + return 0 +} + +func (x *Memory) GetTotalPhysicalBytes() uint64 { + if x != nil { + return x.TotalPhysicalBytes + } + return 0 +} + +var File_envoy_admin_v3_memory_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_memory_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x98, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x61, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61, + 0x70, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, + 0x70, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x5f, 0x66, + 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x70, 0x61, 0x67, 0x65, 0x68, + 0x65, 0x61, 0x70, 0x46, 0x72, 0x65, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63, + 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x42, 0x74, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_memory_proto_rawDescOnce sync.Once + file_envoy_admin_v3_memory_proto_rawDescData = file_envoy_admin_v3_memory_proto_rawDesc +) + +func file_envoy_admin_v3_memory_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_memory_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_memory_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_memory_proto_rawDescData) + }) + return file_envoy_admin_v3_memory_proto_rawDescData +} + +var file_envoy_admin_v3_memory_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_memory_proto_goTypes = []interface{}{ + (*Memory)(nil), // 0: envoy.admin.v3.Memory +} +var file_envoy_admin_v3_memory_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_memory_proto_init() } +func file_envoy_admin_v3_memory_proto_init() { + if File_envoy_admin_v3_memory_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_memory_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Memory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_memory_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_memory_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_memory_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_memory_proto_msgTypes, + }.Build() + File_envoy_admin_v3_memory_proto = out.File + file_envoy_admin_v3_memory_proto_rawDesc = nil + file_envoy_admin_v3_memory_proto_goTypes = nil + file_envoy_admin_v3_memory_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go new file mode 100644 index 0000000000..bcb9c1d201 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go @@ -0,0 +1,147 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Memory with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Memory) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Memory with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in MemoryMultiError, or nil if none found. +func (m *Memory) ValidateAll() error { + return m.validate(true) +} + +func (m *Memory) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Allocated + + // no validation rules for HeapSize + + // no validation rules for PageheapUnmapped + + // no validation rules for PageheapFree + + // no validation rules for TotalThreadCache + + // no validation rules for TotalPhysicalBytes + + if len(errors) > 0 { + return MemoryMultiError(errors) + } + + return nil +} + +// MemoryMultiError is an error wrapping multiple validation errors returned by +// Memory.ValidateAll() if the designated constraints aren't met. +type MemoryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MemoryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MemoryMultiError) AllErrors() []error { return m } + +// MemoryValidationError is the validation error returned by Memory.Validate if +// the designated constraints aren't met. +type MemoryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MemoryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MemoryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MemoryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MemoryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MemoryValidationError) ErrorName() string { return "MemoryValidationError" } + +// Error satisfies the builtin error interface +func (e MemoryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMemory.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MemoryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MemoryValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go new file mode 100644 index 0000000000..6e3a23688a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/memory.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Memory) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Memory) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Memory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalPhysicalBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalPhysicalBytes)) + i-- + dAtA[i] = 0x30 + } + if m.TotalThreadCache != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalThreadCache)) + i-- + dAtA[i] = 0x28 + } + if m.PageheapFree != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapFree)) + i-- + dAtA[i] = 0x20 + } + if m.PageheapUnmapped != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapUnmapped)) + i-- + dAtA[i] = 0x18 + } + if m.HeapSize != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeapSize)) + i-- + dAtA[i] = 0x10 + } + if m.Allocated != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Allocated)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Memory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocated != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Allocated)) + } + if m.HeapSize != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HeapSize)) + } + if m.PageheapUnmapped != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapUnmapped)) + } + if m.PageheapFree != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapFree)) + } + if m.TotalThreadCache != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalThreadCache)) + } + if m.TotalPhysicalBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalPhysicalBytes)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go new file mode 100644 index 0000000000..3b71895960 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go @@ -0,0 +1,234 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SimpleMetric_Type int32 + +const ( + SimpleMetric_COUNTER SimpleMetric_Type = 0 + SimpleMetric_GAUGE SimpleMetric_Type = 1 +) + +// Enum value maps for SimpleMetric_Type. +var ( + SimpleMetric_Type_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + } + SimpleMetric_Type_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + } +) + +func (x SimpleMetric_Type) Enum() *SimpleMetric_Type { + p := new(SimpleMetric_Type) + *p = x + return p +} + +func (x SimpleMetric_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SimpleMetric_Type) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_metrics_proto_enumTypes[0].Descriptor() +} + +func (SimpleMetric_Type) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_metrics_proto_enumTypes[0] +} + +func (x SimpleMetric_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SimpleMetric_Type.Descriptor instead. +func (SimpleMetric_Type) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0, 0} +} + +// Proto representation of an Envoy Counter or Gauge value. +type SimpleMetric struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the metric represented. + Type SimpleMetric_Type `protobuf:"varint,1,opt,name=type,proto3,enum=envoy.admin.v3.SimpleMetric_Type" json:"type,omitempty"` + // Current metric value. + Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + // Name of the metric. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *SimpleMetric) Reset() { + *x = SimpleMetric{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SimpleMetric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SimpleMetric) ProtoMessage() {} + +func (x *SimpleMetric) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SimpleMetric.ProtoReflect.Descriptor instead. +func (*SimpleMetric) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *SimpleMetric) GetType() SimpleMetric_Type { + if x != nil { + return x.Type + } + return SimpleMetric_COUNTER +} + +func (x *SimpleMetric) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *SimpleMetric) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_envoy_admin_v3_metrics_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_metrics_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xb8, 0x01, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x1e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, + 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, + 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x42, 0x75, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_metrics_proto_rawDescOnce sync.Once + file_envoy_admin_v3_metrics_proto_rawDescData = file_envoy_admin_v3_metrics_proto_rawDesc +) + +func file_envoy_admin_v3_metrics_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_metrics_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_metrics_proto_rawDescData) + }) + return file_envoy_admin_v3_metrics_proto_rawDescData +} + +var file_envoy_admin_v3_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_admin_v3_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_metrics_proto_goTypes = []interface{}{ + (SimpleMetric_Type)(0), // 0: envoy.admin.v3.SimpleMetric.Type + (*SimpleMetric)(nil), // 1: envoy.admin.v3.SimpleMetric +} +var file_envoy_admin_v3_metrics_proto_depIdxs = []int32{ + 0, // 0: envoy.admin.v3.SimpleMetric.type:type_name -> envoy.admin.v3.SimpleMetric.Type + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_metrics_proto_init() } +func file_envoy_admin_v3_metrics_proto_init() { + if File_envoy_admin_v3_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SimpleMetric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_metrics_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_metrics_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_metrics_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_metrics_proto_msgTypes, + }.Build() + File_envoy_admin_v3_metrics_proto = out.File + file_envoy_admin_v3_metrics_proto_rawDesc = nil + file_envoy_admin_v3_metrics_proto_goTypes = nil + file_envoy_admin_v3_metrics_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go new file mode 100644 index 0000000000..903d70e199 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go @@ -0,0 +1,142 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SimpleMetric with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SimpleMetric) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SimpleMetric with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in SimpleMetricMultiError, or +// nil if none found. +func (m *SimpleMetric) ValidateAll() error { + return m.validate(true) +} + +func (m *SimpleMetric) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Type + + // no validation rules for Value + + // no validation rules for Name + + if len(errors) > 0 { + return SimpleMetricMultiError(errors) + } + + return nil +} + +// SimpleMetricMultiError is an error wrapping multiple validation errors +// returned by SimpleMetric.ValidateAll() if the designated constraints aren't met. +type SimpleMetricMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SimpleMetricMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SimpleMetricMultiError) AllErrors() []error { return m } + +// SimpleMetricValidationError is the validation error returned by +// SimpleMetric.Validate if the designated constraints aren't met. +type SimpleMetricValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SimpleMetricValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SimpleMetricValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SimpleMetricValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SimpleMetricValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SimpleMetricValidationError) ErrorName() string { return "SimpleMetricValidationError" } + +// Error satisfies the builtin error interface +func (e SimpleMetricValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSimpleMetric.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SimpleMetricValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SimpleMetricValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go new file mode 100644 index 0000000000..0c09ae0459 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go @@ -0,0 +1,89 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/metrics.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SimpleMetric) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SimpleMetric) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SimpleMetric) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SimpleMetric) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go new file mode 100644 index 0000000000..44f35183dd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run +// under :option:`--enable-mutex-tracing`. For more information, see the “absl::Mutex“ +// [docs](https://abseil.io/about/design/mutex#extra-features). +// +// *NB*: The wait cycles below are measured by “absl::base_internal::CycleClock“, and may not +// correspond to core clock frequency. For more information, see the “CycleClock“ +// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). +type MutexStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of individual mutex contentions which have occurred since startup. + NumContentions uint64 `protobuf:"varint,1,opt,name=num_contentions,json=numContentions,proto3" json:"num_contentions,omitempty"` + // The length of the current contention wait cycle. + CurrentWaitCycles uint64 `protobuf:"varint,2,opt,name=current_wait_cycles,json=currentWaitCycles,proto3" json:"current_wait_cycles,omitempty"` + // The lifetime total of all contention wait cycles. + LifetimeWaitCycles uint64 `protobuf:"varint,3,opt,name=lifetime_wait_cycles,json=lifetimeWaitCycles,proto3" json:"lifetime_wait_cycles,omitempty"` +} + +func (x *MutexStats) Reset() { + *x = MutexStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MutexStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MutexStats) ProtoMessage() {} + +func (x *MutexStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MutexStats.ProtoReflect.Descriptor instead. +func (*MutexStats) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *MutexStats) GetNumContentions() uint64 { + if x != nil { + return x.NumContentions + } + return 0 +} + +func (x *MutexStats) GetCurrentWaitCycles() uint64 { + if x != nil { + return x.CurrentWaitCycles + } + return 0 +} + +func (x *MutexStats) GetLifetimeWaitCycles() uint64 { + if x != nil { + return x.LifetimeWaitCycles + } + return 0 +} + +var File_envoy_admin_v3_mutex_stats_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_mutex_stats_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x01, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6e, 0x75, + 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x6c, 0x69, 0x66, 0x65, + 0x74, 0x69, 0x6d, 0x65, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x3a, 0x25, + 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x75, 0x74, 0x65, 0x78, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4d, + 0x75, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_mutex_stats_proto_rawDescOnce sync.Once + file_envoy_admin_v3_mutex_stats_proto_rawDescData = file_envoy_admin_v3_mutex_stats_proto_rawDesc +) + +func file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_mutex_stats_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_mutex_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_mutex_stats_proto_rawDescData) + }) + return file_envoy_admin_v3_mutex_stats_proto_rawDescData +} + +var file_envoy_admin_v3_mutex_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_mutex_stats_proto_goTypes = []interface{}{ + (*MutexStats)(nil), // 0: envoy.admin.v3.MutexStats +} +var file_envoy_admin_v3_mutex_stats_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_mutex_stats_proto_init() } +func file_envoy_admin_v3_mutex_stats_proto_init() { + if File_envoy_admin_v3_mutex_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_mutex_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MutexStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_mutex_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_mutex_stats_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_mutex_stats_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_mutex_stats_proto_msgTypes, + }.Build() + File_envoy_admin_v3_mutex_stats_proto = out.File + file_envoy_admin_v3_mutex_stats_proto_rawDesc = nil + file_envoy_admin_v3_mutex_stats_proto_goTypes = nil + file_envoy_admin_v3_mutex_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go new file mode 100644 index 0000000000..236524c54a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go @@ -0,0 +1,142 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MutexStats with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MutexStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MutexStats with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MutexStatsMultiError, or +// nil if none found. +func (m *MutexStats) ValidateAll() error { + return m.validate(true) +} + +func (m *MutexStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumContentions + + // no validation rules for CurrentWaitCycles + + // no validation rules for LifetimeWaitCycles + + if len(errors) > 0 { + return MutexStatsMultiError(errors) + } + + return nil +} + +// MutexStatsMultiError is an error wrapping multiple validation errors +// returned by MutexStats.ValidateAll() if the designated constraints aren't met. +type MutexStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MutexStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MutexStatsMultiError) AllErrors() []error { return m } + +// MutexStatsValidationError is the validation error returned by +// MutexStats.Validate if the designated constraints aren't met. +type MutexStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MutexStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MutexStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MutexStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MutexStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MutexStatsValidationError) ErrorName() string { return "MutexStatsValidationError" } + +// Error satisfies the builtin error interface +func (e MutexStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMutexStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MutexStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MutexStatsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go new file mode 100644 index 0000000000..4318cbc993 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go @@ -0,0 +1,86 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/mutex_stats.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MutexStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutexStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MutexStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LifetimeWaitCycles != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LifetimeWaitCycles)) + i-- + dAtA[i] = 0x18 + } + if m.CurrentWaitCycles != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CurrentWaitCycles)) + i-- + dAtA[i] = 0x10 + } + if m.NumContentions != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumContentions)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MutexStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumContentions != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumContentions)) + } + if m.CurrentWaitCycles != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CurrentWaitCycles)) + } + if m.LifetimeWaitCycles != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LifetimeWaitCycles)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go new file mode 100644 index 0000000000..4538ff30f1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go @@ -0,0 +1,975 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServerInfo_State int32 + +const ( + // Server is live and serving traffic. + ServerInfo_LIVE ServerInfo_State = 0 + // Server is draining listeners in response to external health checks failing. + ServerInfo_DRAINING ServerInfo_State = 1 + // Server has not yet completed cluster manager initialization. + ServerInfo_PRE_INITIALIZING ServerInfo_State = 2 + // Server is running the cluster manager initialization callbacks (e.g., RDS). + ServerInfo_INITIALIZING ServerInfo_State = 3 +) + +// Enum value maps for ServerInfo_State. +var ( + ServerInfo_State_name = map[int32]string{ + 0: "LIVE", + 1: "DRAINING", + 2: "PRE_INITIALIZING", + 3: "INITIALIZING", + } + ServerInfo_State_value = map[string]int32{ + "LIVE": 0, + "DRAINING": 1, + "PRE_INITIALIZING": 2, + "INITIALIZING": 3, + } +) + +func (x ServerInfo_State) Enum() *ServerInfo_State { + p := new(ServerInfo_State) + *p = x + return p +} + +func (x ServerInfo_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServerInfo_State) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[0].Descriptor() +} + +func (ServerInfo_State) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[0] +} + +func (x ServerInfo_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServerInfo_State.Descriptor instead. +func (ServerInfo_State) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0, 0} +} + +type CommandLineOptions_IpVersion int32 + +const ( + CommandLineOptions_v4 CommandLineOptions_IpVersion = 0 + CommandLineOptions_v6 CommandLineOptions_IpVersion = 1 +) + +// Enum value maps for CommandLineOptions_IpVersion. +var ( + CommandLineOptions_IpVersion_name = map[int32]string{ + 0: "v4", + 1: "v6", + } + CommandLineOptions_IpVersion_value = map[string]int32{ + "v4": 0, + "v6": 1, + } +) + +func (x CommandLineOptions_IpVersion) Enum() *CommandLineOptions_IpVersion { + p := new(CommandLineOptions_IpVersion) + *p = x + return p +} + +func (x CommandLineOptions_IpVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandLineOptions_IpVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[1].Descriptor() +} + +func (CommandLineOptions_IpVersion) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[1] +} + +func (x CommandLineOptions_IpVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandLineOptions_IpVersion.Descriptor instead. +func (CommandLineOptions_IpVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 0} +} + +type CommandLineOptions_Mode int32 + +const ( + // Validate configs and then serve traffic normally. + CommandLineOptions_Serve CommandLineOptions_Mode = 0 + // Validate configs and exit. + CommandLineOptions_Validate CommandLineOptions_Mode = 1 + // Completely load and initialize the config, and then exit without running the listener loop. + CommandLineOptions_InitOnly CommandLineOptions_Mode = 2 +) + +// Enum value maps for CommandLineOptions_Mode. +var ( + CommandLineOptions_Mode_name = map[int32]string{ + 0: "Serve", + 1: "Validate", + 2: "InitOnly", + } + CommandLineOptions_Mode_value = map[string]int32{ + "Serve": 0, + "Validate": 1, + "InitOnly": 2, + } +) + +func (x CommandLineOptions_Mode) Enum() *CommandLineOptions_Mode { + p := new(CommandLineOptions_Mode) + *p = x + return p +} + +func (x CommandLineOptions_Mode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandLineOptions_Mode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[2].Descriptor() +} + +func (CommandLineOptions_Mode) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[2] +} + +func (x CommandLineOptions_Mode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandLineOptions_Mode.Descriptor instead. +func (CommandLineOptions_Mode) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 1} +} + +type CommandLineOptions_DrainStrategy int32 + +const ( + // Gradually discourage connections over the course of the drain period. + CommandLineOptions_Gradual CommandLineOptions_DrainStrategy = 0 + // Discourage all connections for the duration of the drain sequence. + CommandLineOptions_Immediate CommandLineOptions_DrainStrategy = 1 +) + +// Enum value maps for CommandLineOptions_DrainStrategy. +var ( + CommandLineOptions_DrainStrategy_name = map[int32]string{ + 0: "Gradual", + 1: "Immediate", + } + CommandLineOptions_DrainStrategy_value = map[string]int32{ + "Gradual": 0, + "Immediate": 1, + } +) + +func (x CommandLineOptions_DrainStrategy) Enum() *CommandLineOptions_DrainStrategy { + p := new(CommandLineOptions_DrainStrategy) + *p = x + return p +} + +func (x CommandLineOptions_DrainStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandLineOptions_DrainStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_admin_v3_server_info_proto_enumTypes[3].Descriptor() +} + +func (CommandLineOptions_DrainStrategy) Type() protoreflect.EnumType { + return &file_envoy_admin_v3_server_info_proto_enumTypes[3] +} + +func (x CommandLineOptions_DrainStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandLineOptions_DrainStrategy.Descriptor instead. +func (CommandLineOptions_DrainStrategy) EnumDescriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 2} +} + +// Proto representation of the value returned by /server_info, containing +// server version/server status information. +// [#next-free-field: 8] +type ServerInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Server version. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // State of the server. + State ServerInfo_State `protobuf:"varint,2,opt,name=state,proto3,enum=envoy.admin.v3.ServerInfo_State" json:"state,omitempty"` + // Uptime since current epoch was started. + UptimeCurrentEpoch *durationpb.Duration `protobuf:"bytes,3,opt,name=uptime_current_epoch,json=uptimeCurrentEpoch,proto3" json:"uptime_current_epoch,omitempty"` + // Uptime since the start of the first epoch. + UptimeAllEpochs *durationpb.Duration `protobuf:"bytes,4,opt,name=uptime_all_epochs,json=uptimeAllEpochs,proto3" json:"uptime_all_epochs,omitempty"` + // Hot restart version. + HotRestartVersion string `protobuf:"bytes,5,opt,name=hot_restart_version,json=hotRestartVersion,proto3" json:"hot_restart_version,omitempty"` + // Command line options the server is currently running with. + CommandLineOptions *CommandLineOptions `protobuf:"bytes,6,opt,name=command_line_options,json=commandLineOptions,proto3" json:"command_line_options,omitempty"` + // Populated node identity of this server. + Node *v3.Node `protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"` +} + +func (x *ServerInfo) Reset() { + *x = ServerInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerInfo) ProtoMessage() {} + +func (x *ServerInfo) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead. +func (*ServerInfo) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ServerInfo) GetState() ServerInfo_State { + if x != nil { + return x.State + } + return ServerInfo_LIVE +} + +func (x *ServerInfo) GetUptimeCurrentEpoch() *durationpb.Duration { + if x != nil { + return x.UptimeCurrentEpoch + } + return nil +} + +func (x *ServerInfo) GetUptimeAllEpochs() *durationpb.Duration { + if x != nil { + return x.UptimeAllEpochs + } + return nil +} + +func (x *ServerInfo) GetHotRestartVersion() string { + if x != nil { + return x.HotRestartVersion + } + return "" +} + +func (x *ServerInfo) GetCommandLineOptions() *CommandLineOptions { + if x != nil { + return x.CommandLineOptions + } + return nil +} + +func (x *ServerInfo) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +// [#next-free-field: 41] +type CommandLineOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // See :option:`--base-id` for details. + BaseId uint64 `protobuf:"varint,1,opt,name=base_id,json=baseId,proto3" json:"base_id,omitempty"` + // See :option:`--use-dynamic-base-id` for details. + UseDynamicBaseId bool `protobuf:"varint,31,opt,name=use_dynamic_base_id,json=useDynamicBaseId,proto3" json:"use_dynamic_base_id,omitempty"` + // See :option:`--skip-hot-restart-on-no-parent` for details. + SkipHotRestartOnNoParent bool `protobuf:"varint,39,opt,name=skip_hot_restart_on_no_parent,json=skipHotRestartOnNoParent,proto3" json:"skip_hot_restart_on_no_parent,omitempty"` + // See :option:`--skip-hot-restart-parent-stats` for details. + SkipHotRestartParentStats bool `protobuf:"varint,40,opt,name=skip_hot_restart_parent_stats,json=skipHotRestartParentStats,proto3" json:"skip_hot_restart_parent_stats,omitempty"` + // See :option:`--base-id-path` for details. + BaseIdPath string `protobuf:"bytes,32,opt,name=base_id_path,json=baseIdPath,proto3" json:"base_id_path,omitempty"` + // See :option:`--concurrency` for details. + Concurrency uint32 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // See :option:`--config-path` for details. + ConfigPath string `protobuf:"bytes,3,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` + // See :option:`--config-yaml` for details. + ConfigYaml string `protobuf:"bytes,4,opt,name=config_yaml,json=configYaml,proto3" json:"config_yaml,omitempty"` + // See :option:`--allow-unknown-static-fields` for details. + AllowUnknownStaticFields bool `protobuf:"varint,5,opt,name=allow_unknown_static_fields,json=allowUnknownStaticFields,proto3" json:"allow_unknown_static_fields,omitempty"` + // See :option:`--reject-unknown-dynamic-fields` for details. + RejectUnknownDynamicFields bool `protobuf:"varint,26,opt,name=reject_unknown_dynamic_fields,json=rejectUnknownDynamicFields,proto3" json:"reject_unknown_dynamic_fields,omitempty"` + // See :option:`--ignore-unknown-dynamic-fields` for details. + IgnoreUnknownDynamicFields bool `protobuf:"varint,30,opt,name=ignore_unknown_dynamic_fields,json=ignoreUnknownDynamicFields,proto3" json:"ignore_unknown_dynamic_fields,omitempty"` + // See :option:`--admin-address-path` for details. + AdminAddressPath string `protobuf:"bytes,6,opt,name=admin_address_path,json=adminAddressPath,proto3" json:"admin_address_path,omitempty"` + // See :option:`--local-address-ip-version` for details. + LocalAddressIpVersion CommandLineOptions_IpVersion `protobuf:"varint,7,opt,name=local_address_ip_version,json=localAddressIpVersion,proto3,enum=envoy.admin.v3.CommandLineOptions_IpVersion" json:"local_address_ip_version,omitempty"` + // See :option:`--log-level` for details. + LogLevel string `protobuf:"bytes,8,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"` + // See :option:`--component-log-level` for details. + ComponentLogLevel string `protobuf:"bytes,9,opt,name=component_log_level,json=componentLogLevel,proto3" json:"component_log_level,omitempty"` + // See :option:`--log-format` for details. + LogFormat string `protobuf:"bytes,10,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"` + // See :option:`--log-format-escaped` for details. + LogFormatEscaped bool `protobuf:"varint,27,opt,name=log_format_escaped,json=logFormatEscaped,proto3" json:"log_format_escaped,omitempty"` + // See :option:`--log-path` for details. + LogPath string `protobuf:"bytes,11,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"` + // See :option:`--service-cluster` for details. + ServiceCluster string `protobuf:"bytes,13,opt,name=service_cluster,json=serviceCluster,proto3" json:"service_cluster,omitempty"` + // See :option:`--service-node` for details. + ServiceNode string `protobuf:"bytes,14,opt,name=service_node,json=serviceNode,proto3" json:"service_node,omitempty"` + // See :option:`--service-zone` for details. + ServiceZone string `protobuf:"bytes,15,opt,name=service_zone,json=serviceZone,proto3" json:"service_zone,omitempty"` + // See :option:`--file-flush-interval-msec` for details. + FileFlushInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=file_flush_interval,json=fileFlushInterval,proto3" json:"file_flush_interval,omitempty"` + // See :option:`--drain-time-s` for details. + DrainTime *durationpb.Duration `protobuf:"bytes,17,opt,name=drain_time,json=drainTime,proto3" json:"drain_time,omitempty"` + // See :option:`--drain-strategy` for details. + DrainStrategy CommandLineOptions_DrainStrategy `protobuf:"varint,33,opt,name=drain_strategy,json=drainStrategy,proto3,enum=envoy.admin.v3.CommandLineOptions_DrainStrategy" json:"drain_strategy,omitempty"` + // See :option:`--parent-shutdown-time-s` for details. + ParentShutdownTime *durationpb.Duration `protobuf:"bytes,18,opt,name=parent_shutdown_time,json=parentShutdownTime,proto3" json:"parent_shutdown_time,omitempty"` + // See :option:`--mode` for details. + Mode CommandLineOptions_Mode `protobuf:"varint,19,opt,name=mode,proto3,enum=envoy.admin.v3.CommandLineOptions_Mode" json:"mode,omitempty"` + // See :option:`--disable-hot-restart` for details. + DisableHotRestart bool `protobuf:"varint,22,opt,name=disable_hot_restart,json=disableHotRestart,proto3" json:"disable_hot_restart,omitempty"` + // See :option:`--enable-mutex-tracing` for details. + EnableMutexTracing bool `protobuf:"varint,23,opt,name=enable_mutex_tracing,json=enableMutexTracing,proto3" json:"enable_mutex_tracing,omitempty"` + // See :option:`--restart-epoch` for details. + RestartEpoch uint32 `protobuf:"varint,24,opt,name=restart_epoch,json=restartEpoch,proto3" json:"restart_epoch,omitempty"` + // See :option:`--cpuset-threads` for details. + CpusetThreads bool `protobuf:"varint,25,opt,name=cpuset_threads,json=cpusetThreads,proto3" json:"cpuset_threads,omitempty"` + // See :option:`--disable-extensions` for details. + DisabledExtensions []string `protobuf:"bytes,28,rep,name=disabled_extensions,json=disabledExtensions,proto3" json:"disabled_extensions,omitempty"` + // See :option:`--enable-fine-grain-logging` for details. + EnableFineGrainLogging bool `protobuf:"varint,34,opt,name=enable_fine_grain_logging,json=enableFineGrainLogging,proto3" json:"enable_fine_grain_logging,omitempty"` + // See :option:`--socket-path` for details. + SocketPath string `protobuf:"bytes,35,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"` + // See :option:`--socket-mode` for details. + SocketMode uint32 `protobuf:"varint,36,opt,name=socket_mode,json=socketMode,proto3" json:"socket_mode,omitempty"` + // See :option:`--enable-core-dump` for details. + EnableCoreDump bool `protobuf:"varint,37,opt,name=enable_core_dump,json=enableCoreDump,proto3" json:"enable_core_dump,omitempty"` + // See :option:`--stats-tag` for details. + StatsTag []string `protobuf:"bytes,38,rep,name=stats_tag,json=statsTag,proto3" json:"stats_tag,omitempty"` +} + +func (x *CommandLineOptions) Reset() { + *x = CommandLineOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommandLineOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommandLineOptions) ProtoMessage() {} + +func (x *CommandLineOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommandLineOptions.ProtoReflect.Descriptor instead. +func (*CommandLineOptions) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1} +} + +func (x *CommandLineOptions) GetBaseId() uint64 { + if x != nil { + return x.BaseId + } + return 0 +} + +func (x *CommandLineOptions) GetUseDynamicBaseId() bool { + if x != nil { + return x.UseDynamicBaseId + } + return false +} + +func (x *CommandLineOptions) GetSkipHotRestartOnNoParent() bool { + if x != nil { + return x.SkipHotRestartOnNoParent + } + return false +} + +func (x *CommandLineOptions) GetSkipHotRestartParentStats() bool { + if x != nil { + return x.SkipHotRestartParentStats + } + return false +} + +func (x *CommandLineOptions) GetBaseIdPath() string { + if x != nil { + return x.BaseIdPath + } + return "" +} + +func (x *CommandLineOptions) GetConcurrency() uint32 { + if x != nil { + return x.Concurrency + } + return 0 +} + +func (x *CommandLineOptions) GetConfigPath() string { + if x != nil { + return x.ConfigPath + } + return "" +} + +func (x *CommandLineOptions) GetConfigYaml() string { + if x != nil { + return x.ConfigYaml + } + return "" +} + +func (x *CommandLineOptions) GetAllowUnknownStaticFields() bool { + if x != nil { + return x.AllowUnknownStaticFields + } + return false +} + +func (x *CommandLineOptions) GetRejectUnknownDynamicFields() bool { + if x != nil { + return x.RejectUnknownDynamicFields + } + return false +} + +func (x *CommandLineOptions) GetIgnoreUnknownDynamicFields() bool { + if x != nil { + return x.IgnoreUnknownDynamicFields + } + return false +} + +func (x *CommandLineOptions) GetAdminAddressPath() string { + if x != nil { + return x.AdminAddressPath + } + return "" +} + +func (x *CommandLineOptions) GetLocalAddressIpVersion() CommandLineOptions_IpVersion { + if x != nil { + return x.LocalAddressIpVersion + } + return CommandLineOptions_v4 +} + +func (x *CommandLineOptions) GetLogLevel() string { + if x != nil { + return x.LogLevel + } + return "" +} + +func (x *CommandLineOptions) GetComponentLogLevel() string { + if x != nil { + return x.ComponentLogLevel + } + return "" +} + +func (x *CommandLineOptions) GetLogFormat() string { + if x != nil { + return x.LogFormat + } + return "" +} + +func (x *CommandLineOptions) GetLogFormatEscaped() bool { + if x != nil { + return x.LogFormatEscaped + } + return false +} + +func (x *CommandLineOptions) GetLogPath() string { + if x != nil { + return x.LogPath + } + return "" +} + +func (x *CommandLineOptions) GetServiceCluster() string { + if x != nil { + return x.ServiceCluster + } + return "" +} + +func (x *CommandLineOptions) GetServiceNode() string { + if x != nil { + return x.ServiceNode + } + return "" +} + +func (x *CommandLineOptions) GetServiceZone() string { + if x != nil { + return x.ServiceZone + } + return "" +} + +func (x *CommandLineOptions) GetFileFlushInterval() *durationpb.Duration { + if x != nil { + return x.FileFlushInterval + } + return nil +} + +func (x *CommandLineOptions) GetDrainTime() *durationpb.Duration { + if x != nil { + return x.DrainTime + } + return nil +} + +func (x *CommandLineOptions) GetDrainStrategy() CommandLineOptions_DrainStrategy { + if x != nil { + return x.DrainStrategy + } + return CommandLineOptions_Gradual +} + +func (x *CommandLineOptions) GetParentShutdownTime() *durationpb.Duration { + if x != nil { + return x.ParentShutdownTime + } + return nil +} + +func (x *CommandLineOptions) GetMode() CommandLineOptions_Mode { + if x != nil { + return x.Mode + } + return CommandLineOptions_Serve +} + +func (x *CommandLineOptions) GetDisableHotRestart() bool { + if x != nil { + return x.DisableHotRestart + } + return false +} + +func (x *CommandLineOptions) GetEnableMutexTracing() bool { + if x != nil { + return x.EnableMutexTracing + } + return false +} + +func (x *CommandLineOptions) GetRestartEpoch() uint32 { + if x != nil { + return x.RestartEpoch + } + return 0 +} + +func (x *CommandLineOptions) GetCpusetThreads() bool { + if x != nil { + return x.CpusetThreads + } + return false +} + +func (x *CommandLineOptions) GetDisabledExtensions() []string { + if x != nil { + return x.DisabledExtensions + } + return nil +} + +func (x *CommandLineOptions) GetEnableFineGrainLogging() bool { + if x != nil { + return x.EnableFineGrainLogging + } + return false +} + +func (x *CommandLineOptions) GetSocketPath() string { + if x != nil { + return x.SocketPath + } + return "" +} + +func (x *CommandLineOptions) GetSocketMode() uint32 { + if x != nil { + return x.SocketMode + } + return 0 +} + +func (x *CommandLineOptions) GetEnableCoreDump() bool { + if x != nil { + return x.EnableCoreDump + } + return false +} + +func (x *CommandLineOptions) GetStatsTag() []string { + if x != nil { + return x.StatsTag + } + return nil +} + +var File_envoy_admin_v3_server_info_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_server_info_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x98, 0x04, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x12, 0x45, 0x0a, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x61, 0x6c, + 0x6c, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x41, 0x6c, 0x6c, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x68, 0x6f, + 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x14, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, + 0x22, 0x47, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x56, + 0x45, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, + 0x01, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x52, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, + 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49, + 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x22, 0xde, 0x0f, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x62, 0x61, 0x73, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, + 0x12, 0x2d, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, + 0x73, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, + 0x3f, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4f, 0x6e, 0x4e, 0x6f, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x40, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x59, 0x61, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x65, 0x0a, 0x18, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x70, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x2e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x65, 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x6f, 0x67, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x7a, + 0x6f, 0x6e, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x66, 0x69, 0x6c, 0x65, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x64, + 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x2e, + 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x48, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x75, 0x73, 0x65, 0x74, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x63, + 0x70, 0x75, 0x73, 0x65, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x0a, + 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x65, 0x5f, 0x67, 0x72, 0x61, + 0x69, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x22, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6e, 0x65, 0x47, 0x72, 0x61, 0x69, + 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x18, 0x25, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x72, 0x65, + 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, + 0x67, 0x18, 0x26, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x73, 0x54, 0x61, + 0x67, 0x22, 0x1b, 0x0a, 0x09, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, + 0x0a, 0x02, 0x76, 0x34, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x76, 0x36, 0x10, 0x01, 0x22, 0x2d, + 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x65, 0x72, 0x76, 0x65, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x10, 0x02, 0x22, 0x2b, 0x0a, + 0x0d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x0b, + 0x0a, 0x07, 0x47, 0x72, 0x61, 0x64, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x49, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x10, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, + 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, + 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, + 0x04, 0x08, 0x14, 0x10, 0x15, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08, 0x1d, 0x10, + 0x1e, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x52, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x6f, 0x62, 0x6a, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x52, 0x11, + 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_server_info_proto_rawDescOnce sync.Once + file_envoy_admin_v3_server_info_proto_rawDescData = file_envoy_admin_v3_server_info_proto_rawDesc +) + +func file_envoy_admin_v3_server_info_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_server_info_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_server_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_server_info_proto_rawDescData) + }) + return file_envoy_admin_v3_server_info_proto_rawDescData +} + +var file_envoy_admin_v3_server_info_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_envoy_admin_v3_server_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_admin_v3_server_info_proto_goTypes = []interface{}{ + (ServerInfo_State)(0), // 0: envoy.admin.v3.ServerInfo.State + (CommandLineOptions_IpVersion)(0), // 1: envoy.admin.v3.CommandLineOptions.IpVersion + (CommandLineOptions_Mode)(0), // 2: envoy.admin.v3.CommandLineOptions.Mode + (CommandLineOptions_DrainStrategy)(0), // 3: envoy.admin.v3.CommandLineOptions.DrainStrategy + (*ServerInfo)(nil), // 4: envoy.admin.v3.ServerInfo + (*CommandLineOptions)(nil), // 5: envoy.admin.v3.CommandLineOptions + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (*v3.Node)(nil), // 7: envoy.config.core.v3.Node +} +var file_envoy_admin_v3_server_info_proto_depIdxs = []int32{ + 0, // 0: envoy.admin.v3.ServerInfo.state:type_name -> envoy.admin.v3.ServerInfo.State + 6, // 1: envoy.admin.v3.ServerInfo.uptime_current_epoch:type_name -> google.protobuf.Duration + 6, // 2: envoy.admin.v3.ServerInfo.uptime_all_epochs:type_name -> google.protobuf.Duration + 5, // 3: envoy.admin.v3.ServerInfo.command_line_options:type_name -> envoy.admin.v3.CommandLineOptions + 7, // 4: envoy.admin.v3.ServerInfo.node:type_name -> envoy.config.core.v3.Node + 1, // 5: envoy.admin.v3.CommandLineOptions.local_address_ip_version:type_name -> envoy.admin.v3.CommandLineOptions.IpVersion + 6, // 6: envoy.admin.v3.CommandLineOptions.file_flush_interval:type_name -> google.protobuf.Duration + 6, // 7: envoy.admin.v3.CommandLineOptions.drain_time:type_name -> google.protobuf.Duration + 3, // 8: envoy.admin.v3.CommandLineOptions.drain_strategy:type_name -> envoy.admin.v3.CommandLineOptions.DrainStrategy + 6, // 9: envoy.admin.v3.CommandLineOptions.parent_shutdown_time:type_name -> google.protobuf.Duration + 2, // 10: envoy.admin.v3.CommandLineOptions.mode:type_name -> envoy.admin.v3.CommandLineOptions.Mode + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_server_info_proto_init() } +func file_envoy_admin_v3_server_info_proto_init() { + if File_envoy_admin_v3_server_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_server_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_admin_v3_server_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommandLineOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_server_info_proto_rawDesc, + NumEnums: 4, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_server_info_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_server_info_proto_depIdxs, + EnumInfos: file_envoy_admin_v3_server_info_proto_enumTypes, + MessageInfos: file_envoy_admin_v3_server_info_proto_msgTypes, + }.Build() + File_envoy_admin_v3_server_info_proto = out.File + file_envoy_admin_v3_server_info_proto_rawDesc = nil + file_envoy_admin_v3_server_info_proto_goTypes = nil + file_envoy_admin_v3_server_info_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go new file mode 100644 index 0000000000..8db097828b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go @@ -0,0 +1,509 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ServerInfo with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ServerInfo) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ServerInfo with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ServerInfoMultiError, or +// nil if none found. +func (m *ServerInfo) ValidateAll() error { + return m.validate(true) +} + +func (m *ServerInfo) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Version + + // no validation rules for State + + if all { + switch v := interface{}(m.GetUptimeCurrentEpoch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeCurrentEpoch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeCurrentEpoch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUptimeCurrentEpoch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "UptimeCurrentEpoch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUptimeAllEpochs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeAllEpochs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "UptimeAllEpochs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUptimeAllEpochs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "UptimeAllEpochs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for HotRestartVersion + + if all { + switch v := interface{}(m.GetCommandLineOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "CommandLineOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "CommandLineOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommandLineOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "CommandLineOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ServerInfoValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ServerInfoValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ServerInfoMultiError(errors) + } + + return nil +} + +// ServerInfoMultiError is an error wrapping multiple validation errors +// returned by ServerInfo.ValidateAll() if the designated constraints aren't met. +type ServerInfoMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ServerInfoMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ServerInfoMultiError) AllErrors() []error { return m } + +// ServerInfoValidationError is the validation error returned by +// ServerInfo.Validate if the designated constraints aren't met. +type ServerInfoValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ServerInfoValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ServerInfoValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ServerInfoValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ServerInfoValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ServerInfoValidationError) ErrorName() string { return "ServerInfoValidationError" } + +// Error satisfies the builtin error interface +func (e ServerInfoValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sServerInfo.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ServerInfoValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ServerInfoValidationError{} + +// Validate checks the field values on CommandLineOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CommandLineOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommandLineOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommandLineOptionsMultiError, or nil if none found. +func (m *CommandLineOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *CommandLineOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BaseId + + // no validation rules for UseDynamicBaseId + + // no validation rules for SkipHotRestartOnNoParent + + // no validation rules for SkipHotRestartParentStats + + // no validation rules for BaseIdPath + + // no validation rules for Concurrency + + // no validation rules for ConfigPath + + // no validation rules for ConfigYaml + + // no validation rules for AllowUnknownStaticFields + + // no validation rules for RejectUnknownDynamicFields + + // no validation rules for IgnoreUnknownDynamicFields + + // no validation rules for AdminAddressPath + + // no validation rules for LocalAddressIpVersion + + // no validation rules for LogLevel + + // no validation rules for ComponentLogLevel + + // no validation rules for LogFormat + + // no validation rules for LogFormatEscaped + + // no validation rules for LogPath + + // no validation rules for ServiceCluster + + // no validation rules for ServiceNode + + // no validation rules for ServiceZone + + if all { + switch v := interface{}(m.GetFileFlushInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "FileFlushInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "FileFlushInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFileFlushInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommandLineOptionsValidationError{ + field: "FileFlushInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDrainTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "DrainTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "DrainTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommandLineOptionsValidationError{ + field: "DrainTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DrainStrategy + + if all { + switch v := interface{}(m.GetParentShutdownTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "ParentShutdownTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommandLineOptionsValidationError{ + field: "ParentShutdownTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParentShutdownTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommandLineOptionsValidationError{ + field: "ParentShutdownTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Mode + + // no validation rules for DisableHotRestart + + // no validation rules for EnableMutexTracing + + // no validation rules for RestartEpoch + + // no validation rules for CpusetThreads + + // no validation rules for EnableFineGrainLogging + + // no validation rules for SocketPath + + // no validation rules for SocketMode + + // no validation rules for EnableCoreDump + + if len(errors) > 0 { + return CommandLineOptionsMultiError(errors) + } + + return nil +} + +// CommandLineOptionsMultiError is an error wrapping multiple validation errors +// returned by CommandLineOptions.ValidateAll() if the designated constraints +// aren't met. +type CommandLineOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommandLineOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommandLineOptionsMultiError) AllErrors() []error { return m } + +// CommandLineOptionsValidationError is the validation error returned by +// CommandLineOptions.Validate if the designated constraints aren't met. +type CommandLineOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommandLineOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommandLineOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommandLineOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommandLineOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommandLineOptionsValidationError) ErrorName() string { + return "CommandLineOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e CommandLineOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommandLineOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommandLineOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommandLineOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go new file mode 100644 index 0000000000..5bf55561e9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go @@ -0,0 +1,671 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/server_info.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ServerInfo) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerInfo) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ServerInfo) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.CommandLineOptions != nil { + size, err := m.CommandLineOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.HotRestartVersion) > 0 { + i -= len(m.HotRestartVersion) + copy(dAtA[i:], m.HotRestartVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HotRestartVersion))) + i-- + dAtA[i] = 0x2a + } + if m.UptimeAllEpochs != nil { + size, err := (*durationpb.Duration)(m.UptimeAllEpochs).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.UptimeCurrentEpoch != nil { + size, err := (*durationpb.Duration)(m.UptimeCurrentEpoch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommandLineOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommandLineOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommandLineOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipHotRestartParentStats { + i-- + if m.SkipHotRestartParentStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + } + if m.SkipHotRestartOnNoParent { + i-- + if m.SkipHotRestartOnNoParent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if len(m.StatsTag) > 0 { + for iNdEx := len(m.StatsTag) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StatsTag[iNdEx]) + copy(dAtA[i:], m.StatsTag[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatsTag[iNdEx]))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + } + if m.EnableCoreDump { + i-- + if m.EnableCoreDump { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } + if m.SocketMode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SocketMode)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa0 + } + if len(m.SocketPath) > 0 { + i -= len(m.SocketPath) + copy(dAtA[i:], m.SocketPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SocketPath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.EnableFineGrainLogging { + i-- + if m.EnableFineGrainLogging { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x90 + } + if m.DrainStrategy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainStrategy)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if len(m.BaseIdPath) > 0 { + i -= len(m.BaseIdPath) + copy(dAtA[i:], m.BaseIdPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BaseIdPath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.UseDynamicBaseId { + i-- + if m.UseDynamicBaseId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.IgnoreUnknownDynamicFields { + i-- + if m.IgnoreUnknownDynamicFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if len(m.DisabledExtensions) > 0 { + for iNdEx := len(m.DisabledExtensions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DisabledExtensions[iNdEx]) + copy(dAtA[i:], m.DisabledExtensions[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisabledExtensions[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.LogFormatEscaped { + i-- + if m.LogFormatEscaped { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.RejectUnknownDynamicFields { + i-- + if m.RejectUnknownDynamicFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.CpusetThreads { + i-- + if m.CpusetThreads { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.RestartEpoch != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RestartEpoch)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.EnableMutexTracing { + i-- + if m.EnableMutexTracing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.DisableHotRestart { + i-- + if m.DisableHotRestart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.Mode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.ParentShutdownTime != nil { + size, err := (*durationpb.Duration)(m.ParentShutdownTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.DrainTime != nil { + size, err := (*durationpb.Duration)(m.DrainTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FileFlushInterval != nil { + size, err := (*durationpb.Duration)(m.FileFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.ServiceZone) > 0 { + i -= len(m.ServiceZone) + copy(dAtA[i:], m.ServiceZone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceZone))) + i-- + dAtA[i] = 0x7a + } + if len(m.ServiceNode) > 0 { + i -= len(m.ServiceNode) + copy(dAtA[i:], m.ServiceNode) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceNode))) + i-- + dAtA[i] = 0x72 + } + if len(m.ServiceCluster) > 0 { + i -= len(m.ServiceCluster) + copy(dAtA[i:], m.ServiceCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceCluster))) + i-- + dAtA[i] = 0x6a + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x5a + } + if len(m.LogFormat) > 0 { + i -= len(m.LogFormat) + copy(dAtA[i:], m.LogFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogFormat))) + i-- + dAtA[i] = 0x52 + } + if len(m.ComponentLogLevel) > 0 { + i -= len(m.ComponentLogLevel) + copy(dAtA[i:], m.ComponentLogLevel) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ComponentLogLevel))) + i-- + dAtA[i] = 0x4a + } + if len(m.LogLevel) > 0 { + i -= len(m.LogLevel) + copy(dAtA[i:], m.LogLevel) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogLevel))) + i-- + dAtA[i] = 0x42 + } + if m.LocalAddressIpVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LocalAddressIpVersion)) + i-- + dAtA[i] = 0x38 + } + if len(m.AdminAddressPath) > 0 { + i -= len(m.AdminAddressPath) + copy(dAtA[i:], m.AdminAddressPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AdminAddressPath))) + i-- + dAtA[i] = 0x32 + } + if m.AllowUnknownStaticFields { + i-- + if m.AllowUnknownStaticFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ConfigYaml) > 0 { + i -= len(m.ConfigYaml) + copy(dAtA[i:], m.ConfigYaml) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigYaml))) + i-- + dAtA[i] = 0x22 + } + if len(m.ConfigPath) > 0 { + i -= len(m.ConfigPath) + copy(dAtA[i:], m.ConfigPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigPath))) + i-- + dAtA[i] = 0x1a + } + if m.Concurrency != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x10 + } + if m.BaseId != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BaseId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ServerInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.State != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.State)) + } + if m.UptimeCurrentEpoch != nil { + l = (*durationpb.Duration)(m.UptimeCurrentEpoch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UptimeAllEpochs != nil { + l = (*durationpb.Duration)(m.UptimeAllEpochs).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.HotRestartVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CommandLineOptions != nil { + l = m.CommandLineOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommandLineOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseId != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BaseId)) + } + if m.Concurrency != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Concurrency)) + } + l = len(m.ConfigPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ConfigYaml) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowUnknownStaticFields { + n += 2 + } + l = len(m.AdminAddressPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalAddressIpVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LocalAddressIpVersion)) + } + l = len(m.LogLevel) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ComponentLogLevel) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LogFormat) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceNode) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceZone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FileFlushInterval != nil { + l = (*durationpb.Duration)(m.FileFlushInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainTime != nil { + l = (*durationpb.Duration)(m.DrainTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ParentShutdownTime != nil { + l = (*durationpb.Duration)(m.ParentShutdownTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Mode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.Mode)) + } + if m.DisableHotRestart { + n += 3 + } + if m.EnableMutexTracing { + n += 3 + } + if m.RestartEpoch != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.RestartEpoch)) + } + if m.CpusetThreads { + n += 3 + } + if m.RejectUnknownDynamicFields { + n += 3 + } + if m.LogFormatEscaped { + n += 3 + } + if len(m.DisabledExtensions) > 0 { + for _, s := range m.DisabledExtensions { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnoreUnknownDynamicFields { + n += 3 + } + if m.UseDynamicBaseId { + n += 3 + } + l = len(m.BaseIdPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainStrategy != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DrainStrategy)) + } + if m.EnableFineGrainLogging { + n += 3 + } + l = len(m.SocketPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketMode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.SocketMode)) + } + if m.EnableCoreDump { + n += 3 + } + if len(m.StatsTag) > 0 { + for _, s := range m.StatsTag { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SkipHotRestartOnNoParent { + n += 3 + } + if m.SkipHotRestartParentStats { + n += 3 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go new file mode 100644 index 0000000000..f30e4500e4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/tap/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The /tap admin request body that is used to configure an active tap session. +type TapRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The opaque configuration ID used to match the configuration to a loaded extension. + // A tap extension configures a similar opaque ID that is used to match. + ConfigId string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // The tap configuration to load. + TapConfig *v3.TapConfig `protobuf:"bytes,2,opt,name=tap_config,json=tapConfig,proto3" json:"tap_config,omitempty"` +} + +func (x *TapRequest) Reset() { + *x = TapRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_admin_v3_tap_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TapRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TapRequest) ProtoMessage() {} + +func (x *TapRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_admin_v3_tap_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TapRequest.ProtoReflect.Descriptor instead. +func (*TapRequest) Descriptor() ([]byte, []int) { + return file_envoy_admin_v3_tap_proto_rawDescGZIP(), []int{0} +} + +func (x *TapRequest) GetConfigId() string { + if x != nil { + return x.ConfigId + } + return "" +} + +func (x *TapRequest) GetTapConfig() *v3.TapConfig { + if x != nil { + return x.TapConfig + } + return nil +} + +var File_envoy_admin_v3_tap_proto protoreflect.FileDescriptor + +var file_envoy_admin_v3_tap_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, + 0x2f, 0x74, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x0a, + 0x74, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x70, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x54, 0x61, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x71, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, 0x61, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_admin_v3_tap_proto_rawDescOnce sync.Once + file_envoy_admin_v3_tap_proto_rawDescData = file_envoy_admin_v3_tap_proto_rawDesc +) + +func file_envoy_admin_v3_tap_proto_rawDescGZIP() []byte { + file_envoy_admin_v3_tap_proto_rawDescOnce.Do(func() { + file_envoy_admin_v3_tap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_tap_proto_rawDescData) + }) + return file_envoy_admin_v3_tap_proto_rawDescData +} + +var file_envoy_admin_v3_tap_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_admin_v3_tap_proto_goTypes = []interface{}{ + (*TapRequest)(nil), // 0: envoy.admin.v3.TapRequest + (*v3.TapConfig)(nil), // 1: envoy.config.tap.v3.TapConfig +} +var file_envoy_admin_v3_tap_proto_depIdxs = []int32{ + 1, // 0: envoy.admin.v3.TapRequest.tap_config:type_name -> envoy.config.tap.v3.TapConfig + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_admin_v3_tap_proto_init() } +func file_envoy_admin_v3_tap_proto_init() { + if File_envoy_admin_v3_tap_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_admin_v3_tap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TapRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_admin_v3_tap_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_admin_v3_tap_proto_goTypes, + DependencyIndexes: file_envoy_admin_v3_tap_proto_depIdxs, + MessageInfos: file_envoy_admin_v3_tap_proto_msgTypes, + }.Build() + File_envoy_admin_v3_tap_proto = out.File + file_envoy_admin_v3_tap_proto_rawDesc = nil + file_envoy_admin_v3_tap_proto_goTypes = nil + file_envoy_admin_v3_tap_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go new file mode 100644 index 0000000000..d524f2aefe --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go @@ -0,0 +1,187 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TapRequest with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TapRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TapRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TapRequestMultiError, or +// nil if none found. +func (m *TapRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *TapRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetConfigId()) < 1 { + err := TapRequestValidationError{ + field: "ConfigId", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTapConfig() == nil { + err := TapRequestValidationError{ + field: "TapConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTapConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapRequestValidationError{ + field: "TapConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapRequestValidationError{ + field: "TapConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTapConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapRequestValidationError{ + field: "TapConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TapRequestMultiError(errors) + } + + return nil +} + +// TapRequestMultiError is an error wrapping multiple validation errors +// returned by TapRequest.ValidateAll() if the designated constraints aren't met. +type TapRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TapRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TapRequestMultiError) AllErrors() []error { return m } + +// TapRequestValidationError is the validation error returned by +// TapRequest.Validate if the designated constraints aren't met. +type TapRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TapRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TapRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TapRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TapRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TapRequestValidationError) ErrorName() string { return "TapRequestValidationError" } + +// Error satisfies the builtin error interface +func (e TapRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTapRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TapRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TapRequestValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go new file mode 100644 index 0000000000..4524bfb4f7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go @@ -0,0 +1,106 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/admin/v3/tap.proto + +package adminv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TapRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TapRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TapRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TapConfig != nil { + if vtmsg, ok := interface{}(m.TapConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TapConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ConfigId) > 0 { + i -= len(m.ConfigId) + copy(dAtA[i:], m.ConfigId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TapRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConfigId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TapConfig != nil { + if size, ok := interface{}(m.TapConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TapConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go new file mode 100644 index 0000000000..258fcfe2fb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/annotations/deprecation.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_envoy_annotations_deprecation_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 189503207, + Name: "envoy.annotations.disallowed_by_default", + Tag: "varint,189503207,opt,name=disallowed_by_default", + Filename: "envoy/annotations/deprecation.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 157299826, + Name: "envoy.annotations.deprecated_at_minor_version", + Tag: "bytes,157299826,opt,name=deprecated_at_minor_version", + Filename: "envoy/annotations/deprecation.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 70100853, + Name: "envoy.annotations.disallowed_by_default_enum", + Tag: "varint,70100853,opt,name=disallowed_by_default_enum", + Filename: "envoy/annotations/deprecation.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 181198657, + Name: "envoy.annotations.deprecated_at_minor_version_enum", + Tag: "bytes,181198657,opt,name=deprecated_at_minor_version_enum", + Filename: "envoy/annotations/deprecation.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional bool disallowed_by_default = 189503207; + E_DisallowedByDefault = &file_envoy_annotations_deprecation_proto_extTypes[0] + // The API major and minor version on which the field was deprecated + // (e.g., "3.5" for major version 3 and minor version 5). + // + // optional string deprecated_at_minor_version = 157299826; + E_DeprecatedAtMinorVersion = &file_envoy_annotations_deprecation_proto_extTypes[1] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // optional bool disallowed_by_default_enum = 70100853; + E_DisallowedByDefaultEnum = &file_envoy_annotations_deprecation_proto_extTypes[2] + // The API major and minor version on which the enum value was deprecated + // (e.g., "3.5" for major version 3 and minor version 5). + // + // optional string deprecated_at_minor_version_enum = 181198657; + E_DeprecatedAtMinorVersionEnum = &file_envoy_annotations_deprecation_proto_extTypes[3] +) + +var File_envoy_annotations_deprecation_proto protoreflect.FileDescriptor + +var file_envoy_annotations_deprecation_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x54, 0x0a, 0x15, 0x64, 0x69, + 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xe7, 0xad, 0xae, 0x5a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x3a, 0x5f, 0x0a, 0x1b, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf2, + 0xe8, 0x80, 0x4b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x3a, 0x61, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xf5, 0xce, 0xb6, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x6c, 0x0a, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xbe, 0xb3, 0x56, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x75, 0x6d, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_envoy_annotations_deprecation_proto_goTypes = []interface{}{ + (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions + (*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions +} +var file_envoy_annotations_deprecation_proto_depIdxs = []int32{ + 0, // 0: envoy.annotations.disallowed_by_default:extendee -> google.protobuf.FieldOptions + 0, // 1: envoy.annotations.deprecated_at_minor_version:extendee -> google.protobuf.FieldOptions + 1, // 2: envoy.annotations.disallowed_by_default_enum:extendee -> google.protobuf.EnumValueOptions + 1, // 3: envoy.annotations.deprecated_at_minor_version_enum:extendee -> google.protobuf.EnumValueOptions + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_annotations_deprecation_proto_init() } +func file_envoy_annotations_deprecation_proto_init() { + if File_envoy_annotations_deprecation_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_annotations_deprecation_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_envoy_annotations_deprecation_proto_goTypes, + DependencyIndexes: file_envoy_annotations_deprecation_proto_depIdxs, + ExtensionInfos: file_envoy_annotations_deprecation_proto_extTypes, + }.Build() + File_envoy_annotations_deprecation_proto = out.File + file_envoy_annotations_deprecation_proto_rawDesc = nil + file_envoy_annotations_deprecation_proto_goTypes = nil + file_envoy_annotations_deprecation_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go new file mode 100644 index 0000000000..be58aa5244 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/annotations/deprecation.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go new file mode 100644 index 0000000000..828c87c5e3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/annotations/resource.proto + +package annotations + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource + // type. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *ResourceAnnotation) Reset() { + *x = ResourceAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_annotations_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceAnnotation) ProtoMessage() {} + +func (x *ResourceAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_annotations_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceAnnotation.ProtoReflect.Descriptor instead. +func (*ResourceAnnotation) Descriptor() ([]byte, []int) { + return file_envoy_annotations_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceAnnotation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +var file_envoy_annotations_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*ResourceAnnotation)(nil), + Field: 265073217, + Name: "envoy.annotations.resource", + Tag: "bytes,265073217,opt,name=resource", + Filename: "envoy/annotations/resource.proto", + }, +} + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // optional envoy.annotations.ResourceAnnotation resource = 265073217; + E_Resource = &file_envoy_annotations_resource_proto_extTypes[0] +) + +var File_envoy_annotations_resource_proto protoreflect.FileDescriptor + +var file_envoy_annotations_resource_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x3a, 0x65, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, + 0xe4, 0xb2, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_annotations_resource_proto_rawDescOnce sync.Once + file_envoy_annotations_resource_proto_rawDescData = file_envoy_annotations_resource_proto_rawDesc +) + +func file_envoy_annotations_resource_proto_rawDescGZIP() []byte { + file_envoy_annotations_resource_proto_rawDescOnce.Do(func() { + file_envoy_annotations_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_annotations_resource_proto_rawDescData) + }) + return file_envoy_annotations_resource_proto_rawDescData +} + +var file_envoy_annotations_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_annotations_resource_proto_goTypes = []interface{}{ + (*ResourceAnnotation)(nil), // 0: envoy.annotations.ResourceAnnotation + (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions +} +var file_envoy_annotations_resource_proto_depIdxs = []int32{ + 1, // 0: envoy.annotations.resource:extendee -> google.protobuf.ServiceOptions + 0, // 1: envoy.annotations.resource:type_name -> envoy.annotations.ResourceAnnotation + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_annotations_resource_proto_init() } +func file_envoy_annotations_resource_proto_init() { + if File_envoy_annotations_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_annotations_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_annotations_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_envoy_annotations_resource_proto_goTypes, + DependencyIndexes: file_envoy_annotations_resource_proto_depIdxs, + MessageInfos: file_envoy_annotations_resource_proto_msgTypes, + ExtensionInfos: file_envoy_annotations_resource_proto_extTypes, + }.Build() + File_envoy_annotations_resource_proto = out.File + file_envoy_annotations_resource_proto_rawDesc = nil + file_envoy_annotations_resource_proto_goTypes = nil + file_envoy_annotations_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go new file mode 100644 index 0000000000..2929a5813b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go @@ -0,0 +1,141 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/annotations/resource.proto + +package annotations + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceAnnotation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceAnnotation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceAnnotationMultiError, or nil if none found. +func (m *ResourceAnnotation) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceAnnotation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Type + + if len(errors) > 0 { + return ResourceAnnotationMultiError(errors) + } + + return nil +} + +// ResourceAnnotationMultiError is an error wrapping multiple validation errors +// returned by ResourceAnnotation.ValidateAll() if the designated constraints +// aren't met. +type ResourceAnnotationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceAnnotationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceAnnotationMultiError) AllErrors() []error { return m } + +// ResourceAnnotationValidationError is the validation error returned by +// ResourceAnnotation.Validate if the designated constraints aren't met. +type ResourceAnnotationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceAnnotationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceAnnotationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceAnnotationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceAnnotationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceAnnotationValidationError) ErrorName() string { + return "ResourceAnnotationValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceAnnotationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceAnnotation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceAnnotationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceAnnotationValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go new file mode 100644 index 0000000000..324cb09166 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go @@ -0,0 +1,73 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/annotations/resource.proto + +package annotations + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceAnnotation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAnnotation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceAnnotation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceAnnotation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go new file mode 100644 index 0000000000..34996d4975 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go @@ -0,0 +1,1924 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ComparisonFilter_Op int32 + +const ( + // = + ComparisonFilter_EQ ComparisonFilter_Op = 0 + // >= + ComparisonFilter_GE ComparisonFilter_Op = 1 + // <= + ComparisonFilter_LE ComparisonFilter_Op = 2 +) + +// Enum value maps for ComparisonFilter_Op. +var ( + ComparisonFilter_Op_name = map[int32]string{ + 0: "EQ", + 1: "GE", + 2: "LE", + } + ComparisonFilter_Op_value = map[string]int32{ + "EQ": 0, + "GE": 1, + "LE": 2, + } +) + +func (x ComparisonFilter_Op) Enum() *ComparisonFilter_Op { + p := new(ComparisonFilter_Op) + *p = x + return p +} + +func (x ComparisonFilter_Op) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ComparisonFilter_Op) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor() +} + +func (ComparisonFilter_Op) Type() protoreflect.EnumType { + return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0] +} + +func (x ComparisonFilter_Op) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ComparisonFilter_Op.Descriptor instead. +func (ComparisonFilter_Op) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2, 0} +} + +type GrpcStatusFilter_Status int32 + +const ( + GrpcStatusFilter_OK GrpcStatusFilter_Status = 0 + GrpcStatusFilter_CANCELED GrpcStatusFilter_Status = 1 + GrpcStatusFilter_UNKNOWN GrpcStatusFilter_Status = 2 + GrpcStatusFilter_INVALID_ARGUMENT GrpcStatusFilter_Status = 3 + GrpcStatusFilter_DEADLINE_EXCEEDED GrpcStatusFilter_Status = 4 + GrpcStatusFilter_NOT_FOUND GrpcStatusFilter_Status = 5 + GrpcStatusFilter_ALREADY_EXISTS GrpcStatusFilter_Status = 6 + GrpcStatusFilter_PERMISSION_DENIED GrpcStatusFilter_Status = 7 + GrpcStatusFilter_RESOURCE_EXHAUSTED GrpcStatusFilter_Status = 8 + GrpcStatusFilter_FAILED_PRECONDITION GrpcStatusFilter_Status = 9 + GrpcStatusFilter_ABORTED GrpcStatusFilter_Status = 10 + GrpcStatusFilter_OUT_OF_RANGE GrpcStatusFilter_Status = 11 + GrpcStatusFilter_UNIMPLEMENTED GrpcStatusFilter_Status = 12 + GrpcStatusFilter_INTERNAL GrpcStatusFilter_Status = 13 + GrpcStatusFilter_UNAVAILABLE GrpcStatusFilter_Status = 14 + GrpcStatusFilter_DATA_LOSS GrpcStatusFilter_Status = 15 + GrpcStatusFilter_UNAUTHENTICATED GrpcStatusFilter_Status = 16 +) + +// Enum value maps for GrpcStatusFilter_Status. +var ( + GrpcStatusFilter_Status_name = map[int32]string{ + 0: "OK", + 1: "CANCELED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + 16: "UNAUTHENTICATED", + } + GrpcStatusFilter_Status_value = map[string]int32{ + "OK": 0, + "CANCELED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + "UNAUTHENTICATED": 16, + } +) + +func (x GrpcStatusFilter_Status) Enum() *GrpcStatusFilter_Status { + p := new(GrpcStatusFilter_Status) + *p = x + return p +} + +func (x GrpcStatusFilter_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcStatusFilter_Status) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor() +} + +func (GrpcStatusFilter_Status) Type() protoreflect.EnumType { + return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1] +} + +func (x GrpcStatusFilter_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcStatusFilter_Status.Descriptor instead. +func (GrpcStatusFilter_Status) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12, 0} +} + +type AccessLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the access log extension configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Filter which is used to determine if the access log needs to be written. + Filter *AccessLogFilter `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Custom configuration that must be set according to the access logger extension being instantiated. + // [#extension-category: envoy.access_loggers] + // + // Types that are assignable to ConfigType: + // + // *AccessLog_TypedConfig + ConfigType isAccessLog_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *AccessLog) Reset() { + *x = AccessLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLog) ProtoMessage() {} + +func (x *AccessLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLog.ProtoReflect.Descriptor instead. +func (*AccessLog) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +func (x *AccessLog) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AccessLog) GetFilter() *AccessLogFilter { + if x != nil { + return x.Filter + } + return nil +} + +func (m *AccessLog) GetConfigType() isAccessLog_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *AccessLog) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*AccessLog_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isAccessLog_ConfigType interface { + isAccessLog_ConfigType() +} + +type AccessLog_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*AccessLog_TypedConfig) isAccessLog_ConfigType() {} + +// [#next-free-field: 14] +type AccessLogFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to FilterSpecifier: + // + // *AccessLogFilter_StatusCodeFilter + // *AccessLogFilter_DurationFilter + // *AccessLogFilter_NotHealthCheckFilter + // *AccessLogFilter_TraceableFilter + // *AccessLogFilter_RuntimeFilter + // *AccessLogFilter_AndFilter + // *AccessLogFilter_OrFilter + // *AccessLogFilter_HeaderFilter + // *AccessLogFilter_ResponseFlagFilter + // *AccessLogFilter_GrpcStatusFilter + // *AccessLogFilter_ExtensionFilter + // *AccessLogFilter_MetadataFilter + // *AccessLogFilter_LogTypeFilter + FilterSpecifier isAccessLogFilter_FilterSpecifier `protobuf_oneof:"filter_specifier"` +} + +func (x *AccessLogFilter) Reset() { + *x = AccessLogFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLogFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLogFilter) ProtoMessage() {} + +func (x *AccessLogFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLogFilter.ProtoReflect.Descriptor instead. +func (*AccessLogFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1} +} + +func (m *AccessLogFilter) GetFilterSpecifier() isAccessLogFilter_FilterSpecifier { + if m != nil { + return m.FilterSpecifier + } + return nil +} + +func (x *AccessLogFilter) GetStatusCodeFilter() *StatusCodeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_StatusCodeFilter); ok { + return x.StatusCodeFilter + } + return nil +} + +func (x *AccessLogFilter) GetDurationFilter() *DurationFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_DurationFilter); ok { + return x.DurationFilter + } + return nil +} + +func (x *AccessLogFilter) GetNotHealthCheckFilter() *NotHealthCheckFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_NotHealthCheckFilter); ok { + return x.NotHealthCheckFilter + } + return nil +} + +func (x *AccessLogFilter) GetTraceableFilter() *TraceableFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_TraceableFilter); ok { + return x.TraceableFilter + } + return nil +} + +func (x *AccessLogFilter) GetRuntimeFilter() *RuntimeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_RuntimeFilter); ok { + return x.RuntimeFilter + } + return nil +} + +func (x *AccessLogFilter) GetAndFilter() *AndFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_AndFilter); ok { + return x.AndFilter + } + return nil +} + +func (x *AccessLogFilter) GetOrFilter() *OrFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_OrFilter); ok { + return x.OrFilter + } + return nil +} + +func (x *AccessLogFilter) GetHeaderFilter() *HeaderFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_HeaderFilter); ok { + return x.HeaderFilter + } + return nil +} + +func (x *AccessLogFilter) GetResponseFlagFilter() *ResponseFlagFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ResponseFlagFilter); ok { + return x.ResponseFlagFilter + } + return nil +} + +func (x *AccessLogFilter) GetGrpcStatusFilter() *GrpcStatusFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_GrpcStatusFilter); ok { + return x.GrpcStatusFilter + } + return nil +} + +func (x *AccessLogFilter) GetExtensionFilter() *ExtensionFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ExtensionFilter); ok { + return x.ExtensionFilter + } + return nil +} + +func (x *AccessLogFilter) GetMetadataFilter() *MetadataFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_MetadataFilter); ok { + return x.MetadataFilter + } + return nil +} + +func (x *AccessLogFilter) GetLogTypeFilter() *LogTypeFilter { + if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_LogTypeFilter); ok { + return x.LogTypeFilter + } + return nil +} + +type isAccessLogFilter_FilterSpecifier interface { + isAccessLogFilter_FilterSpecifier() +} + +type AccessLogFilter_StatusCodeFilter struct { + // Status code filter. + StatusCodeFilter *StatusCodeFilter `protobuf:"bytes,1,opt,name=status_code_filter,json=statusCodeFilter,proto3,oneof"` +} + +type AccessLogFilter_DurationFilter struct { + // Duration filter. + DurationFilter *DurationFilter `protobuf:"bytes,2,opt,name=duration_filter,json=durationFilter,proto3,oneof"` +} + +type AccessLogFilter_NotHealthCheckFilter struct { + // Not health check filter. + NotHealthCheckFilter *NotHealthCheckFilter `protobuf:"bytes,3,opt,name=not_health_check_filter,json=notHealthCheckFilter,proto3,oneof"` +} + +type AccessLogFilter_TraceableFilter struct { + // Traceable filter. + TraceableFilter *TraceableFilter `protobuf:"bytes,4,opt,name=traceable_filter,json=traceableFilter,proto3,oneof"` +} + +type AccessLogFilter_RuntimeFilter struct { + // Runtime filter. + RuntimeFilter *RuntimeFilter `protobuf:"bytes,5,opt,name=runtime_filter,json=runtimeFilter,proto3,oneof"` +} + +type AccessLogFilter_AndFilter struct { + // And filter. + AndFilter *AndFilter `protobuf:"bytes,6,opt,name=and_filter,json=andFilter,proto3,oneof"` +} + +type AccessLogFilter_OrFilter struct { + // Or filter. + OrFilter *OrFilter `protobuf:"bytes,7,opt,name=or_filter,json=orFilter,proto3,oneof"` +} + +type AccessLogFilter_HeaderFilter struct { + // Header filter. + HeaderFilter *HeaderFilter `protobuf:"bytes,8,opt,name=header_filter,json=headerFilter,proto3,oneof"` +} + +type AccessLogFilter_ResponseFlagFilter struct { + // Response flag filter. + ResponseFlagFilter *ResponseFlagFilter `protobuf:"bytes,9,opt,name=response_flag_filter,json=responseFlagFilter,proto3,oneof"` +} + +type AccessLogFilter_GrpcStatusFilter struct { + // gRPC status filter. + GrpcStatusFilter *GrpcStatusFilter `protobuf:"bytes,10,opt,name=grpc_status_filter,json=grpcStatusFilter,proto3,oneof"` +} + +type AccessLogFilter_ExtensionFilter struct { + // Extension filter. + // [#extension-category: envoy.access_loggers.extension_filters] + ExtensionFilter *ExtensionFilter `protobuf:"bytes,11,opt,name=extension_filter,json=extensionFilter,proto3,oneof"` +} + +type AccessLogFilter_MetadataFilter struct { + // Metadata Filter + MetadataFilter *MetadataFilter `protobuf:"bytes,12,opt,name=metadata_filter,json=metadataFilter,proto3,oneof"` +} + +type AccessLogFilter_LogTypeFilter struct { + // Log Type Filter + LogTypeFilter *LogTypeFilter `protobuf:"bytes,13,opt,name=log_type_filter,json=logTypeFilter,proto3,oneof"` +} + +func (*AccessLogFilter_StatusCodeFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_DurationFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_NotHealthCheckFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_TraceableFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_RuntimeFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_AndFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_OrFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_HeaderFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_ResponseFlagFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_GrpcStatusFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_ExtensionFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_MetadataFilter) isAccessLogFilter_FilterSpecifier() {} + +func (*AccessLogFilter_LogTypeFilter) isAccessLogFilter_FilterSpecifier() {} + +// Filter on an integer comparison. +type ComparisonFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Comparison operator. + Op ComparisonFilter_Op `protobuf:"varint,1,opt,name=op,proto3,enum=envoy.config.accesslog.v3.ComparisonFilter_Op" json:"op,omitempty"` + // Value to compare against. + Value *v3.RuntimeUInt32 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ComparisonFilter) Reset() { + *x = ComparisonFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComparisonFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComparisonFilter) ProtoMessage() {} + +func (x *ComparisonFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComparisonFilter.ProtoReflect.Descriptor instead. +func (*ComparisonFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2} +} + +func (x *ComparisonFilter) GetOp() ComparisonFilter_Op { + if x != nil { + return x.Op + } + return ComparisonFilter_EQ +} + +func (x *ComparisonFilter) GetValue() *v3.RuntimeUInt32 { + if x != nil { + return x.Value + } + return nil +} + +// Filters on HTTP response/status code. +type StatusCodeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Comparison. + Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"` +} + +func (x *StatusCodeFilter) Reset() { + *x = StatusCodeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusCodeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusCodeFilter) ProtoMessage() {} + +func (x *StatusCodeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusCodeFilter.ProtoReflect.Descriptor instead. +func (*StatusCodeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3} +} + +func (x *StatusCodeFilter) GetComparison() *ComparisonFilter { + if x != nil { + return x.Comparison + } + return nil +} + +// Filters based on the duration of the request or stream, in milliseconds. +// For end of stream access logs, the total duration of the stream will be used. +// For :ref:`periodic access logs`, +// the duration of the stream at the time of log recording will be used. +type DurationFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Comparison. + Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"` +} + +func (x *DurationFilter) Reset() { + *x = DurationFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurationFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurationFilter) ProtoMessage() {} + +func (x *DurationFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurationFilter.ProtoReflect.Descriptor instead. +func (*DurationFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4} +} + +func (x *DurationFilter) GetComparison() *ComparisonFilter { + if x != nil { + return x.Comparison + } + return nil +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +type NotHealthCheckFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotHealthCheckFilter) Reset() { + *x = NotHealthCheckFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotHealthCheckFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotHealthCheckFilter) ProtoMessage() {} + +func (x *NotHealthCheckFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotHealthCheckFilter.ProtoReflect.Descriptor instead. +func (*NotHealthCheckFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5} +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +type TraceableFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *TraceableFilter) Reset() { + *x = TraceableFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceableFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceableFilter) ProtoMessage() {} + +func (x *TraceableFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceableFilter.ProtoReflect.Descriptor instead. +func (*TraceableFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6} +} + +// Filters for random sampling of requests. +type RuntimeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Runtime key to get an optional overridden numerator for use in the + // “percent_sampled“ field. If found in runtime, this value will replace the + // default numerator. + RuntimeKey string `protobuf:"bytes,1,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. + PercentSampled *v31.FractionalPercent `protobuf:"bytes,2,opt,name=percent_sampled,json=percentSampled,proto3" json:"percent_sampled,omitempty"` + // By default, sampling pivots on the header + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or “use_independent_randomness“ is set to true, the filter will + // randomly sample based on the runtime key value alone. + // “use_independent_randomness“ can be used for logging kill switches within + // complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). + UseIndependentRandomness bool `protobuf:"varint,3,opt,name=use_independent_randomness,json=useIndependentRandomness,proto3" json:"use_independent_randomness,omitempty"` +} + +func (x *RuntimeFilter) Reset() { + *x = RuntimeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeFilter) ProtoMessage() {} + +func (x *RuntimeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeFilter.ProtoReflect.Descriptor instead. +func (*RuntimeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7} +} + +func (x *RuntimeFilter) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +func (x *RuntimeFilter) GetPercentSampled() *v31.FractionalPercent { + if x != nil { + return x.PercentSampled + } + return nil +} + +func (x *RuntimeFilter) GetUseIndependentRandomness() bool { + if x != nil { + return x.UseIndependentRandomness + } + return false +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +type AndFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []*AccessLogFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *AndFilter) Reset() { + *x = AndFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AndFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AndFilter) ProtoMessage() {} + +func (x *AndFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AndFilter.ProtoReflect.Descriptor instead. +func (*AndFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{8} +} + +func (x *AndFilter) GetFilters() []*AccessLogFilter { + if x != nil { + return x.Filters + } + return nil +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +type OrFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []*AccessLogFilter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *OrFilter) Reset() { + *x = OrFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrFilter) ProtoMessage() {} + +func (x *OrFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrFilter.ProtoReflect.Descriptor instead. +func (*OrFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{9} +} + +func (x *OrFilter) GetFilters() []*AccessLogFilter { + if x != nil { + return x.Filters + } + return nil +} + +// Filters requests based on the presence or value of a request header. +type HeaderFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only requests with a header which matches the specified HeaderMatcher will + // pass the filter check. + Header *v32.HeaderMatcher `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` +} + +func (x *HeaderFilter) Reset() { + *x = HeaderFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderFilter) ProtoMessage() {} + +func (x *HeaderFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderFilter.ProtoReflect.Descriptor instead. +func (*HeaderFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{10} +} + +func (x *HeaderFilter) GetHeader() *v32.HeaderMatcher { + if x != nil { + return x.Header + } + return nil +} + +// Filters requests that received responses with an Envoy response flag set. +// A list of the response flags can be found +// in the access log formatter +// :ref:`documentation`. +type ResponseFlagFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only responses with the any of the flags listed in this field will be + // logged. This field is optional. If it is not specified, then any response + // flag will pass the filter check. + Flags []string `protobuf:"bytes,1,rep,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *ResponseFlagFilter) Reset() { + *x = ResponseFlagFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlagFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlagFilter) ProtoMessage() {} + +func (x *ResponseFlagFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlagFilter.ProtoReflect.Descriptor instead. +func (*ResponseFlagFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{11} +} + +func (x *ResponseFlagFilter) GetFlags() []string { + if x != nil { + return x.Flags + } + return nil +} + +// Filters gRPC requests based on their response status. If a gRPC status is not +// provided, the filter will infer the status from the HTTP status code. +type GrpcStatusFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Logs only responses that have any one of the gRPC statuses in this field. + Statuses []GrpcStatusFilter_Status `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.accesslog.v3.GrpcStatusFilter_Status" json:"statuses,omitempty"` + // If included and set to true, the filter will instead block all responses + // with a gRPC status or inferred gRPC status enumerated in statuses, and + // allow all other responses. + Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"` +} + +func (x *GrpcStatusFilter) Reset() { + *x = GrpcStatusFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcStatusFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcStatusFilter) ProtoMessage() {} + +func (x *GrpcStatusFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcStatusFilter.ProtoReflect.Descriptor instead. +func (*GrpcStatusFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12} +} + +func (x *GrpcStatusFilter) GetStatuses() []GrpcStatusFilter_Status { + if x != nil { + return x.Statuses + } + return nil +} + +func (x *GrpcStatusFilter) GetExclude() bool { + if x != nil { + return x.Exclude + } + return false +} + +// Filters based on matching dynamic metadata. +// If the matcher path and key correspond to an existing key in dynamic +// metadata, the request is logged only if the matcher value is equal to the +// metadata value. If the matcher path and key *do not* correspond to an +// existing key in dynamic metadata, the request is logged only if +// match_if_key_not_found is "true" or unset. +type MetadataFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Matcher to check metadata for specified value. For example, to match on the + // access_log_hint metadata, set the filter to "envoy.common" and the path to + // "access_log_hint", and the value to "true". + Matcher *v33.MetadataMatcher `protobuf:"bytes,1,opt,name=matcher,proto3" json:"matcher,omitempty"` + // Default result if the key does not exist in dynamic metadata: if unset or + // true, then log; if false, then don't log. + MatchIfKeyNotFound *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=match_if_key_not_found,json=matchIfKeyNotFound,proto3" json:"match_if_key_not_found,omitempty"` +} + +func (x *MetadataFilter) Reset() { + *x = MetadataFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataFilter) ProtoMessage() {} + +func (x *MetadataFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataFilter.ProtoReflect.Descriptor instead. +func (*MetadataFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{13} +} + +func (x *MetadataFilter) GetMatcher() *v33.MetadataMatcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrapperspb.BoolValue { + if x != nil { + return x.MatchIfKeyNotFound + } + return nil +} + +// Filters based on access log type. +type LogTypeFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Logs only records which their type is one of the types defined in this field. + Types []v34.AccessLogType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"types,omitempty"` + // If this field is set to true, the filter will instead block all records + // with a access log type in types field, and allow all other records. + Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"` +} + +func (x *LogTypeFilter) Reset() { + *x = LogTypeFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogTypeFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogTypeFilter) ProtoMessage() {} + +func (x *LogTypeFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogTypeFilter.ProtoReflect.Descriptor instead. +func (*LogTypeFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{14} +} + +func (x *LogTypeFilter) GetTypes() []v34.AccessLogType { + if x != nil { + return x.Types + } + return nil +} + +func (x *LogTypeFilter) GetExclude() bool { + if x != nil { + return x.Exclude + } + return false +} + +// Extension filter is statically registered at runtime. +type ExtensionFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter implementation to instantiate. The name must + // match a statically registered filter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Custom configuration that depends on the filter being instantiated. + // + // Types that are assignable to ConfigType: + // + // *ExtensionFilter_TypedConfig + ConfigType isExtensionFilter_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *ExtensionFilter) Reset() { + *x = ExtensionFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionFilter) ProtoMessage() {} + +func (x *ExtensionFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionFilter.ProtoReflect.Descriptor instead. +func (*ExtensionFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{15} +} + +func (x *ExtensionFilter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ExtensionFilter) GetConfigType() isExtensionFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *ExtensionFilter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*ExtensionFilter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isExtensionFilter_ConfigType interface { + isExtensionFilter_ConfigType() +} + +type ExtensionFilter_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*ExtensionFilter_TypedConfig) isExtensionFilter_ConfigType() {} + +var File_envoy_config_accesslog_v3_accesslog_proto protoreflect.FileDescriptor + +var file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, + 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a, + 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, + 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xca, 0x09, 0x0a, 0x0f, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x68, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x09, 0x61, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x09, + 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x61, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61, + 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, + 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x57, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x52, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x17, 0x0a, 0x10, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf9, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x02, 0x6f, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x02, 0x6f, 0x70, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1c, 0x0a, 0x02, 0x4f, 0x70, 0x12, + 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x01, 0x12, + 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x02, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x3a, 0x38, 0x9a, + 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x0e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, + 0x6e, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x14, 0x4e, 0x6f, 0x74, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, + 0x4a, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x0d, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, + 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x70, + 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73, + 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x41, 0x6e, 0x64, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, + 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x4f, 0x72, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0xa7, 0x01, + 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x90, 0x01, + 0xfa, 0x42, 0x8c, 0x01, 0x92, 0x01, 0x88, 0x01, 0x22, 0x85, 0x01, 0x72, 0x82, 0x01, 0x52, 0x02, + 0x4c, 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02, + 0x55, 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02, + 0x4e, 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04, + 0x55, 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03, + 0x55, 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45, + 0x52, 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e, + 0x46, 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43, + 0x52, 0x02, 0x4f, 0x4d, 0x52, 0x02, 0x44, 0x46, 0x52, 0x02, 0x44, 0x4f, 0x52, 0x02, 0x44, 0x52, + 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, + 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, + 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, + 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, + 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, + 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41, + 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, + 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, + 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, + 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, + 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, + 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5, + 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66, + 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88, + 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x22, 0x76, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37, + 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce sync.Once + file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = file_envoy_config_accesslog_v3_accesslog_proto_rawDesc +) + +func file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP() []byte { + file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() { + file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_accesslog_v3_accesslog_proto_rawDescData) + }) + return file_envoy_config_accesslog_v3_accesslog_proto_rawDescData +} + +var file_envoy_config_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{ + (ComparisonFilter_Op)(0), // 0: envoy.config.accesslog.v3.ComparisonFilter.Op + (GrpcStatusFilter_Status)(0), // 1: envoy.config.accesslog.v3.GrpcStatusFilter.Status + (*AccessLog)(nil), // 2: envoy.config.accesslog.v3.AccessLog + (*AccessLogFilter)(nil), // 3: envoy.config.accesslog.v3.AccessLogFilter + (*ComparisonFilter)(nil), // 4: envoy.config.accesslog.v3.ComparisonFilter + (*StatusCodeFilter)(nil), // 5: envoy.config.accesslog.v3.StatusCodeFilter + (*DurationFilter)(nil), // 6: envoy.config.accesslog.v3.DurationFilter + (*NotHealthCheckFilter)(nil), // 7: envoy.config.accesslog.v3.NotHealthCheckFilter + (*TraceableFilter)(nil), // 8: envoy.config.accesslog.v3.TraceableFilter + (*RuntimeFilter)(nil), // 9: envoy.config.accesslog.v3.RuntimeFilter + (*AndFilter)(nil), // 10: envoy.config.accesslog.v3.AndFilter + (*OrFilter)(nil), // 11: envoy.config.accesslog.v3.OrFilter + (*HeaderFilter)(nil), // 12: envoy.config.accesslog.v3.HeaderFilter + (*ResponseFlagFilter)(nil), // 13: envoy.config.accesslog.v3.ResponseFlagFilter + (*GrpcStatusFilter)(nil), // 14: envoy.config.accesslog.v3.GrpcStatusFilter + (*MetadataFilter)(nil), // 15: envoy.config.accesslog.v3.MetadataFilter + (*LogTypeFilter)(nil), // 16: envoy.config.accesslog.v3.LogTypeFilter + (*ExtensionFilter)(nil), // 17: envoy.config.accesslog.v3.ExtensionFilter + (*anypb.Any)(nil), // 18: google.protobuf.Any + (*v3.RuntimeUInt32)(nil), // 19: envoy.config.core.v3.RuntimeUInt32 + (*v31.FractionalPercent)(nil), // 20: envoy.type.v3.FractionalPercent + (*v32.HeaderMatcher)(nil), // 21: envoy.config.route.v3.HeaderMatcher + (*v33.MetadataMatcher)(nil), // 22: envoy.type.matcher.v3.MetadataMatcher + (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue + (v34.AccessLogType)(0), // 24: envoy.data.accesslog.v3.AccessLogType +} +var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{ + 3, // 0: envoy.config.accesslog.v3.AccessLog.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 18, // 1: envoy.config.accesslog.v3.AccessLog.typed_config:type_name -> google.protobuf.Any + 5, // 2: envoy.config.accesslog.v3.AccessLogFilter.status_code_filter:type_name -> envoy.config.accesslog.v3.StatusCodeFilter + 6, // 3: envoy.config.accesslog.v3.AccessLogFilter.duration_filter:type_name -> envoy.config.accesslog.v3.DurationFilter + 7, // 4: envoy.config.accesslog.v3.AccessLogFilter.not_health_check_filter:type_name -> envoy.config.accesslog.v3.NotHealthCheckFilter + 8, // 5: envoy.config.accesslog.v3.AccessLogFilter.traceable_filter:type_name -> envoy.config.accesslog.v3.TraceableFilter + 9, // 6: envoy.config.accesslog.v3.AccessLogFilter.runtime_filter:type_name -> envoy.config.accesslog.v3.RuntimeFilter + 10, // 7: envoy.config.accesslog.v3.AccessLogFilter.and_filter:type_name -> envoy.config.accesslog.v3.AndFilter + 11, // 8: envoy.config.accesslog.v3.AccessLogFilter.or_filter:type_name -> envoy.config.accesslog.v3.OrFilter + 12, // 9: envoy.config.accesslog.v3.AccessLogFilter.header_filter:type_name -> envoy.config.accesslog.v3.HeaderFilter + 13, // 10: envoy.config.accesslog.v3.AccessLogFilter.response_flag_filter:type_name -> envoy.config.accesslog.v3.ResponseFlagFilter + 14, // 11: envoy.config.accesslog.v3.AccessLogFilter.grpc_status_filter:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter + 17, // 12: envoy.config.accesslog.v3.AccessLogFilter.extension_filter:type_name -> envoy.config.accesslog.v3.ExtensionFilter + 15, // 13: envoy.config.accesslog.v3.AccessLogFilter.metadata_filter:type_name -> envoy.config.accesslog.v3.MetadataFilter + 16, // 14: envoy.config.accesslog.v3.AccessLogFilter.log_type_filter:type_name -> envoy.config.accesslog.v3.LogTypeFilter + 0, // 15: envoy.config.accesslog.v3.ComparisonFilter.op:type_name -> envoy.config.accesslog.v3.ComparisonFilter.Op + 19, // 16: envoy.config.accesslog.v3.ComparisonFilter.value:type_name -> envoy.config.core.v3.RuntimeUInt32 + 4, // 17: envoy.config.accesslog.v3.StatusCodeFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter + 4, // 18: envoy.config.accesslog.v3.DurationFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter + 20, // 19: envoy.config.accesslog.v3.RuntimeFilter.percent_sampled:type_name -> envoy.type.v3.FractionalPercent + 3, // 20: envoy.config.accesslog.v3.AndFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 3, // 21: envoy.config.accesslog.v3.OrFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 21, // 22: envoy.config.accesslog.v3.HeaderFilter.header:type_name -> envoy.config.route.v3.HeaderMatcher + 1, // 23: envoy.config.accesslog.v3.GrpcStatusFilter.statuses:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter.Status + 22, // 24: envoy.config.accesslog.v3.MetadataFilter.matcher:type_name -> envoy.type.matcher.v3.MetadataMatcher + 23, // 25: envoy.config.accesslog.v3.MetadataFilter.match_if_key_not_found:type_name -> google.protobuf.BoolValue + 24, // 26: envoy.config.accesslog.v3.LogTypeFilter.types:type_name -> envoy.data.accesslog.v3.AccessLogType + 18, // 27: envoy.config.accesslog.v3.ExtensionFilter.typed_config:type_name -> google.protobuf.Any + 28, // [28:28] is the sub-list for method output_type + 28, // [28:28] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name +} + +func init() { file_envoy_config_accesslog_v3_accesslog_proto_init() } +func file_envoy_config_accesslog_v3_accesslog_proto_init() { + if File_envoy_config_accesslog_v3_accesslog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLogFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComparisonFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusCodeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurationFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotHealthCheckFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceableFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AndFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlagFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcStatusFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogTypeFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AccessLog_TypedConfig)(nil), + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*AccessLogFilter_StatusCodeFilter)(nil), + (*AccessLogFilter_DurationFilter)(nil), + (*AccessLogFilter_NotHealthCheckFilter)(nil), + (*AccessLogFilter_TraceableFilter)(nil), + (*AccessLogFilter_RuntimeFilter)(nil), + (*AccessLogFilter_AndFilter)(nil), + (*AccessLogFilter_OrFilter)(nil), + (*AccessLogFilter_HeaderFilter)(nil), + (*AccessLogFilter_ResponseFlagFilter)(nil), + (*AccessLogFilter_GrpcStatusFilter)(nil), + (*AccessLogFilter_ExtensionFilter)(nil), + (*AccessLogFilter_MetadataFilter)(nil), + (*AccessLogFilter_LogTypeFilter)(nil), + } + file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*ExtensionFilter_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_accesslog_v3_accesslog_proto_rawDesc, + NumEnums: 2, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_accesslog_v3_accesslog_proto_goTypes, + DependencyIndexes: file_envoy_config_accesslog_v3_accesslog_proto_depIdxs, + EnumInfos: file_envoy_config_accesslog_v3_accesslog_proto_enumTypes, + MessageInfos: file_envoy_config_accesslog_v3_accesslog_proto_msgTypes, + }.Build() + File_envoy_config_accesslog_v3_accesslog_proto = out.File + file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = nil + file_envoy_config_accesslog_v3_accesslog_proto_goTypes = nil + file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go new file mode 100644 index 0000000000..746f6f2c4c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go @@ -0,0 +1,2773 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.AccessLogType(0) +) + +// Validate checks the field values on AccessLog with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AccessLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLog with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AccessLogMultiError, or nil +// if none found. +func (m *AccessLog) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ConfigType.(type) { + case *AccessLog_TypedConfig: + if v == nil { + err := AccessLogValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return AccessLogMultiError(errors) + } + + return nil +} + +// AccessLogMultiError is an error wrapping multiple validation errors returned +// by AccessLog.ValidateAll() if the designated constraints aren't met. +type AccessLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogMultiError) AllErrors() []error { return m } + +// AccessLogValidationError is the validation error returned by +// AccessLog.Validate if the designated constraints aren't met. +type AccessLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogValidationError) ErrorName() string { return "AccessLogValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogValidationError{} + +// Validate checks the field values on AccessLogFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AccessLogFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLogFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AccessLogFilterMultiError, or nil if none found. +func (m *AccessLogFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLogFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofFilterSpecifierPresent := false + switch v := m.FilterSpecifier.(type) { + case *AccessLogFilter_StatusCodeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetStatusCodeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "StatusCodeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "StatusCodeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatusCodeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "StatusCodeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_DurationFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDurationFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "DurationFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "DurationFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDurationFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "DurationFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_NotHealthCheckFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetNotHealthCheckFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "NotHealthCheckFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "NotHealthCheckFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotHealthCheckFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "NotHealthCheckFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_TraceableFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetTraceableFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "TraceableFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "TraceableFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTraceableFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "TraceableFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_RuntimeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRuntimeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "RuntimeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "RuntimeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRuntimeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "RuntimeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_AndFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAndFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "AndFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "AndFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "AndFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_OrFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetOrFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "OrFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "OrFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "OrFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_HeaderFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetHeaderFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "HeaderFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "HeaderFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "HeaderFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_ResponseFlagFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetResponseFlagFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ResponseFlagFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ResponseFlagFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseFlagFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "ResponseFlagFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_GrpcStatusFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGrpcStatusFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "GrpcStatusFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "GrpcStatusFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcStatusFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "GrpcStatusFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_ExtensionFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetExtensionFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ExtensionFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "ExtensionFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtensionFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "ExtensionFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_MetadataFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMetadataFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "MetadataFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "MetadataFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "MetadataFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AccessLogFilter_LogTypeFilter: + if v == nil { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFilterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLogTypeFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogTypeFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogFilterValidationError{ + field: "LogTypeFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofFilterSpecifierPresent { + err := AccessLogFilterValidationError{ + field: "FilterSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AccessLogFilterMultiError(errors) + } + + return nil +} + +// AccessLogFilterMultiError is an error wrapping multiple validation errors +// returned by AccessLogFilter.ValidateAll() if the designated constraints +// aren't met. +type AccessLogFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogFilterMultiError) AllErrors() []error { return m } + +// AccessLogFilterValidationError is the validation error returned by +// AccessLogFilter.Validate if the designated constraints aren't met. +type AccessLogFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogFilterValidationError) ErrorName() string { return "AccessLogFilterValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLogFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogFilterValidationError{} + +// Validate checks the field values on ComparisonFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ComparisonFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ComparisonFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ComparisonFilterMultiError, or nil if none found. +func (m *ComparisonFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ComparisonFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ComparisonFilter_Op_name[int32(m.GetOp())]; !ok { + err := ComparisonFilterValidationError{ + field: "Op", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetValue() == nil { + err := ComparisonFilterValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ComparisonFilterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ComparisonFilterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ComparisonFilterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ComparisonFilterMultiError(errors) + } + + return nil +} + +// ComparisonFilterMultiError is an error wrapping multiple validation errors +// returned by ComparisonFilter.ValidateAll() if the designated constraints +// aren't met. +type ComparisonFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ComparisonFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ComparisonFilterMultiError) AllErrors() []error { return m } + +// ComparisonFilterValidationError is the validation error returned by +// ComparisonFilter.Validate if the designated constraints aren't met. +type ComparisonFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ComparisonFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ComparisonFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ComparisonFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ComparisonFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ComparisonFilterValidationError) ErrorName() string { return "ComparisonFilterValidationError" } + +// Error satisfies the builtin error interface +func (e ComparisonFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sComparisonFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ComparisonFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ComparisonFilterValidationError{} + +// Validate checks the field values on StatusCodeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StatusCodeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatusCodeFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StatusCodeFilterMultiError, or nil if none found. +func (m *StatusCodeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *StatusCodeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetComparison() == nil { + err := StatusCodeFilterValidationError{ + field: "Comparison", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetComparison()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatusCodeFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatusCodeFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatusCodeFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return StatusCodeFilterMultiError(errors) + } + + return nil +} + +// StatusCodeFilterMultiError is an error wrapping multiple validation errors +// returned by StatusCodeFilter.ValidateAll() if the designated constraints +// aren't met. +type StatusCodeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatusCodeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatusCodeFilterMultiError) AllErrors() []error { return m } + +// StatusCodeFilterValidationError is the validation error returned by +// StatusCodeFilter.Validate if the designated constraints aren't met. +type StatusCodeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatusCodeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatusCodeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatusCodeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatusCodeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatusCodeFilterValidationError) ErrorName() string { return "StatusCodeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e StatusCodeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatusCodeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatusCodeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatusCodeFilterValidationError{} + +// Validate checks the field values on DurationFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DurationFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DurationFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DurationFilterMultiError, +// or nil if none found. +func (m *DurationFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *DurationFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetComparison() == nil { + err := DurationFilterValidationError{ + field: "Comparison", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetComparison()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DurationFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DurationFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DurationFilterValidationError{ + field: "Comparison", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DurationFilterMultiError(errors) + } + + return nil +} + +// DurationFilterMultiError is an error wrapping multiple validation errors +// returned by DurationFilter.ValidateAll() if the designated constraints +// aren't met. +type DurationFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DurationFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DurationFilterMultiError) AllErrors() []error { return m } + +// DurationFilterValidationError is the validation error returned by +// DurationFilter.Validate if the designated constraints aren't met. +type DurationFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DurationFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DurationFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DurationFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DurationFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DurationFilterValidationError) ErrorName() string { return "DurationFilterValidationError" } + +// Error satisfies the builtin error interface +func (e DurationFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDurationFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DurationFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DurationFilterValidationError{} + +// Validate checks the field values on NotHealthCheckFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NotHealthCheckFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NotHealthCheckFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NotHealthCheckFilterMultiError, or nil if none found. +func (m *NotHealthCheckFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *NotHealthCheckFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return NotHealthCheckFilterMultiError(errors) + } + + return nil +} + +// NotHealthCheckFilterMultiError is an error wrapping multiple validation +// errors returned by NotHealthCheckFilter.ValidateAll() if the designated +// constraints aren't met. +type NotHealthCheckFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NotHealthCheckFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NotHealthCheckFilterMultiError) AllErrors() []error { return m } + +// NotHealthCheckFilterValidationError is the validation error returned by +// NotHealthCheckFilter.Validate if the designated constraints aren't met. +type NotHealthCheckFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NotHealthCheckFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NotHealthCheckFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NotHealthCheckFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NotHealthCheckFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NotHealthCheckFilterValidationError) ErrorName() string { + return "NotHealthCheckFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e NotHealthCheckFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNotHealthCheckFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NotHealthCheckFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NotHealthCheckFilterValidationError{} + +// Validate checks the field values on TraceableFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TraceableFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TraceableFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TraceableFilterMultiError, or nil if none found. +func (m *TraceableFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *TraceableFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return TraceableFilterMultiError(errors) + } + + return nil +} + +// TraceableFilterMultiError is an error wrapping multiple validation errors +// returned by TraceableFilter.ValidateAll() if the designated constraints +// aren't met. +type TraceableFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TraceableFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TraceableFilterMultiError) AllErrors() []error { return m } + +// TraceableFilterValidationError is the validation error returned by +// TraceableFilter.Validate if the designated constraints aren't met. +type TraceableFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TraceableFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TraceableFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TraceableFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TraceableFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TraceableFilterValidationError) ErrorName() string { return "TraceableFilterValidationError" } + +// Error satisfies the builtin error interface +func (e TraceableFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTraceableFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TraceableFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TraceableFilterValidationError{} + +// Validate checks the field values on RuntimeFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeFilterMultiError, or +// nil if none found. +func (m *RuntimeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeFilterValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPercentSampled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeFilterValidationError{ + field: "PercentSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeFilterValidationError{ + field: "PercentSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentSampled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeFilterValidationError{ + field: "PercentSampled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UseIndependentRandomness + + if len(errors) > 0 { + return RuntimeFilterMultiError(errors) + } + + return nil +} + +// RuntimeFilterMultiError is an error wrapping multiple validation errors +// returned by RuntimeFilter.ValidateAll() if the designated constraints +// aren't met. +type RuntimeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeFilterMultiError) AllErrors() []error { return m } + +// RuntimeFilterValidationError is the validation error returned by +// RuntimeFilter.Validate if the designated constraints aren't met. +type RuntimeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeFilterValidationError) ErrorName() string { return "RuntimeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeFilterValidationError{} + +// Validate checks the field values on AndFilter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AndFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AndFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AndFilterMultiError, or nil +// if none found. +func (m *AndFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *AndFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFilters()) < 2 { + err := AndFilterValidationError{ + field: "Filters", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AndFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AndFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AndFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return AndFilterMultiError(errors) + } + + return nil +} + +// AndFilterMultiError is an error wrapping multiple validation errors returned +// by AndFilter.ValidateAll() if the designated constraints aren't met. +type AndFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AndFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AndFilterMultiError) AllErrors() []error { return m } + +// AndFilterValidationError is the validation error returned by +// AndFilter.Validate if the designated constraints aren't met. +type AndFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AndFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AndFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AndFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AndFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AndFilterValidationError) ErrorName() string { return "AndFilterValidationError" } + +// Error satisfies the builtin error interface +func (e AndFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAndFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AndFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AndFilterValidationError{} + +// Validate checks the field values on OrFilter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OrFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrFilterMultiError, or nil +// if none found. +func (m *OrFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *OrFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFilters()) < 2 { + err := OrFilterValidationError{ + field: "Filters", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrFilterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return OrFilterMultiError(errors) + } + + return nil +} + +// OrFilterMultiError is an error wrapping multiple validation errors returned +// by OrFilter.ValidateAll() if the designated constraints aren't met. +type OrFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrFilterMultiError) AllErrors() []error { return m } + +// OrFilterValidationError is the validation error returned by +// OrFilter.Validate if the designated constraints aren't met. +type OrFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrFilterValidationError) ErrorName() string { return "OrFilterValidationError" } + +// Error satisfies the builtin error interface +func (e OrFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrFilterValidationError{} + +// Validate checks the field values on HeaderFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderFilterMultiError, or +// nil if none found. +func (m *HeaderFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHeader() == nil { + err := HeaderFilterValidationError{ + field: "Header", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderFilterValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderFilterValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderFilterValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HeaderFilterMultiError(errors) + } + + return nil +} + +// HeaderFilterMultiError is an error wrapping multiple validation errors +// returned by HeaderFilter.ValidateAll() if the designated constraints aren't met. +type HeaderFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderFilterMultiError) AllErrors() []error { return m } + +// HeaderFilterValidationError is the validation error returned by +// HeaderFilter.Validate if the designated constraints aren't met. +type HeaderFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderFilterValidationError) ErrorName() string { return "HeaderFilterValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderFilterValidationError{} + +// Validate checks the field values on ResponseFlagFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResponseFlagFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlagFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResponseFlagFilterMultiError, or nil if none found. +func (m *ResponseFlagFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlagFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetFlags() { + _, _ = idx, item + + if _, ok := _ResponseFlagFilter_Flags_InLookup[item]; !ok { + err := ResponseFlagFilterValidationError{ + field: fmt.Sprintf("Flags[%v]", idx), + reason: "value must be in list [LH UH UT LR UR UF UC UO NR DI FI RL UAEX RLSE DC URX SI IH DPE UMSDR RFCF NFCF DT UPE NC OM DF DO DR]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ResponseFlagFilterMultiError(errors) + } + + return nil +} + +// ResponseFlagFilterMultiError is an error wrapping multiple validation errors +// returned by ResponseFlagFilter.ValidateAll() if the designated constraints +// aren't met. +type ResponseFlagFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlagFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlagFilterMultiError) AllErrors() []error { return m } + +// ResponseFlagFilterValidationError is the validation error returned by +// ResponseFlagFilter.Validate if the designated constraints aren't met. +type ResponseFlagFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlagFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlagFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlagFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlagFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlagFilterValidationError) ErrorName() string { + return "ResponseFlagFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e ResponseFlagFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlagFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlagFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlagFilterValidationError{} + +var _ResponseFlagFilter_Flags_InLookup = map[string]struct{}{ + "LH": {}, + "UH": {}, + "UT": {}, + "LR": {}, + "UR": {}, + "UF": {}, + "UC": {}, + "UO": {}, + "NR": {}, + "DI": {}, + "FI": {}, + "RL": {}, + "UAEX": {}, + "RLSE": {}, + "DC": {}, + "URX": {}, + "SI": {}, + "IH": {}, + "DPE": {}, + "UMSDR": {}, + "RFCF": {}, + "NFCF": {}, + "DT": {}, + "UPE": {}, + "NC": {}, + "OM": {}, + "DF": {}, + "DO": {}, + "DR": {}, +} + +// Validate checks the field values on GrpcStatusFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GrpcStatusFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcStatusFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcStatusFilterMultiError, or nil if none found. +func (m *GrpcStatusFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcStatusFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStatuses() { + _, _ = idx, item + + if _, ok := GrpcStatusFilter_Status_name[int32(item)]; !ok { + err := GrpcStatusFilterValidationError{ + field: fmt.Sprintf("Statuses[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for Exclude + + if len(errors) > 0 { + return GrpcStatusFilterMultiError(errors) + } + + return nil +} + +// GrpcStatusFilterMultiError is an error wrapping multiple validation errors +// returned by GrpcStatusFilter.ValidateAll() if the designated constraints +// aren't met. +type GrpcStatusFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcStatusFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcStatusFilterMultiError) AllErrors() []error { return m } + +// GrpcStatusFilterValidationError is the validation error returned by +// GrpcStatusFilter.Validate if the designated constraints aren't met. +type GrpcStatusFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcStatusFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcStatusFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcStatusFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcStatusFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcStatusFilterValidationError) ErrorName() string { return "GrpcStatusFilterValidationError" } + +// Error satisfies the builtin error interface +func (e GrpcStatusFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcStatusFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcStatusFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcStatusFilterValidationError{} + +// Validate checks the field values on MetadataFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MetadataFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataFilterMultiError, +// or nil if none found. +func (m *MetadataFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataFilterValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMatchIfKeyNotFound()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "MatchIfKeyNotFound", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataFilterValidationError{ + field: "MatchIfKeyNotFound", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatchIfKeyNotFound()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataFilterValidationError{ + field: "MatchIfKeyNotFound", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return MetadataFilterMultiError(errors) + } + + return nil +} + +// MetadataFilterMultiError is an error wrapping multiple validation errors +// returned by MetadataFilter.ValidateAll() if the designated constraints +// aren't met. +type MetadataFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataFilterMultiError) AllErrors() []error { return m } + +// MetadataFilterValidationError is the validation error returned by +// MetadataFilter.Validate if the designated constraints aren't met. +type MetadataFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataFilterValidationError) ErrorName() string { return "MetadataFilterValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataFilterValidationError{} + +// Validate checks the field values on LogTypeFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LogTypeFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LogTypeFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LogTypeFilterMultiError, or +// nil if none found. +func (m *LogTypeFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *LogTypeFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetTypes() { + _, _ = idx, item + + if _, ok := v3.AccessLogType_name[int32(item)]; !ok { + err := LogTypeFilterValidationError{ + field: fmt.Sprintf("Types[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for Exclude + + if len(errors) > 0 { + return LogTypeFilterMultiError(errors) + } + + return nil +} + +// LogTypeFilterMultiError is an error wrapping multiple validation errors +// returned by LogTypeFilter.ValidateAll() if the designated constraints +// aren't met. +type LogTypeFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LogTypeFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LogTypeFilterMultiError) AllErrors() []error { return m } + +// LogTypeFilterValidationError is the validation error returned by +// LogTypeFilter.Validate if the designated constraints aren't met. +type LogTypeFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LogTypeFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LogTypeFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LogTypeFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LogTypeFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LogTypeFilterValidationError) ErrorName() string { return "LogTypeFilterValidationError" } + +// Error satisfies the builtin error interface +func (e LogTypeFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLogTypeFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LogTypeFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LogTypeFilterValidationError{} + +// Validate checks the field values on ExtensionFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ExtensionFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtensionFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtensionFilterMultiError, or nil if none found. +func (m *ExtensionFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtensionFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.ConfigType.(type) { + case *ExtensionFilter_TypedConfig: + if v == nil { + err := ExtensionFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtensionFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtensionFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtensionFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ExtensionFilterMultiError(errors) + } + + return nil +} + +// ExtensionFilterMultiError is an error wrapping multiple validation errors +// returned by ExtensionFilter.ValidateAll() if the designated constraints +// aren't met. +type ExtensionFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtensionFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtensionFilterMultiError) AllErrors() []error { return m } + +// ExtensionFilterValidationError is the validation error returned by +// ExtensionFilter.Validate if the designated constraints aren't met. +type ExtensionFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtensionFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtensionFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtensionFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtensionFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtensionFilterValidationError) ErrorName() string { return "ExtensionFilterValidationError" } + +// Error satisfies the builtin error interface +func (e ExtensionFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtensionFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtensionFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtensionFilterValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go new file mode 100644 index 0000000000..e75bf014ac --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go @@ -0,0 +1,1751 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AccessLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*AccessLog_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Filter != nil { + size, err := m.Filter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AccessLog_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLog_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLogFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_LogTypeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_MetadataFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ExtensionFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_GrpcStatusFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ResponseFlagFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_HeaderFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_OrFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_AndFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_RuntimeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_TraceableFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_NotHealthCheckFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_DurationFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.FilterSpecifier.(*AccessLogFilter_StatusCodeFilter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AccessLogFilter_StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusCodeFilter != nil { + size, err := m.StatusCodeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DurationFilter != nil { + size, err := m.DurationFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotHealthCheckFilter != nil { + size, err := m.NotHealthCheckFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TraceableFilter != nil { + size, err := m.TraceableFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RuntimeFilter != nil { + size, err := m.RuntimeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndFilter != nil { + size, err := m.AndFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrFilter != nil { + size, err := m.OrFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderFilter != nil { + size, err := m.HeaderFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResponseFlagFilter != nil { + size, err := m.ResponseFlagFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcStatusFilter != nil { + size, err := m.GrpcStatusFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtensionFilter != nil { + size, err := m.ExtensionFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MetadataFilter != nil { + size, err := m.MetadataFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *AccessLogFilter_LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogFilter_LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LogTypeFilter != nil { + size, err := m.LogTypeFilter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *ComparisonFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComparisonFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ComparisonFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + if vtmsg, ok := interface{}(m.Value).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Value) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Op != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Op)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatusCodeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Comparison != nil { + size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DurationFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Comparison != nil { + size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NotHealthCheckFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *TraceableFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RuntimeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseIndependentRandomness { + i-- + if m.UseIndependentRandomness { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.PercentSampled != nil { + if vtmsg, ok := interface{}(m.PercentSampled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PercentSampled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AndFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OrFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *HeaderFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlagFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Flags) > 0 { + for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Flags[iNdEx]) + copy(dAtA[i:], m.Flags[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Flags[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcStatusFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exclude { + i-- + if m.Exclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Statuses) > 0 { + var pksize2 int + for _, num := range m.Statuses { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Statuses { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MatchIfKeyNotFound != nil { + size, err := (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LogTypeFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exclude { + i-- + if m.Exclude { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Types) > 0 { + var pksize2 int + for _, num := range m.Types { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Types { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtensionFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ExtensionFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtensionFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AccessLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLog_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.FilterSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLogFilter_StatusCodeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusCodeFilter != nil { + l = m.StatusCodeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_DurationFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DurationFilter != nil { + l = m.DurationFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_NotHealthCheckFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotHealthCheckFilter != nil { + l = m.NotHealthCheckFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_TraceableFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceableFilter != nil { + l = m.TraceableFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_RuntimeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RuntimeFilter != nil { + l = m.RuntimeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_AndFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndFilter != nil { + l = m.AndFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_OrFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrFilter != nil { + l = m.OrFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_HeaderFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderFilter != nil { + l = m.HeaderFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_ResponseFlagFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseFlagFilter != nil { + l = m.ResponseFlagFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_GrpcStatusFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcStatusFilter != nil { + l = m.GrpcStatusFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_ExtensionFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtensionFilter != nil { + l = m.ExtensionFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_MetadataFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MetadataFilter != nil { + l = m.MetadataFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AccessLogFilter_LogTypeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LogTypeFilter != nil { + l = m.LogTypeFilter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ComparisonFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Op != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Op)) + } + if m.Value != nil { + if size, ok := interface{}(m.Value).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Value) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatusCodeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Comparison != nil { + l = m.Comparison.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DurationFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Comparison != nil { + l = m.Comparison.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *NotHealthCheckFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *TraceableFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RuntimeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PercentSampled != nil { + if size, ok := interface{}(m.PercentSampled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PercentSampled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseIndependentRandomness { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AndFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OrFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlagFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Flags) > 0 { + for _, s := range m.Flags { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcStatusFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + l = 0 + for _, e := range m.Statuses { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Exclude { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MatchIfKeyNotFound != nil { + l = (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LogTypeFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Types) > 0 { + l = 0 + for _, e := range m.Types { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Exclude { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExtensionFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ExtensionFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go new file mode 100644 index 0000000000..893acf21e3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go @@ -0,0 +1,3310 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v34 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v37 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v36 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/config/overload/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3" + v38 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v35 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. +// Within an event type, actions execute in the order they are configured. +// For KILL/MULTIKILL there is a default PANIC that will run after the +// registered actions and kills the process if it wasn't already killed. +// It might be useful to specify several debug actions, and possibly an +// alternate FATAL action. +type Watchdog_WatchdogAction_WatchdogEvent int32 + +const ( + Watchdog_WatchdogAction_UNKNOWN Watchdog_WatchdogAction_WatchdogEvent = 0 + Watchdog_WatchdogAction_KILL Watchdog_WatchdogAction_WatchdogEvent = 1 + Watchdog_WatchdogAction_MULTIKILL Watchdog_WatchdogAction_WatchdogEvent = 2 + Watchdog_WatchdogAction_MEGAMISS Watchdog_WatchdogAction_WatchdogEvent = 3 + Watchdog_WatchdogAction_MISS Watchdog_WatchdogAction_WatchdogEvent = 4 +) + +// Enum value maps for Watchdog_WatchdogAction_WatchdogEvent. +var ( + Watchdog_WatchdogAction_WatchdogEvent_name = map[int32]string{ + 0: "UNKNOWN", + 1: "KILL", + 2: "MULTIKILL", + 3: "MEGAMISS", + 4: "MISS", + } + Watchdog_WatchdogAction_WatchdogEvent_value = map[string]int32{ + "UNKNOWN": 0, + "KILL": 1, + "MULTIKILL": 2, + "MEGAMISS": 3, + "MISS": 4, + } +) + +func (x Watchdog_WatchdogAction_WatchdogEvent) Enum() *Watchdog_WatchdogAction_WatchdogEvent { + p := new(Watchdog_WatchdogAction_WatchdogEvent) + *p = x + return p +} + +func (x Watchdog_WatchdogAction_WatchdogEvent) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Watchdog_WatchdogAction_WatchdogEvent) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0].Descriptor() +} + +func (Watchdog_WatchdogAction_WatchdogEvent) Type() protoreflect.EnumType { + return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0] +} + +func (x Watchdog_WatchdogAction_WatchdogEvent) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Watchdog_WatchdogAction_WatchdogEvent.Descriptor instead. +func (Watchdog_WatchdogAction_WatchdogEvent) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0, 0} +} + +type CustomInlineHeader_InlineHeaderType int32 + +const ( + CustomInlineHeader_REQUEST_HEADER CustomInlineHeader_InlineHeaderType = 0 + CustomInlineHeader_REQUEST_TRAILER CustomInlineHeader_InlineHeaderType = 1 + CustomInlineHeader_RESPONSE_HEADER CustomInlineHeader_InlineHeaderType = 2 + CustomInlineHeader_RESPONSE_TRAILER CustomInlineHeader_InlineHeaderType = 3 +) + +// Enum value maps for CustomInlineHeader_InlineHeaderType. +var ( + CustomInlineHeader_InlineHeaderType_name = map[int32]string{ + 0: "REQUEST_HEADER", + 1: "REQUEST_TRAILER", + 2: "RESPONSE_HEADER", + 3: "RESPONSE_TRAILER", + } + CustomInlineHeader_InlineHeaderType_value = map[string]int32{ + "REQUEST_HEADER": 0, + "REQUEST_TRAILER": 1, + "RESPONSE_HEADER": 2, + "RESPONSE_TRAILER": 3, + } +) + +func (x CustomInlineHeader_InlineHeaderType) Enum() *CustomInlineHeader_InlineHeaderType { + p := new(CustomInlineHeader_InlineHeaderType) + *p = x + return p +} + +func (x CustomInlineHeader_InlineHeaderType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomInlineHeader_InlineHeaderType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1].Descriptor() +} + +func (CustomInlineHeader_InlineHeaderType) Type() protoreflect.EnumType { + return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1] +} + +func (x CustomInlineHeader_InlineHeaderType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomInlineHeader_InlineHeaderType.Descriptor instead. +func (CustomInlineHeader_InlineHeaderType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9, 0} +} + +// Bootstrap :ref:`configuration overview `. +// [#next-free-field: 42] +type Bootstrap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of :ref:`Node ` field names + // that will be included in the context parameters of the effective + // xdstp:// URL that is sent in a discovery request when resource + // locators are used for LDS/CDS. Any non-string field will have its JSON + // encoding set as the context parameter value, with the exception of + // metadata, which will be flattened (see example below). The supported field + // names are: + // - "cluster" + // - "id" + // - "locality.region" + // - "locality.sub_zone" + // - "locality.zone" + // - "metadata" + // - "user_agent_build_version.metadata" + // - "user_agent_build_version.version" + // - "user_agent_name" + // - "user_agent_version" + // + // The node context parameters act as a base layer dictionary for the context + // parameters (i.e. more specific resource specific context parameters will + // override). Field names will be prefixed with “udpa.node.” when included in + // context parameters. + // + // For example, if node_context_params is “["user_agent_name", "metadata"]“, + // the implied context parameters might be:: + // + // node.user_agent_name: "envoy" + // node.metadata.foo: "{\"bar\": \"baz\"}" + // node.metadata.some: "42" + // node.metadata.thing: "\"thing\"" + // + // [#not-implemented-hide:] + NodeContextParams []string `protobuf:"bytes,26,rep,name=node_context_params,json=nodeContextParams,proto3" json:"node_context_params,omitempty"` + // Statically specified resources. + StaticResources *Bootstrap_StaticResources `protobuf:"bytes,2,opt,name=static_resources,json=staticResources,proto3" json:"static_resources,omitempty"` + // xDS configuration sources. + DynamicResources *Bootstrap_DynamicResources `protobuf:"bytes,3,opt,name=dynamic_resources,json=dynamicResources,proto3" json:"dynamic_resources,omitempty"` + // Configuration for the cluster manager which owns all upstream clusters + // within the server. + ClusterManager *ClusterManager `protobuf:"bytes,4,opt,name=cluster_manager,json=clusterManager,proto3" json:"cluster_manager,omitempty"` + // Health discovery service config option. + // (:ref:`core.ApiConfigSource `) + HdsConfig *v3.ApiConfigSource `protobuf:"bytes,14,opt,name=hds_config,json=hdsConfig,proto3" json:"hds_config,omitempty"` + // Optional file system path to search for startup flag files. + FlagsPath string `protobuf:"bytes,5,opt,name=flags_path,json=flagsPath,proto3" json:"flags_path,omitempty"` + // Optional set of stats sinks. + StatsSinks []*v31.StatsSink `protobuf:"bytes,6,rep,name=stats_sinks,json=statsSinks,proto3" json:"stats_sinks,omitempty"` + // Options to control behaviors of deferred creation compatible stats. + DeferredStatOptions *Bootstrap_DeferredStatOptions `protobuf:"bytes,39,opt,name=deferred_stat_options,json=deferredStatOptions,proto3" json:"deferred_stat_options,omitempty"` + // Configuration for internal processing of stats. + StatsConfig *v31.StatsConfig `protobuf:"bytes,13,opt,name=stats_config,json=statsConfig,proto3" json:"stats_config,omitempty"` + // Optional duration between flushes to configured stats sinks. For + // performance reasons Envoy latches counters and only flushes counters and + // gauges at a periodic interval. If not specified the default is 5000ms (5 + // seconds). Only one of “stats_flush_interval“ or “stats_flush_on_admin“ + // can be set. + // Duration must be at least 1ms and at most 5 min. + StatsFlushInterval *durationpb.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"` + // Types that are assignable to StatsFlush: + // + // *Bootstrap_StatsFlushOnAdmin + StatsFlush isBootstrap_StatsFlush `protobuf_oneof:"stats_flush"` + // Optional watchdog configuration. + // This is for a single watchdog configuration for the entire system. + // Deprecated in favor of “watchdogs“ which has finer granularity. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + Watchdog *Watchdog `protobuf:"bytes,8,opt,name=watchdog,proto3" json:"watchdog,omitempty"` + // Optional watchdogs configuration. + // This is used for specifying different watchdogs for the different subsystems. + // [#extension-category: envoy.guarddog_actions] + Watchdogs *Watchdogs `protobuf:"bytes,27,opt,name=watchdogs,proto3" json:"watchdogs,omitempty"` + // Configuration for an external tracing provider. + // + // .. attention:: + // + // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider + // `. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + Tracing *v32.Tracing `protobuf:"bytes,9,opt,name=tracing,proto3" json:"tracing,omitempty"` + // Configuration for the runtime configuration provider. If not + // specified, a “null” provider will be used which will result in all defaults + // being used. + LayeredRuntime *LayeredRuntime `protobuf:"bytes,17,opt,name=layered_runtime,json=layeredRuntime,proto3" json:"layered_runtime,omitempty"` + // Configuration for the local administration HTTP server. + Admin *Admin `protobuf:"bytes,12,opt,name=admin,proto3" json:"admin,omitempty"` + // Optional overload manager configuration. + OverloadManager *v33.OverloadManager `protobuf:"bytes,15,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"` + // Enable :ref:`stats for event dispatcher `, defaults to false. + // Note that this records a value for each iteration of the event loop on every thread. This + // should normally be minimal overhead, but when using + // :ref:`statsd `, it will send each observed value + // over the wire individually because the statsd protocol doesn't have any way to represent a + // histogram summary. Be aware that this can be a very large volume of data. + EnableDispatcherStats bool `protobuf:"varint,16,opt,name=enable_dispatcher_stats,json=enableDispatcherStats,proto3" json:"enable_dispatcher_stats,omitempty"` + // Optional string which will be used in lieu of x-envoy in prefixing headers. + // + // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be + // transformed into x-foo-retry-on etc. + // + // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the + // headers Envoy will trust for core code and core extensions only. Be VERY careful making + // changes to this string, especially in multi-layer Envoy deployments or deployments using + // extensions which are not upstream. + HeaderPrefix string `protobuf:"bytes,18,opt,name=header_prefix,json=headerPrefix,proto3" json:"header_prefix,omitempty"` + // Optional proxy version which will be used to set the value of :ref:`server.version statistic + // ` if specified. Envoy will not process this value, it will be sent as is to + // :ref:`stats sinks `. + StatsServerVersionOverride *wrapperspb.UInt64Value `protobuf:"bytes,19,opt,name=stats_server_version_override,json=statsServerVersionOverride,proto3" json:"stats_server_version_override,omitempty"` + // Always use TCP queries instead of UDP queries for DNS lookups. + // This may be overridden on a per-cluster basis in cds_config, + // when :ref:`dns_resolvers ` and + // :ref:`use_tcp_for_dns_lookups ` are + // specified. + // This field is deprecated in favor of “dns_resolution_config“ + // which aggregates all of the DNS resolver configuration in a single message. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + UseTcpForDnsLookups bool `protobuf:"varint,20,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"` + // DNS resolution configuration which includes the underlying dns resolver addresses and options. + // This may be overridden on a per-cluster basis in cds_config, when + // :ref:`dns_resolution_config ` + // is specified. + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + DnsResolutionConfig *v3.DnsResolutionConfig `protobuf:"bytes,30,opt,name=dns_resolution_config,json=dnsResolutionConfig,proto3" json:"dns_resolution_config,omitempty"` + // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + // or any other DNS resolver types and the related parameters. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this “typed_dns_resolver_config“. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. + // During the transition period when both “dns_resolution_config“ and “typed_dns_resolver_config“ exists, + // when “typed_dns_resolver_config“ is in place, Envoy will use it and ignore “dns_resolution_config“. + // When “typed_dns_resolver_config“ is missing, the default behavior is in place. + // [#extension-category: envoy.network.dns_resolver] + TypedDnsResolverConfig *v3.TypedExtensionConfig `protobuf:"bytes,31,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"` + // Specifies optional bootstrap extensions to be instantiated at startup time. + // Each item contains extension specific configuration. + // [#extension-category: envoy.bootstrap] + BootstrapExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,21,rep,name=bootstrap_extensions,json=bootstrapExtensions,proto3" json:"bootstrap_extensions,omitempty"` + // Specifies optional extensions instantiated at startup time and + // invoked during crash time on the request that caused the crash. + FatalActions []*FatalAction `protobuf:"bytes,28,rep,name=fatal_actions,json=fatalActions,proto3" json:"fatal_actions,omitempty"` + // Configuration sources that will participate in + // xdstp:// URL authority resolution. The algorithm is as + // follows: + // 1. The authority field is taken from the xdstp:// URL, call + // this “resource_authority“. + // 2. “resource_authority“ is compared against the authorities in any peer + // “ConfigSource“. The peer “ConfigSource“ is the configuration source + // message which would have been used unconditionally for resolution + // with opaque resource names. If there is a match with an authority, the + // peer “ConfigSource“ message is used. + // 3. “resource_authority“ is compared sequentially with the authorities in + // each configuration source in “config_sources“. The first “ConfigSource“ + // to match wins. + // 4. As a fallback, if no configuration source matches, then + // “default_config_source“ is used. + // 5. If “default_config_source“ is not specified, resolution fails. + // + // [#not-implemented-hide:] + ConfigSources []*v3.ConfigSource `protobuf:"bytes,22,rep,name=config_sources,json=configSources,proto3" json:"config_sources,omitempty"` + // Default configuration source for xdstp:// URLs if all + // other resolution fails. + // [#not-implemented-hide:] + DefaultConfigSource *v3.ConfigSource `protobuf:"bytes,23,opt,name=default_config_source,json=defaultConfigSource,proto3" json:"default_config_source,omitempty"` + // Optional overriding of default socket interface. The value must be the name of one of the + // socket interface factories initialized through a bootstrap extension + DefaultSocketInterface string `protobuf:"bytes,24,opt,name=default_socket_interface,json=defaultSocketInterface,proto3" json:"default_socket_interface,omitempty"` + // Global map of CertificateProvider instances. These instances are referred to by name in the + // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name + // ` + // field. + // [#not-implemented-hide:] + CertificateProviderInstances map[string]*v3.TypedExtensionConfig `protobuf:"bytes,25,rep,name=certificate_provider_instances,json=certificateProviderInstances,proto3" json:"certificate_provider_instances,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Specifies a set of headers that need to be registered as inline header. This configuration + // allows users to customize the inline headers on-demand at Envoy startup without modifying + // Envoy's source code. + // + // Note that the 'set-cookie' header cannot be registered as inline header. + InlineHeaders []*CustomInlineHeader `protobuf:"bytes,32,rep,name=inline_headers,json=inlineHeaders,proto3" json:"inline_headers,omitempty"` + // Optional path to a file with performance tracing data created by "Perfetto" SDK in binary + // ProtoBuf format. The default value is "envoy.pftrace". + PerfTracingFilePath string `protobuf:"bytes,33,opt,name=perf_tracing_file_path,json=perfTracingFilePath,proto3" json:"perf_tracing_file_path,omitempty"` + // Optional overriding of default regex engine. + // If the value is not specified, Google RE2 will be used by default. + // [#extension-category: envoy.regex_engines] + DefaultRegexEngine *v3.TypedExtensionConfig `protobuf:"bytes,34,opt,name=default_regex_engine,json=defaultRegexEngine,proto3" json:"default_regex_engine,omitempty"` + // Optional XdsResourcesDelegate configuration, which allows plugging custom logic into both + // fetch and load events during xDS processing. + // If a value is not specified, no XdsResourcesDelegate will be used. + // TODO(abeyad): Add public-facing documentation. + // [#not-implemented-hide:] + XdsDelegateExtension *v3.TypedExtensionConfig `protobuf:"bytes,35,opt,name=xds_delegate_extension,json=xdsDelegateExtension,proto3" json:"xds_delegate_extension,omitempty"` + // Optional XdsConfigTracker configuration, which allows tracking xDS responses in external components, + // e.g., external tracer or monitor. It provides the process point when receive, ingest, or fail to + // process xDS resources and messages. If a value is not specified, no XdsConfigTracker will be used. + // + // .. note:: + // + // There are no in-repo extensions currently, and the :repo:`XdsConfigTracker ` + // interface should be implemented before using. + // See :repo:`xds_config_tracker_integration_test ` + // for an example usage of the interface. + XdsConfigTrackerExtension *v3.TypedExtensionConfig `protobuf:"bytes,36,opt,name=xds_config_tracker_extension,json=xdsConfigTrackerExtension,proto3" json:"xds_config_tracker_extension,omitempty"` + // [#not-implemented-hide:] + // This controls the type of listener manager configured for Envoy. Currently + // Envoy only supports ListenerManager for this field and Envoy Mobile + // supports ApiListenerManager. + ListenerManager *v3.TypedExtensionConfig `protobuf:"bytes,37,opt,name=listener_manager,json=listenerManager,proto3" json:"listener_manager,omitempty"` + // Optional application log configuration. + ApplicationLogConfig *Bootstrap_ApplicationLogConfig `protobuf:"bytes,38,opt,name=application_log_config,json=applicationLogConfig,proto3" json:"application_log_config,omitempty"` + // Optional gRPC async manager config. + GrpcAsyncClientManagerConfig *Bootstrap_GrpcAsyncClientManagerConfig `protobuf:"bytes,40,opt,name=grpc_async_client_manager_config,json=grpcAsyncClientManagerConfig,proto3" json:"grpc_async_client_manager_config,omitempty"` + // Optional configuration for memory allocation manager. + // Memory releasing is only supported for `tcmalloc allocator `_. + MemoryAllocatorManager *MemoryAllocatorManager `protobuf:"bytes,41,opt,name=memory_allocator_manager,json=memoryAllocatorManager,proto3" json:"memory_allocator_manager,omitempty"` +} + +func (x *Bootstrap) Reset() { + *x = Bootstrap{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap) ProtoMessage() {} + +func (x *Bootstrap) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap.ProtoReflect.Descriptor instead. +func (*Bootstrap) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0} +} + +func (x *Bootstrap) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *Bootstrap) GetNodeContextParams() []string { + if x != nil { + return x.NodeContextParams + } + return nil +} + +func (x *Bootstrap) GetStaticResources() *Bootstrap_StaticResources { + if x != nil { + return x.StaticResources + } + return nil +} + +func (x *Bootstrap) GetDynamicResources() *Bootstrap_DynamicResources { + if x != nil { + return x.DynamicResources + } + return nil +} + +func (x *Bootstrap) GetClusterManager() *ClusterManager { + if x != nil { + return x.ClusterManager + } + return nil +} + +func (x *Bootstrap) GetHdsConfig() *v3.ApiConfigSource { + if x != nil { + return x.HdsConfig + } + return nil +} + +func (x *Bootstrap) GetFlagsPath() string { + if x != nil { + return x.FlagsPath + } + return "" +} + +func (x *Bootstrap) GetStatsSinks() []*v31.StatsSink { + if x != nil { + return x.StatsSinks + } + return nil +} + +func (x *Bootstrap) GetDeferredStatOptions() *Bootstrap_DeferredStatOptions { + if x != nil { + return x.DeferredStatOptions + } + return nil +} + +func (x *Bootstrap) GetStatsConfig() *v31.StatsConfig { + if x != nil { + return x.StatsConfig + } + return nil +} + +func (x *Bootstrap) GetStatsFlushInterval() *durationpb.Duration { + if x != nil { + return x.StatsFlushInterval + } + return nil +} + +func (m *Bootstrap) GetStatsFlush() isBootstrap_StatsFlush { + if m != nil { + return m.StatsFlush + } + return nil +} + +func (x *Bootstrap) GetStatsFlushOnAdmin() bool { + if x, ok := x.GetStatsFlush().(*Bootstrap_StatsFlushOnAdmin); ok { + return x.StatsFlushOnAdmin + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetWatchdog() *Watchdog { + if x != nil { + return x.Watchdog + } + return nil +} + +func (x *Bootstrap) GetWatchdogs() *Watchdogs { + if x != nil { + return x.Watchdogs + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetTracing() *v32.Tracing { + if x != nil { + return x.Tracing + } + return nil +} + +func (x *Bootstrap) GetLayeredRuntime() *LayeredRuntime { + if x != nil { + return x.LayeredRuntime + } + return nil +} + +func (x *Bootstrap) GetAdmin() *Admin { + if x != nil { + return x.Admin + } + return nil +} + +func (x *Bootstrap) GetOverloadManager() *v33.OverloadManager { + if x != nil { + return x.OverloadManager + } + return nil +} + +func (x *Bootstrap) GetEnableDispatcherStats() bool { + if x != nil { + return x.EnableDispatcherStats + } + return false +} + +func (x *Bootstrap) GetHeaderPrefix() string { + if x != nil { + return x.HeaderPrefix + } + return "" +} + +func (x *Bootstrap) GetStatsServerVersionOverride() *wrapperspb.UInt64Value { + if x != nil { + return x.StatsServerVersionOverride + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetUseTcpForDnsLookups() bool { + if x != nil { + return x.UseTcpForDnsLookups + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Bootstrap) GetDnsResolutionConfig() *v3.DnsResolutionConfig { + if x != nil { + return x.DnsResolutionConfig + } + return nil +} + +func (x *Bootstrap) GetTypedDnsResolverConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.TypedDnsResolverConfig + } + return nil +} + +func (x *Bootstrap) GetBootstrapExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.BootstrapExtensions + } + return nil +} + +func (x *Bootstrap) GetFatalActions() []*FatalAction { + if x != nil { + return x.FatalActions + } + return nil +} + +func (x *Bootstrap) GetConfigSources() []*v3.ConfigSource { + if x != nil { + return x.ConfigSources + } + return nil +} + +func (x *Bootstrap) GetDefaultConfigSource() *v3.ConfigSource { + if x != nil { + return x.DefaultConfigSource + } + return nil +} + +func (x *Bootstrap) GetDefaultSocketInterface() string { + if x != nil { + return x.DefaultSocketInterface + } + return "" +} + +func (x *Bootstrap) GetCertificateProviderInstances() map[string]*v3.TypedExtensionConfig { + if x != nil { + return x.CertificateProviderInstances + } + return nil +} + +func (x *Bootstrap) GetInlineHeaders() []*CustomInlineHeader { + if x != nil { + return x.InlineHeaders + } + return nil +} + +func (x *Bootstrap) GetPerfTracingFilePath() string { + if x != nil { + return x.PerfTracingFilePath + } + return "" +} + +func (x *Bootstrap) GetDefaultRegexEngine() *v3.TypedExtensionConfig { + if x != nil { + return x.DefaultRegexEngine + } + return nil +} + +func (x *Bootstrap) GetXdsDelegateExtension() *v3.TypedExtensionConfig { + if x != nil { + return x.XdsDelegateExtension + } + return nil +} + +func (x *Bootstrap) GetXdsConfigTrackerExtension() *v3.TypedExtensionConfig { + if x != nil { + return x.XdsConfigTrackerExtension + } + return nil +} + +func (x *Bootstrap) GetListenerManager() *v3.TypedExtensionConfig { + if x != nil { + return x.ListenerManager + } + return nil +} + +func (x *Bootstrap) GetApplicationLogConfig() *Bootstrap_ApplicationLogConfig { + if x != nil { + return x.ApplicationLogConfig + } + return nil +} + +func (x *Bootstrap) GetGrpcAsyncClientManagerConfig() *Bootstrap_GrpcAsyncClientManagerConfig { + if x != nil { + return x.GrpcAsyncClientManagerConfig + } + return nil +} + +func (x *Bootstrap) GetMemoryAllocatorManager() *MemoryAllocatorManager { + if x != nil { + return x.MemoryAllocatorManager + } + return nil +} + +type isBootstrap_StatsFlush interface { + isBootstrap_StatsFlush() +} + +type Bootstrap_StatsFlushOnAdmin struct { + // Flush stats to sinks only when queried for on the admin interface. If set, + // a flush timer is not created. Only one of “stats_flush_on_admin“ or + // “stats_flush_interval“ can be set. + StatsFlushOnAdmin bool `protobuf:"varint,29,opt,name=stats_flush_on_admin,json=statsFlushOnAdmin,proto3,oneof"` +} + +func (*Bootstrap_StatsFlushOnAdmin) isBootstrap_StatsFlush() {} + +// Administration interface :ref:`operations documentation +// `. +// [#next-free-field: 7] +type Admin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for :ref:`access logs ` + // emitted by the administration server. + AccessLog []*v34.AccessLog `protobuf:"bytes,5,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The path to write the access log for the administration server. If no + // access log is desired specify ‘/dev/null’. This is only required if + // :ref:`address ` is set. + // Deprecated in favor of “access_log“ which offers more options. + // + // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. + AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"` + // The cpu profiler output path for the administration server. If no profile + // path is specified, the default is ‘/var/log/envoy/envoy.prof’. + ProfilePath string `protobuf:"bytes,2,opt,name=profile_path,json=profilePath,proto3" json:"profile_path,omitempty"` + // The TCP address that the administration server will listen on. + // If not specified, Envoy will not start an administration server. + Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + SocketOptions []*v3.SocketOption `protobuf:"bytes,4,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Indicates whether :ref:`global_downstream_max_connections ` + // should apply to the admin interface or not. + IgnoreGlobalConnLimit bool `protobuf:"varint,6,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"` +} + +func (x *Admin) Reset() { + *x = Admin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Admin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Admin) ProtoMessage() {} + +func (x *Admin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Admin.ProtoReflect.Descriptor instead. +func (*Admin) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{1} +} + +func (x *Admin) GetAccessLog() []*v34.AccessLog { + if x != nil { + return x.AccessLog + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto. +func (x *Admin) GetAccessLogPath() string { + if x != nil { + return x.AccessLogPath + } + return "" +} + +func (x *Admin) GetProfilePath() string { + if x != nil { + return x.ProfilePath + } + return "" +} + +func (x *Admin) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Admin) GetSocketOptions() []*v3.SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +func (x *Admin) GetIgnoreGlobalConnLimit() bool { + if x != nil { + return x.IgnoreGlobalConnLimit + } + return false +} + +// Cluster manager :ref:`architecture overview `. +// [#next-free-field: 6] +type ClusterManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the local cluster (i.e., the cluster that owns the Envoy running + // this configuration). In order to enable :ref:`zone aware routing + // ` this option must be set. + // If “local_cluster_name“ is defined then :ref:`clusters + // ` must be defined in the :ref:`Bootstrap + // static cluster resources + // `. This is unrelated to + // the :option:`--service-cluster` option which does not `affect zone aware + // routing `_. + LocalClusterName string `protobuf:"bytes,1,opt,name=local_cluster_name,json=localClusterName,proto3" json:"local_cluster_name,omitempty"` + // Optional global configuration for outlier detection. + OutlierDetection *ClusterManager_OutlierDetection `protobuf:"bytes,2,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` + // Optional configuration used to bind newly established upstream connections. + // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. + UpstreamBindConfig *v3.BindConfig `protobuf:"bytes,3,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"` + // A management server endpoint to stream load stats to via + // “StreamLoadStats“. This must have :ref:`api_type + // ` :ref:`GRPC + // `. + LoadStatsConfig *v3.ApiConfigSource `protobuf:"bytes,4,opt,name=load_stats_config,json=loadStatsConfig,proto3" json:"load_stats_config,omitempty"` + // Whether the ClusterManager will create clusters on the worker threads + // inline during requests. This will save memory and CPU cycles in cases where + // there are lots of inactive clusters and > 1 worker thread. + EnableDeferredClusterCreation bool `protobuf:"varint,5,opt,name=enable_deferred_cluster_creation,json=enableDeferredClusterCreation,proto3" json:"enable_deferred_cluster_creation,omitempty"` +} + +func (x *ClusterManager) Reset() { + *x = ClusterManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterManager) ProtoMessage() {} + +func (x *ClusterManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterManager.ProtoReflect.Descriptor instead. +func (*ClusterManager) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2} +} + +func (x *ClusterManager) GetLocalClusterName() string { + if x != nil { + return x.LocalClusterName + } + return "" +} + +func (x *ClusterManager) GetOutlierDetection() *ClusterManager_OutlierDetection { + if x != nil { + return x.OutlierDetection + } + return nil +} + +func (x *ClusterManager) GetUpstreamBindConfig() *v3.BindConfig { + if x != nil { + return x.UpstreamBindConfig + } + return nil +} + +func (x *ClusterManager) GetLoadStatsConfig() *v3.ApiConfigSource { + if x != nil { + return x.LoadStatsConfig + } + return nil +} + +func (x *ClusterManager) GetEnableDeferredClusterCreation() bool { + if x != nil { + return x.EnableDeferredClusterCreation + } + return false +} + +// Allows you to specify different watchdog configs for different subsystems. +// This allows finer tuned policies for the watchdog. If a subsystem is omitted +// the default values for that system will be used. +type Watchdogs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Watchdog for the main thread. + MainThreadWatchdog *Watchdog `protobuf:"bytes,1,opt,name=main_thread_watchdog,json=mainThreadWatchdog,proto3" json:"main_thread_watchdog,omitempty"` + // Watchdog for the worker threads. + WorkerWatchdog *Watchdog `protobuf:"bytes,2,opt,name=worker_watchdog,json=workerWatchdog,proto3" json:"worker_watchdog,omitempty"` +} + +func (x *Watchdogs) Reset() { + *x = Watchdogs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Watchdogs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Watchdogs) ProtoMessage() {} + +func (x *Watchdogs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Watchdogs.ProtoReflect.Descriptor instead. +func (*Watchdogs) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{3} +} + +func (x *Watchdogs) GetMainThreadWatchdog() *Watchdog { + if x != nil { + return x.MainThreadWatchdog + } + return nil +} + +func (x *Watchdogs) GetWorkerWatchdog() *Watchdog { + if x != nil { + return x.WorkerWatchdog + } + return nil +} + +// Envoy process watchdog configuration. When configured, this monitors for +// nonresponsive threads and kills the process after the configured thresholds. +// See the :ref:`watchdog documentation ` for more information. +// [#next-free-field: 8] +type Watchdog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Register actions that will fire on given WatchDog events. + // See “WatchDogAction“ for priority of events. + Actions []*Watchdog_WatchdogAction `protobuf:"bytes,7,rep,name=actions,proto3" json:"actions,omitempty"` + // The duration after which Envoy counts a nonresponsive thread in the + // “watchdog_miss“ statistic. If not specified the default is 200ms. + MissTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"` + // The duration after which Envoy counts a nonresponsive thread in the + // “watchdog_mega_miss“ statistic. If not specified the default is + // 1000ms. + MegamissTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"` + // If a watched thread has been nonresponsive for this duration, assume a + // programming error and kill the entire Envoy process. Set to 0 to disable + // kill behavior. If not specified the default is 0 (disabled). + KillTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"` + // Defines the maximum jitter used to adjust the “kill_timeout“ if “kill_timeout“ is + // enabled. Enabling this feature would help to reduce risk of synchronized + // watchdog kill events across proxies due to external triggers. Set to 0 to + // disable. If not specified the default is 0 (disabled). + MaxKillTimeoutJitter *durationpb.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"` + // If “max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))“ + // threads have been nonresponsive for at least this duration kill the entire + // Envoy process. Set to 0 to disable this behavior. If not specified the + // default is 0 (disabled). + MultikillTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"` + // Sets the threshold for “multikill_timeout“ in terms of the percentage of + // nonresponsive threads required for the “multikill_timeout“. + // If not specified the default is 0. + MultikillThreshold *v35.Percent `protobuf:"bytes,5,opt,name=multikill_threshold,json=multikillThreshold,proto3" json:"multikill_threshold,omitempty"` +} + +func (x *Watchdog) Reset() { + *x = Watchdog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Watchdog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Watchdog) ProtoMessage() {} + +func (x *Watchdog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Watchdog.ProtoReflect.Descriptor instead. +func (*Watchdog) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4} +} + +func (x *Watchdog) GetActions() []*Watchdog_WatchdogAction { + if x != nil { + return x.Actions + } + return nil +} + +func (x *Watchdog) GetMissTimeout() *durationpb.Duration { + if x != nil { + return x.MissTimeout + } + return nil +} + +func (x *Watchdog) GetMegamissTimeout() *durationpb.Duration { + if x != nil { + return x.MegamissTimeout + } + return nil +} + +func (x *Watchdog) GetKillTimeout() *durationpb.Duration { + if x != nil { + return x.KillTimeout + } + return nil +} + +func (x *Watchdog) GetMaxKillTimeoutJitter() *durationpb.Duration { + if x != nil { + return x.MaxKillTimeoutJitter + } + return nil +} + +func (x *Watchdog) GetMultikillTimeout() *durationpb.Duration { + if x != nil { + return x.MultikillTimeout + } + return nil +} + +func (x *Watchdog) GetMultikillThreshold() *v35.Percent { + if x != nil { + return x.MultikillThreshold + } + return nil +} + +// Fatal actions to run while crashing. Actions can be safe (meaning they are +// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions. +// If using an unsafe action that could get stuck or deadlock, it important to +// have an out of band system to terminate the process. +// +// The interface for the extension is “Envoy::Server::Configuration::FatalAction“. +// “FatalAction“ extensions live in the “envoy.extensions.fatal_actions“ API +// namespace. +type FatalAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extension specific configuration for the action. It's expected to conform + // to the “Envoy::Server::Configuration::FatalAction“ interface. + Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *FatalAction) Reset() { + *x = FatalAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FatalAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FatalAction) ProtoMessage() {} + +func (x *FatalAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FatalAction.ProtoReflect.Descriptor instead. +func (*FatalAction) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{5} +} + +func (x *FatalAction) GetConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.Config + } + return nil +} + +// Runtime :ref:`configuration overview ` (deprecated). +type Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. Envoy + // will watch the location for changes and reload the file system tree when + // they happen. If this parameter is not set, there will be no disk based + // runtime. + SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"` + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + Subdirectory string `protobuf:"bytes,2,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"` + // Specifies an optional subdirectory to load within the root directory. If + // specified and the directory exists, configuration values within this + // directory will override those found in the primary subdirectory. This is + // useful when Envoy is deployed across many different types of servers. + // Sometimes it is useful to have a per service cluster directory for runtime + // configuration. See below for exactly how the override directory is used. + OverrideSubdirectory string `protobuf:"bytes,3,opt,name=override_subdirectory,json=overrideSubdirectory,proto3" json:"override_subdirectory,omitempty"` + // Static base runtime. This will be :ref:`overridden + // ` by other runtime layers, e.g. + // disk or admin. This follows the :ref:`runtime protobuf JSON representation + // encoding `. + Base *structpb.Struct `protobuf:"bytes,4,opt,name=base,proto3" json:"base,omitempty"` +} + +func (x *Runtime) Reset() { + *x = Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Runtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Runtime) ProtoMessage() {} + +func (x *Runtime) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Runtime.ProtoReflect.Descriptor instead. +func (*Runtime) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{6} +} + +func (x *Runtime) GetSymlinkRoot() string { + if x != nil { + return x.SymlinkRoot + } + return "" +} + +func (x *Runtime) GetSubdirectory() string { + if x != nil { + return x.Subdirectory + } + return "" +} + +func (x *Runtime) GetOverrideSubdirectory() string { + if x != nil { + return x.OverrideSubdirectory + } + return "" +} + +func (x *Runtime) GetBase() *structpb.Struct { + if x != nil { + return x.Base + } + return nil +} + +// [#next-free-field: 6] +type RuntimeLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Descriptive name for the runtime layer. This is only used for the runtime + // :http:get:`/runtime` output. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to LayerSpecifier: + // + // *RuntimeLayer_StaticLayer + // *RuntimeLayer_DiskLayer_ + // *RuntimeLayer_AdminLayer_ + // *RuntimeLayer_RtdsLayer_ + LayerSpecifier isRuntimeLayer_LayerSpecifier `protobuf_oneof:"layer_specifier"` +} + +func (x *RuntimeLayer) Reset() { + *x = RuntimeLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer) ProtoMessage() {} + +func (x *RuntimeLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7} +} + +func (x *RuntimeLayer) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RuntimeLayer) GetLayerSpecifier() isRuntimeLayer_LayerSpecifier { + if m != nil { + return m.LayerSpecifier + } + return nil +} + +func (x *RuntimeLayer) GetStaticLayer() *structpb.Struct { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_StaticLayer); ok { + return x.StaticLayer + } + return nil +} + +func (x *RuntimeLayer) GetDiskLayer() *RuntimeLayer_DiskLayer { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_DiskLayer_); ok { + return x.DiskLayer + } + return nil +} + +func (x *RuntimeLayer) GetAdminLayer() *RuntimeLayer_AdminLayer { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_AdminLayer_); ok { + return x.AdminLayer + } + return nil +} + +func (x *RuntimeLayer) GetRtdsLayer() *RuntimeLayer_RtdsLayer { + if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_RtdsLayer_); ok { + return x.RtdsLayer + } + return nil +} + +type isRuntimeLayer_LayerSpecifier interface { + isRuntimeLayer_LayerSpecifier() +} + +type RuntimeLayer_StaticLayer struct { + // :ref:`Static runtime ` layer. + // This follows the :ref:`runtime protobuf JSON representation encoding + // `. Unlike static xDS resources, this static + // layer is overridable by later layers in the runtime virtual filesystem. + StaticLayer *structpb.Struct `protobuf:"bytes,2,opt,name=static_layer,json=staticLayer,proto3,oneof"` +} + +type RuntimeLayer_DiskLayer_ struct { + DiskLayer *RuntimeLayer_DiskLayer `protobuf:"bytes,3,opt,name=disk_layer,json=diskLayer,proto3,oneof"` +} + +type RuntimeLayer_AdminLayer_ struct { + AdminLayer *RuntimeLayer_AdminLayer `protobuf:"bytes,4,opt,name=admin_layer,json=adminLayer,proto3,oneof"` +} + +type RuntimeLayer_RtdsLayer_ struct { + RtdsLayer *RuntimeLayer_RtdsLayer `protobuf:"bytes,5,opt,name=rtds_layer,json=rtdsLayer,proto3,oneof"` +} + +func (*RuntimeLayer_StaticLayer) isRuntimeLayer_LayerSpecifier() {} + +func (*RuntimeLayer_DiskLayer_) isRuntimeLayer_LayerSpecifier() {} + +func (*RuntimeLayer_AdminLayer_) isRuntimeLayer_LayerSpecifier() {} + +func (*RuntimeLayer_RtdsLayer_) isRuntimeLayer_LayerSpecifier() {} + +// Runtime :ref:`configuration overview `. +type LayeredRuntime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The :ref:`layers ` of the runtime. This is ordered + // such that later layers in the list overlay earlier entries. + Layers []*RuntimeLayer `protobuf:"bytes,1,rep,name=layers,proto3" json:"layers,omitempty"` +} + +func (x *LayeredRuntime) Reset() { + *x = LayeredRuntime{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LayeredRuntime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LayeredRuntime) ProtoMessage() {} + +func (x *LayeredRuntime) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LayeredRuntime.ProtoReflect.Descriptor instead. +func (*LayeredRuntime) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{8} +} + +func (x *LayeredRuntime) GetLayers() []*RuntimeLayer { + if x != nil { + return x.Layers + } + return nil +} + +// Used to specify the header that needs to be registered as an inline header. +// +// If request or response contain multiple headers with the same name and the header +// name is registered as an inline header. Then multiple headers will be folded +// into one, and multiple header values will be concatenated by a suitable delimiter. +// The delimiter is generally a comma. +// +// For example, if 'foo' is registered as an inline header, and the headers contains +// the following two headers: +// +// .. code-block:: text +// +// foo: bar +// foo: eep +// +// Then they will eventually be folded into: +// +// .. code-block:: text +// +// foo: bar, eep +// +// Inline headers provide O(1) search performance, but each inline header imposes +// an additional memory overhead on all instances of the corresponding type of +// HeaderMap or TrailerMap. +type CustomInlineHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the header that is expected to be set as the inline header. + InlineHeaderName string `protobuf:"bytes,1,opt,name=inline_header_name,json=inlineHeaderName,proto3" json:"inline_header_name,omitempty"` + // The type of the header that is expected to be set as the inline header. + InlineHeaderType CustomInlineHeader_InlineHeaderType `protobuf:"varint,2,opt,name=inline_header_type,json=inlineHeaderType,proto3,enum=envoy.config.bootstrap.v3.CustomInlineHeader_InlineHeaderType" json:"inline_header_type,omitempty"` +} + +func (x *CustomInlineHeader) Reset() { + *x = CustomInlineHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomInlineHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomInlineHeader) ProtoMessage() {} + +func (x *CustomInlineHeader) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomInlineHeader.ProtoReflect.Descriptor instead. +func (*CustomInlineHeader) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9} +} + +func (x *CustomInlineHeader) GetInlineHeaderName() string { + if x != nil { + return x.InlineHeaderName + } + return "" +} + +func (x *CustomInlineHeader) GetInlineHeaderType() CustomInlineHeader_InlineHeaderType { + if x != nil { + return x.InlineHeaderType + } + return CustomInlineHeader_REQUEST_HEADER +} + +type MemoryAllocatorManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures tcmalloc to perform background release of free memory in amount of bytes per “memory_release_interval“ interval. + // If equals to “0“, no memory release will occur. Defaults to “0“. + BytesToRelease uint64 `protobuf:"varint,1,opt,name=bytes_to_release,json=bytesToRelease,proto3" json:"bytes_to_release,omitempty"` + // Interval in milliseconds for memory releasing. If specified, during every + // interval Envoy will try to release “bytes_to_release“ of free memory back to operating system for reuse. + // Defaults to 1000 milliseconds. + MemoryReleaseInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=memory_release_interval,json=memoryReleaseInterval,proto3" json:"memory_release_interval,omitempty"` +} + +func (x *MemoryAllocatorManager) Reset() { + *x = MemoryAllocatorManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryAllocatorManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryAllocatorManager) ProtoMessage() {} + +func (x *MemoryAllocatorManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryAllocatorManager.ProtoReflect.Descriptor instead. +func (*MemoryAllocatorManager) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{10} +} + +func (x *MemoryAllocatorManager) GetBytesToRelease() uint64 { + if x != nil { + return x.BytesToRelease + } + return 0 +} + +func (x *MemoryAllocatorManager) GetMemoryReleaseInterval() *durationpb.Duration { + if x != nil { + return x.MemoryReleaseInterval + } + return nil +} + +type Bootstrap_StaticResources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Static :ref:`Listeners `. These listeners are + // available regardless of LDS configuration. + Listeners []*v36.Listener `protobuf:"bytes,1,rep,name=listeners,proto3" json:"listeners,omitempty"` + // If a network based configuration source is specified for :ref:`cds_config + // `, it's necessary + // to have some initial cluster definitions available to allow Envoy to know + // how to speak to the management server. These cluster definitions may not + // use :ref:`EDS ` (i.e. they should be static + // IP or DNS-based). + Clusters []*v37.Cluster `protobuf:"bytes,2,rep,name=clusters,proto3" json:"clusters,omitempty"` + // These static secrets can be used by :ref:`SdsSecretConfig + // ` + Secrets []*v38.Secret `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty"` +} + +func (x *Bootstrap_StaticResources) Reset() { + *x = Bootstrap_StaticResources{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_StaticResources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_StaticResources) ProtoMessage() {} + +func (x *Bootstrap_StaticResources) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_StaticResources.ProtoReflect.Descriptor instead. +func (*Bootstrap_StaticResources) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Bootstrap_StaticResources) GetListeners() []*v36.Listener { + if x != nil { + return x.Listeners + } + return nil +} + +func (x *Bootstrap_StaticResources) GetClusters() []*v37.Cluster { + if x != nil { + return x.Clusters + } + return nil +} + +func (x *Bootstrap_StaticResources) GetSecrets() []*v38.Secret { + if x != nil { + return x.Secrets + } + return nil +} + +// [#next-free-field: 7] +type Bootstrap_DynamicResources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // All :ref:`Listeners ` are provided by a single + // :ref:`LDS ` configuration source. + LdsConfig *v3.ConfigSource `protobuf:"bytes,1,opt,name=lds_config,json=ldsConfig,proto3" json:"lds_config,omitempty"` + // xdstp:// resource locator for listener collection. + // [#not-implemented-hide:] + LdsResourcesLocator string `protobuf:"bytes,5,opt,name=lds_resources_locator,json=ldsResourcesLocator,proto3" json:"lds_resources_locator,omitempty"` + // All post-bootstrap :ref:`Cluster ` definitions are + // provided by a single :ref:`CDS ` + // configuration source. + CdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=cds_config,json=cdsConfig,proto3" json:"cds_config,omitempty"` + // xdstp:// resource locator for cluster collection. + // [#not-implemented-hide:] + CdsResourcesLocator string `protobuf:"bytes,6,opt,name=cds_resources_locator,json=cdsResourcesLocator,proto3" json:"cds_resources_locator,omitempty"` + // A single :ref:`ADS ` source may be optionally + // specified. This must have :ref:`api_type + // ` :ref:`GRPC + // `. Only + // :ref:`ConfigSources ` that have + // the :ref:`ads ` field set will be + // streamed on the ADS channel. + AdsConfig *v3.ApiConfigSource `protobuf:"bytes,3,opt,name=ads_config,json=adsConfig,proto3" json:"ads_config,omitempty"` +} + +func (x *Bootstrap_DynamicResources) Reset() { + *x = Bootstrap_DynamicResources{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_DynamicResources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_DynamicResources) ProtoMessage() {} + +func (x *Bootstrap_DynamicResources) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_DynamicResources.ProtoReflect.Descriptor instead. +func (*Bootstrap_DynamicResources) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Bootstrap_DynamicResources) GetLdsConfig() *v3.ConfigSource { + if x != nil { + return x.LdsConfig + } + return nil +} + +func (x *Bootstrap_DynamicResources) GetLdsResourcesLocator() string { + if x != nil { + return x.LdsResourcesLocator + } + return "" +} + +func (x *Bootstrap_DynamicResources) GetCdsConfig() *v3.ConfigSource { + if x != nil { + return x.CdsConfig + } + return nil +} + +func (x *Bootstrap_DynamicResources) GetCdsResourcesLocator() string { + if x != nil { + return x.CdsResourcesLocator + } + return "" +} + +func (x *Bootstrap_DynamicResources) GetAdsConfig() *v3.ApiConfigSource { + if x != nil { + return x.AdsConfig + } + return nil +} + +type Bootstrap_ApplicationLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional field to set the application logs format. If this field is set, it will override + // the default log format. Setting both this field and :option:`--log-format` command line + // option is not allowed, and will cause a bootstrap error. + LogFormat *Bootstrap_ApplicationLogConfig_LogFormat `protobuf:"bytes,1,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"` +} + +func (x *Bootstrap_ApplicationLogConfig) Reset() { + *x = Bootstrap_ApplicationLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_ApplicationLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_ApplicationLogConfig) ProtoMessage() {} + +func (x *Bootstrap_ApplicationLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_ApplicationLogConfig.ProtoReflect.Descriptor instead. +func (*Bootstrap_ApplicationLogConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Bootstrap_ApplicationLogConfig) GetLogFormat() *Bootstrap_ApplicationLogConfig_LogFormat { + if x != nil { + return x.LogFormat + } + return nil +} + +type Bootstrap_DeferredStatOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the flag is enabled, Envoy will lazily initialize a subset of the stats (see below). + // This will save memory and CPU cycles when creating the objects that own these stats, if those + // stats are never referenced throughout the lifetime of the process. However, it will incur additional + // memory overhead for these objects, and a small increase of CPU usage when a at least one of the stats + // is updated for the first time. + // Groups of stats that will be lazily initialized: + // - Cluster traffic stats: a subgroup of the :ref:`cluster statistics ` + // that are used when requests are routed to the cluster. + EnableDeferredCreationStats bool `protobuf:"varint,1,opt,name=enable_deferred_creation_stats,json=enableDeferredCreationStats,proto3" json:"enable_deferred_creation_stats,omitempty"` +} + +func (x *Bootstrap_DeferredStatOptions) Reset() { + *x = Bootstrap_DeferredStatOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_DeferredStatOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_DeferredStatOptions) ProtoMessage() {} + +func (x *Bootstrap_DeferredStatOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_DeferredStatOptions.ProtoReflect.Descriptor instead. +func (*Bootstrap_DeferredStatOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *Bootstrap_DeferredStatOptions) GetEnableDeferredCreationStats() bool { + if x != nil { + return x.EnableDeferredCreationStats + } + return false +} + +type Bootstrap_GrpcAsyncClientManagerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional field to set the expiration time for the cached gRPC client object. + // The minimal value is 5s and the default is 50s. + MaxCachedEntryIdleDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_cached_entry_idle_duration,json=maxCachedEntryIdleDuration,proto3" json:"max_cached_entry_idle_duration,omitempty"` +} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) Reset() { + *x = Bootstrap_GrpcAsyncClientManagerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_GrpcAsyncClientManagerConfig) ProtoMessage() {} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_GrpcAsyncClientManagerConfig.ProtoReflect.Descriptor instead. +func (*Bootstrap_GrpcAsyncClientManagerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *Bootstrap_GrpcAsyncClientManagerConfig) GetMaxCachedEntryIdleDuration() *durationpb.Duration { + if x != nil { + return x.MaxCachedEntryIdleDuration + } + return nil +} + +type Bootstrap_ApplicationLogConfig_LogFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LogFormat: + // + // *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat + // *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat + LogFormat isBootstrap_ApplicationLogConfig_LogFormat_LogFormat `protobuf_oneof:"log_format"` +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) Reset() { + *x = Bootstrap_ApplicationLogConfig_LogFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bootstrap_ApplicationLogConfig_LogFormat) ProtoMessage() {} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bootstrap_ApplicationLogConfig_LogFormat.ProtoReflect.Descriptor instead. +func (*Bootstrap_ApplicationLogConfig_LogFormat) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) GetLogFormat() isBootstrap_ApplicationLogConfig_LogFormat_LogFormat { + if m != nil { + return m.LogFormat + } + return nil +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetJsonFormat() *structpb.Struct { + if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok { + return x.JsonFormat + } + return nil +} + +func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetTextFormat() string { + if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok { + return x.TextFormat + } + return "" +} + +type isBootstrap_ApplicationLogConfig_LogFormat_LogFormat interface { + isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() +} + +type Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat struct { + // Flush application logs in JSON format. The configured JSON struct can + // support all the format flags specified in the :option:`--log-format` + // command line options section, except for the “%v“ and “%_“ flags. + JsonFormat *structpb.Struct `protobuf:"bytes,1,opt,name=json_format,json=jsonFormat,proto3,oneof"` +} + +type Bootstrap_ApplicationLogConfig_LogFormat_TextFormat struct { + // Flush application log in a format defined by a string. The text format + // can support all the format flags specified in the :option:`--log-format` + // command line option section. + TextFormat string `protobuf:"bytes,2,opt,name=text_format,json=textFormat,proto3,oneof"` +} + +func (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() { +} + +func (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() { +} + +type ClusterManager_OutlierDetection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the path to the outlier event log. + EventLogPath string `protobuf:"bytes,1,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"` + // [#not-implemented-hide:] + // The gRPC service for the outlier detection event service. + // If empty, outlier detection events won't be sent to a remote endpoint. + EventService *v3.EventServiceConfig `protobuf:"bytes,2,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"` +} + +func (x *ClusterManager_OutlierDetection) Reset() { + *x = ClusterManager_OutlierDetection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterManager_OutlierDetection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterManager_OutlierDetection) ProtoMessage() {} + +func (x *ClusterManager_OutlierDetection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterManager_OutlierDetection.ProtoReflect.Descriptor instead. +func (*ClusterManager_OutlierDetection) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClusterManager_OutlierDetection) GetEventLogPath() string { + if x != nil { + return x.EventLogPath + } + return "" +} + +func (x *ClusterManager_OutlierDetection) GetEventService() *v3.EventServiceConfig { + if x != nil { + return x.EventService + } + return nil +} + +type Watchdog_WatchdogAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extension specific configuration for the action. + Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Event Watchdog_WatchdogAction_WatchdogEvent `protobuf:"varint,2,opt,name=event,proto3,enum=envoy.config.bootstrap.v3.Watchdog_WatchdogAction_WatchdogEvent" json:"event,omitempty"` +} + +func (x *Watchdog_WatchdogAction) Reset() { + *x = Watchdog_WatchdogAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Watchdog_WatchdogAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Watchdog_WatchdogAction) ProtoMessage() {} + +func (x *Watchdog_WatchdogAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Watchdog_WatchdogAction.ProtoReflect.Descriptor instead. +func (*Watchdog_WatchdogAction) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *Watchdog_WatchdogAction) GetConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *Watchdog_WatchdogAction) GetEvent() Watchdog_WatchdogAction_WatchdogEvent { + if x != nil { + return x.Event + } + return Watchdog_WatchdogAction_UNKNOWN +} + +// :ref:`Disk runtime ` layer. +type RuntimeLayer_DiskLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. + // Envoy will watch the location for changes and reload the file system tree + // when they happen. See documentation on runtime :ref:`atomicity + // ` for further details on how reloads are + // treated. + SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"` + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + Subdirectory string `protobuf:"bytes,3,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"` + // :ref:`Append ` the + // service cluster to the path under symlink root. + AppendServiceCluster bool `protobuf:"varint,2,opt,name=append_service_cluster,json=appendServiceCluster,proto3" json:"append_service_cluster,omitempty"` +} + +func (x *RuntimeLayer_DiskLayer) Reset() { + *x = RuntimeLayer_DiskLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer_DiskLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer_DiskLayer) ProtoMessage() {} + +func (x *RuntimeLayer_DiskLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer_DiskLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer_DiskLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *RuntimeLayer_DiskLayer) GetSymlinkRoot() string { + if x != nil { + return x.SymlinkRoot + } + return "" +} + +func (x *RuntimeLayer_DiskLayer) GetSubdirectory() string { + if x != nil { + return x.Subdirectory + } + return "" +} + +func (x *RuntimeLayer_DiskLayer) GetAppendServiceCluster() bool { + if x != nil { + return x.AppendServiceCluster + } + return false +} + +// :ref:`Admin console runtime ` layer. +type RuntimeLayer_AdminLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RuntimeLayer_AdminLayer) Reset() { + *x = RuntimeLayer_AdminLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer_AdminLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer_AdminLayer) ProtoMessage() {} + +func (x *RuntimeLayer_AdminLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer_AdminLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer_AdminLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 1} +} + +// :ref:`Runtime Discovery Service (RTDS) ` layer. +type RuntimeLayer_RtdsLayer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource to subscribe to at “rtds_config“ for the RTDS layer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // RTDS configuration source. + RtdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=rtds_config,json=rtdsConfig,proto3" json:"rtds_config,omitempty"` +} + +func (x *RuntimeLayer_RtdsLayer) Reset() { + *x = RuntimeLayer_RtdsLayer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeLayer_RtdsLayer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeLayer_RtdsLayer) ProtoMessage() {} + +func (x *RuntimeLayer_RtdsLayer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeLayer_RtdsLayer.ProtoReflect.Descriptor instead. +func (*RuntimeLayer_RtdsLayer) Descriptor() ([]byte, []int) { + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 2} +} + +func (x *RuntimeLayer_RtdsLayer) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RuntimeLayer_RtdsLayer) GetRtdsConfig() *v3.ConfigSource { + if x != nil { + return x.RtdsConfig + } + return nil +} + +var File_envoy_config_bootstrap_v3_bootstrap_proto protoreflect.FileDescriptor + +var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, + 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x99, 0x24, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, + 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, + 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x6e, 0x6f, + 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x5f, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, + 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x62, 0x0a, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x68, 0x64, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x09, 0x68, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, + 0x0a, 0x0a, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a, + 0x0b, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, + 0x6b, 0x73, 0x12, 0x6c, 0x0a, 0x15, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x27, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x64, 0x65, 0x66, + 0x65, 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x24, 0xfa, 0x42, 0x0e, 0xaa, 0x01, 0x0b, 0x1a, 0x03, 0x08, 0xac, 0x02, 0x32, + 0x04, 0x10, 0xc0, 0x84, 0x3d, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x12, 0x0b, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, + 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x14, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, + 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73, + 0x68, 0x4f, 0x6e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x08, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x64, 0x6f, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x08, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x42, 0x0a, 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, + 0x6f, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x52, + 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x07, 0x74, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, + 0x67, 0x12, 0x52, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x5f, 0x0a, + 0x10, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x42, 0x09, 0x8a, 0x93, 0xb7, 0x2a, 0x04, 0x08, 0x01, 0x10, 0x01, 0x52, 0x0f, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x5f, 0x0a, 0x1d, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x1a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x41, 0x0a, 0x17, + 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54, + 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12, + 0x6a, 0x0a, 0x15, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x14, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, + 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x15, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x13, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x1e, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x19, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1c, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x69, 0x6e, + 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x20, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x72, 0x66, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x13, 0x70, 0x65, 0x72, 0x66, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x6c, + 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x22, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x67, 0x65, 0x78, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x78, 0x64, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x23, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x14, 0x78, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x6b, 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x16, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x20, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x67, 0x72, 0x70, 0x63, 0x41, 0x73, + 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x18, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x16, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, + 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a, + 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, + 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, 0xf9, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x09, + 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x7d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x5a, 0x0a, 0x13, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, + 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x1c, 0x47, 0x72, 0x70, 0x63, 0x41, 0x73, 0x79, + 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, + 0x32, 0x02, 0x08, 0x05, 0x52, 0x1a, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2a, 0x9a, + 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, + 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x89, 0x03, + 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, + 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0x94, 0x05, 0x0a, 0x0e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x11, 0x6f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x20, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, + 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a, + 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, + 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55, + 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, + 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, + 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10, + 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, + 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02, + 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, + 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, + 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, + 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, + 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, + 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62, + 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, + 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, + 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, + 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64, + 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01, + 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22, + 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, + 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, + 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74, + 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72, + 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, + 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61, + 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12, + 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51, + 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13, + 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, + 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x22, 0x95, 0x01, 0x0a, 0x16, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, + 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x17, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce sync.Once + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc +) + +func file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP() []byte { + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce.Do(func() { + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData) + }) + return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData +} + +var file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = []interface{}{ + (Watchdog_WatchdogAction_WatchdogEvent)(0), // 0: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent + (CustomInlineHeader_InlineHeaderType)(0), // 1: envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType + (*Bootstrap)(nil), // 2: envoy.config.bootstrap.v3.Bootstrap + (*Admin)(nil), // 3: envoy.config.bootstrap.v3.Admin + (*ClusterManager)(nil), // 4: envoy.config.bootstrap.v3.ClusterManager + (*Watchdogs)(nil), // 5: envoy.config.bootstrap.v3.Watchdogs + (*Watchdog)(nil), // 6: envoy.config.bootstrap.v3.Watchdog + (*FatalAction)(nil), // 7: envoy.config.bootstrap.v3.FatalAction + (*Runtime)(nil), // 8: envoy.config.bootstrap.v3.Runtime + (*RuntimeLayer)(nil), // 9: envoy.config.bootstrap.v3.RuntimeLayer + (*LayeredRuntime)(nil), // 10: envoy.config.bootstrap.v3.LayeredRuntime + (*CustomInlineHeader)(nil), // 11: envoy.config.bootstrap.v3.CustomInlineHeader + (*MemoryAllocatorManager)(nil), // 12: envoy.config.bootstrap.v3.MemoryAllocatorManager + (*Bootstrap_StaticResources)(nil), // 13: envoy.config.bootstrap.v3.Bootstrap.StaticResources + (*Bootstrap_DynamicResources)(nil), // 14: envoy.config.bootstrap.v3.Bootstrap.DynamicResources + (*Bootstrap_ApplicationLogConfig)(nil), // 15: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig + (*Bootstrap_DeferredStatOptions)(nil), // 16: envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions + (*Bootstrap_GrpcAsyncClientManagerConfig)(nil), // 17: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig + nil, // 18: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry + (*Bootstrap_ApplicationLogConfig_LogFormat)(nil), // 19: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat + (*ClusterManager_OutlierDetection)(nil), // 20: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + (*Watchdog_WatchdogAction)(nil), // 21: envoy.config.bootstrap.v3.Watchdog.WatchdogAction + (*RuntimeLayer_DiskLayer)(nil), // 22: envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + (*RuntimeLayer_AdminLayer)(nil), // 23: envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + (*RuntimeLayer_RtdsLayer)(nil), // 24: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + (*v3.Node)(nil), // 25: envoy.config.core.v3.Node + (*v3.ApiConfigSource)(nil), // 26: envoy.config.core.v3.ApiConfigSource + (*v31.StatsSink)(nil), // 27: envoy.config.metrics.v3.StatsSink + (*v31.StatsConfig)(nil), // 28: envoy.config.metrics.v3.StatsConfig + (*durationpb.Duration)(nil), // 29: google.protobuf.Duration + (*v32.Tracing)(nil), // 30: envoy.config.trace.v3.Tracing + (*v33.OverloadManager)(nil), // 31: envoy.config.overload.v3.OverloadManager + (*wrapperspb.UInt64Value)(nil), // 32: google.protobuf.UInt64Value + (*v3.DnsResolutionConfig)(nil), // 33: envoy.config.core.v3.DnsResolutionConfig + (*v3.TypedExtensionConfig)(nil), // 34: envoy.config.core.v3.TypedExtensionConfig + (*v3.ConfigSource)(nil), // 35: envoy.config.core.v3.ConfigSource + (*v34.AccessLog)(nil), // 36: envoy.config.accesslog.v3.AccessLog + (*v3.Address)(nil), // 37: envoy.config.core.v3.Address + (*v3.SocketOption)(nil), // 38: envoy.config.core.v3.SocketOption + (*v3.BindConfig)(nil), // 39: envoy.config.core.v3.BindConfig + (*v35.Percent)(nil), // 40: envoy.type.v3.Percent + (*structpb.Struct)(nil), // 41: google.protobuf.Struct + (*v36.Listener)(nil), // 42: envoy.config.listener.v3.Listener + (*v37.Cluster)(nil), // 43: envoy.config.cluster.v3.Cluster + (*v38.Secret)(nil), // 44: envoy.extensions.transport_sockets.tls.v3.Secret + (*v3.EventServiceConfig)(nil), // 45: envoy.config.core.v3.EventServiceConfig +} +var file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = []int32{ + 25, // 0: envoy.config.bootstrap.v3.Bootstrap.node:type_name -> envoy.config.core.v3.Node + 13, // 1: envoy.config.bootstrap.v3.Bootstrap.static_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.StaticResources + 14, // 2: envoy.config.bootstrap.v3.Bootstrap.dynamic_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.DynamicResources + 4, // 3: envoy.config.bootstrap.v3.Bootstrap.cluster_manager:type_name -> envoy.config.bootstrap.v3.ClusterManager + 26, // 4: envoy.config.bootstrap.v3.Bootstrap.hds_config:type_name -> envoy.config.core.v3.ApiConfigSource + 27, // 5: envoy.config.bootstrap.v3.Bootstrap.stats_sinks:type_name -> envoy.config.metrics.v3.StatsSink + 16, // 6: envoy.config.bootstrap.v3.Bootstrap.deferred_stat_options:type_name -> envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions + 28, // 7: envoy.config.bootstrap.v3.Bootstrap.stats_config:type_name -> envoy.config.metrics.v3.StatsConfig + 29, // 8: envoy.config.bootstrap.v3.Bootstrap.stats_flush_interval:type_name -> google.protobuf.Duration + 6, // 9: envoy.config.bootstrap.v3.Bootstrap.watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 5, // 10: envoy.config.bootstrap.v3.Bootstrap.watchdogs:type_name -> envoy.config.bootstrap.v3.Watchdogs + 30, // 11: envoy.config.bootstrap.v3.Bootstrap.tracing:type_name -> envoy.config.trace.v3.Tracing + 10, // 12: envoy.config.bootstrap.v3.Bootstrap.layered_runtime:type_name -> envoy.config.bootstrap.v3.LayeredRuntime + 3, // 13: envoy.config.bootstrap.v3.Bootstrap.admin:type_name -> envoy.config.bootstrap.v3.Admin + 31, // 14: envoy.config.bootstrap.v3.Bootstrap.overload_manager:type_name -> envoy.config.overload.v3.OverloadManager + 32, // 15: envoy.config.bootstrap.v3.Bootstrap.stats_server_version_override:type_name -> google.protobuf.UInt64Value + 33, // 16: envoy.config.bootstrap.v3.Bootstrap.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 34, // 17: envoy.config.bootstrap.v3.Bootstrap.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 18: envoy.config.bootstrap.v3.Bootstrap.bootstrap_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 7, // 19: envoy.config.bootstrap.v3.Bootstrap.fatal_actions:type_name -> envoy.config.bootstrap.v3.FatalAction + 35, // 20: envoy.config.bootstrap.v3.Bootstrap.config_sources:type_name -> envoy.config.core.v3.ConfigSource + 35, // 21: envoy.config.bootstrap.v3.Bootstrap.default_config_source:type_name -> envoy.config.core.v3.ConfigSource + 18, // 22: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry + 11, // 23: envoy.config.bootstrap.v3.Bootstrap.inline_headers:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader + 34, // 24: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 26: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 34, // 27: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 28: envoy.config.bootstrap.v3.Bootstrap.application_log_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig + 17, // 29: envoy.config.bootstrap.v3.Bootstrap.grpc_async_client_manager_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig + 12, // 30: envoy.config.bootstrap.v3.Bootstrap.memory_allocator_manager:type_name -> envoy.config.bootstrap.v3.MemoryAllocatorManager + 36, // 31: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 37, // 32: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address + 38, // 33: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption + 20, // 34: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection + 39, // 35: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 26, // 36: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource + 6, // 37: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 6, // 38: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog + 21, // 39: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction + 29, // 40: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration + 29, // 41: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration + 29, // 42: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration + 29, // 43: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration + 29, // 44: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration + 40, // 45: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent + 34, // 46: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 41, // 47: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct + 41, // 48: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct + 22, // 49: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer + 23, // 50: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer + 24, // 51: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer + 9, // 52: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer + 1, // 53: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType + 29, // 54: envoy.config.bootstrap.v3.MemoryAllocatorManager.memory_release_interval:type_name -> google.protobuf.Duration + 42, // 55: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener + 43, // 56: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster + 44, // 57: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret + 35, // 58: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource + 35, // 59: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource + 26, // 60: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource + 19, // 61: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.log_format:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat + 29, // 62: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig.max_cached_entry_idle_duration:type_name -> google.protobuf.Duration + 34, // 63: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig + 41, // 64: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat.json_format:type_name -> google.protobuf.Struct + 45, // 65: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 34, // 66: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 67: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent + 35, // 68: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource + 69, // [69:69] is the sub-list for method output_type + 69, // [69:69] is the sub-list for method input_type + 69, // [69:69] is the sub-list for extension type_name + 69, // [69:69] is the sub-list for extension extendee + 0, // [0:69] is the sub-list for field type_name +} + +func init() { file_envoy_config_bootstrap_v3_bootstrap_proto_init() } +func file_envoy_config_bootstrap_v3_bootstrap_proto_init() { + if File_envoy_config_bootstrap_v3_bootstrap_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Admin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Watchdogs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Watchdog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FatalAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LayeredRuntime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomInlineHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryAllocatorManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_StaticResources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_DynamicResources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_ApplicationLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_DeferredStatOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_GrpcAsyncClientManagerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bootstrap_ApplicationLogConfig_LogFormat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterManager_OutlierDetection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Watchdog_WatchdogAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer_DiskLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer_AdminLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeLayer_RtdsLayer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Bootstrap_StatsFlushOnAdmin)(nil), + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*RuntimeLayer_StaticLayer)(nil), + (*RuntimeLayer_DiskLayer_)(nil), + (*RuntimeLayer_AdminLayer_)(nil), + (*RuntimeLayer_RtdsLayer_)(nil), + } + file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].OneofWrappers = []interface{}{ + (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat)(nil), + (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc, + NumEnums: 2, + NumMessages: 23, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes, + DependencyIndexes: file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs, + EnumInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes, + MessageInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes, + }.Build() + File_envoy_config_bootstrap_v3_bootstrap_proto = out.File + file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = nil + file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = nil + file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go new file mode 100644 index 0000000000..55724c0957 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go @@ -0,0 +1,4501 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Bootstrap with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Bootstrap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BootstrapMultiError, or nil +// if none found. +func (m *Bootstrap) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStaticResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StaticResources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StaticResources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStaticResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "StaticResources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDynamicResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DynamicResources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DynamicResources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DynamicResources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetClusterManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ClusterManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ClusterManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClusterManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ClusterManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "HdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "HdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "HdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FlagsPath + + for idx, item := range m.GetStatsSinks() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("StatsSinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("StatsSinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("StatsSinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetDeferredStatOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DeferredStatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DeferredStatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDeferredStatOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DeferredStatOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStatsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "StatsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetStatsFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = BootstrapValidationError{ + field: "StatsFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lt := time.Duration(300*time.Second + 0*time.Nanosecond) + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte || dur >= lt { + err := BootstrapValidationError{ + field: "StatsFlushInterval", + reason: "value must be inside range [1ms, 5m0s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetWatchdog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchdog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Watchdog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWatchdogs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdogs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Watchdogs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchdogs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Watchdogs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTracing()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLayeredRuntime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "LayeredRuntime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "LayeredRuntime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLayeredRuntime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "LayeredRuntime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAdmin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Admin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "Admin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdmin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "Admin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverloadManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "OverloadManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "OverloadManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverloadManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "OverloadManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableDispatcherStats + + // no validation rules for HeaderPrefix + + if all { + switch v := interface{}(m.GetStatsServerVersionOverride()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsServerVersionOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "StatsServerVersionOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatsServerVersionOverride()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "StatsServerVersionOverride", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UseTcpForDnsLookups + + if all { + switch v := interface{}(m.GetDnsResolutionConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsResolutionConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTypedDnsResolverConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedDnsResolverConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetBootstrapExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("BootstrapExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("BootstrapExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("BootstrapExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetFatalActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("FatalActions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("FatalActions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("FatalActions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetConfigSources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("ConfigSources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("ConfigSources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("ConfigSources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetDefaultConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DefaultConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultSocketInterface + + { + sorted_keys := make([]string, len(m.GetCertificateProviderInstances())) + i := 0 + for key := range m.GetCertificateProviderInstances() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetCertificateProviderInstances()[key] + _ = val + + // no validation rules for CertificateProviderInstances[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("CertificateProviderInstances[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("CertificateProviderInstances[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("CertificateProviderInstances[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + for idx, item := range m.GetInlineHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("InlineHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: fmt.Sprintf("InlineHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: fmt.Sprintf("InlineHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for PerfTracingFilePath + + if all { + switch v := interface{}(m.GetDefaultRegexEngine()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultRegexEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "DefaultRegexEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultRegexEngine()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "DefaultRegexEngine", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetXdsDelegateExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsDelegateExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "XdsDelegateExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetXdsConfigTrackerExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsConfigTrackerExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "XdsConfigTrackerExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetListenerManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ListenerManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApplicationLogConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ApplicationLogConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "ApplicationLogConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApplicationLogConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "ApplicationLogConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcAsyncClientManagerConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "GrpcAsyncClientManagerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "GrpcAsyncClientManagerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcAsyncClientManagerConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "GrpcAsyncClientManagerConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMemoryAllocatorManager()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemoryAllocatorManager()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BootstrapValidationError{ + field: "MemoryAllocatorManager", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.StatsFlush.(type) { + case *Bootstrap_StatsFlushOnAdmin: + if v == nil { + err := BootstrapValidationError{ + field: "StatsFlush", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetStatsFlushOnAdmin() != true { + err := BootstrapValidationError{ + field: "StatsFlushOnAdmin", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return BootstrapMultiError(errors) + } + + return nil +} + +// BootstrapMultiError is an error wrapping multiple validation errors returned +// by Bootstrap.ValidateAll() if the designated constraints aren't met. +type BootstrapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BootstrapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BootstrapMultiError) AllErrors() []error { return m } + +// BootstrapValidationError is the validation error returned by +// Bootstrap.Validate if the designated constraints aren't met. +type BootstrapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BootstrapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BootstrapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BootstrapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BootstrapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BootstrapValidationError) ErrorName() string { return "BootstrapValidationError" } + +// Error satisfies the builtin error interface +func (e BootstrapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BootstrapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BootstrapValidationError{} + +// Validate checks the field values on Admin with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Admin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Admin with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in AdminMultiError, or nil if none found. +func (m *Admin) ValidateAll() error { + return m.validate(true) +} + +func (m *Admin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAccessLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdminValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AccessLogPath + + // no validation rules for ProfilePath + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdminValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdminValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdminValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdminValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdminValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for IgnoreGlobalConnLimit + + if len(errors) > 0 { + return AdminMultiError(errors) + } + + return nil +} + +// AdminMultiError is an error wrapping multiple validation errors returned by +// Admin.ValidateAll() if the designated constraints aren't met. +type AdminMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AdminMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AdminMultiError) AllErrors() []error { return m } + +// AdminValidationError is the validation error returned by Admin.Validate if +// the designated constraints aren't met. +type AdminValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AdminValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AdminValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AdminValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AdminValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AdminValidationError) ErrorName() string { return "AdminValidationError" } + +// Error satisfies the builtin error interface +func (e AdminValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAdmin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AdminValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AdminValidationError{} + +// Validate checks the field values on ClusterManager with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterManagerMultiError, +// or nil if none found. +func (m *ClusterManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for LocalClusterName + + if all { + switch v := interface{}(m.GetOutlierDetection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutlierDetection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManagerValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamBindConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamBindConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManagerValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLoadStatsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "LoadStatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManagerValidationError{ + field: "LoadStatsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadStatsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManagerValidationError{ + field: "LoadStatsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableDeferredClusterCreation + + if len(errors) > 0 { + return ClusterManagerMultiError(errors) + } + + return nil +} + +// ClusterManagerMultiError is an error wrapping multiple validation errors +// returned by ClusterManager.ValidateAll() if the designated constraints +// aren't met. +type ClusterManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterManagerMultiError) AllErrors() []error { return m } + +// ClusterManagerValidationError is the validation error returned by +// ClusterManager.Validate if the designated constraints aren't met. +type ClusterManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterManagerValidationError) ErrorName() string { return "ClusterManagerValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterManagerValidationError{} + +// Validate checks the field values on Watchdogs with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Watchdogs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Watchdogs with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WatchdogsMultiError, or nil +// if none found. +func (m *Watchdogs) ValidateAll() error { + return m.validate(true) +} + +func (m *Watchdogs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMainThreadWatchdog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "MainThreadWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "MainThreadWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMainThreadWatchdog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogsValidationError{ + field: "MainThreadWatchdog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWorkerWatchdog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "WorkerWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogsValidationError{ + field: "WorkerWatchdog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkerWatchdog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogsValidationError{ + field: "WorkerWatchdog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WatchdogsMultiError(errors) + } + + return nil +} + +// WatchdogsMultiError is an error wrapping multiple validation errors returned +// by Watchdogs.ValidateAll() if the designated constraints aren't met. +type WatchdogsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WatchdogsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WatchdogsMultiError) AllErrors() []error { return m } + +// WatchdogsValidationError is the validation error returned by +// Watchdogs.Validate if the designated constraints aren't met. +type WatchdogsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WatchdogsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WatchdogsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WatchdogsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WatchdogsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WatchdogsValidationError) ErrorName() string { return "WatchdogsValidationError" } + +// Error satisfies the builtin error interface +func (e WatchdogsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchdogs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WatchdogsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WatchdogsValidationError{} + +// Validate checks the field values on Watchdog with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Watchdog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Watchdog with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WatchdogMultiError, or nil +// if none found. +func (m *Watchdog) ValidateAll() error { + return m.validate(true) +} + +func (m *Watchdog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMissTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMissTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MissTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMegamissTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MegamissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MegamissTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMegamissTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MegamissTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKillTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "KillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "KillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKillTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "KillTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetMaxKillTimeoutJitter(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = WatchdogValidationError{ + field: "MaxKillTimeoutJitter", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := WatchdogValidationError{ + field: "MaxKillTimeoutJitter", + reason: "value must be greater than or equal to 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetMultikillTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMultikillTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MultikillTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMultikillThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WatchdogValidationError{ + field: "MultikillThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMultikillThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WatchdogValidationError{ + field: "MultikillThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WatchdogMultiError(errors) + } + + return nil +} + +// WatchdogMultiError is an error wrapping multiple validation errors returned +// by Watchdog.ValidateAll() if the designated constraints aren't met. +type WatchdogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WatchdogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WatchdogMultiError) AllErrors() []error { return m } + +// WatchdogValidationError is the validation error returned by +// Watchdog.Validate if the designated constraints aren't met. +type WatchdogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WatchdogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WatchdogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WatchdogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WatchdogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WatchdogValidationError) ErrorName() string { return "WatchdogValidationError" } + +// Error satisfies the builtin error interface +func (e WatchdogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchdog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WatchdogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WatchdogValidationError{} + +// Validate checks the field values on FatalAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FatalAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FatalAction with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FatalActionMultiError, or +// nil if none found. +func (m *FatalAction) ValidateAll() error { + return m.validate(true) +} + +func (m *FatalAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FatalActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FatalActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FatalActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FatalActionMultiError(errors) + } + + return nil +} + +// FatalActionMultiError is an error wrapping multiple validation errors +// returned by FatalAction.ValidateAll() if the designated constraints aren't met. +type FatalActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FatalActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FatalActionMultiError) AllErrors() []error { return m } + +// FatalActionValidationError is the validation error returned by +// FatalAction.Validate if the designated constraints aren't met. +type FatalActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FatalActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FatalActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FatalActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FatalActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FatalActionValidationError) ErrorName() string { return "FatalActionValidationError" } + +// Error satisfies the builtin error interface +func (e FatalActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFatalAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FatalActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FatalActionValidationError{} + +// Validate checks the field values on Runtime with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Runtime) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Runtime with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RuntimeMultiError, or nil if none found. +func (m *Runtime) ValidateAll() error { + return m.validate(true) +} + +func (m *Runtime) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SymlinkRoot + + // no validation rules for Subdirectory + + // no validation rules for OverrideSubdirectory + + if all { + switch v := interface{}(m.GetBase()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeValidationError{ + field: "Base", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeValidationError{ + field: "Base", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBase()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeValidationError{ + field: "Base", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RuntimeMultiError(errors) + } + + return nil +} + +// RuntimeMultiError is an error wrapping multiple validation errors returned +// by Runtime.ValidateAll() if the designated constraints aren't met. +type RuntimeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeMultiError) AllErrors() []error { return m } + +// RuntimeValidationError is the validation error returned by Runtime.Validate +// if the designated constraints aren't met. +type RuntimeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeValidationError) ErrorName() string { return "RuntimeValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntime.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeValidationError{} + +// Validate checks the field values on RuntimeLayer with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeLayerMultiError, or +// nil if none found. +func (m *RuntimeLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RuntimeLayerValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofLayerSpecifierPresent := false + switch v := m.LayerSpecifier.(type) { + case *RuntimeLayer_StaticLayer: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetStaticLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "StaticLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "StaticLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStaticLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "StaticLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RuntimeLayer_DiskLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDiskLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "DiskLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "DiskLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDiskLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "DiskLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RuntimeLayer_AdminLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAdminLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "AdminLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "AdminLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdminLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "AdminLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RuntimeLayer_RtdsLayer_: + if v == nil { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLayerSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRtdsLayer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "RtdsLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayerValidationError{ + field: "RtdsLayer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRtdsLayer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayerValidationError{ + field: "RtdsLayer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLayerSpecifierPresent { + err := RuntimeLayerValidationError{ + field: "LayerSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayerMultiError is an error wrapping multiple validation errors +// returned by RuntimeLayer.ValidateAll() if the designated constraints aren't met. +type RuntimeLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayerValidationError is the validation error returned by +// RuntimeLayer.Validate if the designated constraints aren't met. +type RuntimeLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayerValidationError) ErrorName() string { return "RuntimeLayerValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayerValidationError{} + +// Validate checks the field values on LayeredRuntime with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LayeredRuntime) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LayeredRuntime with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LayeredRuntimeMultiError, +// or nil if none found. +func (m *LayeredRuntime) ValidateAll() error { + return m.validate(true) +} + +func (m *LayeredRuntime) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetLayers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LayeredRuntimeValidationError{ + field: fmt.Sprintf("Layers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LayeredRuntimeValidationError{ + field: fmt.Sprintf("Layers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LayeredRuntimeValidationError{ + field: fmt.Sprintf("Layers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LayeredRuntimeMultiError(errors) + } + + return nil +} + +// LayeredRuntimeMultiError is an error wrapping multiple validation errors +// returned by LayeredRuntime.ValidateAll() if the designated constraints +// aren't met. +type LayeredRuntimeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LayeredRuntimeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LayeredRuntimeMultiError) AllErrors() []error { return m } + +// LayeredRuntimeValidationError is the validation error returned by +// LayeredRuntime.Validate if the designated constraints aren't met. +type LayeredRuntimeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LayeredRuntimeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LayeredRuntimeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LayeredRuntimeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LayeredRuntimeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LayeredRuntimeValidationError) ErrorName() string { return "LayeredRuntimeValidationError" } + +// Error satisfies the builtin error interface +func (e LayeredRuntimeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLayeredRuntime.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LayeredRuntimeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LayeredRuntimeValidationError{} + +// Validate checks the field values on CustomInlineHeader with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CustomInlineHeader) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomInlineHeader with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomInlineHeaderMultiError, or nil if none found. +func (m *CustomInlineHeader) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomInlineHeader) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetInlineHeaderName()) < 1 { + err := CustomInlineHeaderValidationError{ + field: "InlineHeaderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_CustomInlineHeader_InlineHeaderName_Pattern.MatchString(m.GetInlineHeaderName()) { + err := CustomInlineHeaderValidationError{ + field: "InlineHeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := CustomInlineHeader_InlineHeaderType_name[int32(m.GetInlineHeaderType())]; !ok { + err := CustomInlineHeaderValidationError{ + field: "InlineHeaderType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CustomInlineHeaderMultiError(errors) + } + + return nil +} + +// CustomInlineHeaderMultiError is an error wrapping multiple validation errors +// returned by CustomInlineHeader.ValidateAll() if the designated constraints +// aren't met. +type CustomInlineHeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomInlineHeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomInlineHeaderMultiError) AllErrors() []error { return m } + +// CustomInlineHeaderValidationError is the validation error returned by +// CustomInlineHeader.Validate if the designated constraints aren't met. +type CustomInlineHeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomInlineHeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomInlineHeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomInlineHeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomInlineHeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomInlineHeaderValidationError) ErrorName() string { + return "CustomInlineHeaderValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomInlineHeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomInlineHeader.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomInlineHeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomInlineHeaderValidationError{} + +var _CustomInlineHeader_InlineHeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on MemoryAllocatorManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MemoryAllocatorManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MemoryAllocatorManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MemoryAllocatorManagerMultiError, or nil if none found. +func (m *MemoryAllocatorManager) ValidateAll() error { + return m.validate(true) +} + +func (m *MemoryAllocatorManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesToRelease + + if all { + switch v := interface{}(m.GetMemoryReleaseInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemoryReleaseInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MemoryAllocatorManagerValidationError{ + field: "MemoryReleaseInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return MemoryAllocatorManagerMultiError(errors) + } + + return nil +} + +// MemoryAllocatorManagerMultiError is an error wrapping multiple validation +// errors returned by MemoryAllocatorManager.ValidateAll() if the designated +// constraints aren't met. +type MemoryAllocatorManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MemoryAllocatorManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MemoryAllocatorManagerMultiError) AllErrors() []error { return m } + +// MemoryAllocatorManagerValidationError is the validation error returned by +// MemoryAllocatorManager.Validate if the designated constraints aren't met. +type MemoryAllocatorManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MemoryAllocatorManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MemoryAllocatorManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MemoryAllocatorManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MemoryAllocatorManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MemoryAllocatorManagerValidationError) ErrorName() string { + return "MemoryAllocatorManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e MemoryAllocatorManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMemoryAllocatorManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MemoryAllocatorManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MemoryAllocatorManagerValidationError{} + +// Validate checks the field values on Bootstrap_StaticResources with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_StaticResources) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_StaticResources with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Bootstrap_StaticResourcesMultiError, or nil if none found. +func (m *Bootstrap_StaticResources) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_StaticResources) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetListeners() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Listeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Listeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Listeners[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetSecrets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Secrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Secrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_StaticResourcesValidationError{ + field: fmt.Sprintf("Secrets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Bootstrap_StaticResourcesMultiError(errors) + } + + return nil +} + +// Bootstrap_StaticResourcesMultiError is an error wrapping multiple validation +// errors returned by Bootstrap_StaticResources.ValidateAll() if the +// designated constraints aren't met. +type Bootstrap_StaticResourcesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_StaticResourcesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_StaticResourcesMultiError) AllErrors() []error { return m } + +// Bootstrap_StaticResourcesValidationError is the validation error returned by +// Bootstrap_StaticResources.Validate if the designated constraints aren't met. +type Bootstrap_StaticResourcesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_StaticResourcesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_StaticResourcesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_StaticResourcesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_StaticResourcesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_StaticResourcesValidationError) ErrorName() string { + return "Bootstrap_StaticResourcesValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_StaticResourcesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_StaticResources.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_StaticResourcesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_StaticResourcesValidationError{} + +// Validate checks the field values on Bootstrap_DynamicResources with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_DynamicResources) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_DynamicResources with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Bootstrap_DynamicResourcesMultiError, or nil if none found. +func (m *Bootstrap_DynamicResources) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_DynamicResources) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "LdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "LdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_DynamicResourcesValidationError{ + field: "LdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for LdsResourcesLocator + + if all { + switch v := interface{}(m.GetCdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "CdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "CdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_DynamicResourcesValidationError{ + field: "CdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CdsResourcesLocator + + if all { + switch v := interface{}(m.GetAdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "AdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_DynamicResourcesValidationError{ + field: "AdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_DynamicResourcesValidationError{ + field: "AdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Bootstrap_DynamicResourcesMultiError(errors) + } + + return nil +} + +// Bootstrap_DynamicResourcesMultiError is an error wrapping multiple +// validation errors returned by Bootstrap_DynamicResources.ValidateAll() if +// the designated constraints aren't met. +type Bootstrap_DynamicResourcesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_DynamicResourcesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_DynamicResourcesMultiError) AllErrors() []error { return m } + +// Bootstrap_DynamicResourcesValidationError is the validation error returned +// by Bootstrap_DynamicResources.Validate if the designated constraints aren't met. +type Bootstrap_DynamicResourcesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_DynamicResourcesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_DynamicResourcesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_DynamicResourcesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_DynamicResourcesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_DynamicResourcesValidationError) ErrorName() string { + return "Bootstrap_DynamicResourcesValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_DynamicResourcesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_DynamicResources.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_DynamicResourcesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_DynamicResourcesValidationError{} + +// Validate checks the field values on Bootstrap_ApplicationLogConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_ApplicationLogConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_ApplicationLogConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Bootstrap_ApplicationLogConfigMultiError, or nil if none found. +func (m *Bootstrap_ApplicationLogConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_ApplicationLogConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLogFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{ + field: "LogFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{ + field: "LogFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_ApplicationLogConfigValidationError{ + field: "LogFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Bootstrap_ApplicationLogConfigMultiError(errors) + } + + return nil +} + +// Bootstrap_ApplicationLogConfigMultiError is an error wrapping multiple +// validation errors returned by Bootstrap_ApplicationLogConfig.ValidateAll() +// if the designated constraints aren't met. +type Bootstrap_ApplicationLogConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_ApplicationLogConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_ApplicationLogConfigMultiError) AllErrors() []error { return m } + +// Bootstrap_ApplicationLogConfigValidationError is the validation error +// returned by Bootstrap_ApplicationLogConfig.Validate if the designated +// constraints aren't met. +type Bootstrap_ApplicationLogConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_ApplicationLogConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_ApplicationLogConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_ApplicationLogConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_ApplicationLogConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_ApplicationLogConfigValidationError) ErrorName() string { + return "Bootstrap_ApplicationLogConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_ApplicationLogConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_ApplicationLogConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_ApplicationLogConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_ApplicationLogConfigValidationError{} + +// Validate checks the field values on Bootstrap_DeferredStatOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Bootstrap_DeferredStatOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Bootstrap_DeferredStatOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Bootstrap_DeferredStatOptionsMultiError, or nil if none found. +func (m *Bootstrap_DeferredStatOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_DeferredStatOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for EnableDeferredCreationStats + + if len(errors) > 0 { + return Bootstrap_DeferredStatOptionsMultiError(errors) + } + + return nil +} + +// Bootstrap_DeferredStatOptionsMultiError is an error wrapping multiple +// validation errors returned by Bootstrap_DeferredStatOptions.ValidateAll() +// if the designated constraints aren't met. +type Bootstrap_DeferredStatOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_DeferredStatOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_DeferredStatOptionsMultiError) AllErrors() []error { return m } + +// Bootstrap_DeferredStatOptionsValidationError is the validation error +// returned by Bootstrap_DeferredStatOptions.Validate if the designated +// constraints aren't met. +type Bootstrap_DeferredStatOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_DeferredStatOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_DeferredStatOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_DeferredStatOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_DeferredStatOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_DeferredStatOptionsValidationError) ErrorName() string { + return "Bootstrap_DeferredStatOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_DeferredStatOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_DeferredStatOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_DeferredStatOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_DeferredStatOptionsValidationError{} + +// Validate checks the field values on Bootstrap_GrpcAsyncClientManagerConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Bootstrap_GrpcAsyncClientManagerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Bootstrap_GrpcAsyncClientManagerConfig with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// Bootstrap_GrpcAsyncClientManagerConfigMultiError, or nil if none found. +func (m *Bootstrap_GrpcAsyncClientManagerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetMaxCachedEntryIdleDuration(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Bootstrap_GrpcAsyncClientManagerConfigValidationError{ + field: "MaxCachedEntryIdleDuration", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(5*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := Bootstrap_GrpcAsyncClientManagerConfigValidationError{ + field: "MaxCachedEntryIdleDuration", + reason: "value must be greater than or equal to 5s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Bootstrap_GrpcAsyncClientManagerConfigMultiError(errors) + } + + return nil +} + +// Bootstrap_GrpcAsyncClientManagerConfigMultiError is an error wrapping +// multiple validation errors returned by +// Bootstrap_GrpcAsyncClientManagerConfig.ValidateAll() if the designated +// constraints aren't met. +type Bootstrap_GrpcAsyncClientManagerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) AllErrors() []error { return m } + +// Bootstrap_GrpcAsyncClientManagerConfigValidationError is the validation +// error returned by Bootstrap_GrpcAsyncClientManagerConfig.Validate if the +// designated constraints aren't met. +type Bootstrap_GrpcAsyncClientManagerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) ErrorName() string { + return "Bootstrap_GrpcAsyncClientManagerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_GrpcAsyncClientManagerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_GrpcAsyncClientManagerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_GrpcAsyncClientManagerConfigValidationError{} + +// Validate checks the field values on Bootstrap_ApplicationLogConfig_LogFormat +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Bootstrap_ApplicationLogConfig_LogFormat) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Bootstrap_ApplicationLogConfig_LogFormat with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Bootstrap_ApplicationLogConfig_LogFormatMultiError, or nil if none found. +func (m *Bootstrap_ApplicationLogConfig_LogFormat) ValidateAll() error { + return m.validate(true) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofLogFormatPresent := false + switch v := m.LogFormat.(type) { + case *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat: + if v == nil { + err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "LogFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLogFormatPresent = true + + if all { + switch v := interface{}(m.GetJsonFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat: + if v == nil { + err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "LogFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLogFormatPresent = true + // no validation rules for TextFormat + default: + _ = v // ensures v is used + } + if !oneofLogFormatPresent { + err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{ + field: "LogFormat", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Bootstrap_ApplicationLogConfig_LogFormatMultiError(errors) + } + + return nil +} + +// Bootstrap_ApplicationLogConfig_LogFormatMultiError is an error wrapping +// multiple validation errors returned by +// Bootstrap_ApplicationLogConfig_LogFormat.ValidateAll() if the designated +// constraints aren't met. +type Bootstrap_ApplicationLogConfig_LogFormatMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) AllErrors() []error { return m } + +// Bootstrap_ApplicationLogConfig_LogFormatValidationError is the validation +// error returned by Bootstrap_ApplicationLogConfig_LogFormat.Validate if the +// designated constraints aren't met. +type Bootstrap_ApplicationLogConfig_LogFormatValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) ErrorName() string { + return "Bootstrap_ApplicationLogConfig_LogFormatValidationError" +} + +// Error satisfies the builtin error interface +func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBootstrap_ApplicationLogConfig_LogFormat.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Bootstrap_ApplicationLogConfig_LogFormatValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Bootstrap_ApplicationLogConfig_LogFormatValidationError{} + +// Validate checks the field values on ClusterManager_OutlierDetection with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterManager_OutlierDetection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterManager_OutlierDetection with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClusterManager_OutlierDetectionMultiError, or nil if none found. +func (m *ClusterManager_OutlierDetection) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterManager_OutlierDetection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for EventLogPath + + if all { + switch v := interface{}(m.GetEventService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterManager_OutlierDetectionValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterManager_OutlierDetectionValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterManager_OutlierDetectionValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterManager_OutlierDetectionMultiError(errors) + } + + return nil +} + +// ClusterManager_OutlierDetectionMultiError is an error wrapping multiple +// validation errors returned by ClusterManager_OutlierDetection.ValidateAll() +// if the designated constraints aren't met. +type ClusterManager_OutlierDetectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterManager_OutlierDetectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterManager_OutlierDetectionMultiError) AllErrors() []error { return m } + +// ClusterManager_OutlierDetectionValidationError is the validation error +// returned by ClusterManager_OutlierDetection.Validate if the designated +// constraints aren't met. +type ClusterManager_OutlierDetectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterManager_OutlierDetectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterManager_OutlierDetectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterManager_OutlierDetectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterManager_OutlierDetectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterManager_OutlierDetectionValidationError) ErrorName() string { + return "ClusterManager_OutlierDetectionValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterManager_OutlierDetectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterManager_OutlierDetection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterManager_OutlierDetectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterManager_OutlierDetectionValidationError{} + +// Validate checks the field values on Watchdog_WatchdogAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Watchdog_WatchdogAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Watchdog_WatchdogAction with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Watchdog_WatchdogActionMultiError, or nil if none found. +func (m *Watchdog_WatchdogAction) ValidateAll() error { + return m.validate(true) +} + +func (m *Watchdog_WatchdogAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Watchdog_WatchdogActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Watchdog_WatchdogActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Watchdog_WatchdogActionValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := Watchdog_WatchdogAction_WatchdogEvent_name[int32(m.GetEvent())]; !ok { + err := Watchdog_WatchdogActionValidationError{ + field: "Event", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Watchdog_WatchdogActionMultiError(errors) + } + + return nil +} + +// Watchdog_WatchdogActionMultiError is an error wrapping multiple validation +// errors returned by Watchdog_WatchdogAction.ValidateAll() if the designated +// constraints aren't met. +type Watchdog_WatchdogActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Watchdog_WatchdogActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Watchdog_WatchdogActionMultiError) AllErrors() []error { return m } + +// Watchdog_WatchdogActionValidationError is the validation error returned by +// Watchdog_WatchdogAction.Validate if the designated constraints aren't met. +type Watchdog_WatchdogActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Watchdog_WatchdogActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Watchdog_WatchdogActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Watchdog_WatchdogActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Watchdog_WatchdogActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Watchdog_WatchdogActionValidationError) ErrorName() string { + return "Watchdog_WatchdogActionValidationError" +} + +// Error satisfies the builtin error interface +func (e Watchdog_WatchdogActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchdog_WatchdogAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Watchdog_WatchdogActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Watchdog_WatchdogActionValidationError{} + +// Validate checks the field values on RuntimeLayer_DiskLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer_DiskLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer_DiskLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeLayer_DiskLayerMultiError, or nil if none found. +func (m *RuntimeLayer_DiskLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer_DiskLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SymlinkRoot + + // no validation rules for Subdirectory + + // no validation rules for AppendServiceCluster + + if len(errors) > 0 { + return RuntimeLayer_DiskLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayer_DiskLayerMultiError is an error wrapping multiple validation +// errors returned by RuntimeLayer_DiskLayer.ValidateAll() if the designated +// constraints aren't met. +type RuntimeLayer_DiskLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayer_DiskLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayer_DiskLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayer_DiskLayerValidationError is the validation error returned by +// RuntimeLayer_DiskLayer.Validate if the designated constraints aren't met. +type RuntimeLayer_DiskLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayer_DiskLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayer_DiskLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayer_DiskLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayer_DiskLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayer_DiskLayerValidationError) ErrorName() string { + return "RuntimeLayer_DiskLayerValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeLayer_DiskLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer_DiskLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayer_DiskLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayer_DiskLayerValidationError{} + +// Validate checks the field values on RuntimeLayer_AdminLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer_AdminLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer_AdminLayer with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeLayer_AdminLayerMultiError, or nil if none found. +func (m *RuntimeLayer_AdminLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer_AdminLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RuntimeLayer_AdminLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayer_AdminLayerMultiError is an error wrapping multiple validation +// errors returned by RuntimeLayer_AdminLayer.ValidateAll() if the designated +// constraints aren't met. +type RuntimeLayer_AdminLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayer_AdminLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayer_AdminLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayer_AdminLayerValidationError is the validation error returned by +// RuntimeLayer_AdminLayer.Validate if the designated constraints aren't met. +type RuntimeLayer_AdminLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayer_AdminLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayer_AdminLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayer_AdminLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayer_AdminLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayer_AdminLayerValidationError) ErrorName() string { + return "RuntimeLayer_AdminLayerValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeLayer_AdminLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer_AdminLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayer_AdminLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayer_AdminLayerValidationError{} + +// Validate checks the field values on RuntimeLayer_RtdsLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeLayer_RtdsLayer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeLayer_RtdsLayer with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeLayer_RtdsLayerMultiError, or nil if none found. +func (m *RuntimeLayer_RtdsLayer) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeLayer_RtdsLayer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetRtdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeLayer_RtdsLayerValidationError{ + field: "RtdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeLayer_RtdsLayerValidationError{ + field: "RtdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRtdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeLayer_RtdsLayerValidationError{ + field: "RtdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RuntimeLayer_RtdsLayerMultiError(errors) + } + + return nil +} + +// RuntimeLayer_RtdsLayerMultiError is an error wrapping multiple validation +// errors returned by RuntimeLayer_RtdsLayer.ValidateAll() if the designated +// constraints aren't met. +type RuntimeLayer_RtdsLayerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeLayer_RtdsLayerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeLayer_RtdsLayerMultiError) AllErrors() []error { return m } + +// RuntimeLayer_RtdsLayerValidationError is the validation error returned by +// RuntimeLayer_RtdsLayer.Validate if the designated constraints aren't met. +type RuntimeLayer_RtdsLayerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeLayer_RtdsLayerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeLayer_RtdsLayerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeLayer_RtdsLayerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeLayer_RtdsLayerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeLayer_RtdsLayerValidationError) ErrorName() string { + return "RuntimeLayer_RtdsLayerValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeLayer_RtdsLayerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeLayer_RtdsLayer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeLayer_RtdsLayerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeLayer_RtdsLayerValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go new file mode 100644 index 0000000000..51e10e0e08 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go @@ -0,0 +1,3128 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/bootstrap/v3/bootstrap.proto + +package bootstrapv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Bootstrap_StaticResources) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_StaticResources) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_StaticResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Secrets[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Secrets[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Clusters[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Clusters[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Listeners) > 0 { + for iNdEx := len(m.Listeners) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Listeners[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Listeners[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_DynamicResources) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_DynamicResources) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_DynamicResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CdsResourcesLocator) > 0 { + i -= len(m.CdsResourcesLocator) + copy(dAtA[i:], m.CdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CdsResourcesLocator))) + i-- + dAtA[i] = 0x32 + } + if len(m.LdsResourcesLocator) > 0 { + i -= len(m.LdsResourcesLocator) + copy(dAtA[i:], m.LdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LdsResourcesLocator))) + i-- + dAtA[i] = 0x2a + } + if m.AdsConfig != nil { + if vtmsg, ok := interface{}(m.AdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.CdsConfig != nil { + if vtmsg, ok := interface{}(m.CdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.LdsConfig != nil { + if vtmsg, ok := interface{}(m.LdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JsonFormat != nil { + size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TextFormat) + copy(dAtA[i:], m.TextFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *Bootstrap_ApplicationLogConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_ApplicationLogConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_ApplicationLogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LogFormat != nil { + size, err := m.LogFormat.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_DeferredStatOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_DeferredStatOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_DeferredStatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnableDeferredCreationStats { + i-- + if m.EnableDeferredCreationStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxCachedEntryIdleDuration != nil { + size, err := (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bootstrap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MemoryAllocatorManager != nil { + size, err := m.MemoryAllocatorManager.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.GrpcAsyncClientManagerConfig != nil { + size, err := m.GrpcAsyncClientManagerConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.DeferredStatOptions != nil { + size, err := m.DeferredStatOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + if m.ApplicationLogConfig != nil { + size, err := m.ApplicationLogConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.ListenerManager != nil { + if vtmsg, ok := interface{}(m.ListenerManager).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ListenerManager) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if m.XdsConfigTrackerExtension != nil { + if vtmsg, ok := interface{}(m.XdsConfigTrackerExtension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.XdsConfigTrackerExtension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.XdsDelegateExtension != nil { + if vtmsg, ok := interface{}(m.XdsDelegateExtension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.XdsDelegateExtension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.DefaultRegexEngine != nil { + if vtmsg, ok := interface{}(m.DefaultRegexEngine).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultRegexEngine) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if len(m.PerfTracingFilePath) > 0 { + i -= len(m.PerfTracingFilePath) + copy(dAtA[i:], m.PerfTracingFilePath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PerfTracingFilePath))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if len(m.InlineHeaders) > 0 { + for iNdEx := len(m.InlineHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InlineHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.TypedDnsResolverConfig != nil { + if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedDnsResolverConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.DnsResolutionConfig != nil { + if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolutionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if msg, ok := m.StatsFlush.(*Bootstrap_StatsFlushOnAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.FatalActions) > 0 { + for iNdEx := len(m.FatalActions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FatalActions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.Watchdogs != nil { + size, err := m.Watchdogs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.NodeContextParams) > 0 { + for iNdEx := len(m.NodeContextParams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NodeContextParams[iNdEx]) + copy(dAtA[i:], m.NodeContextParams[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NodeContextParams[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } + if len(m.CertificateProviderInstances) > 0 { + for k := range m.CertificateProviderInstances { + v := m.CertificateProviderInstances[k] + baseI := i + if vtmsg, ok := interface{}(v).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(v) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.DefaultSocketInterface) > 0 { + i -= len(m.DefaultSocketInterface) + copy(dAtA[i:], m.DefaultSocketInterface) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultSocketInterface))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.DefaultConfigSource != nil { + if vtmsg, ok := interface{}(m.DefaultConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.ConfigSources) > 0 { + for iNdEx := len(m.ConfigSources) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ConfigSources[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSources[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if len(m.BootstrapExtensions) > 0 { + for iNdEx := len(m.BootstrapExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.BootstrapExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BootstrapExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.StatsServerVersionOverride != nil { + size, err := (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.HeaderPrefix) > 0 { + i -= len(m.HeaderPrefix) + copy(dAtA[i:], m.HeaderPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.LayeredRuntime != nil { + size, err := m.LayeredRuntime.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.EnableDispatcherStats { + i-- + if m.EnableDispatcherStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.OverloadManager != nil { + if vtmsg, ok := interface{}(m.OverloadManager).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverloadManager) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x7a + } + if m.HdsConfig != nil { + if vtmsg, ok := interface{}(m.HdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.StatsConfig != nil { + if vtmsg, ok := interface{}(m.StatsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StatsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.Admin != nil { + size, err := m.Admin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.Tracing != nil { + if vtmsg, ok := interface{}(m.Tracing).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Tracing) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.Watchdog != nil { + size, err := m.Watchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.StatsFlushInterval != nil { + size, err := (*durationpb.Duration)(m.StatsFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.StatsSinks) > 0 { + for iNdEx := len(m.StatsSinks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.StatsSinks[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StatsSinks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.FlagsPath) > 0 { + i -= len(m.FlagsPath) + copy(dAtA[i:], m.FlagsPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FlagsPath))) + i-- + dAtA[i] = 0x2a + } + if m.ClusterManager != nil { + size, err := m.ClusterManager.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.DynamicResources != nil { + size, err := m.DynamicResources.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.StaticResources != nil { + size, err := m.StaticResources.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_StatsFlushOnAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Bootstrap_StatsFlushOnAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.StatsFlushOnAdmin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + return len(dAtA) - i, nil +} +func (m *Admin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Admin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Admin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IgnoreGlobalConnLimit { + i-- + if m.IgnoreGlobalConnLimit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ProfilePath) > 0 { + i -= len(m.ProfilePath) + copy(dAtA[i:], m.ProfilePath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProfilePath))) + i-- + dAtA[i] = 0x12 + } + if len(m.AccessLogPath) > 0 { + i -= len(m.AccessLogPath) + copy(dAtA[i:], m.AccessLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessLogPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterManager_OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterManager_OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterManager_OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EventService != nil { + if vtmsg, ok := interface{}(m.EventService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EventService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.EventLogPath) > 0 { + i -= len(m.EventLogPath) + copy(dAtA[i:], m.EventLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnableDeferredClusterCreation { + i-- + if m.EnableDeferredClusterCreation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LoadStatsConfig != nil { + if vtmsg, ok := interface{}(m.LoadStatsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LoadStatsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.UpstreamBindConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamBindConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.OutlierDetection != nil { + size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.LocalClusterName) > 0 { + i -= len(m.LocalClusterName) + copy(dAtA[i:], m.LocalClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LocalClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdogs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdogs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdogs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WorkerWatchdog != nil { + size, err := m.WorkerWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MainThreadWatchdog != nil { + size, err := m.MainThreadWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdog_WatchdogAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdog_WatchdogAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdog_WatchdogAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Event != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Event)) + i-- + dAtA[i] = 0x10 + } + if m.Config != nil { + if vtmsg, ok := interface{}(m.Config).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Config) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Watchdog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Watchdog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Watchdog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.MaxKillTimeoutJitter != nil { + size, err := (*durationpb.Duration)(m.MaxKillTimeoutJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.MultikillThreshold != nil { + if vtmsg, ok := interface{}(m.MultikillThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MultikillThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.MultikillTimeout != nil { + size, err := (*durationpb.Duration)(m.MultikillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.KillTimeout != nil { + size, err := (*durationpb.Duration)(m.KillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MegamissTimeout != nil { + size, err := (*durationpb.Duration)(m.MegamissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MissTimeout != nil { + size, err := (*durationpb.Duration)(m.MissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FatalAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FatalAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FatalAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + if vtmsg, ok := interface{}(m.Config).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Config) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Runtime) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Runtime) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Runtime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Base != nil { + size, err := (*structpb.Struct)(m.Base).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.OverrideSubdirectory) > 0 { + i -= len(m.OverrideSubdirectory) + copy(dAtA[i:], m.OverrideSubdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideSubdirectory))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subdirectory) > 0 { + i -= len(m.Subdirectory) + copy(dAtA[i:], m.Subdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory))) + i-- + dAtA[i] = 0x12 + } + if len(m.SymlinkRoot) > 0 { + i -= len(m.SymlinkRoot) + copy(dAtA[i:], m.SymlinkRoot) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_DiskLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_DiskLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_DiskLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Subdirectory) > 0 { + i -= len(m.Subdirectory) + copy(dAtA[i:], m.Subdirectory) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory))) + i-- + dAtA[i] = 0x1a + } + if m.AppendServiceCluster { + i-- + if m.AppendServiceCluster { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.SymlinkRoot) > 0 { + i -= len(m.SymlinkRoot) + copy(dAtA[i:], m.SymlinkRoot) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_AdminLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_AdminLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_AdminLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_RtdsLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer_RtdsLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_RtdsLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RtdsConfig != nil { + if vtmsg, ok := interface{}(m.RtdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RtdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_RtdsLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_AdminLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_DiskLayer_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LayerSpecifier.(*RuntimeLayer_StaticLayer); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeLayer_StaticLayer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_StaticLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StaticLayer != nil { + size, err := (*structpb.Struct)(m.StaticLayer).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_DiskLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_DiskLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DiskLayer != nil { + size, err := m.DiskLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_AdminLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_AdminLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AdminLayer != nil { + size, err := m.AdminLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RuntimeLayer_RtdsLayer_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeLayer_RtdsLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RtdsLayer != nil { + size, err := m.RtdsLayer.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *LayeredRuntime) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LayeredRuntime) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LayeredRuntime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Layers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CustomInlineHeader) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomInlineHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomInlineHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InlineHeaderType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InlineHeaderType)) + i-- + dAtA[i] = 0x10 + } + if len(m.InlineHeaderName) > 0 { + i -= len(m.InlineHeaderName) + copy(dAtA[i:], m.InlineHeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineHeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MemoryAllocatorManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryAllocatorManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MemoryAllocatorManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MemoryReleaseInterval != nil { + size, err := (*durationpb.Duration)(m.MemoryReleaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BytesToRelease != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesToRelease)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bootstrap_StaticResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Listeners) > 0 { + for _, e := range m.Listeners { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_DynamicResources) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LdsConfig != nil { + if size, ok := interface{}(m.LdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CdsConfig != nil { + if size, ok := interface{}(m.CdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AdsConfig != nil { + if size, ok := interface{}(m.AdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LogFormat.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JsonFormat != nil { + l = (*structpb.Struct)(m.JsonFormat).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TextFormat) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Bootstrap_ApplicationLogConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LogFormat != nil { + l = m.LogFormat.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_DeferredStatOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnableDeferredCreationStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_GrpcAsyncClientManagerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxCachedEntryIdleDuration != nil { + l = (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StaticResources != nil { + l = m.StaticResources.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DynamicResources != nil { + l = m.DynamicResources.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClusterManager != nil { + l = m.ClusterManager.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.FlagsPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.StatsSinks) > 0 { + for _, e := range m.StatsSinks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StatsFlushInterval != nil { + l = (*durationpb.Duration)(m.StatsFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Watchdog != nil { + l = m.Watchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + if size, ok := interface{}(m.Tracing).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Tracing) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Admin != nil { + l = m.Admin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsConfig != nil { + if size, ok := interface{}(m.StatsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StatsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HdsConfig != nil { + if size, ok := interface{}(m.HdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverloadManager != nil { + if size, ok := interface{}(m.OverloadManager).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverloadManager) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableDispatcherStats { + n += 3 + } + if m.LayeredRuntime != nil { + l = m.LayeredRuntime.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.HeaderPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsServerVersionOverride != nil { + l = (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseTcpForDnsLookups { + n += 3 + } + if len(m.BootstrapExtensions) > 0 { + for _, e := range m.BootstrapExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ConfigSources) > 0 { + for _, e := range m.ConfigSources { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DefaultConfigSource != nil { + if size, ok := interface{}(m.DefaultConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultConfigSource) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultSocketInterface) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CertificateProviderInstances) > 0 { + for k, v := range m.CertificateProviderInstances { + _ = k + _ = v + l = 0 + if v != nil { + if size, ok := interface{}(v).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(v) + } + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.NodeContextParams) > 0 { + for _, s := range m.NodeContextParams { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Watchdogs != nil { + l = m.Watchdogs.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FatalActions) > 0 { + for _, e := range m.FatalActions { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if vtmsg, ok := m.StatsFlush.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.DnsResolutionConfig != nil { + if size, ok := interface{}(m.DnsResolutionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DnsResolutionConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedDnsResolverConfig != nil { + if size, ok := interface{}(m.TypedDnsResolverConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedDnsResolverConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InlineHeaders) > 0 { + for _, e := range m.InlineHeaders { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.PerfTracingFilePath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultRegexEngine != nil { + if size, ok := interface{}(m.DefaultRegexEngine).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultRegexEngine) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsDelegateExtension != nil { + if size, ok := interface{}(m.XdsDelegateExtension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.XdsDelegateExtension) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsConfigTrackerExtension != nil { + if size, ok := interface{}(m.XdsConfigTrackerExtension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.XdsConfigTrackerExtension) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ListenerManager != nil { + if size, ok := interface{}(m.ListenerManager).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ListenerManager) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplicationLogConfig != nil { + l = m.ApplicationLogConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DeferredStatOptions != nil { + l = m.DeferredStatOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcAsyncClientManagerConfig != nil { + l = m.GrpcAsyncClientManagerConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MemoryAllocatorManager != nil { + l = m.MemoryAllocatorManager.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Bootstrap_StatsFlushOnAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 3 + return n +} +func (m *Admin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AccessLogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ProfilePath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnoreGlobalConnLimit { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterManager_OutlierDetection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EventLogPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EventService != nil { + if size, ok := interface{}(m.EventService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EventService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LocalClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamBindConfig != nil { + if size, ok := interface{}(m.UpstreamBindConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamBindConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LoadStatsConfig != nil { + if size, ok := interface{}(m.LoadStatsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LoadStatsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableDeferredClusterCreation { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdogs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MainThreadWatchdog != nil { + l = m.MainThreadWatchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WorkerWatchdog != nil { + l = m.WorkerWatchdog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdog_WatchdogAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + if size, ok := interface{}(m.Config).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Config) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Event != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Event)) + } + n += len(m.unknownFields) + return n +} + +func (m *Watchdog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MissTimeout != nil { + l = (*durationpb.Duration)(m.MissTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MegamissTimeout != nil { + l = (*durationpb.Duration)(m.MegamissTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KillTimeout != nil { + l = (*durationpb.Duration)(m.KillTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MultikillTimeout != nil { + l = (*durationpb.Duration)(m.MultikillTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MultikillThreshold != nil { + if size, ok := interface{}(m.MultikillThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MultikillThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxKillTimeoutJitter != nil { + l = (*durationpb.Duration)(m.MaxKillTimeoutJitter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FatalAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + if size, ok := interface{}(m.Config).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Config) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Runtime) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SymlinkRoot) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Subdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.OverrideSubdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Base != nil { + l = (*structpb.Struct)(m.Base).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_DiskLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SymlinkRoot) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendServiceCluster { + n += 2 + } + l = len(m.Subdirectory) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_AdminLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_RtdsLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RtdsConfig != nil { + if size, ok := interface{}(m.RtdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RtdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LayerSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeLayer_StaticLayer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StaticLayer != nil { + l = (*structpb.Struct)(m.StaticLayer).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_DiskLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DiskLayer != nil { + l = m.DiskLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_AdminLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AdminLayer != nil { + l = m.AdminLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeLayer_RtdsLayer_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RtdsLayer != nil { + l = m.RtdsLayer.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LayeredRuntime) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, e := range m.Layers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CustomInlineHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineHeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InlineHeaderType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.InlineHeaderType)) + } + n += len(m.unknownFields) + return n +} + +func (m *MemoryAllocatorManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesToRelease != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesToRelease)) + } + if m.MemoryReleaseInterval != nil { + l = (*durationpb.Duration)(m.MemoryReleaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go new file mode 100644 index 0000000000..cffadb9d88 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go @@ -0,0 +1,507 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +type CircuitBreakers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + Thresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,1,rep,name=thresholds,proto3" json:"thresholds,omitempty"` + // Optional per-host limits which apply to each individual host in a cluster. + // + // .. note:: + // + // currently only the :ref:`max_connections + // ` field is supported for per-host limits. + // + // If multiple per-host :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no per-host Thresholds are defined for a given + // :ref:`RoutingPriority`, + // the cluster will not have per-host limits. + PerHostThresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,2,rep,name=per_host_thresholds,json=perHostThresholds,proto3" json:"per_host_thresholds,omitempty"` +} + +func (x *CircuitBreakers) Reset() { + *x = CircuitBreakers{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CircuitBreakers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CircuitBreakers) ProtoMessage() {} + +func (x *CircuitBreakers) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CircuitBreakers.ProtoReflect.Descriptor instead. +func (*CircuitBreakers) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0} +} + +func (x *CircuitBreakers) GetThresholds() []*CircuitBreakers_Thresholds { + if x != nil { + return x.Thresholds + } + return nil +} + +func (x *CircuitBreakers) GetPerHostThresholds() []*CircuitBreakers_Thresholds { + if x != nil { + return x.PerHostThresholds + } + return nil +} + +// A Thresholds defines CircuitBreaker settings for a +// :ref:`RoutingPriority`. +// [#next-free-field: 9] +type CircuitBreakers_Thresholds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + Priority v3.RoutingPriority `protobuf:"varint,1,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"` + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + MaxConnections *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + // This limit is applied as a connection limit for non-HTTP traffic. + MaxPendingRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"` + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + // This limit does not apply to non-HTTP traffic. + MaxRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_requests,json=maxRequests,proto3" json:"max_requests,omitempty"` + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + MaxRetries *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` + // Specifies a limit on concurrent retries in relation to the number of active requests. This + // parameter is optional. + // + // .. note:: + // + // If this field is set, the retry budget will override any configured retry circuit + // breaker. + RetryBudget *CircuitBreakers_Thresholds_RetryBudget `protobuf:"bytes,8,opt,name=retry_budget,json=retryBudget,proto3" json:"retry_budget,omitempty"` + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + // + // .. note:: + // + // If a retry budget is used in lieu of the max_retries circuit breaker, + // the remaining retry resources remaining will not be tracked. + TrackRemaining bool `protobuf:"varint,6,opt,name=track_remaining,json=trackRemaining,proto3" json:"track_remaining,omitempty"` + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + MaxConnectionPools *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_connection_pools,json=maxConnectionPools,proto3" json:"max_connection_pools,omitempty"` +} + +func (x *CircuitBreakers_Thresholds) Reset() { + *x = CircuitBreakers_Thresholds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CircuitBreakers_Thresholds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CircuitBreakers_Thresholds) ProtoMessage() {} + +func (x *CircuitBreakers_Thresholds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CircuitBreakers_Thresholds.ProtoReflect.Descriptor instead. +func (*CircuitBreakers_Thresholds) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CircuitBreakers_Thresholds) GetPriority() v3.RoutingPriority { + if x != nil { + return x.Priority + } + return v3.RoutingPriority(0) +} + +func (x *CircuitBreakers_Thresholds) GetMaxConnections() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConnections + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetMaxPendingRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxPendingRequests + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetMaxRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequests + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetMaxRetries() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRetries + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetRetryBudget() *CircuitBreakers_Thresholds_RetryBudget { + if x != nil { + return x.RetryBudget + } + return nil +} + +func (x *CircuitBreakers_Thresholds) GetTrackRemaining() bool { + if x != nil { + return x.TrackRemaining + } + return false +} + +func (x *CircuitBreakers_Thresholds) GetMaxConnectionPools() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConnectionPools + } + return nil +} + +type CircuitBreakers_Thresholds_RetryBudget struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the limit on concurrent retries as a percentage of the sum of active requests and + // active pending requests. For example, if there are 100 active requests and the + // budget_percent is set to 25, there may be 25 active retries. + // + // This parameter is optional. Defaults to 20%. + BudgetPercent *v31.Percent `protobuf:"bytes,1,opt,name=budget_percent,json=budgetPercent,proto3" json:"budget_percent,omitempty"` + // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the + // number of active retries may never go below this number. + // + // This parameter is optional. Defaults to 3. + MinRetryConcurrency *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=min_retry_concurrency,json=minRetryConcurrency,proto3" json:"min_retry_concurrency,omitempty"` +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) Reset() { + *x = CircuitBreakers_Thresholds_RetryBudget{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CircuitBreakers_Thresholds_RetryBudget) ProtoMessage() {} + +func (x *CircuitBreakers_Thresholds_RetryBudget) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CircuitBreakers_Thresholds_RetryBudget.ProtoReflect.Descriptor instead. +func (*CircuitBreakers_Thresholds_RetryBudget) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) GetBudgetPercent() *v31.Percent { + if x != nil { + return x.BudgetPercent + } + return nil +} + +func (x *CircuitBreakers_Thresholds_RetryBudget) GetMinRetryConcurrency() *wrapperspb.UInt32Value { + if x != nil { + return x.MinRetryConcurrency + } + return nil +} + +var File_envoy_config_cluster_v3_circuit_breaker_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xe5, 0x08, 0x0a, 0x0f, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, + 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, + 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x0a, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x63, 0x0a, 0x13, 0x70, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, + 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x11, 0x70, 0x65, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x1a, + 0xea, 0x06, 0x0a, 0x0a, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x4b, + 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x6d, + 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, + 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, + 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, + 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x1a, + 0xe2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, + 0x3d, 0x0a, 0x0e, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x0d, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x50, + 0x0a, 0x15, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6d, 0x69, 0x6e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69, + 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75, + 0x64, 0x67, 0x65, 0x74, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, + 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, + 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x43, 0x69, 0x72, + 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc +) + +func file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData +} + +var file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = []interface{}{ + (*CircuitBreakers)(nil), // 0: envoy.config.cluster.v3.CircuitBreakers + (*CircuitBreakers_Thresholds)(nil), // 1: envoy.config.cluster.v3.CircuitBreakers.Thresholds + (*CircuitBreakers_Thresholds_RetryBudget)(nil), // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget + (v3.RoutingPriority)(0), // 3: envoy.config.core.v3.RoutingPriority + (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value + (*v31.Percent)(nil), // 5: envoy.type.v3.Percent +} +var file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = []int32{ + 1, // 0: envoy.config.cluster.v3.CircuitBreakers.thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds + 1, // 1: envoy.config.cluster.v3.CircuitBreakers.per_host_thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds + 3, // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.priority:type_name -> envoy.config.core.v3.RoutingPriority + 4, // 3: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connections:type_name -> google.protobuf.UInt32Value + 4, // 4: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_pending_requests:type_name -> google.protobuf.UInt32Value + 4, // 5: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_requests:type_name -> google.protobuf.UInt32Value + 4, // 6: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_retries:type_name -> google.protobuf.UInt32Value + 2, // 7: envoy.config.cluster.v3.CircuitBreakers.Thresholds.retry_budget:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget + 4, // 8: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connection_pools:type_name -> google.protobuf.UInt32Value + 5, // 9: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.budget_percent:type_name -> envoy.type.v3.Percent + 4, // 10: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.min_retry_concurrency:type_name -> google.protobuf.UInt32Value + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_circuit_breaker_proto_init() } +func file_envoy_config_cluster_v3_circuit_breaker_proto_init() { + if File_envoy_config_cluster_v3_circuit_breaker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CircuitBreakers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CircuitBreakers_Thresholds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CircuitBreakers_Thresholds_RetryBudget); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs, + MessageInfos: file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_circuit_breaker_proto = out.File + file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = nil + file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = nil + file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go new file mode 100644 index 0000000000..8bf3373beb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go @@ -0,0 +1,662 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RoutingPriority(0) +) + +// Validate checks the field values on CircuitBreakers with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CircuitBreakers) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CircuitBreakers with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CircuitBreakersMultiError, or nil if none found. +func (m *CircuitBreakers) ValidateAll() error { + return m.validate(true) +} + +func (m *CircuitBreakers) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetThresholds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("Thresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("Thresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakersValidationError{ + field: fmt.Sprintf("Thresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetPerHostThresholds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("PerHostThresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakersValidationError{ + field: fmt.Sprintf("PerHostThresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakersValidationError{ + field: fmt.Sprintf("PerHostThresholds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CircuitBreakersMultiError(errors) + } + + return nil +} + +// CircuitBreakersMultiError is an error wrapping multiple validation errors +// returned by CircuitBreakers.ValidateAll() if the designated constraints +// aren't met. +type CircuitBreakersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CircuitBreakersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CircuitBreakersMultiError) AllErrors() []error { return m } + +// CircuitBreakersValidationError is the validation error returned by +// CircuitBreakers.Validate if the designated constraints aren't met. +type CircuitBreakersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CircuitBreakersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CircuitBreakersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CircuitBreakersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CircuitBreakersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CircuitBreakersValidationError) ErrorName() string { return "CircuitBreakersValidationError" } + +// Error satisfies the builtin error interface +func (e CircuitBreakersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCircuitBreakers.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CircuitBreakersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CircuitBreakersValidationError{} + +// Validate checks the field values on CircuitBreakers_Thresholds with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CircuitBreakers_Thresholds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CircuitBreakers_Thresholds with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CircuitBreakers_ThresholdsMultiError, or nil if none found. +func (m *CircuitBreakers_Thresholds) ValidateAll() error { + return m.validate(true) +} + +func (m *CircuitBreakers_Thresholds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := v3.RoutingPriority_name[int32(m.GetPriority())]; !ok { + err := CircuitBreakers_ThresholdsValidationError{ + field: "Priority", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMaxConnections()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnections", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnections", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConnections()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnections", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxPendingRequests()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxPendingRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxPendingRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxPendingRequests()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxPendingRequests", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxRequests()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRequests", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRequests()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxRequests", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxRetries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRetries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxRetries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryBudget()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "RetryBudget", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "RetryBudget", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryBudget()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "RetryBudget", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TrackRemaining + + if all { + switch v := interface{}(m.GetMaxConnectionPools()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnectionPools", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnectionPools", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConnectionPools()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_ThresholdsValidationError{ + field: "MaxConnectionPools", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CircuitBreakers_ThresholdsMultiError(errors) + } + + return nil +} + +// CircuitBreakers_ThresholdsMultiError is an error wrapping multiple +// validation errors returned by CircuitBreakers_Thresholds.ValidateAll() if +// the designated constraints aren't met. +type CircuitBreakers_ThresholdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CircuitBreakers_ThresholdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CircuitBreakers_ThresholdsMultiError) AllErrors() []error { return m } + +// CircuitBreakers_ThresholdsValidationError is the validation error returned +// by CircuitBreakers_Thresholds.Validate if the designated constraints aren't met. +type CircuitBreakers_ThresholdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CircuitBreakers_ThresholdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CircuitBreakers_ThresholdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CircuitBreakers_ThresholdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CircuitBreakers_ThresholdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CircuitBreakers_ThresholdsValidationError) ErrorName() string { + return "CircuitBreakers_ThresholdsValidationError" +} + +// Error satisfies the builtin error interface +func (e CircuitBreakers_ThresholdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCircuitBreakers_Thresholds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CircuitBreakers_ThresholdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CircuitBreakers_ThresholdsValidationError{} + +// Validate checks the field values on CircuitBreakers_Thresholds_RetryBudget +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *CircuitBreakers_Thresholds_RetryBudget) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CircuitBreakers_Thresholds_RetryBudget with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// CircuitBreakers_Thresholds_RetryBudgetMultiError, or nil if none found. +func (m *CircuitBreakers_Thresholds_RetryBudget) ValidateAll() error { + return m.validate(true) +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBudgetPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "BudgetPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "BudgetPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBudgetPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "BudgetPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinRetryConcurrency()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "MinRetryConcurrency", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "MinRetryConcurrency", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinRetryConcurrency()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CircuitBreakers_Thresholds_RetryBudgetValidationError{ + field: "MinRetryConcurrency", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CircuitBreakers_Thresholds_RetryBudgetMultiError(errors) + } + + return nil +} + +// CircuitBreakers_Thresholds_RetryBudgetMultiError is an error wrapping +// multiple validation errors returned by +// CircuitBreakers_Thresholds_RetryBudget.ValidateAll() if the designated +// constraints aren't met. +type CircuitBreakers_Thresholds_RetryBudgetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) AllErrors() []error { return m } + +// CircuitBreakers_Thresholds_RetryBudgetValidationError is the validation +// error returned by CircuitBreakers_Thresholds_RetryBudget.Validate if the +// designated constraints aren't met. +type CircuitBreakers_Thresholds_RetryBudgetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) ErrorName() string { + return "CircuitBreakers_Thresholds_RetryBudgetValidationError" +} + +// Error satisfies the builtin error interface +func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCircuitBreakers_Thresholds_RetryBudget.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CircuitBreakers_Thresholds_RetryBudgetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CircuitBreakers_Thresholds_RetryBudgetValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go new file mode 100644 index 0000000000..14ca0a1f11 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go @@ -0,0 +1,337 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/circuit_breaker.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinRetryConcurrency != nil { + size, err := (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BudgetPercent != nil { + if vtmsg, ok := interface{}(m.BudgetPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BudgetPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers_Thresholds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers_Thresholds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers_Thresholds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryBudget != nil { + size, err := m.RetryBudget.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.MaxConnectionPools != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionPools).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.TrackRemaining { + i-- + if m.TrackRemaining { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.MaxRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxPendingRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxPendingRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxConnections != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnections).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CircuitBreakers) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CircuitBreakers) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PerHostThresholds) > 0 { + for iNdEx := len(m.PerHostThresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PerHostThresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Thresholds) > 0 { + for iNdEx := len(m.Thresholds) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Thresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CircuitBreakers_Thresholds_RetryBudget) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BudgetPercent != nil { + if size, ok := interface{}(m.BudgetPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BudgetPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinRetryConcurrency != nil { + l = (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CircuitBreakers_Thresholds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.MaxConnections != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnections).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxPendingRequests != nil { + l = (*wrapperspb.UInt32Value)(m.MaxPendingRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRequests != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRetries != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackRemaining { + n += 2 + } + if m.MaxConnectionPools != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnectionPools).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryBudget != nil { + l = m.RetryBudget.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CircuitBreakers) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Thresholds) > 0 { + for _, e := range m.Thresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.PerHostThresholds) > 0 { + for _, e := range m.PerHostThresholds { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go new file mode 100644 index 0000000000..53c9afecd6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go @@ -0,0 +1,4635 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Refer to :ref:`service discovery type ` +// for an explanation on each type. +type Cluster_DiscoveryType int32 + +const ( + // Refer to the :ref:`static discovery type` + // for an explanation. + Cluster_STATIC Cluster_DiscoveryType = 0 + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + Cluster_STRICT_DNS Cluster_DiscoveryType = 1 + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + Cluster_LOGICAL_DNS Cluster_DiscoveryType = 2 + // Refer to the :ref:`service discovery type` + // for an explanation. + Cluster_EDS Cluster_DiscoveryType = 3 + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + Cluster_ORIGINAL_DST Cluster_DiscoveryType = 4 +) + +// Enum value maps for Cluster_DiscoveryType. +var ( + Cluster_DiscoveryType_name = map[int32]string{ + 0: "STATIC", + 1: "STRICT_DNS", + 2: "LOGICAL_DNS", + 3: "EDS", + 4: "ORIGINAL_DST", + } + Cluster_DiscoveryType_value = map[string]int32{ + "STATIC": 0, + "STRICT_DNS": 1, + "LOGICAL_DNS": 2, + "EDS": 3, + "ORIGINAL_DST": 4, + } +) + +func (x Cluster_DiscoveryType) Enum() *Cluster_DiscoveryType { + p := new(Cluster_DiscoveryType) + *p = x + return p +} + +func (x Cluster_DiscoveryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_DiscoveryType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[0].Descriptor() +} + +func (Cluster_DiscoveryType) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[0] +} + +func (x Cluster_DiscoveryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_DiscoveryType.Descriptor instead. +func (Cluster_DiscoveryType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 0} +} + +// Refer to :ref:`load balancer type ` architecture +// overview section for information on each type. +type Cluster_LbPolicy int32 + +const ( + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + Cluster_ROUND_ROBIN Cluster_LbPolicy = 0 + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + Cluster_LEAST_REQUEST Cluster_LbPolicy = 1 + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + Cluster_RING_HASH Cluster_LbPolicy = 2 + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + Cluster_RANDOM Cluster_LbPolicy = 3 + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + Cluster_MAGLEV Cluster_LbPolicy = 5 + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + Cluster_CLUSTER_PROVIDED Cluster_LbPolicy = 6 + // Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // This has been deprecated in favor of using the :ref:`load_balancing_policy + // ` field without + // setting any value in :ref:`lb_policy`. + Cluster_LOAD_BALANCING_POLICY_CONFIG Cluster_LbPolicy = 7 +) + +// Enum value maps for Cluster_LbPolicy. +var ( + Cluster_LbPolicy_name = map[int32]string{ + 0: "ROUND_ROBIN", + 1: "LEAST_REQUEST", + 2: "RING_HASH", + 3: "RANDOM", + 5: "MAGLEV", + 6: "CLUSTER_PROVIDED", + 7: "LOAD_BALANCING_POLICY_CONFIG", + } + Cluster_LbPolicy_value = map[string]int32{ + "ROUND_ROBIN": 0, + "LEAST_REQUEST": 1, + "RING_HASH": 2, + "RANDOM": 3, + "MAGLEV": 5, + "CLUSTER_PROVIDED": 6, + "LOAD_BALANCING_POLICY_CONFIG": 7, + } +) + +func (x Cluster_LbPolicy) Enum() *Cluster_LbPolicy { + p := new(Cluster_LbPolicy) + *p = x + return p +} + +func (x Cluster_LbPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[1].Descriptor() +} + +func (Cluster_LbPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[1] +} + +func (x Cluster_LbPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbPolicy.Descriptor instead. +func (Cluster_LbPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 1} +} + +// When V4_ONLY is selected, the DNS resolver will only perform a lookup for +// addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will +// only perform a lookup for addresses in the IPv6 family. If AUTO is +// specified, the DNS resolver will first perform a lookup for addresses in +// the IPv6 family and fallback to a lookup for addresses in the IPv4 family. +// This is semantically equivalent to a non-existent V6_PREFERRED option. +// AUTO is a legacy name that is more opaque than +// necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. +// If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the +// IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback +// target will only get v6 addresses if there were NO v4 addresses to return. +// If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, +// and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for +// upstream connections. Refer to :ref:`Happy Eyeballs Support ` +// for more information. +// For cluster types other than +// :ref:`STRICT_DNS` and +// :ref:`LOGICAL_DNS`, +// this setting is +// ignored. +// [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] +type Cluster_DnsLookupFamily int32 + +const ( + Cluster_AUTO Cluster_DnsLookupFamily = 0 + Cluster_V4_ONLY Cluster_DnsLookupFamily = 1 + Cluster_V6_ONLY Cluster_DnsLookupFamily = 2 + Cluster_V4_PREFERRED Cluster_DnsLookupFamily = 3 + Cluster_ALL Cluster_DnsLookupFamily = 4 +) + +// Enum value maps for Cluster_DnsLookupFamily. +var ( + Cluster_DnsLookupFamily_name = map[int32]string{ + 0: "AUTO", + 1: "V4_ONLY", + 2: "V6_ONLY", + 3: "V4_PREFERRED", + 4: "ALL", + } + Cluster_DnsLookupFamily_value = map[string]int32{ + "AUTO": 0, + "V4_ONLY": 1, + "V6_ONLY": 2, + "V4_PREFERRED": 3, + "ALL": 4, + } +) + +func (x Cluster_DnsLookupFamily) Enum() *Cluster_DnsLookupFamily { + p := new(Cluster_DnsLookupFamily) + *p = x + return p +} + +func (x Cluster_DnsLookupFamily) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_DnsLookupFamily) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[2].Descriptor() +} + +func (Cluster_DnsLookupFamily) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[2] +} + +func (x Cluster_DnsLookupFamily) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_DnsLookupFamily.Descriptor instead. +func (Cluster_DnsLookupFamily) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 2} +} + +type Cluster_ClusterProtocolSelection int32 + +const ( + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + Cluster_USE_CONFIGURED_PROTOCOL Cluster_ClusterProtocolSelection = 0 + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + Cluster_USE_DOWNSTREAM_PROTOCOL Cluster_ClusterProtocolSelection = 1 +) + +// Enum value maps for Cluster_ClusterProtocolSelection. +var ( + Cluster_ClusterProtocolSelection_name = map[int32]string{ + 0: "USE_CONFIGURED_PROTOCOL", + 1: "USE_DOWNSTREAM_PROTOCOL", + } + Cluster_ClusterProtocolSelection_value = map[string]int32{ + "USE_CONFIGURED_PROTOCOL": 0, + "USE_DOWNSTREAM_PROTOCOL": 1, + } +) + +func (x Cluster_ClusterProtocolSelection) Enum() *Cluster_ClusterProtocolSelection { + p := new(Cluster_ClusterProtocolSelection) + *p = x + return p +} + +func (x Cluster_ClusterProtocolSelection) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_ClusterProtocolSelection) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[3].Descriptor() +} + +func (Cluster_ClusterProtocolSelection) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[3] +} + +func (x Cluster_ClusterProtocolSelection) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_ClusterProtocolSelection.Descriptor instead. +func (Cluster_ClusterProtocolSelection) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3} +} + +// If NO_FALLBACK is selected, a result +// equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, +// any cluster endpoint may be returned (subject to policy, health checks, +// etc). If DEFAULT_SUBSET is selected, load balancing is performed over the +// endpoints matching the values from the default_subset field. +type Cluster_LbSubsetConfig_LbSubsetFallbackPolicy int32 + +const ( + Cluster_LbSubsetConfig_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 0 + Cluster_LbSubsetConfig_ANY_ENDPOINT Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 1 + Cluster_LbSubsetConfig_DEFAULT_SUBSET Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 2 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_name = map[int32]string{ + 0: "NO_FALLBACK", + 1: "ANY_ENDPOINT", + 2: "DEFAULT_SUBSET", + } + Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_value = map[string]int32{ + "NO_FALLBACK": 0, + "ANY_ENDPOINT": 1, + "DEFAULT_SUBSET": 2, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[4].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[4] +} + +func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0} +} + +type Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy int32 + +const ( + // No fallback. Route metadata will be used as-is. + Cluster_LbSubsetConfig_METADATA_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 0 + // A special metadata key “fallback_list“ will be used to provide variants of metadata to try. + // Value of “fallback_list“ key has to be a list. Every list element has to be a struct - it will + // be merged with route metadata, overriding keys that appear in both places. + // “fallback_list“ entries will be used in order until a host is found. + // + // “fallback_list“ key itself is removed from metadata before subset load balancing is performed. + // + // Example: + // + // for metadata: + // + // .. code-block:: yaml + // + // version: 1.0 + // fallback_list: + // - version: 2.0 + // hardware: c64 + // - hardware: c32 + // - version: 3.0 + // + // at first, metadata: + // + // .. code-block:: json + // + // {"version": "2.0", "hardware": "c64"} + // + // will be used for load balancing. If no host is found, metadata: + // + // .. code-block:: json + // + // {"version": "1.0", "hardware": "c32"} + // + // is next to try. If it still results in no host, finally metadata: + // + // .. code-block:: json + // + // {"version": "3.0"} + // + // is used. + Cluster_LbSubsetConfig_FALLBACK_LIST Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 1 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name = map[int32]string{ + 0: "METADATA_NO_FALLBACK", + 1: "FALLBACK_LIST", + } + Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_value = map[string]int32{ + "METADATA_NO_FALLBACK": 0, + "FALLBACK_LIST": 1, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[5].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[5] +} + +func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 1} +} + +// Allows to override top level fallback policy per selector. +type Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy int32 + +const ( + // If NOT_DEFINED top level config fallback policy is used instead. + Cluster_LbSubsetConfig_LbSubsetSelector_NOT_DEFINED Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 0 + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + Cluster_LbSubsetConfig_LbSubsetSelector_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 1 + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + Cluster_LbSubsetConfig_LbSubsetSelector_ANY_ENDPOINT Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 2 + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + Cluster_LbSubsetConfig_LbSubsetSelector_DEFAULT_SUBSET Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 3 + // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + // keys reduced to + // :ref:`fallback_keys_subset`. + // It allows for a fallback to a different, less specific selector if some of the keys of + // the selector are considered optional. + Cluster_LbSubsetConfig_LbSubsetSelector_KEYS_SUBSET Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 4 +) + +// Enum value maps for Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy. +var ( + Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_name = map[int32]string{ + 0: "NOT_DEFINED", + 1: "NO_FALLBACK", + 2: "ANY_ENDPOINT", + 3: "DEFAULT_SUBSET", + 4: "KEYS_SUBSET", + } + Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_value = map[string]int32{ + "NOT_DEFINED": 0, + "NO_FALLBACK": 1, + "ANY_ENDPOINT": 2, + "DEFAULT_SUBSET": 3, + "KEYS_SUBSET": 4, + } +) + +func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy { + p := new(Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) + *p = x + return p +} + +func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[6].Descriptor() +} + +func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[6] +} + +func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy.Descriptor instead. +func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0, 0} +} + +// The hash function used to hash hosts onto the ketama ring. +type Cluster_RingHashLbConfig_HashFunction int32 + +const ( + // Use `xxHash `_, this is the default hash function. + Cluster_RingHashLbConfig_XX_HASH Cluster_RingHashLbConfig_HashFunction = 0 + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + Cluster_RingHashLbConfig_MURMUR_HASH_2 Cluster_RingHashLbConfig_HashFunction = 1 +) + +// Enum value maps for Cluster_RingHashLbConfig_HashFunction. +var ( + Cluster_RingHashLbConfig_HashFunction_name = map[int32]string{ + 0: "XX_HASH", + 1: "MURMUR_HASH_2", + } + Cluster_RingHashLbConfig_HashFunction_value = map[string]int32{ + "XX_HASH": 0, + "MURMUR_HASH_2": 1, + } +) + +func (x Cluster_RingHashLbConfig_HashFunction) Enum() *Cluster_RingHashLbConfig_HashFunction { + p := new(Cluster_RingHashLbConfig_HashFunction) + *p = x + return p +} + +func (x Cluster_RingHashLbConfig_HashFunction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Cluster_RingHashLbConfig_HashFunction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[7].Descriptor() +} + +func (Cluster_RingHashLbConfig_HashFunction) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[7] +} + +func (x Cluster_RingHashLbConfig_HashFunction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Cluster_RingHashLbConfig_HashFunction.Descriptor instead. +func (Cluster_RingHashLbConfig_HashFunction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7, 0} +} + +type UpstreamConnectionOptions_FirstAddressFamilyVersion int32 + +const ( + // respect the native ranking of destination ip addresses returned from dns + // resolution + UpstreamConnectionOptions_DEFAULT UpstreamConnectionOptions_FirstAddressFamilyVersion = 0 + UpstreamConnectionOptions_V4 UpstreamConnectionOptions_FirstAddressFamilyVersion = 1 + UpstreamConnectionOptions_V6 UpstreamConnectionOptions_FirstAddressFamilyVersion = 2 +) + +// Enum value maps for UpstreamConnectionOptions_FirstAddressFamilyVersion. +var ( + UpstreamConnectionOptions_FirstAddressFamilyVersion_name = map[int32]string{ + 0: "DEFAULT", + 1: "V4", + 2: "V6", + } + UpstreamConnectionOptions_FirstAddressFamilyVersion_value = map[string]int32{ + "DEFAULT": 0, + "V4": 1, + "V6": 2, + } +) + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Enum() *UpstreamConnectionOptions_FirstAddressFamilyVersion { + p := new(UpstreamConnectionOptions_FirstAddressFamilyVersion) + *p = x + return p +} + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_cluster_v3_cluster_proto_enumTypes[8].Descriptor() +} + +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Type() protoreflect.EnumType { + return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[8] +} + +func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UpstreamConnectionOptions_FirstAddressFamilyVersion.Descriptor instead. +func (UpstreamConnectionOptions_FirstAddressFamilyVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0} +} + +// Cluster list collections. Entries are “Cluster“ resources or references. +// [#not-implemented-hide:] +type ClusterCollection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries *v3.CollectionEntry `protobuf:"bytes,1,opt,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *ClusterCollection) Reset() { + *x = ClusterCollection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterCollection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterCollection) ProtoMessage() {} + +func (x *ClusterCollection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterCollection.ProtoReflect.Descriptor instead. +func (*ClusterCollection) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{0} +} + +func (x *ClusterCollection) GetEntries() *v3.CollectionEntry { + if x != nil { + return x.Entries + } + return nil +} + +// Configuration for a single upstream cluster. +// [#next-free-field: 58] +type Cluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration to use different transport sockets for different endpoints. The entry of + // “envoy.transport_socket_match“ in the :ref:`LbEndpoint.Metadata + // ` is used to match against the + // transport sockets as they appear in the list. If a match is not found, the search continues in + // :ref:`LocalityLbEndpoints.Metadata + // `. The first :ref:`match + // ` is used. For example, with + // the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: envoy.transport_sockets.raw_buffer + // + // Connections to the endpoints whose metadata value under “envoy.transport_socket_match“ + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under “envoy.transport_socket_match“ does not match any + // “TransportSocketMatch“, the locality metadata is then checked for a match. Barring any + // matches in the endpoint or locality metadata, the socket configuration fallbacks to use the + // “tls_context“ or “transport_socket“ specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // “TransportSocketMatch“ in this field. Other client Envoys receive CDS without + // “transport_socket_match“ set, and still send plain text traffic to the same cluster. + // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // + // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + TransportSocketMatches []*Cluster_TransportSocketMatch `protobuf:"bytes,43,rep,name=transport_socket_matches,json=transportSocketMatches,proto3" json:"transport_socket_matches,omitempty"` + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any “:“ in the cluster name will be converted to “_“ when emitting statistics. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // An optional alternative to the cluster name to be used for observability. This name is used + // emitting stats for the cluster and access logging the cluster name. This will appear as + // additional information in configuration dumps of a cluster's current status as + // :ref:`observability_name ` + // and as an additional tag "upstream_cluster.name" while tracing. Note: Any “:“ in the name + // will be converted to “_“ when emitting statistics. This should not be confused with + // :ref:`Router Filter Header `. + AltStatName string `protobuf:"bytes,28,opt,name=alt_stat_name,json=altStatName,proto3" json:"alt_stat_name,omitempty"` + // Types that are assignable to ClusterDiscoveryType: + // + // *Cluster_Type + // *Cluster_ClusterType + ClusterDiscoveryType isCluster_ClusterDiscoveryType `protobuf_oneof:"cluster_discovery_type"` + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig *Cluster_EdsClusterConfig `protobuf:"bytes,3,opt,name=eds_cluster_config,json=edsClusterConfig,proto3" json:"eds_cluster_config,omitempty"` + // The timeout for new network connections to hosts in the cluster. + // If not set, a default value of 5s will be used. + ConnectTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"` + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy Cluster_LbPolicy `protobuf:"varint,6,opt,name=lb_policy,json=lbPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbPolicy" json:"lb_policy,omitempty"` + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes the “hosts“ field in the v2 API. + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + LoadAssignment *v31.ClusterLoadAssignment `protobuf:"bytes,33,opt,name=load_assignment,json=loadAssignment,proto3" json:"load_assignment,omitempty"` + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + HealthChecks []*v32.HealthCheck `protobuf:"bytes,8,rep,name=health_checks,json=healthChecks,proto3" json:"health_checks,omitempty"` + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + // + // .. attention:: + // + // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` + // Optional :ref:`circuit breaking ` for the cluster. + CircuitBreakers *CircuitBreakers `protobuf:"bytes,10,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"` + // HTTP protocol options that are applied only to upstream HTTP connections. + // These options apply to all HTTP versions. + // This has been deprecated in favor of + // :ref:`upstream_http_protocol_options ` + // in the :ref:`http_protocol_options ` message. + // upstream_http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + UpstreamHttpProtocolOptions *v32.UpstreamHttpProtocolOptions `protobuf:"bytes,46,opt,name=upstream_http_protocol_options,json=upstreamHttpProtocolOptions,proto3" json:"upstream_http_protocol_options,omitempty"` + // Additional options when handling HTTP requests upstream. These options will be applicable to + // both HTTP1 and HTTP2 requests. + // This has been deprecated in favor of + // :ref:`common_http_protocol_options ` + // in the :ref:`http_protocol_options ` message. + // common_http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + CommonHttpProtocolOptions *v32.HttpProtocolOptions `protobuf:"bytes,29,opt,name=common_http_protocol_options,json=commonHttpProtocolOptions,proto3" json:"common_http_protocol_options,omitempty"` + // Additional options when handling HTTP1 requests. + // This has been deprecated in favor of http_protocol_options fields in the + // :ref:`http_protocol_options ` message. + // http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + HttpProtocolOptions *v32.Http1ProtocolOptions `protobuf:"bytes,13,opt,name=http_protocol_options,json=httpProtocolOptions,proto3" json:"http_protocol_options,omitempty"` + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, “http2_protocol_options“ must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + // This has been deprecated in favor of http2_protocol_options fields in the + // :ref:`http_protocol_options ` + // message. http2_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + Http2ProtocolOptions *v32.Http2ProtocolOptions `protobuf:"bytes,14,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + // [#next-major-version: make this a list of typed extensions.] + TypedExtensionProtocolOptions map[string]*anypb.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. The value configured must be at least 1ms. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + DnsRefreshRate *durationpb.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"` + // If the DNS failure refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + // other than :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS` this setting is + // ignored. + DnsFailureRefreshRate *Cluster_RefreshRate `protobuf:"bytes,44,opt,name=dns_failure_refresh_rate,json=dnsFailureRefreshRate,proto3" json:"dns_failure_refresh_rate,omitempty"` + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + RespectDnsTtl bool `protobuf:"varint,39,opt,name=respect_dns_ttl,json=respectDnsTtl,proto3" json:"respect_dns_ttl,omitempty"` + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily Cluster_DnsLookupFamily `protobuf:"varint,17,opt,name=dns_lookup_family,json=dnsLookupFamily,proto3,enum=envoy.config.cluster.v3.Cluster_DnsLookupFamily" json:"dns_lookup_family,omitempty"` + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + // This field is deprecated in favor of “dns_resolution_config“ + // which aggregates all of the DNS resolver configuration in a single message. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + DnsResolvers []*v32.Address `protobuf:"bytes,18,rep,name=dns_resolvers,json=dnsResolvers,proto3" json:"dns_resolvers,omitempty"` + // Always use TCP queries instead of UDP queries for DNS lookups. + // This field is deprecated in favor of “dns_resolution_config“ + // which aggregates all of the DNS resolver configuration in a single message. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + UseTcpForDnsLookups bool `protobuf:"varint,45,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"` + // DNS resolution configuration which includes the underlying dns resolver addresses and options. + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + DnsResolutionConfig *v32.DnsResolutionConfig `protobuf:"bytes,53,opt,name=dns_resolution_config,json=dnsResolutionConfig,proto3" json:"dns_resolution_config,omitempty"` + // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + // or any other DNS resolver types and the related parameters. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this “typed_dns_resolver_config“. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. + // During the transition period when both “dns_resolution_config“ and “typed_dns_resolver_config“ exists, + // when “typed_dns_resolver_config“ is in place, Envoy will use it and ignore “dns_resolution_config“. + // When “typed_dns_resolver_config“ is missing, the default behavior is in place. + // [#extension-category: envoy.network.dns_resolver] + TypedDnsResolverConfig *v32.TypedExtensionConfig `protobuf:"bytes,55,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"` + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // or :ref:`Redis Cluster`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + WaitForWarmOnInit *wrapperspb.BoolValue `protobuf:"bytes,54,opt,name=wait_for_warm_on_init,json=waitForWarmOnInit,proto3" json:"wait_for_warm_on_init,omitempty"` + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + OutlierDetection *OutlierDetection `protobuf:"bytes,19,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + CleanupInterval *durationpb.Duration `protobuf:"bytes,20,opt,name=cleanup_interval,json=cleanupInterval,proto3" json:"cleanup_interval,omitempty"` + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + UpstreamBindConfig *v32.BindConfig `protobuf:"bytes,21,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"` + // Configuration for load balancing subsetting. + LbSubsetConfig *Cluster_LbSubsetConfig `protobuf:"bytes,22,opt,name=lb_subset_config,json=lbSubsetConfig,proto3" json:"lb_subset_config,omitempty"` + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH`, + // :ref:`MAGLEV` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + // + // Types that are assignable to LbConfig: + // + // *Cluster_RingHashLbConfig_ + // *Cluster_MaglevLbConfig_ + // *Cluster_OriginalDstLbConfig_ + // *Cluster_LeastRequestLbConfig_ + // *Cluster_RoundRobinLbConfig_ + LbConfig isCluster_LbConfig `protobuf_oneof:"lb_config"` + // Common configuration for all load balancer implementations. + CommonLbConfig *Cluster_CommonLbConfig `protobuf:"bytes,27,opt,name=common_lb_config,json=commonLbConfig,proto3" json:"common_lb_config,omitempty"` + // Optional custom transport socket implementation to use for upstream connections. + // To setup TLS, set a transport socket with name “envoy.transport_sockets.tls“ and + // :ref:`UpstreamTlsContexts ` in the “typed_config“. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + TransportSocket *v32.TransportSocket `protobuf:"bytes,24,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as “envoy.filters.http.router“. + Metadata *v32.Metadata `protobuf:"bytes,25,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Determines how Envoy selects the protocol used to speak to upstream hosts. + // This has been deprecated in favor of setting explicit protocol selection + // in the :ref:`http_protocol_options + // ` message. + // http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + ProtocolSelection Cluster_ClusterProtocolSelection `protobuf:"varint,26,opt,name=protocol_selection,json=protocolSelection,proto3,enum=envoy.config.cluster.v3.Cluster_ClusterProtocolSelection" json:"protocol_selection,omitempty"` + // Optional options for upstream connections. + UpstreamConnectionOptions *UpstreamConnectionOptions `protobuf:"bytes,30,opt,name=upstream_connection_options,json=upstreamConnectionOptions,proto3" json:"upstream_connection_options,omitempty"` + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + CloseConnectionsOnHostHealthFailure bool `protobuf:"varint,31,opt,name=close_connections_on_host_health_failure,json=closeConnectionsOnHostHealthFailure,proto3" json:"close_connections_on_host_health_failure,omitempty"` + // If set to true, Envoy will ignore the health value of a host when processing its removal + // from service discovery. This means that if active health checking is used, Envoy will *not* + // wait for the endpoint to go unhealthy before removing it. + IgnoreHealthOnHostRemoval bool `protobuf:"varint,32,opt,name=ignore_health_on_host_removal,json=ignoreHealthOnHostRemoval,proto3" json:"ignore_health_on_host_removal,omitempty"` + // An (optional) network filter chain, listed in the order the filters should be applied. + // The chain will be applied to all outgoing connections that Envoy makes to the upstream + // servers of this cluster. + Filters []*Filter `protobuf:"bytes,40,rep,name=filters,proto3" json:"filters,omitempty"` + // If this field is set and is supported by the client, it will supersede the value of + // :ref:`lb_policy`. + LoadBalancingPolicy *LoadBalancingPolicy `protobuf:"bytes,41,opt,name=load_balancing_policy,json=loadBalancingPolicy,proto3" json:"load_balancing_policy,omitempty"` + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + LrsServer *v32.ConfigSource `protobuf:"bytes,42,opt,name=lrs_server,json=lrsServer,proto3" json:"lrs_server,omitempty"` + // [#not-implemented-hide:] + // A list of metric names from ORCA load reports to propagate to LRS. + // + // For map fields in the ORCA proto, the string will be of the form “.“. + // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA + // “named_metrics“ field. + // + // The special map key “*“ means to report all entries in the map (e.g., “named_metrics.*“ means to + // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted + // backends. + // + // The metric names in LRS will follow the same semantics as this field. In other words, if this field + // contains “named_metrics.foo“, then the LRS load report will include the data with that same string + // as the key. + LrsReportEndpointMetrics []string `protobuf:"bytes,57,rep,name=lrs_report_endpoint_metrics,json=lrsReportEndpointMetrics,proto3" json:"lrs_report_endpoint_metrics,omitempty"` + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + // + // .. attention:: + // + // This field has been deprecated in favor of ``timeout_budgets``, part of + // :ref:`track_cluster_stats `. + // + // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. + TrackTimeoutBudgets bool `protobuf:"varint,47,opt,name=track_timeout_budgets,json=trackTimeoutBudgets,proto3" json:"track_timeout_budgets,omitempty"` + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from “http2_protocol_options“ + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + // [#extension-category: envoy.upstreams] + UpstreamConfig *v32.TypedExtensionConfig `protobuf:"bytes,48,opt,name=upstream_config,json=upstreamConfig,proto3" json:"upstream_config,omitempty"` + // Configuration to track optional cluster stats. + TrackClusterStats *TrackClusterStats `protobuf:"bytes,49,opt,name=track_cluster_stats,json=trackClusterStats,proto3" json:"track_cluster_stats,omitempty"` + // Preconnect configuration for this cluster. + PreconnectPolicy *Cluster_PreconnectPolicy `protobuf:"bytes,50,opt,name=preconnect_policy,json=preconnectPolicy,proto3" json:"preconnect_policy,omitempty"` + // If “connection_pool_per_downstream_connection“ is true, the cluster will use a separate + // connection pool for every downstream connection + ConnectionPoolPerDownstreamConnection bool `protobuf:"varint,51,opt,name=connection_pool_per_downstream_connection,json=connectionPoolPerDownstreamConnection,proto3" json:"connection_pool_per_downstream_connection,omitempty"` +} + +func (x *Cluster) Reset() { + *x = Cluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster) ProtoMessage() {} + +func (x *Cluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster.ProtoReflect.Descriptor instead. +func (*Cluster) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1} +} + +func (x *Cluster) GetTransportSocketMatches() []*Cluster_TransportSocketMatch { + if x != nil { + return x.TransportSocketMatches + } + return nil +} + +func (x *Cluster) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cluster) GetAltStatName() string { + if x != nil { + return x.AltStatName + } + return "" +} + +func (m *Cluster) GetClusterDiscoveryType() isCluster_ClusterDiscoveryType { + if m != nil { + return m.ClusterDiscoveryType + } + return nil +} + +func (x *Cluster) GetType() Cluster_DiscoveryType { + if x, ok := x.GetClusterDiscoveryType().(*Cluster_Type); ok { + return x.Type + } + return Cluster_STATIC +} + +func (x *Cluster) GetClusterType() *Cluster_CustomClusterType { + if x, ok := x.GetClusterDiscoveryType().(*Cluster_ClusterType); ok { + return x.ClusterType + } + return nil +} + +func (x *Cluster) GetEdsClusterConfig() *Cluster_EdsClusterConfig { + if x != nil { + return x.EdsClusterConfig + } + return nil +} + +func (x *Cluster) GetConnectTimeout() *durationpb.Duration { + if x != nil { + return x.ConnectTimeout + } + return nil +} + +func (x *Cluster) GetPerConnectionBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerConnectionBufferLimitBytes + } + return nil +} + +func (x *Cluster) GetLbPolicy() Cluster_LbPolicy { + if x != nil { + return x.LbPolicy + } + return Cluster_ROUND_ROBIN +} + +func (x *Cluster) GetLoadAssignment() *v31.ClusterLoadAssignment { + if x != nil { + return x.LoadAssignment + } + return nil +} + +func (x *Cluster) GetHealthChecks() []*v32.HealthCheck { + if x != nil { + return x.HealthChecks + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequestsPerConnection + } + return nil +} + +func (x *Cluster) GetCircuitBreakers() *CircuitBreakers { + if x != nil { + return x.CircuitBreakers + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetUpstreamHttpProtocolOptions() *v32.UpstreamHttpProtocolOptions { + if x != nil { + return x.UpstreamHttpProtocolOptions + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetCommonHttpProtocolOptions() *v32.HttpProtocolOptions { + if x != nil { + return x.CommonHttpProtocolOptions + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetHttpProtocolOptions() *v32.Http1ProtocolOptions { + if x != nil { + return x.HttpProtocolOptions + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetHttp2ProtocolOptions() *v32.Http2ProtocolOptions { + if x != nil { + return x.Http2ProtocolOptions + } + return nil +} + +func (x *Cluster) GetTypedExtensionProtocolOptions() map[string]*anypb.Any { + if x != nil { + return x.TypedExtensionProtocolOptions + } + return nil +} + +func (x *Cluster) GetDnsRefreshRate() *durationpb.Duration { + if x != nil { + return x.DnsRefreshRate + } + return nil +} + +func (x *Cluster) GetDnsFailureRefreshRate() *Cluster_RefreshRate { + if x != nil { + return x.DnsFailureRefreshRate + } + return nil +} + +func (x *Cluster) GetRespectDnsTtl() bool { + if x != nil { + return x.RespectDnsTtl + } + return false +} + +func (x *Cluster) GetDnsLookupFamily() Cluster_DnsLookupFamily { + if x != nil { + return x.DnsLookupFamily + } + return Cluster_AUTO +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetDnsResolvers() []*v32.Address { + if x != nil { + return x.DnsResolvers + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetUseTcpForDnsLookups() bool { + if x != nil { + return x.UseTcpForDnsLookups + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetDnsResolutionConfig() *v32.DnsResolutionConfig { + if x != nil { + return x.DnsResolutionConfig + } + return nil +} + +func (x *Cluster) GetTypedDnsResolverConfig() *v32.TypedExtensionConfig { + if x != nil { + return x.TypedDnsResolverConfig + } + return nil +} + +func (x *Cluster) GetWaitForWarmOnInit() *wrapperspb.BoolValue { + if x != nil { + return x.WaitForWarmOnInit + } + return nil +} + +func (x *Cluster) GetOutlierDetection() *OutlierDetection { + if x != nil { + return x.OutlierDetection + } + return nil +} + +func (x *Cluster) GetCleanupInterval() *durationpb.Duration { + if x != nil { + return x.CleanupInterval + } + return nil +} + +func (x *Cluster) GetUpstreamBindConfig() *v32.BindConfig { + if x != nil { + return x.UpstreamBindConfig + } + return nil +} + +func (x *Cluster) GetLbSubsetConfig() *Cluster_LbSubsetConfig { + if x != nil { + return x.LbSubsetConfig + } + return nil +} + +func (m *Cluster) GetLbConfig() isCluster_LbConfig { + if m != nil { + return m.LbConfig + } + return nil +} + +func (x *Cluster) GetRingHashLbConfig() *Cluster_RingHashLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_RingHashLbConfig_); ok { + return x.RingHashLbConfig + } + return nil +} + +func (x *Cluster) GetMaglevLbConfig() *Cluster_MaglevLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_MaglevLbConfig_); ok { + return x.MaglevLbConfig + } + return nil +} + +func (x *Cluster) GetOriginalDstLbConfig() *Cluster_OriginalDstLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_OriginalDstLbConfig_); ok { + return x.OriginalDstLbConfig + } + return nil +} + +func (x *Cluster) GetLeastRequestLbConfig() *Cluster_LeastRequestLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_LeastRequestLbConfig_); ok { + return x.LeastRequestLbConfig + } + return nil +} + +func (x *Cluster) GetRoundRobinLbConfig() *Cluster_RoundRobinLbConfig { + if x, ok := x.GetLbConfig().(*Cluster_RoundRobinLbConfig_); ok { + return x.RoundRobinLbConfig + } + return nil +} + +func (x *Cluster) GetCommonLbConfig() *Cluster_CommonLbConfig { + if x != nil { + return x.CommonLbConfig + } + return nil +} + +func (x *Cluster) GetTransportSocket() *v32.TransportSocket { + if x != nil { + return x.TransportSocket + } + return nil +} + +func (x *Cluster) GetMetadata() *v32.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetProtocolSelection() Cluster_ClusterProtocolSelection { + if x != nil { + return x.ProtocolSelection + } + return Cluster_USE_CONFIGURED_PROTOCOL +} + +func (x *Cluster) GetUpstreamConnectionOptions() *UpstreamConnectionOptions { + if x != nil { + return x.UpstreamConnectionOptions + } + return nil +} + +func (x *Cluster) GetCloseConnectionsOnHostHealthFailure() bool { + if x != nil { + return x.CloseConnectionsOnHostHealthFailure + } + return false +} + +func (x *Cluster) GetIgnoreHealthOnHostRemoval() bool { + if x != nil { + return x.IgnoreHealthOnHostRemoval + } + return false +} + +func (x *Cluster) GetFilters() []*Filter { + if x != nil { + return x.Filters + } + return nil +} + +func (x *Cluster) GetLoadBalancingPolicy() *LoadBalancingPolicy { + if x != nil { + return x.LoadBalancingPolicy + } + return nil +} + +func (x *Cluster) GetLrsServer() *v32.ConfigSource { + if x != nil { + return x.LrsServer + } + return nil +} + +func (x *Cluster) GetLrsReportEndpointMetrics() []string { + if x != nil { + return x.LrsReportEndpointMetrics + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto. +func (x *Cluster) GetTrackTimeoutBudgets() bool { + if x != nil { + return x.TrackTimeoutBudgets + } + return false +} + +func (x *Cluster) GetUpstreamConfig() *v32.TypedExtensionConfig { + if x != nil { + return x.UpstreamConfig + } + return nil +} + +func (x *Cluster) GetTrackClusterStats() *TrackClusterStats { + if x != nil { + return x.TrackClusterStats + } + return nil +} + +func (x *Cluster) GetPreconnectPolicy() *Cluster_PreconnectPolicy { + if x != nil { + return x.PreconnectPolicy + } + return nil +} + +func (x *Cluster) GetConnectionPoolPerDownstreamConnection() bool { + if x != nil { + return x.ConnectionPoolPerDownstreamConnection + } + return false +} + +type isCluster_ClusterDiscoveryType interface { + isCluster_ClusterDiscoveryType() +} + +type Cluster_Type struct { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + Type Cluster_DiscoveryType `protobuf:"varint,2,opt,name=type,proto3,enum=envoy.config.cluster.v3.Cluster_DiscoveryType,oneof"` +} + +type Cluster_ClusterType struct { + // The custom cluster type. + ClusterType *Cluster_CustomClusterType `protobuf:"bytes,38,opt,name=cluster_type,json=clusterType,proto3,oneof"` +} + +func (*Cluster_Type) isCluster_ClusterDiscoveryType() {} + +func (*Cluster_ClusterType) isCluster_ClusterDiscoveryType() {} + +type isCluster_LbConfig interface { + isCluster_LbConfig() +} + +type Cluster_RingHashLbConfig_ struct { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig *Cluster_RingHashLbConfig `protobuf:"bytes,23,opt,name=ring_hash_lb_config,json=ringHashLbConfig,proto3,oneof"` +} + +type Cluster_MaglevLbConfig_ struct { + // Optional configuration for the Maglev load balancing policy. + MaglevLbConfig *Cluster_MaglevLbConfig `protobuf:"bytes,52,opt,name=maglev_lb_config,json=maglevLbConfig,proto3,oneof"` +} + +type Cluster_OriginalDstLbConfig_ struct { + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig *Cluster_OriginalDstLbConfig `protobuf:"bytes,34,opt,name=original_dst_lb_config,json=originalDstLbConfig,proto3,oneof"` +} + +type Cluster_LeastRequestLbConfig_ struct { + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig *Cluster_LeastRequestLbConfig `protobuf:"bytes,37,opt,name=least_request_lb_config,json=leastRequestLbConfig,proto3,oneof"` +} + +type Cluster_RoundRobinLbConfig_ struct { + // Optional configuration for the RoundRobin load balancing policy. + RoundRobinLbConfig *Cluster_RoundRobinLbConfig `protobuf:"bytes,56,opt,name=round_robin_lb_config,json=roundRobinLbConfig,proto3,oneof"` +} + +func (*Cluster_RingHashLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_MaglevLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_OriginalDstLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_LeastRequestLbConfig_) isCluster_LbConfig() {} + +func (*Cluster_RoundRobinLbConfig_) isCluster_LbConfig() {} + +// Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +type LoadBalancingPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + Policies []*LoadBalancingPolicy_Policy `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` +} + +func (x *LoadBalancingPolicy) Reset() { + *x = LoadBalancingPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalancingPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalancingPolicy) ProtoMessage() {} + +func (x *LoadBalancingPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalancingPolicy.ProtoReflect.Descriptor instead. +func (*LoadBalancingPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{2} +} + +func (x *LoadBalancingPolicy) GetPolicies() []*LoadBalancingPolicy_Policy { + if x != nil { + return x.Policies + } + return nil +} + +type UpstreamConnectionOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + TcpKeepalive *v32.TcpKeepalive `protobuf:"bytes,1,opt,name=tcp_keepalive,json=tcpKeepalive,proto3" json:"tcp_keepalive,omitempty"` + // If enabled, associates the interface name of the local address with the upstream connection. + // This can be used by extensions during processing of requests. The association mechanism is + // implementation specific. Defaults to false due to performance concerns. + SetLocalInterfaceNameOnUpstreamConnections bool `protobuf:"varint,2,opt,name=set_local_interface_name_on_upstream_connections,json=setLocalInterfaceNameOnUpstreamConnections,proto3" json:"set_local_interface_name_on_upstream_connections,omitempty"` + // Configurations for happy eyeballs algorithm. + // Add configs for first_address_family_version and first_address_family_count + // when sorting destination ip addresses. + HappyEyeballsConfig *UpstreamConnectionOptions_HappyEyeballsConfig `protobuf:"bytes,3,opt,name=happy_eyeballs_config,json=happyEyeballsConfig,proto3" json:"happy_eyeballs_config,omitempty"` +} + +func (x *UpstreamConnectionOptions) Reset() { + *x = UpstreamConnectionOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConnectionOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConnectionOptions) ProtoMessage() {} + +func (x *UpstreamConnectionOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConnectionOptions.ProtoReflect.Descriptor instead. +func (*UpstreamConnectionOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3} +} + +func (x *UpstreamConnectionOptions) GetTcpKeepalive() *v32.TcpKeepalive { + if x != nil { + return x.TcpKeepalive + } + return nil +} + +func (x *UpstreamConnectionOptions) GetSetLocalInterfaceNameOnUpstreamConnections() bool { + if x != nil { + return x.SetLocalInterfaceNameOnUpstreamConnections + } + return false +} + +func (x *UpstreamConnectionOptions) GetHappyEyeballsConfig() *UpstreamConnectionOptions_HappyEyeballsConfig { + if x != nil { + return x.HappyEyeballsConfig + } + return nil +} + +type TrackClusterStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + TimeoutBudgets bool `protobuf:"varint,1,opt,name=timeout_budgets,json=timeoutBudgets,proto3" json:"timeout_budgets,omitempty"` + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. + RequestResponseSizes bool `protobuf:"varint,2,opt,name=request_response_sizes,json=requestResponseSizes,proto3" json:"request_response_sizes,omitempty"` + // If true, some stats will be emitted per-endpoint, similar to the stats in admin “/clusters“ + // output. + // + // This does not currently output correct stats during a hot-restart. + // + // This is not currently implemented by all stat sinks. + // + // These stats do not honor filtering or tag extraction rules in :ref:`StatsConfig + // ` (but fixed-value tags are supported). Admin + // endpoint filtering is supported. + // + // This may not be used at the same time as + // :ref:`load_stats_config `. + PerEndpointStats bool `protobuf:"varint,3,opt,name=per_endpoint_stats,json=perEndpointStats,proto3" json:"per_endpoint_stats,omitempty"` +} + +func (x *TrackClusterStats) Reset() { + *x = TrackClusterStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TrackClusterStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrackClusterStats) ProtoMessage() {} + +func (x *TrackClusterStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrackClusterStats.ProtoReflect.Descriptor instead. +func (*TrackClusterStats) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{4} +} + +func (x *TrackClusterStats) GetTimeoutBudgets() bool { + if x != nil { + return x.TimeoutBudgets + } + return false +} + +func (x *TrackClusterStats) GetRequestResponseSizes() bool { + if x != nil { + return x.RequestResponseSizes + } + return false +} + +func (x *TrackClusterStats) GetPerEndpointStats() bool { + if x != nil { + return x.PerEndpointStats + } + return false +} + +// TransportSocketMatch specifies what transport socket config will be used +// when the match conditions are satisfied. +type Cluster_TransportSocketMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the match, used in stats generation. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in “envoy.transport_socket_match“ is used to match + // against the values specified in this field. + Match *structpb.Struct `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` + // The configuration of the transport socket. + // [#extension-category: envoy.transport_sockets.upstream] + TransportSocket *v32.TransportSocket `protobuf:"bytes,3,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` +} + +func (x *Cluster_TransportSocketMatch) Reset() { + *x = Cluster_TransportSocketMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_TransportSocketMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_TransportSocketMatch) ProtoMessage() {} + +func (x *Cluster_TransportSocketMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_TransportSocketMatch.ProtoReflect.Descriptor instead. +func (*Cluster_TransportSocketMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Cluster_TransportSocketMatch) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cluster_TransportSocketMatch) GetMatch() *structpb.Struct { + if x != nil { + return x.Match + } + return nil +} + +func (x *Cluster_TransportSocketMatch) GetTransportSocket() *v32.TransportSocket { + if x != nil { + return x.TransportSocket + } + return nil +} + +// Extended cluster type. +type Cluster_CustomClusterType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the cluster to instantiate. The name must match a supported cluster type. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + // [#extension-category: envoy.clusters] + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *Cluster_CustomClusterType) Reset() { + *x = Cluster_CustomClusterType{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CustomClusterType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CustomClusterType) ProtoMessage() {} + +func (x *Cluster_CustomClusterType) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CustomClusterType.ProtoReflect.Descriptor instead. +func (*Cluster_CustomClusterType) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Cluster_CustomClusterType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cluster_CustomClusterType) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +// Only valid when discovery type is EDS. +type Cluster_EdsClusterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for the source of EDS updates for this Cluster. + EdsConfig *v32.ConfigSource `protobuf:"bytes,1,opt,name=eds_config,json=edsConfig,proto3" json:"eds_config,omitempty"` + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. This may be a xdstp:// URL. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *Cluster_EdsClusterConfig) Reset() { + *x = Cluster_EdsClusterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_EdsClusterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_EdsClusterConfig) ProtoMessage() {} + +func (x *Cluster_EdsClusterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_EdsClusterConfig.ProtoReflect.Descriptor instead. +func (*Cluster_EdsClusterConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Cluster_EdsClusterConfig) GetEdsConfig() *v32.ConfigSource { + if x != nil { + return x.EdsConfig + } + return nil +} + +func (x *Cluster_EdsClusterConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +// Optionally divide the endpoints in this cluster into subsets defined by +// endpoint metadata and selected by route and weighted cluster metadata. +// [#next-free-field: 9] +type Cluster_LbSubsetConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + FallbackPolicy Cluster_LbSubsetConfig_LbSubsetFallbackPolicy `protobuf:"varint,1,opt,name=fallback_policy,json=fallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetFallbackPolicy" json:"fallback_policy,omitempty"` + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the “envoy.lb“ + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + DefaultSubset *structpb.Struct `protobuf:"bytes,2,opt,name=default_subset,json=defaultSubset,proto3" json:"default_subset,omitempty"` + // For each entry, LbEndpoint.Metadata's + // “envoy.lb“ namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + SubsetSelectors []*Cluster_LbSubsetConfig_LbSubsetSelector `protobuf:"bytes,3,rep,name=subset_selectors,json=subsetSelectors,proto3" json:"subset_selectors,omitempty"` + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + LocalityWeightAware bool `protobuf:"varint,4,opt,name=locality_weight_aware,json=localityWeightAware,proto3" json:"locality_weight_aware,omitempty"` + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionately affected by the + // subset predicate. + ScaleLocalityWeight bool `protobuf:"varint,5,opt,name=scale_locality_weight,json=scaleLocalityWeight,proto3" json:"scale_locality_weight,omitempty"` + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + PanicModeAny bool `protobuf:"varint,6,opt,name=panic_mode_any,json=panicModeAny,proto3" json:"panic_mode_any,omitempty"` + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + ListAsAny bool `protobuf:"varint,7,opt,name=list_as_any,json=listAsAny,proto3" json:"list_as_any,omitempty"` + // Fallback mechanism that allows to try different route metadata until a host is found. + // If load balancing process, including all its mechanisms (like + // :ref:`fallback_policy`) + // fails to select a host, this policy decides if and how the process is repeated using another metadata. + // + // The value defaults to + // :ref:`METADATA_NO_FALLBACK`. + MetadataFallbackPolicy Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy `protobuf:"varint,8,opt,name=metadata_fallback_policy,json=metadataFallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy" json:"metadata_fallback_policy,omitempty"` +} + +func (x *Cluster_LbSubsetConfig) Reset() { + *x = Cluster_LbSubsetConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_LbSubsetConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_LbSubsetConfig) ProtoMessage() {} + +func (x *Cluster_LbSubsetConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig.ProtoReflect.Descriptor instead. +func (*Cluster_LbSubsetConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Cluster_LbSubsetConfig) GetFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetFallbackPolicy { + if x != nil { + return x.FallbackPolicy + } + return Cluster_LbSubsetConfig_NO_FALLBACK +} + +func (x *Cluster_LbSubsetConfig) GetDefaultSubset() *structpb.Struct { + if x != nil { + return x.DefaultSubset + } + return nil +} + +func (x *Cluster_LbSubsetConfig) GetSubsetSelectors() []*Cluster_LbSubsetConfig_LbSubsetSelector { + if x != nil { + return x.SubsetSelectors + } + return nil +} + +func (x *Cluster_LbSubsetConfig) GetLocalityWeightAware() bool { + if x != nil { + return x.LocalityWeightAware + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetScaleLocalityWeight() bool { + if x != nil { + return x.ScaleLocalityWeight + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetPanicModeAny() bool { + if x != nil { + return x.PanicModeAny + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetListAsAny() bool { + if x != nil { + return x.ListAsAny + } + return false +} + +func (x *Cluster_LbSubsetConfig) GetMetadataFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy { + if x != nil { + return x.MetadataFallbackPolicy + } + return Cluster_LbSubsetConfig_METADATA_NO_FALLBACK +} + +// Configuration for :ref:`slow start mode `. +type Cluster_SlowStartConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + SlowStartWindow *durationpb.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // “new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))“, + // where “time_factor=(time_since_start_seconds / slow_start_time_seconds)“. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + Aggression *v32.RuntimeDouble `protobuf:"bytes,2,opt,name=aggression,proto3" json:"aggression,omitempty"` + // Configures the minimum percentage of origin weight that avoids too small new weight, + // which may cause endpoints in slow start mode receive no traffic in slow start window. + // If not specified, the default is 10%. + MinWeightPercent *v33.Percent `protobuf:"bytes,3,opt,name=min_weight_percent,json=minWeightPercent,proto3" json:"min_weight_percent,omitempty"` +} + +func (x *Cluster_SlowStartConfig) Reset() { + *x = Cluster_SlowStartConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_SlowStartConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_SlowStartConfig) ProtoMessage() {} + +func (x *Cluster_SlowStartConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_SlowStartConfig.ProtoReflect.Descriptor instead. +func (*Cluster_SlowStartConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Cluster_SlowStartConfig) GetSlowStartWindow() *durationpb.Duration { + if x != nil { + return x.SlowStartWindow + } + return nil +} + +func (x *Cluster_SlowStartConfig) GetAggression() *v32.RuntimeDouble { + if x != nil { + return x.Aggression + } + return nil +} + +func (x *Cluster_SlowStartConfig) GetMinWeightPercent() *v33.Percent { + if x != nil { + return x.MinWeightPercent + } + return nil +} + +// Specific configuration for the RoundRobin load balancing policy. +type Cluster_RoundRobinLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *Cluster_SlowStartConfig `protobuf:"bytes,1,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` +} + +func (x *Cluster_RoundRobinLbConfig) Reset() { + *x = Cluster_RoundRobinLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_RoundRobinLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_RoundRobinLbConfig) ProtoMessage() {} + +func (x *Cluster_RoundRobinLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_RoundRobinLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_RoundRobinLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Cluster_RoundRobinLbConfig) GetSlowStartConfig() *Cluster_SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +// Specific configuration for the LeastRequest load balancing policy. +type Cluster_LeastRequestLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + ChoiceCount *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // “weight = load_balancing_weight / (active_requests + 1)^active_request_bias“ + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // “active_request_bias“ must be greater than or equal to 0.0. + // + // When “active_request_bias == 0.0“ the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When “active_request_bias > 0.0“ the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // + // This setting only takes effect if all host weights are not equal. + ActiveRequestBias *v32.RuntimeDouble `protobuf:"bytes,2,opt,name=active_request_bias,json=activeRequestBias,proto3" json:"active_request_bias,omitempty"` + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *Cluster_SlowStartConfig `protobuf:"bytes,3,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` +} + +func (x *Cluster_LeastRequestLbConfig) Reset() { + *x = Cluster_LeastRequestLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_LeastRequestLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_LeastRequestLbConfig) ProtoMessage() {} + +func (x *Cluster_LeastRequestLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_LeastRequestLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_LeastRequestLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 6} +} + +func (x *Cluster_LeastRequestLbConfig) GetChoiceCount() *wrapperspb.UInt32Value { + if x != nil { + return x.ChoiceCount + } + return nil +} + +func (x *Cluster_LeastRequestLbConfig) GetActiveRequestBias() *v32.RuntimeDouble { + if x != nil { + return x.ActiveRequestBias + } + return nil +} + +func (x *Cluster_LeastRequestLbConfig) GetSlowStartConfig() *Cluster_SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +// Specific configuration for the :ref:`RingHash` +// load balancing policy. +type Cluster_RingHashLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + MinimumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction Cluster_RingHashLbConfig_HashFunction `protobuf:"varint,3,opt,name=hash_function,json=hashFunction,proto3,enum=envoy.config.cluster.v3.Cluster_RingHashLbConfig_HashFunction" json:"hash_function,omitempty"` + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + MaximumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,4,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` +} + +func (x *Cluster_RingHashLbConfig) Reset() { + *x = Cluster_RingHashLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_RingHashLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_RingHashLbConfig) ProtoMessage() {} + +func (x *Cluster_RingHashLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_RingHashLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_RingHashLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7} +} + +func (x *Cluster_RingHashLbConfig) GetMinimumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinimumRingSize + } + return nil +} + +func (x *Cluster_RingHashLbConfig) GetHashFunction() Cluster_RingHashLbConfig_HashFunction { + if x != nil { + return x.HashFunction + } + return Cluster_RingHashLbConfig_XX_HASH +} + +func (x *Cluster_RingHashLbConfig) GetMaximumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MaximumRingSize + } + return nil +} + +// Specific configuration for the :ref:`Maglev` +// load balancing policy. +type Cluster_MaglevLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee. + // Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same + // upstream as it was before. Increasing the table size reduces the amount of disruption. + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + TableSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=table_size,json=tableSize,proto3" json:"table_size,omitempty"` +} + +func (x *Cluster_MaglevLbConfig) Reset() { + *x = Cluster_MaglevLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_MaglevLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_MaglevLbConfig) ProtoMessage() {} + +func (x *Cluster_MaglevLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_MaglevLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_MaglevLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 8} +} + +func (x *Cluster_MaglevLbConfig) GetTableSize() *wrapperspb.UInt64Value { + if x != nil { + return x.TableSize + } + return nil +} + +// Specific configuration for the +// :ref:`Original Destination ` +// load balancing policy. +// [#extension: envoy.clusters.original_dst] +type Cluster_OriginalDstLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When true, a HTTP header can be used to override the original dst address. The default header is + // :ref:`x-envoy-original-dst-host `. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + UseHttpHeader bool `protobuf:"varint,1,opt,name=use_http_header,json=useHttpHeader,proto3" json:"use_http_header,omitempty"` + // The http header to override destination address if :ref:`use_http_header `. + // is set to true. If the value is empty, :ref:`x-envoy-original-dst-host ` will be used. + HttpHeaderName string `protobuf:"bytes,2,opt,name=http_header_name,json=httpHeaderName,proto3" json:"http_header_name,omitempty"` + // The port to override for the original dst address. This port + // will take precedence over filter state and header override ports + UpstreamPortOverride *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=upstream_port_override,json=upstreamPortOverride,proto3" json:"upstream_port_override,omitempty"` + // The dynamic metadata key to override destination address. + // First the request metadata is considered, then the connection one. + MetadataKey *v34.MetadataKey `protobuf:"bytes,4,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` +} + +func (x *Cluster_OriginalDstLbConfig) Reset() { + *x = Cluster_OriginalDstLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_OriginalDstLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_OriginalDstLbConfig) ProtoMessage() {} + +func (x *Cluster_OriginalDstLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_OriginalDstLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_OriginalDstLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 9} +} + +func (x *Cluster_OriginalDstLbConfig) GetUseHttpHeader() bool { + if x != nil { + return x.UseHttpHeader + } + return false +} + +func (x *Cluster_OriginalDstLbConfig) GetHttpHeaderName() string { + if x != nil { + return x.HttpHeaderName + } + return "" +} + +func (x *Cluster_OriginalDstLbConfig) GetUpstreamPortOverride() *wrapperspb.UInt32Value { + if x != nil { + return x.UpstreamPortOverride + } + return nil +} + +func (x *Cluster_OriginalDstLbConfig) GetMetadataKey() *v34.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +// Common configuration for all load balancer implementations. +// [#next-free-field: 9] +type Cluster_CommonLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // + // The specified percent will be truncated to the nearest 1%. + HealthyPanicThreshold *v33.Percent `protobuf:"bytes,1,opt,name=healthy_panic_threshold,json=healthyPanicThreshold,proto3" json:"healthy_panic_threshold,omitempty"` + // Types that are assignable to LocalityConfigSpecifier: + // + // *Cluster_CommonLbConfig_ZoneAwareLbConfig_ + // *Cluster_CommonLbConfig_LocalityWeightedLbConfig_ + LocalityConfigSpecifier isCluster_CommonLbConfig_LocalityConfigSpecifier `protobuf_oneof:"locality_config_specifier"` + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + UpdateMergeWindow *durationpb.Duration `protobuf:"bytes,4,opt,name=update_merge_window,json=updateMergeWindow,proto3" json:"update_merge_window,omitempty"` + // If set to true, Envoy will :ref:`exclude ` new hosts + // when computing load balancing weights until they have been health checked for the first time. + // This will have no effect unless active health checking is also configured. + IgnoreNewHostsUntilFirstHc bool `protobuf:"varint,5,opt,name=ignore_new_hosts_until_first_hc,json=ignoreNewHostsUntilFirstHc,proto3" json:"ignore_new_hosts_until_first_hc,omitempty"` + // If set to “true“, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + CloseConnectionsOnHostSetChange bool `protobuf:"varint,6,opt,name=close_connections_on_host_set_change,json=closeConnectionsOnHostSetChange,proto3" json:"close_connections_on_host_set_change,omitempty"` + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig *Cluster_CommonLbConfig_ConsistentHashingLbConfig `protobuf:"bytes,7,opt,name=consistent_hashing_lb_config,json=consistentHashingLbConfig,proto3" json:"consistent_hashing_lb_config,omitempty"` + // This controls what hosts are considered valid when using + // :ref:`host overrides `, which is used by some + // filters to modify the load balancing decision. + // + // If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is + // set with an empty set of statuses then host overrides will be ignored by the load balancing. + OverrideHostStatus *v32.HealthStatusSet `protobuf:"bytes,8,opt,name=override_host_status,json=overrideHostStatus,proto3" json:"override_host_status,omitempty"` +} + +func (x *Cluster_CommonLbConfig) Reset() { + *x = Cluster_CommonLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10} +} + +func (x *Cluster_CommonLbConfig) GetHealthyPanicThreshold() *v33.Percent { + if x != nil { + return x.HealthyPanicThreshold + } + return nil +} + +func (m *Cluster_CommonLbConfig) GetLocalityConfigSpecifier() isCluster_CommonLbConfig_LocalityConfigSpecifier { + if m != nil { + return m.LocalityConfigSpecifier + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetZoneAwareLbConfig() *Cluster_CommonLbConfig_ZoneAwareLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*Cluster_CommonLbConfig_ZoneAwareLbConfig_); ok { + return x.ZoneAwareLbConfig + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetLocalityWeightedLbConfig() *Cluster_CommonLbConfig_LocalityWeightedLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*Cluster_CommonLbConfig_LocalityWeightedLbConfig_); ok { + return x.LocalityWeightedLbConfig + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetUpdateMergeWindow() *durationpb.Duration { + if x != nil { + return x.UpdateMergeWindow + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetIgnoreNewHostsUntilFirstHc() bool { + if x != nil { + return x.IgnoreNewHostsUntilFirstHc + } + return false +} + +func (x *Cluster_CommonLbConfig) GetCloseConnectionsOnHostSetChange() bool { + if x != nil { + return x.CloseConnectionsOnHostSetChange + } + return false +} + +func (x *Cluster_CommonLbConfig) GetConsistentHashingLbConfig() *Cluster_CommonLbConfig_ConsistentHashingLbConfig { + if x != nil { + return x.ConsistentHashingLbConfig + } + return nil +} + +func (x *Cluster_CommonLbConfig) GetOverrideHostStatus() *v32.HealthStatusSet { + if x != nil { + return x.OverrideHostStatus + } + return nil +} + +type isCluster_CommonLbConfig_LocalityConfigSpecifier interface { + isCluster_CommonLbConfig_LocalityConfigSpecifier() +} + +type Cluster_CommonLbConfig_ZoneAwareLbConfig_ struct { + ZoneAwareLbConfig *Cluster_CommonLbConfig_ZoneAwareLbConfig `protobuf:"bytes,2,opt,name=zone_aware_lb_config,json=zoneAwareLbConfig,proto3,oneof"` +} + +type Cluster_CommonLbConfig_LocalityWeightedLbConfig_ struct { + LocalityWeightedLbConfig *Cluster_CommonLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,3,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3,oneof"` +} + +func (*Cluster_CommonLbConfig_ZoneAwareLbConfig_) isCluster_CommonLbConfig_LocalityConfigSpecifier() { +} + +func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig_) isCluster_CommonLbConfig_LocalityConfigSpecifier() { +} + +type Cluster_RefreshRate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the base interval between refreshes. This parameter is required and must be greater + // than zero and less than + // :ref:`max_interval `. + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + // Specifies the maximum interval between refreshes. This parameter is optional, but must be + // greater than or equal to the + // :ref:`base_interval ` if set. The default + // is 10 times the :ref:`base_interval `. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *Cluster_RefreshRate) Reset() { + *x = Cluster_RefreshRate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_RefreshRate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_RefreshRate) ProtoMessage() {} + +func (x *Cluster_RefreshRate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_RefreshRate.ProtoReflect.Descriptor instead. +func (*Cluster_RefreshRate) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 11} +} + +func (x *Cluster_RefreshRate) GetBaseInterval() *durationpb.Duration { + if x != nil { + return x.BaseInterval + } + return nil +} + +func (x *Cluster_RefreshRate) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +type Cluster_PreconnectPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates how many streams (rounded up) can be anticipated per-upstream for each + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting + // will only be done if the upstream is healthy and the cluster has traffic. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections preconnected. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. + // + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + PerUpstreamPreconnectRatio *wrapperspb.DoubleValue `protobuf:"bytes,1,opt,name=per_upstream_preconnect_ratio,json=perUpstreamPreconnectRatio,proto3" json:"per_upstream_preconnect_ratio,omitempty"` + // Indicates how many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike “per_upstream_preconnect_ratio“ this preconnects across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will + // only be done if there are healthy upstreams and the cluster has traffic. + // + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each + // upstream. + PredictivePreconnectRatio *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=predictive_preconnect_ratio,json=predictivePreconnectRatio,proto3" json:"predictive_preconnect_ratio,omitempty"` +} + +func (x *Cluster_PreconnectPolicy) Reset() { + *x = Cluster_PreconnectPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_PreconnectPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_PreconnectPolicy) ProtoMessage() {} + +func (x *Cluster_PreconnectPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_PreconnectPolicy.ProtoReflect.Descriptor instead. +func (*Cluster_PreconnectPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 12} +} + +func (x *Cluster_PreconnectPolicy) GetPerUpstreamPreconnectRatio() *wrapperspb.DoubleValue { + if x != nil { + return x.PerUpstreamPreconnectRatio + } + return nil +} + +func (x *Cluster_PreconnectPolicy) GetPredictivePreconnectRatio() *wrapperspb.DoubleValue { + if x != nil { + return x.PredictivePreconnectRatio + } + return nil +} + +// Specifications for subsets. +type Cluster_LbSubsetConfig_LbSubsetSelector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of keys to match with the weighted cluster metadata. + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for + // choosing a host, but updating hosts is faster, especially for large numbers of hosts. + // + // If a match is found to a host, that host will be used regardless of priority levels. + // + // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in “keys“ + // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge + // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are + // present in the current configuration. + SingleHostPerSubset bool `protobuf:"varint,4,opt,name=single_host_per_subset,json=singleHostPerSubset,proto3" json:"single_host_per_subset,omitempty"` + // The behavior used when no endpoint subset matches the selected route's + // metadata. + FallbackPolicy Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy `protobuf:"varint,2,opt,name=fallback_policy,json=fallbackPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy" json:"fallback_policy,omitempty"` + // Subset of + // :ref:`keys` used by + // :ref:`KEYS_SUBSET` + // fallback policy. + // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + // For any other fallback policy the parameter is not used and should not be set. + // Only values also present in + // :ref:`keys` are allowed, but + // “fallback_keys_subset“ cannot be equal to “keys“. + FallbackKeysSubset []string `protobuf:"bytes,3,rep,name=fallback_keys_subset,json=fallbackKeysSubset,proto3" json:"fallback_keys_subset,omitempty"` +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) Reset() { + *x = Cluster_LbSubsetConfig_LbSubsetSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_LbSubsetConfig_LbSubsetSelector) ProtoMessage() {} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetSelector.ProtoReflect.Descriptor instead. +func (*Cluster_LbSubsetConfig_LbSubsetSelector) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0} +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetKeys() []string { + if x != nil { + return x.Keys + } + return nil +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetSingleHostPerSubset() bool { + if x != nil { + return x.SingleHostPerSubset + } + return false +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetFallbackPolicy() Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy { + if x != nil { + return x.FallbackPolicy + } + return Cluster_LbSubsetConfig_LbSubsetSelector_NOT_DEFINED +} + +func (x *Cluster_LbSubsetConfig_LbSubsetSelector) GetFallbackKeysSubset() []string { + if x != nil { + return x.FallbackKeysSubset + } + return nil +} + +// Configuration for :ref:`zone aware routing +// `. +type Cluster_CommonLbConfig_ZoneAwareLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + RoutingEnabled *v33.Percent `protobuf:"bytes,1,opt,name=routing_enabled,json=routingEnabled,proto3" json:"routing_enabled,omitempty"` + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + MinClusterSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + FailTrafficOnPanic bool `protobuf:"varint,3,opt,name=fail_traffic_on_panic,json=failTrafficOnPanic,proto3" json:"fail_traffic_on_panic,omitempty"` +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) Reset() { + *x = Cluster_CommonLbConfig_ZoneAwareLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig_ZoneAwareLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig_ZoneAwareLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig_ZoneAwareLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10, 0} +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v33.Percent { + if x != nil { + return x.RoutingEnabled + } + return nil +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinClusterSize + } + return nil +} + +func (x *Cluster_CommonLbConfig_ZoneAwareLbConfig) GetFailTrafficOnPanic() bool { + if x != nil { + return x.FailTrafficOnPanic + } + return false +} + +// Configuration for :ref:`locality weighted load balancing +// ` +type Cluster_CommonLbConfig_LocalityWeightedLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) Reset() { + *x = Cluster_CommonLbConfig_LocalityWeightedLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig_LocalityWeightedLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig_LocalityWeightedLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig_LocalityWeightedLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10, 1} +} + +// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) +type Cluster_CommonLbConfig_ConsistentHashingLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to “true“, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + UseHostnameForHashing bool `protobuf:"varint,1,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // Applies to both Ring Hash and Maglev load balancers. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // “hash_balance_factor“, requests to any upstream host are capped at “hash_balance_factor/100“ times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts + // being probed, so use a higher value if you require better performance. + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) Reset() { + *x = Cluster_CommonLbConfig_ConsistentHashingLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cluster_CommonLbConfig_ConsistentHashingLbConfig) ProtoMessage() {} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cluster_CommonLbConfig_ConsistentHashingLbConfig.ProtoReflect.Descriptor instead. +func (*Cluster_CommonLbConfig_ConsistentHashingLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 10, 2} +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +func (x *Cluster_CommonLbConfig_ConsistentHashingLbConfig) GetHashBalanceFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +type LoadBalancingPolicy_Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#extension-category: envoy.load_balancing_policies] + TypedExtensionConfig *v32.TypedExtensionConfig `protobuf:"bytes,4,opt,name=typed_extension_config,json=typedExtensionConfig,proto3" json:"typed_extension_config,omitempty"` +} + +func (x *LoadBalancingPolicy_Policy) Reset() { + *x = LoadBalancingPolicy_Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalancingPolicy_Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalancingPolicy_Policy) ProtoMessage() {} + +func (x *LoadBalancingPolicy_Policy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalancingPolicy_Policy.ProtoReflect.Descriptor instead. +func (*LoadBalancingPolicy_Policy) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *LoadBalancingPolicy_Policy) GetTypedExtensionConfig() *v32.TypedExtensionConfig { + if x != nil { + return x.TypedExtensionConfig + } + return nil +} + +type UpstreamConnectionOptions_HappyEyeballsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify the IP address family to attempt connection first in happy + // eyeballs algorithm according to RFC8305#section-4. + FirstAddressFamilyVersion UpstreamConnectionOptions_FirstAddressFamilyVersion `protobuf:"varint,1,opt,name=first_address_family_version,json=firstAddressFamilyVersion,proto3,enum=envoy.config.cluster.v3.UpstreamConnectionOptions_FirstAddressFamilyVersion" json:"first_address_family_version,omitempty"` + // Specify the number of addresses of the first_address_family_version being + // attempted for connection before the other address family. + FirstAddressFamilyCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=first_address_family_count,json=firstAddressFamilyCount,proto3" json:"first_address_family_count,omitempty"` +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) Reset() { + *x = UpstreamConnectionOptions_HappyEyeballsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamConnectionOptions_HappyEyeballsConfig) ProtoMessage() {} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamConnectionOptions_HappyEyeballsConfig.ProtoReflect.Descriptor instead. +func (*UpstreamConnectionOptions_HappyEyeballsConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) GetFirstAddressFamilyVersion() UpstreamConnectionOptions_FirstAddressFamilyVersion { + if x != nil { + return x.FirstAddressFamilyVersion + } + return UpstreamConnectionOptions_DEFAULT +} + +func (x *UpstreamConnectionOptions_HappyEyeballsConfig) GetFirstAddressFamilyCount() *wrapperspb.UInt32Value { + if x != nil { + return x.FirstAddressFamilyCount + } + return nil +} + +var File_envoy_config_cluster_v3_cluster_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x1a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x11, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd6, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x2b, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x16, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3e, 0x0a, 0x0d, 0x61, 0x6c, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x14, 0x0a, + 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x52, 0x0b, 0x61, 0x6c, 0x74, 0x53, 0x74, 0x61, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x4e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x57, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x5f, 0x0a, 0x12, 0x65, 0x64, 0x73, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x6f, 0x0a, 0x21, 0x70, 0x65, 0x72, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x10, 0x01, 0x52, 0x1d, 0x70, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x09, 0x6c, 0x62, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x08, 0x6c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x0f, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x68, 0x0a, + 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x18, 0x6d, + 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, + 0x69, 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, + 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, + 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x83, 0x01, 0x0a, + 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x2e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x77, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6b, 0x0a, 0x15, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x13, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x74, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, + 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x12, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8c, + 0x01, 0x0a, 0x20, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x24, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1d, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x51, 0x0a, + 0x10, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, 0x3d, + 0x52, 0x0e, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x12, 0x65, 0x0a, 0x18, 0x64, 0x6e, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x52, 0x15, 0x64, 0x6e, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x44, 0x6e, 0x73, 0x54, 0x74, 0x6c, 0x12, + 0x66, 0x0a, 0x11, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x66, 0x61, + 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x44, 0x6e, 0x73, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x64, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x41, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, + 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x73, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, + 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x64, + 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x37, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x44, 0x6e, 0x73, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, + 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x77, 0x61, 0x69, 0x74, 0x46, + 0x6f, 0x72, 0x57, 0x61, 0x72, 0x6d, 0x4f, 0x6e, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x56, 0x0a, 0x11, + 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, + 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, + 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x6c, 0x62, 0x5f, 0x73, + 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0e, 0x6c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x10, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x6d, 0x61, 0x67, 0x6c, 0x65, + 0x76, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x34, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x16, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, + 0x5f, 0x64, 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x13, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x25, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x14, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x68, 0x0a, 0x15, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x12, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, + 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x75, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1b, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x23, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x4f, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4f, 0x6e, 0x48, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x28, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x72, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, + 0x72, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x72, 0x73, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x39, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6c, + 0x72, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, + 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x30, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, + 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x32, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x29, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x25, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x65, 0x72, 0x44, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xe6, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x98, 0x01, 0x0a, + 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, + 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x10, 0x45, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0a, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, + 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, + 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x6b, + 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x61, + 0x77, 0x61, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x41, 0x77, 0x61, 0x72, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, 0x6e, + 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xda, + 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, 0x0a, + 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, 0x46, + 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, + 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, + 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, 0x3b, + 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, + 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, + 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, + 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, 0x1e, + 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, + 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, + 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, + 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, + 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, + 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, + 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xbf, 0x02, 0x0a, 0x13, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, + 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x50, + 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, 0x0a, + 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, + 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x75, + 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, 0x12, + 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, + 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x8a, + 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, + 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, + 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, + 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, + 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, + 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x27, + 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, 0x0a, + 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, 0x53, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, + 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, 0xa4, + 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, + 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, + 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, + 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, 0x04, + 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, + 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, + 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x00, + 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, + 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, 0x9a, + 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, 0x52, + 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbb, 0x05, 0x0a, + 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x63, + 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x73, + 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x15, 0x68, 0x61, 0x70, + 0x70, 0x79, 0x5f, 0x65, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x61, 0x70, + 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x13, 0x68, 0x61, 0x70, 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x89, 0x02, 0x0a, 0x13, 0x48, 0x61, 0x70, 0x70, 0x79, 0x45, + 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x8d, 0x01, + 0x0a, 0x1c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, + 0x1a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x66, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x17, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x38, 0x0a, 0x19, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0b, + 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, + 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x36, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x54, + 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x65, 0x72, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x89, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, + 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_cluster_v3_cluster_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_cluster_proto_rawDescData = file_envoy_config_cluster_v3_cluster_proto_rawDesc +) + +func file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_cluster_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_cluster_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_cluster_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_cluster_proto_rawDescData +} + +var file_envoy_config_cluster_v3_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 9) +var file_envoy_config_cluster_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 25) +var file_envoy_config_cluster_v3_cluster_proto_goTypes = []interface{}{ + (Cluster_DiscoveryType)(0), // 0: envoy.config.cluster.v3.Cluster.DiscoveryType + (Cluster_LbPolicy)(0), // 1: envoy.config.cluster.v3.Cluster.LbPolicy + (Cluster_DnsLookupFamily)(0), // 2: envoy.config.cluster.v3.Cluster.DnsLookupFamily + (Cluster_ClusterProtocolSelection)(0), // 3: envoy.config.cluster.v3.Cluster.ClusterProtocolSelection + (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy)(0), // 4: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy)(0), // 5: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy)(0), // 6: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + (Cluster_RingHashLbConfig_HashFunction)(0), // 7: envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + (UpstreamConnectionOptions_FirstAddressFamilyVersion)(0), // 8: envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + (*ClusterCollection)(nil), // 9: envoy.config.cluster.v3.ClusterCollection + (*Cluster)(nil), // 10: envoy.config.cluster.v3.Cluster + (*LoadBalancingPolicy)(nil), // 11: envoy.config.cluster.v3.LoadBalancingPolicy + (*UpstreamConnectionOptions)(nil), // 12: envoy.config.cluster.v3.UpstreamConnectionOptions + (*TrackClusterStats)(nil), // 13: envoy.config.cluster.v3.TrackClusterStats + (*Cluster_TransportSocketMatch)(nil), // 14: envoy.config.cluster.v3.Cluster.TransportSocketMatch + (*Cluster_CustomClusterType)(nil), // 15: envoy.config.cluster.v3.Cluster.CustomClusterType + (*Cluster_EdsClusterConfig)(nil), // 16: envoy.config.cluster.v3.Cluster.EdsClusterConfig + (*Cluster_LbSubsetConfig)(nil), // 17: envoy.config.cluster.v3.Cluster.LbSubsetConfig + (*Cluster_SlowStartConfig)(nil), // 18: envoy.config.cluster.v3.Cluster.SlowStartConfig + (*Cluster_RoundRobinLbConfig)(nil), // 19: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + (*Cluster_LeastRequestLbConfig)(nil), // 20: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + (*Cluster_RingHashLbConfig)(nil), // 21: envoy.config.cluster.v3.Cluster.RingHashLbConfig + (*Cluster_MaglevLbConfig)(nil), // 22: envoy.config.cluster.v3.Cluster.MaglevLbConfig + (*Cluster_OriginalDstLbConfig)(nil), // 23: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + (*Cluster_CommonLbConfig)(nil), // 24: envoy.config.cluster.v3.Cluster.CommonLbConfig + (*Cluster_RefreshRate)(nil), // 25: envoy.config.cluster.v3.Cluster.RefreshRate + (*Cluster_PreconnectPolicy)(nil), // 26: envoy.config.cluster.v3.Cluster.PreconnectPolicy + nil, // 27: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry + (*Cluster_LbSubsetConfig_LbSubsetSelector)(nil), // 28: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + (*Cluster_CommonLbConfig_ZoneAwareLbConfig)(nil), // 29: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + (*Cluster_CommonLbConfig_LocalityWeightedLbConfig)(nil), // 30: envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + (*Cluster_CommonLbConfig_ConsistentHashingLbConfig)(nil), // 31: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + (*LoadBalancingPolicy_Policy)(nil), // 32: envoy.config.cluster.v3.LoadBalancingPolicy.Policy + (*UpstreamConnectionOptions_HappyEyeballsConfig)(nil), // 33: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + (*v3.CollectionEntry)(nil), // 34: xds.core.v3.CollectionEntry + (*durationpb.Duration)(nil), // 35: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 36: google.protobuf.UInt32Value + (*v31.ClusterLoadAssignment)(nil), // 37: envoy.config.endpoint.v3.ClusterLoadAssignment + (*v32.HealthCheck)(nil), // 38: envoy.config.core.v3.HealthCheck + (*CircuitBreakers)(nil), // 39: envoy.config.cluster.v3.CircuitBreakers + (*v32.UpstreamHttpProtocolOptions)(nil), // 40: envoy.config.core.v3.UpstreamHttpProtocolOptions + (*v32.HttpProtocolOptions)(nil), // 41: envoy.config.core.v3.HttpProtocolOptions + (*v32.Http1ProtocolOptions)(nil), // 42: envoy.config.core.v3.Http1ProtocolOptions + (*v32.Http2ProtocolOptions)(nil), // 43: envoy.config.core.v3.Http2ProtocolOptions + (*v32.Address)(nil), // 44: envoy.config.core.v3.Address + (*v32.DnsResolutionConfig)(nil), // 45: envoy.config.core.v3.DnsResolutionConfig + (*v32.TypedExtensionConfig)(nil), // 46: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 47: google.protobuf.BoolValue + (*OutlierDetection)(nil), // 48: envoy.config.cluster.v3.OutlierDetection + (*v32.BindConfig)(nil), // 49: envoy.config.core.v3.BindConfig + (*v32.TransportSocket)(nil), // 50: envoy.config.core.v3.TransportSocket + (*v32.Metadata)(nil), // 51: envoy.config.core.v3.Metadata + (*Filter)(nil), // 52: envoy.config.cluster.v3.Filter + (*v32.ConfigSource)(nil), // 53: envoy.config.core.v3.ConfigSource + (*v32.TcpKeepalive)(nil), // 54: envoy.config.core.v3.TcpKeepalive + (*structpb.Struct)(nil), // 55: google.protobuf.Struct + (*anypb.Any)(nil), // 56: google.protobuf.Any + (*v32.RuntimeDouble)(nil), // 57: envoy.config.core.v3.RuntimeDouble + (*v33.Percent)(nil), // 58: envoy.type.v3.Percent + (*wrapperspb.UInt64Value)(nil), // 59: google.protobuf.UInt64Value + (*v34.MetadataKey)(nil), // 60: envoy.type.metadata.v3.MetadataKey + (*v32.HealthStatusSet)(nil), // 61: envoy.config.core.v3.HealthStatusSet + (*wrapperspb.DoubleValue)(nil), // 62: google.protobuf.DoubleValue +} +var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ + 34, // 0: envoy.config.cluster.v3.ClusterCollection.entries:type_name -> xds.core.v3.CollectionEntry + 14, // 1: envoy.config.cluster.v3.Cluster.transport_socket_matches:type_name -> envoy.config.cluster.v3.Cluster.TransportSocketMatch + 0, // 2: envoy.config.cluster.v3.Cluster.type:type_name -> envoy.config.cluster.v3.Cluster.DiscoveryType + 15, // 3: envoy.config.cluster.v3.Cluster.cluster_type:type_name -> envoy.config.cluster.v3.Cluster.CustomClusterType + 16, // 4: envoy.config.cluster.v3.Cluster.eds_cluster_config:type_name -> envoy.config.cluster.v3.Cluster.EdsClusterConfig + 35, // 5: envoy.config.cluster.v3.Cluster.connect_timeout:type_name -> google.protobuf.Duration + 36, // 6: envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 1, // 7: envoy.config.cluster.v3.Cluster.lb_policy:type_name -> envoy.config.cluster.v3.Cluster.LbPolicy + 37, // 8: envoy.config.cluster.v3.Cluster.load_assignment:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment + 38, // 9: envoy.config.cluster.v3.Cluster.health_checks:type_name -> envoy.config.core.v3.HealthCheck + 36, // 10: envoy.config.cluster.v3.Cluster.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 39, // 11: envoy.config.cluster.v3.Cluster.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers + 40, // 12: envoy.config.cluster.v3.Cluster.upstream_http_protocol_options:type_name -> envoy.config.core.v3.UpstreamHttpProtocolOptions + 41, // 13: envoy.config.cluster.v3.Cluster.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions + 42, // 14: envoy.config.cluster.v3.Cluster.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions + 43, // 15: envoy.config.cluster.v3.Cluster.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 27, // 16: envoy.config.cluster.v3.Cluster.typed_extension_protocol_options:type_name -> envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry + 35, // 17: envoy.config.cluster.v3.Cluster.dns_refresh_rate:type_name -> google.protobuf.Duration + 25, // 18: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate + 2, // 19: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily + 44, // 20: envoy.config.cluster.v3.Cluster.dns_resolvers:type_name -> envoy.config.core.v3.Address + 45, // 21: envoy.config.cluster.v3.Cluster.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 46, // 22: envoy.config.cluster.v3.Cluster.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 47, // 23: envoy.config.cluster.v3.Cluster.wait_for_warm_on_init:type_name -> google.protobuf.BoolValue + 48, // 24: envoy.config.cluster.v3.Cluster.outlier_detection:type_name -> envoy.config.cluster.v3.OutlierDetection + 35, // 25: envoy.config.cluster.v3.Cluster.cleanup_interval:type_name -> google.protobuf.Duration + 49, // 26: envoy.config.cluster.v3.Cluster.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 17, // 27: envoy.config.cluster.v3.Cluster.lb_subset_config:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig + 21, // 28: envoy.config.cluster.v3.Cluster.ring_hash_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig + 22, // 29: envoy.config.cluster.v3.Cluster.maglev_lb_config:type_name -> envoy.config.cluster.v3.Cluster.MaglevLbConfig + 23, // 30: envoy.config.cluster.v3.Cluster.original_dst_lb_config:type_name -> envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + 20, // 31: envoy.config.cluster.v3.Cluster.least_request_lb_config:type_name -> envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + 19, // 32: envoy.config.cluster.v3.Cluster.round_robin_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + 24, // 33: envoy.config.cluster.v3.Cluster.common_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig + 50, // 34: envoy.config.cluster.v3.Cluster.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 51, // 35: envoy.config.cluster.v3.Cluster.metadata:type_name -> envoy.config.core.v3.Metadata + 3, // 36: envoy.config.cluster.v3.Cluster.protocol_selection:type_name -> envoy.config.cluster.v3.Cluster.ClusterProtocolSelection + 12, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions + 52, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter + 11, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 53, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource + 46, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats + 26, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy + 32, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy + 54, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive + 33, // 46: envoy.config.cluster.v3.UpstreamConnectionOptions.happy_eyeballs_config:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + 55, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct + 50, // 48: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 56, // 49: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any + 53, // 50: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource + 4, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + 55, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct + 28, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + 5, // 54: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + 35, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 57, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 58, // 57: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 18, // 58: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 36, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.choice_count:type_name -> google.protobuf.UInt32Value + 57, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 18, // 61: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 59, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 7, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + 59, // 64: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 59, // 65: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value + 36, // 66: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value + 60, // 67: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 58, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent + 29, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + 30, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + 35, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration + 31, // 72: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + 61, // 73: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet + 35, // 74: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration + 35, // 75: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration + 62, // 76: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 62, // 77: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 56, // 78: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any + 6, // 79: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + 58, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 59, // 81: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 36, // 82: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 46, // 83: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 8, // 84: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_version:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + 36, // 85: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_count:type_name -> google.protobuf.UInt32Value + 86, // [86:86] is the sub-list for method output_type + 86, // [86:86] is the sub-list for method input_type + 86, // [86:86] is the sub-list for extension type_name + 86, // [86:86] is the sub-list for extension extendee + 0, // [0:86] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_cluster_proto_init() } +func file_envoy_config_cluster_v3_cluster_proto_init() { + if File_envoy_config_cluster_v3_cluster_proto != nil { + return + } + file_envoy_config_cluster_v3_circuit_breaker_proto_init() + file_envoy_config_cluster_v3_filter_proto_init() + file_envoy_config_cluster_v3_outlier_detection_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_cluster_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterCollection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancingPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConnectionOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TrackClusterStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_TransportSocketMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CustomClusterType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_EdsClusterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_LbSubsetConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_SlowStartConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_RoundRobinLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_LeastRequestLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_RingHashLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_MaglevLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_OriginalDstLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_RefreshRate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_PreconnectPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_LbSubsetConfig_LbSubsetSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig_ZoneAwareLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig_LocalityWeightedLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cluster_CommonLbConfig_ConsistentHashingLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadBalancingPolicy_Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamConnectionOptions_HappyEyeballsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Cluster_Type)(nil), + (*Cluster_ClusterType)(nil), + (*Cluster_RingHashLbConfig_)(nil), + (*Cluster_MaglevLbConfig_)(nil), + (*Cluster_OriginalDstLbConfig_)(nil), + (*Cluster_LeastRequestLbConfig_)(nil), + (*Cluster_RoundRobinLbConfig_)(nil), + } + file_envoy_config_cluster_v3_cluster_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*Cluster_CommonLbConfig_ZoneAwareLbConfig_)(nil), + (*Cluster_CommonLbConfig_LocalityWeightedLbConfig_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_cluster_proto_rawDesc, + NumEnums: 9, + NumMessages: 25, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_cluster_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_cluster_proto_depIdxs, + EnumInfos: file_envoy_config_cluster_v3_cluster_proto_enumTypes, + MessageInfos: file_envoy_config_cluster_v3_cluster_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_cluster_proto = out.File + file_envoy_config_cluster_v3_cluster_proto_rawDesc = nil + file_envoy_config_cluster_v3_cluster_proto_goTypes = nil + file_envoy_config_cluster_v3_cluster_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go new file mode 100644 index 0000000000..7d33f5e84e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go @@ -0,0 +1,4942 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClusterCollection with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ClusterCollection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterCollectionMultiError, or nil if none found. +func (m *ClusterCollection) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterCollection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEntries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterCollectionValidationError{ + field: "Entries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterCollectionValidationError{ + field: "Entries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEntries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterCollectionValidationError{ + field: "Entries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterCollectionMultiError(errors) + } + + return nil +} + +// ClusterCollectionMultiError is an error wrapping multiple validation errors +// returned by ClusterCollection.ValidateAll() if the designated constraints +// aren't met. +type ClusterCollectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterCollectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterCollectionMultiError) AllErrors() []error { return m } + +// ClusterCollectionValidationError is the validation error returned by +// ClusterCollection.Validate if the designated constraints aren't met. +type ClusterCollectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterCollectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterCollectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterCollectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterCollectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterCollectionValidationError) ErrorName() string { + return "ClusterCollectionValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterCollectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterCollection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterCollectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterCollectionValidationError{} + +// Validate checks the field values on Cluster with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in ClusterMultiError, or nil if none found. +func (m *Cluster) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetTransportSocketMatches() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TransportSocketMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TransportSocketMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("TransportSocketMatches[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ClusterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for AltStatName + + if all { + switch v := interface{}(m.GetEdsClusterConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "EdsClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "EdsClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEdsClusterConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "EdsClusterConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetConnectTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterValidationError{ + field: "ConnectTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ClusterValidationError{ + field: "ConnectTimeout", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetPerConnectionBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerConnectionBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := Cluster_LbPolicy_name[int32(m.GetLbPolicy())]; !ok { + err := ClusterValidationError{ + field: "LbPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLoadAssignment()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadAssignment", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadAssignment", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadAssignment()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LoadAssignment", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHealthChecks() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("HealthChecks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("HealthChecks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("HealthChecks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMaxRequestsPerConnection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRequestsPerConnection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCircuitBreakers()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCircuitBreakers()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "CircuitBreakers", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCommonHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetTypedExtensionProtocolOptions())) + i := 0 + for key := range m.GetTypedExtensionProtocolOptions() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedExtensionProtocolOptions()[key] + _ = val + + // no validation rules for TypedExtensionProtocolOptions[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TypedExtensionProtocolOptions[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("TypedExtensionProtocolOptions[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("TypedExtensionProtocolOptions[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if d := m.GetDnsRefreshRate(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterValidationError{ + field: "DnsRefreshRate", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur <= gt { + err := ClusterValidationError{ + field: "DnsRefreshRate", + reason: "value must be greater than 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetDnsFailureRefreshRate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsFailureRefreshRate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsFailureRefreshRate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsFailureRefreshRate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "DnsFailureRefreshRate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RespectDnsTtl + + if _, ok := Cluster_DnsLookupFamily_name[int32(m.GetDnsLookupFamily())]; !ok { + err := ClusterValidationError{ + field: "DnsLookupFamily", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetDnsResolvers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("DnsResolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("DnsResolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("DnsResolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for UseTcpForDnsLookups + + if all { + switch v := interface{}(m.GetDnsResolutionConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsResolutionConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "DnsResolutionConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTypedDnsResolverConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedDnsResolverConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "TypedDnsResolverConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWaitForWarmOnInit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "WaitForWarmOnInit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "WaitForWarmOnInit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWaitForWarmOnInit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "WaitForWarmOnInit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOutlierDetection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutlierDetection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "OutlierDetection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetCleanupInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterValidationError{ + field: "CleanupInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ClusterValidationError{ + field: "CleanupInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetUpstreamBindConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamBindConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamBindConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLbSubsetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LbSubsetConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LbSubsetConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLbSubsetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LbSubsetConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCommonLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "CommonLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "CommonLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProtocolSelection + + if all { + switch v := interface{}(m.GetUpstreamConnectionOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConnectionOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConnectionOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamConnectionOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamConnectionOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CloseConnectionsOnHostHealthFailure + + // no validation rules for IgnoreHealthOnHostRemoval + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLoadBalancingPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadBalancingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LoadBalancingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadBalancingPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LoadBalancingPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLrsServer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LrsServer", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LrsServer", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLrsServer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LrsServer", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TrackTimeoutBudgets + + if all { + switch v := interface{}(m.GetUpstreamConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "UpstreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "UpstreamConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTrackClusterStats()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TrackClusterStats", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "TrackClusterStats", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTrackClusterStats()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "TrackClusterStats", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPreconnectPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PreconnectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "PreconnectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPreconnectPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "PreconnectPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ConnectionPoolPerDownstreamConnection + + switch v := m.ClusterDiscoveryType.(type) { + case *Cluster_Type: + if v == nil { + err := ClusterValidationError{ + field: "ClusterDiscoveryType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := Cluster_DiscoveryType_name[int32(m.GetType())]; !ok { + err := ClusterValidationError{ + field: "Type", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Cluster_ClusterType: + if v == nil { + err := ClusterValidationError{ + field: "ClusterDiscoveryType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetClusterType()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "ClusterType", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "ClusterType", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClusterType()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "ClusterType", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + switch v := m.LbConfig.(type) { + case *Cluster_RingHashLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRingHashLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RingHashLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RingHashLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRingHashLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "RingHashLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_MaglevLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMaglevLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaglevLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "MaglevLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaglevLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "MaglevLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_OriginalDstLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOriginalDstLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OriginalDstLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "OriginalDstLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOriginalDstLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "OriginalDstLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_LeastRequestLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLeastRequestLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LeastRequestLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "LeastRequestLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLeastRequestLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "LeastRequestLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_RoundRobinLbConfig_: + if v == nil { + err := ClusterValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRoundRobinLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RoundRobinLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterValidationError{ + field: "RoundRobinLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoundRobinLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterValidationError{ + field: "RoundRobinLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ClusterMultiError(errors) + } + + return nil +} + +// ClusterMultiError is an error wrapping multiple validation errors returned +// by Cluster.ValidateAll() if the designated constraints aren't met. +type ClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterMultiError) AllErrors() []error { return m } + +// ClusterValidationError is the validation error returned by Cluster.Validate +// if the designated constraints aren't met. +type ClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterValidationError) ErrorName() string { return "ClusterValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterValidationError{} + +// Validate checks the field values on LoadBalancingPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LoadBalancingPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadBalancingPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadBalancingPolicyMultiError, or nil if none found. +func (m *LoadBalancingPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadBalancingPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadBalancingPolicyValidationError{ + field: fmt.Sprintf("Policies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadBalancingPolicyValidationError{ + field: fmt.Sprintf("Policies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadBalancingPolicyValidationError{ + field: fmt.Sprintf("Policies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadBalancingPolicyMultiError(errors) + } + + return nil +} + +// LoadBalancingPolicyMultiError is an error wrapping multiple validation +// errors returned by LoadBalancingPolicy.ValidateAll() if the designated +// constraints aren't met. +type LoadBalancingPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadBalancingPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadBalancingPolicyMultiError) AllErrors() []error { return m } + +// LoadBalancingPolicyValidationError is the validation error returned by +// LoadBalancingPolicy.Validate if the designated constraints aren't met. +type LoadBalancingPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadBalancingPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadBalancingPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadBalancingPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadBalancingPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadBalancingPolicyValidationError) ErrorName() string { + return "LoadBalancingPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e LoadBalancingPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadBalancingPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadBalancingPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadBalancingPolicyValidationError{} + +// Validate checks the field values on UpstreamConnectionOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamConnectionOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamConnectionOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamConnectionOptionsMultiError, or nil if none found. +func (m *UpstreamConnectionOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamConnectionOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTcpKeepalive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "TcpKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "TcpKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpKeepalive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamConnectionOptionsValidationError{ + field: "TcpKeepalive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SetLocalInterfaceNameOnUpstreamConnections + + if all { + switch v := interface{}(m.GetHappyEyeballsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHappyEyeballsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamConnectionOptionsValidationError{ + field: "HappyEyeballsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpstreamConnectionOptionsMultiError(errors) + } + + return nil +} + +// UpstreamConnectionOptionsMultiError is an error wrapping multiple validation +// errors returned by UpstreamConnectionOptions.ValidateAll() if the +// designated constraints aren't met. +type UpstreamConnectionOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamConnectionOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamConnectionOptionsMultiError) AllErrors() []error { return m } + +// UpstreamConnectionOptionsValidationError is the validation error returned by +// UpstreamConnectionOptions.Validate if the designated constraints aren't met. +type UpstreamConnectionOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamConnectionOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamConnectionOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamConnectionOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamConnectionOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamConnectionOptionsValidationError) ErrorName() string { + return "UpstreamConnectionOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamConnectionOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamConnectionOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamConnectionOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamConnectionOptionsValidationError{} + +// Validate checks the field values on TrackClusterStats with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TrackClusterStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TrackClusterStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TrackClusterStatsMultiError, or nil if none found. +func (m *TrackClusterStats) ValidateAll() error { + return m.validate(true) +} + +func (m *TrackClusterStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TimeoutBudgets + + // no validation rules for RequestResponseSizes + + // no validation rules for PerEndpointStats + + if len(errors) > 0 { + return TrackClusterStatsMultiError(errors) + } + + return nil +} + +// TrackClusterStatsMultiError is an error wrapping multiple validation errors +// returned by TrackClusterStats.ValidateAll() if the designated constraints +// aren't met. +type TrackClusterStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TrackClusterStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TrackClusterStatsMultiError) AllErrors() []error { return m } + +// TrackClusterStatsValidationError is the validation error returned by +// TrackClusterStats.Validate if the designated constraints aren't met. +type TrackClusterStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TrackClusterStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TrackClusterStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TrackClusterStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TrackClusterStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TrackClusterStatsValidationError) ErrorName() string { + return "TrackClusterStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e TrackClusterStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTrackClusterStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TrackClusterStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TrackClusterStatsValidationError{} + +// Validate checks the field values on Cluster_TransportSocketMatch with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_TransportSocketMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_TransportSocketMatch with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_TransportSocketMatchMultiError, or nil if none found. +func (m *Cluster_TransportSocketMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_TransportSocketMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := Cluster_TransportSocketMatchValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_TransportSocketMatchValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_TransportSocketMatchValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_TransportSocketMatchValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_TransportSocketMatchMultiError(errors) + } + + return nil +} + +// Cluster_TransportSocketMatchMultiError is an error wrapping multiple +// validation errors returned by Cluster_TransportSocketMatch.ValidateAll() if +// the designated constraints aren't met. +type Cluster_TransportSocketMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_TransportSocketMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_TransportSocketMatchMultiError) AllErrors() []error { return m } + +// Cluster_TransportSocketMatchValidationError is the validation error returned +// by Cluster_TransportSocketMatch.Validate if the designated constraints +// aren't met. +type Cluster_TransportSocketMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_TransportSocketMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_TransportSocketMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_TransportSocketMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_TransportSocketMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_TransportSocketMatchValidationError) ErrorName() string { + return "Cluster_TransportSocketMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_TransportSocketMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_TransportSocketMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_TransportSocketMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_TransportSocketMatchValidationError{} + +// Validate checks the field values on Cluster_CustomClusterType with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_CustomClusterType) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_CustomClusterType with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_CustomClusterTypeMultiError, or nil if none found. +func (m *Cluster_CustomClusterType) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CustomClusterType) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := Cluster_CustomClusterTypeValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CustomClusterTypeValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CustomClusterTypeValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CustomClusterTypeValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_CustomClusterTypeMultiError(errors) + } + + return nil +} + +// Cluster_CustomClusterTypeMultiError is an error wrapping multiple validation +// errors returned by Cluster_CustomClusterType.ValidateAll() if the +// designated constraints aren't met. +type Cluster_CustomClusterTypeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CustomClusterTypeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CustomClusterTypeMultiError) AllErrors() []error { return m } + +// Cluster_CustomClusterTypeValidationError is the validation error returned by +// Cluster_CustomClusterType.Validate if the designated constraints aren't met. +type Cluster_CustomClusterTypeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CustomClusterTypeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_CustomClusterTypeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_CustomClusterTypeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CustomClusterTypeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CustomClusterTypeValidationError) ErrorName() string { + return "Cluster_CustomClusterTypeValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CustomClusterTypeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CustomClusterType.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CustomClusterTypeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CustomClusterTypeValidationError{} + +// Validate checks the field values on Cluster_EdsClusterConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_EdsClusterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_EdsClusterConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_EdsClusterConfigMultiError, or nil if none found. +func (m *Cluster_EdsClusterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_EdsClusterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_EdsClusterConfigValidationError{ + field: "EdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_EdsClusterConfigValidationError{ + field: "EdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_EdsClusterConfigValidationError{ + field: "EdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ServiceName + + if len(errors) > 0 { + return Cluster_EdsClusterConfigMultiError(errors) + } + + return nil +} + +// Cluster_EdsClusterConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_EdsClusterConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_EdsClusterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_EdsClusterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_EdsClusterConfigMultiError) AllErrors() []error { return m } + +// Cluster_EdsClusterConfigValidationError is the validation error returned by +// Cluster_EdsClusterConfig.Validate if the designated constraints aren't met. +type Cluster_EdsClusterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_EdsClusterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_EdsClusterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_EdsClusterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_EdsClusterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_EdsClusterConfigValidationError) ErrorName() string { + return "Cluster_EdsClusterConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_EdsClusterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_EdsClusterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_EdsClusterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_EdsClusterConfigValidationError{} + +// Validate checks the field values on Cluster_LbSubsetConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_LbSubsetConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_LbSubsetConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_LbSubsetConfigMultiError, or nil if none found. +func (m *Cluster_LbSubsetConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_LbSubsetConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_name[int32(m.GetFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfigValidationError{ + field: "FallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultSubset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: "DefaultSubset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: "DefaultSubset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultSubset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LbSubsetConfigValidationError{ + field: "DefaultSubset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSubsetSelectors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: fmt.Sprintf("SubsetSelectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LbSubsetConfigValidationError{ + field: fmt.Sprintf("SubsetSelectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LbSubsetConfigValidationError{ + field: fmt.Sprintf("SubsetSelectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for LocalityWeightAware + + // no validation rules for ScaleLocalityWeight + + // no validation rules for PanicModeAny + + // no validation rules for ListAsAny + + if _, ok := Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name[int32(m.GetMetadataFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfigValidationError{ + field: "MetadataFallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Cluster_LbSubsetConfigMultiError(errors) + } + + return nil +} + +// Cluster_LbSubsetConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_LbSubsetConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_LbSubsetConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_LbSubsetConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_LbSubsetConfigMultiError) AllErrors() []error { return m } + +// Cluster_LbSubsetConfigValidationError is the validation error returned by +// Cluster_LbSubsetConfig.Validate if the designated constraints aren't met. +type Cluster_LbSubsetConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_LbSubsetConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_LbSubsetConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_LbSubsetConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_LbSubsetConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_LbSubsetConfigValidationError) ErrorName() string { + return "Cluster_LbSubsetConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_LbSubsetConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_LbSubsetConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_LbSubsetConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_LbSubsetConfigValidationError{} + +// Validate checks the field values on Cluster_SlowStartConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_SlowStartConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_SlowStartConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_SlowStartConfigMultiError, or nil if none found. +func (m *Cluster_SlowStartConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_SlowStartConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAggression()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAggression()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinWeightPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinWeightPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_SlowStartConfigMultiError(errors) + } + + return nil +} + +// Cluster_SlowStartConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_SlowStartConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_SlowStartConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_SlowStartConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_SlowStartConfigMultiError) AllErrors() []error { return m } + +// Cluster_SlowStartConfigValidationError is the validation error returned by +// Cluster_SlowStartConfig.Validate if the designated constraints aren't met. +type Cluster_SlowStartConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_SlowStartConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_SlowStartConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_SlowStartConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_SlowStartConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_SlowStartConfigValidationError) ErrorName() string { + return "Cluster_SlowStartConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_SlowStartConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_SlowStartConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_SlowStartConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_SlowStartConfigValidationError{} + +// Validate checks the field values on Cluster_RoundRobinLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_RoundRobinLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_RoundRobinLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_RoundRobinLbConfigMultiError, or nil if none found. +func (m *Cluster_RoundRobinLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_RoundRobinLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_RoundRobinLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_RoundRobinLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_RoundRobinLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_RoundRobinLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_RoundRobinLbConfigMultiError is an error wrapping multiple +// validation errors returned by Cluster_RoundRobinLbConfig.ValidateAll() if +// the designated constraints aren't met. +type Cluster_RoundRobinLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_RoundRobinLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_RoundRobinLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_RoundRobinLbConfigValidationError is the validation error returned +// by Cluster_RoundRobinLbConfig.Validate if the designated constraints aren't met. +type Cluster_RoundRobinLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_RoundRobinLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_RoundRobinLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_RoundRobinLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_RoundRobinLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_RoundRobinLbConfigValidationError) ErrorName() string { + return "Cluster_RoundRobinLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_RoundRobinLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_RoundRobinLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_RoundRobinLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_RoundRobinLbConfigValidationError{} + +// Validate checks the field values on Cluster_LeastRequestLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_LeastRequestLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_LeastRequestLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_LeastRequestLbConfigMultiError, or nil if none found. +func (m *Cluster_LeastRequestLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_LeastRequestLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetChoiceCount(); wrapper != nil { + + if wrapper.GetValue() < 2 { + err := Cluster_LeastRequestLbConfigValidationError{ + field: "ChoiceCount", + reason: "value must be greater than or equal to 2", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetActiveRequestBias()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveRequestBias()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LeastRequestLbConfigValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_LeastRequestLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_LeastRequestLbConfigValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_LeastRequestLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_LeastRequestLbConfigMultiError is an error wrapping multiple +// validation errors returned by Cluster_LeastRequestLbConfig.ValidateAll() if +// the designated constraints aren't met. +type Cluster_LeastRequestLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_LeastRequestLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_LeastRequestLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_LeastRequestLbConfigValidationError is the validation error returned +// by Cluster_LeastRequestLbConfig.Validate if the designated constraints +// aren't met. +type Cluster_LeastRequestLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_LeastRequestLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_LeastRequestLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_LeastRequestLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_LeastRequestLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_LeastRequestLbConfigValidationError) ErrorName() string { + return "Cluster_LeastRequestLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_LeastRequestLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_LeastRequestLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_LeastRequestLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_LeastRequestLbConfigValidationError{} + +// Validate checks the field values on Cluster_RingHashLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_RingHashLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_RingHashLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_RingHashLbConfigMultiError, or nil if none found. +func (m *Cluster_RingHashLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_RingHashLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetMinimumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := Cluster_RingHashLbConfigValidationError{ + field: "MinimumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if _, ok := Cluster_RingHashLbConfig_HashFunction_name[int32(m.GetHashFunction())]; !ok { + err := Cluster_RingHashLbConfigValidationError{ + field: "HashFunction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMaximumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := Cluster_RingHashLbConfigValidationError{ + field: "MaximumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_RingHashLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_RingHashLbConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_RingHashLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_RingHashLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_RingHashLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_RingHashLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_RingHashLbConfigValidationError is the validation error returned by +// Cluster_RingHashLbConfig.Validate if the designated constraints aren't met. +type Cluster_RingHashLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_RingHashLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_RingHashLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_RingHashLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_RingHashLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_RingHashLbConfigValidationError) ErrorName() string { + return "Cluster_RingHashLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_RingHashLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_RingHashLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_RingHashLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_RingHashLbConfigValidationError{} + +// Validate checks the field values on Cluster_MaglevLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_MaglevLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_MaglevLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_MaglevLbConfigMultiError, or nil if none found. +func (m *Cluster_MaglevLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_MaglevLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetTableSize(); wrapper != nil { + + if wrapper.GetValue() > 5000011 { + err := Cluster_MaglevLbConfigValidationError{ + field: "TableSize", + reason: "value must be less than or equal to 5000011", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_MaglevLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_MaglevLbConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_MaglevLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_MaglevLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_MaglevLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_MaglevLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_MaglevLbConfigValidationError is the validation error returned by +// Cluster_MaglevLbConfig.Validate if the designated constraints aren't met. +type Cluster_MaglevLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_MaglevLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_MaglevLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_MaglevLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_MaglevLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_MaglevLbConfigValidationError) ErrorName() string { + return "Cluster_MaglevLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_MaglevLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_MaglevLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_MaglevLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_MaglevLbConfigValidationError{} + +// Validate checks the field values on Cluster_OriginalDstLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_OriginalDstLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_OriginalDstLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_OriginalDstLbConfigMultiError, or nil if none found. +func (m *Cluster_OriginalDstLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_OriginalDstLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHttpHeader + + // no validation rules for HttpHeaderName + + if wrapper := m.GetUpstreamPortOverride(); wrapper != nil { + + if wrapper.GetValue() > 65535 { + err := Cluster_OriginalDstLbConfigValidationError{ + field: "UpstreamPortOverride", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_OriginalDstLbConfigValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_OriginalDstLbConfigValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_OriginalDstLbConfigValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Cluster_OriginalDstLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_OriginalDstLbConfigMultiError is an error wrapping multiple +// validation errors returned by Cluster_OriginalDstLbConfig.ValidateAll() if +// the designated constraints aren't met. +type Cluster_OriginalDstLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_OriginalDstLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_OriginalDstLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_OriginalDstLbConfigValidationError is the validation error returned +// by Cluster_OriginalDstLbConfig.Validate if the designated constraints +// aren't met. +type Cluster_OriginalDstLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_OriginalDstLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_OriginalDstLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_OriginalDstLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_OriginalDstLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_OriginalDstLbConfigValidationError) ErrorName() string { + return "Cluster_OriginalDstLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_OriginalDstLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_OriginalDstLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_OriginalDstLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_OriginalDstLbConfigValidationError{} + +// Validate checks the field values on Cluster_CommonLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_CommonLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_CommonLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_CommonLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHealthyPanicThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "HealthyPanicThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "HealthyPanicThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthyPanicThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "HealthyPanicThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpdateMergeWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "UpdateMergeWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "UpdateMergeWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpdateMergeWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "UpdateMergeWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IgnoreNewHostsUntilFirstHc + + // no validation rules for CloseConnectionsOnHostSetChange + + if all { + switch v := interface{}(m.GetConsistentHashingLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsistentHashingLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverrideHostStatus()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "OverrideHostStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "OverrideHostStatus", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideHostStatus()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "OverrideHostStatus", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.LocalityConfigSpecifier.(type) { + case *Cluster_CommonLbConfig_ZoneAwareLbConfig_: + if v == nil { + err := Cluster_CommonLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetZoneAwareLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetZoneAwareLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Cluster_CommonLbConfig_LocalityWeightedLbConfig_: + if v == nil { + err := Cluster_CommonLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return Cluster_CommonLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfigMultiError is an error wrapping multiple validation +// errors returned by Cluster_CommonLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_CommonLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfigValidationError is the validation error returned by +// Cluster_CommonLbConfig.Validate if the designated constraints aren't met. +type Cluster_CommonLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_CommonLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_CommonLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CommonLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfigValidationError{} + +// Validate checks the field values on Cluster_RefreshRate with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_RefreshRate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_RefreshRate with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_RefreshRateMultiError, or nil if none found. +func (m *Cluster_RefreshRate) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_RefreshRate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetBaseInterval() == nil { + err := Cluster_RefreshRateValidationError{ + field: "BaseInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetBaseInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Cluster_RefreshRateValidationError{ + field: "BaseInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur <= gt { + err := Cluster_RefreshRateValidationError{ + field: "BaseInterval", + reason: "value must be greater than 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Cluster_RefreshRateValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur <= gt { + err := Cluster_RefreshRateValidationError{ + field: "MaxInterval", + reason: "value must be greater than 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Cluster_RefreshRateMultiError(errors) + } + + return nil +} + +// Cluster_RefreshRateMultiError is an error wrapping multiple validation +// errors returned by Cluster_RefreshRate.ValidateAll() if the designated +// constraints aren't met. +type Cluster_RefreshRateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_RefreshRateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_RefreshRateMultiError) AllErrors() []error { return m } + +// Cluster_RefreshRateValidationError is the validation error returned by +// Cluster_RefreshRate.Validate if the designated constraints aren't met. +type Cluster_RefreshRateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_RefreshRateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_RefreshRateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_RefreshRateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_RefreshRateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_RefreshRateValidationError) ErrorName() string { + return "Cluster_RefreshRateValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_RefreshRateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_RefreshRate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_RefreshRateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_RefreshRateValidationError{} + +// Validate checks the field values on Cluster_PreconnectPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Cluster_PreconnectPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cluster_PreconnectPolicy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Cluster_PreconnectPolicyMultiError, or nil if none found. +func (m *Cluster_PreconnectPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_PreconnectPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetPerUpstreamPreconnectRatio(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 3 { + err := Cluster_PreconnectPolicyValidationError{ + field: "PerUpstreamPreconnectRatio", + reason: "value must be inside range [1, 3]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetPredictivePreconnectRatio(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 3 { + err := Cluster_PreconnectPolicyValidationError{ + field: "PredictivePreconnectRatio", + reason: "value must be inside range [1, 3]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_PreconnectPolicyMultiError(errors) + } + + return nil +} + +// Cluster_PreconnectPolicyMultiError is an error wrapping multiple validation +// errors returned by Cluster_PreconnectPolicy.ValidateAll() if the designated +// constraints aren't met. +type Cluster_PreconnectPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_PreconnectPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_PreconnectPolicyMultiError) AllErrors() []error { return m } + +// Cluster_PreconnectPolicyValidationError is the validation error returned by +// Cluster_PreconnectPolicy.Validate if the designated constraints aren't met. +type Cluster_PreconnectPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_PreconnectPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_PreconnectPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_PreconnectPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_PreconnectPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_PreconnectPolicyValidationError) ErrorName() string { + return "Cluster_PreconnectPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_PreconnectPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_PreconnectPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_PreconnectPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_PreconnectPolicyValidationError{} + +// Validate checks the field values on Cluster_LbSubsetConfig_LbSubsetSelector +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_LbSubsetConfig_LbSubsetSelector with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// Cluster_LbSubsetConfig_LbSubsetSelectorMultiError, or nil if none found. +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SingleHostPerSubset + + if _, ok := Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_name[int32(m.GetFallbackPolicy())]; !ok { + err := Cluster_LbSubsetConfig_LbSubsetSelectorValidationError{ + field: "FallbackPolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Cluster_LbSubsetConfig_LbSubsetSelectorMultiError(errors) + } + + return nil +} + +// Cluster_LbSubsetConfig_LbSubsetSelectorMultiError is an error wrapping +// multiple validation errors returned by +// Cluster_LbSubsetConfig_LbSubsetSelector.ValidateAll() if the designated +// constraints aren't met. +type Cluster_LbSubsetConfig_LbSubsetSelectorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_LbSubsetConfig_LbSubsetSelectorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_LbSubsetConfig_LbSubsetSelectorMultiError) AllErrors() []error { return m } + +// Cluster_LbSubsetConfig_LbSubsetSelectorValidationError is the validation +// error returned by Cluster_LbSubsetConfig_LbSubsetSelector.Validate if the +// designated constraints aren't met. +type Cluster_LbSubsetConfig_LbSubsetSelectorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) ErrorName() string { + return "Cluster_LbSubsetConfig_LbSubsetSelectorValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_LbSubsetConfig_LbSubsetSelectorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_LbSubsetConfig_LbSubsetSelector.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_LbSubsetConfig_LbSubsetSelectorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_LbSubsetConfig_LbSubsetSelectorValidationError{} + +// Validate checks the field values on Cluster_CommonLbConfig_ZoneAwareLbConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_CommonLbConfig_ZoneAwareLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRoutingEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoutingEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinClusterSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinClusterSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FailTrafficOnPanic + + if len(errors) > 0 { + return Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError is an error wrapping +// multiple validation errors returned by +// Cluster_CommonLbConfig_ZoneAwareLbConfig.ValidateAll() if the designated +// constraints aren't met. +type Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfig_ZoneAwareLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError is the validation +// error returned by Cluster_CommonLbConfig_ZoneAwareLbConfig.Validate if the +// designated constraints aren't met. +type Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig_ZoneAwareLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfig_ZoneAwareLbConfigValidationError{} + +// Validate checks the field values on +// Cluster_CommonLbConfig_LocalityWeightedLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_CommonLbConfig_LocalityWeightedLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError is an error +// wrapping multiple validation errors returned by +// Cluster_CommonLbConfig_LocalityWeightedLbConfig.ValidateAll() if the +// designated constraints aren't met. +type Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfig_LocalityWeightedLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError is the +// validation error returned by +// Cluster_CommonLbConfig_LocalityWeightedLbConfig.Validate if the designated +// constraints aren't met. +type Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig_LocalityWeightedLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfig_LocalityWeightedLbConfigValidationError{} + +// Validate checks the field values on +// Cluster_CommonLbConfig_ConsistentHashingLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Cluster_CommonLbConfig_ConsistentHashingLbConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError, or nil if none found. +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError(errors) + } + + return nil +} + +// Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError is an error +// wrapping multiple validation errors returned by +// Cluster_CommonLbConfig_ConsistentHashingLbConfig.ValidateAll() if the +// designated constraints aren't met. +type Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Cluster_CommonLbConfig_ConsistentHashingLbConfigMultiError) AllErrors() []error { return m } + +// Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError is the +// validation error returned by +// Cluster_CommonLbConfig_ConsistentHashingLbConfig.Validate if the designated +// constraints aren't met. +type Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) ErrorName() string { + return "Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCluster_CommonLbConfig_ConsistentHashingLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Cluster_CommonLbConfig_ConsistentHashingLbConfigValidationError{} + +// Validate checks the field values on LoadBalancingPolicy_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LoadBalancingPolicy_Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadBalancingPolicy_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadBalancingPolicy_PolicyMultiError, or nil if none found. +func (m *LoadBalancingPolicy_Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadBalancingPolicy_Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTypedExtensionConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadBalancingPolicy_PolicyValidationError{ + field: "TypedExtensionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadBalancingPolicy_PolicyValidationError{ + field: "TypedExtensionConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedExtensionConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadBalancingPolicy_PolicyValidationError{ + field: "TypedExtensionConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return LoadBalancingPolicy_PolicyMultiError(errors) + } + + return nil +} + +// LoadBalancingPolicy_PolicyMultiError is an error wrapping multiple +// validation errors returned by LoadBalancingPolicy_Policy.ValidateAll() if +// the designated constraints aren't met. +type LoadBalancingPolicy_PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadBalancingPolicy_PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadBalancingPolicy_PolicyMultiError) AllErrors() []error { return m } + +// LoadBalancingPolicy_PolicyValidationError is the validation error returned +// by LoadBalancingPolicy_Policy.Validate if the designated constraints aren't met. +type LoadBalancingPolicy_PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadBalancingPolicy_PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadBalancingPolicy_PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadBalancingPolicy_PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadBalancingPolicy_PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadBalancingPolicy_PolicyValidationError) ErrorName() string { + return "LoadBalancingPolicy_PolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e LoadBalancingPolicy_PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadBalancingPolicy_Policy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadBalancingPolicy_PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadBalancingPolicy_PolicyValidationError{} + +// Validate checks the field values on +// UpstreamConnectionOptions_HappyEyeballsConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// UpstreamConnectionOptions_HappyEyeballsConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// UpstreamConnectionOptions_HappyEyeballsConfigMultiError, or nil if none found. +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FirstAddressFamilyVersion + + if wrapper := m.GetFirstAddressFamilyCount(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := UpstreamConnectionOptions_HappyEyeballsConfigValidationError{ + field: "FirstAddressFamilyCount", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return UpstreamConnectionOptions_HappyEyeballsConfigMultiError(errors) + } + + return nil +} + +// UpstreamConnectionOptions_HappyEyeballsConfigMultiError is an error wrapping +// multiple validation errors returned by +// UpstreamConnectionOptions_HappyEyeballsConfig.ValidateAll() if the +// designated constraints aren't met. +type UpstreamConnectionOptions_HappyEyeballsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamConnectionOptions_HappyEyeballsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamConnectionOptions_HappyEyeballsConfigMultiError) AllErrors() []error { return m } + +// UpstreamConnectionOptions_HappyEyeballsConfigValidationError is the +// validation error returned by +// UpstreamConnectionOptions_HappyEyeballsConfig.Validate if the designated +// constraints aren't met. +type UpstreamConnectionOptions_HappyEyeballsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) ErrorName() string { + return "UpstreamConnectionOptions_HappyEyeballsConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamConnectionOptions_HappyEyeballsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamConnectionOptions_HappyEyeballsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamConnectionOptions_HappyEyeballsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamConnectionOptions_HappyEyeballsConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go new file mode 100644 index 0000000000..4e2c50887e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go @@ -0,0 +1,3439 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/cluster.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterCollection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCollection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterCollection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Entries != nil { + if vtmsg, ok := interface{}(m.Entries).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Entries) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_TransportSocketMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_TransportSocketMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_TransportSocketMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Match != nil { + size, err := (*structpb.Struct)(m.Match).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CustomClusterType) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CustomClusterType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CustomClusterType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_EdsClusterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_EdsClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_EdsClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.EdsConfig != nil { + if vtmsg, ok := interface{}(m.EdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SingleHostPerSubset { + i-- + if m.SingleHostPerSubset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.FallbackKeysSubset) > 0 { + for iNdEx := len(m.FallbackKeysSubset) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.FallbackKeysSubset[iNdEx]) + copy(dAtA[i:], m.FallbackKeysSubset[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FallbackKeysSubset[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.FallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FallbackPolicy)) + i-- + dAtA[i] = 0x10 + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LbSubsetConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LbSubsetConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LbSubsetConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataFallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MetadataFallbackPolicy)) + i-- + dAtA[i] = 0x40 + } + if m.ListAsAny { + i-- + if m.ListAsAny { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PanicModeAny { + i-- + if m.PanicModeAny { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.ScaleLocalityWeight { + i-- + if m.ScaleLocalityWeight { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LocalityWeightAware { + i-- + if m.LocalityWeightAware { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.SubsetSelectors) > 0 { + for iNdEx := len(m.SubsetSelectors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubsetSelectors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.DefaultSubset != nil { + size, err := (*structpb.Struct)(m.DefaultSubset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FallbackPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FallbackPolicy)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_SlowStartConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_SlowStartConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_SlowStartConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinWeightPercent != nil { + if vtmsg, ok := interface{}(m.MinWeightPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinWeightPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Aggression != nil { + if vtmsg, ok := interface{}(m.Aggression).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Aggression) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SlowStartWindow != nil { + size, err := (*durationpb.Duration)(m.SlowStartWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_RoundRobinLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RoundRobinLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RoundRobinLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SlowStartConfig != nil { + size, err := m.SlowStartConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_LeastRequestLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_LeastRequestLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LeastRequestLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SlowStartConfig != nil { + size, err := m.SlowStartConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ActiveRequestBias != nil { + if vtmsg, ok := interface{}(m.ActiveRequestBias).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ActiveRequestBias) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ChoiceCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.ChoiceCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_RingHashLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RingHashLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RingHashLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaximumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaximumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.HashFunction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HashFunction)) + i-- + dAtA[i] = 0x18 + } + if m.MinimumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinimumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_MaglevLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_MaglevLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_MaglevLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TableSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.TableSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_OriginalDstLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_OriginalDstLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_OriginalDstLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.UpstreamPortOverride != nil { + size, err := (*wrapperspb.UInt32Value)(m.UpstreamPortOverride).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.HttpHeaderName) > 0 { + i -= len(m.HttpHeaderName) + copy(dAtA[i:], m.HttpHeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HttpHeaderName))) + i-- + dAtA[i] = 0x12 + } + if m.UseHttpHeader { + i-- + if m.UseHttpHeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FailTrafficOnPanic { + i-- + if m.FailTrafficOnPanic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.MinClusterSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinClusterSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RoutingEnabled != nil { + if vtmsg, ok := interface{}(m.RoutingEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RoutingEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_CommonLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OverrideHostStatus != nil { + if vtmsg, ok := interface{}(m.OverrideHostStatus).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverrideHostStatus) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ConsistentHashingLbConfig != nil { + size, err := m.ConsistentHashingLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.CloseConnectionsOnHostSetChange { + i-- + if m.CloseConnectionsOnHostSetChange { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.IgnoreNewHostsUntilFirstHc { + i-- + if m.IgnoreNewHostsUntilFirstHc { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.UpdateMergeWindow != nil { + size, err := (*durationpb.Duration)(m.UpdateMergeWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.LocalityConfigSpecifier.(*Cluster_CommonLbConfig_LocalityWeightedLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LocalityConfigSpecifier.(*Cluster_CommonLbConfig_ZoneAwareLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.HealthyPanicThreshold != nil { + if vtmsg, ok := interface{}(m.HealthyPanicThreshold).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HealthyPanicThreshold) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ZoneAwareLbConfig != nil { + size, err := m.ZoneAwareLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalityWeightedLbConfig != nil { + size, err := m.LocalityWeightedLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Cluster_RefreshRate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_RefreshRate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RefreshRate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_PreconnectPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster_PreconnectPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_PreconnectPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PredictivePreconnectRatio != nil { + size, err := (*wrapperspb.DoubleValue)(m.PredictivePreconnectRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.PerUpstreamPreconnectRatio != nil { + size, err := (*wrapperspb.DoubleValue)(m.PerUpstreamPreconnectRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LrsReportEndpointMetrics) > 0 { + for iNdEx := len(m.LrsReportEndpointMetrics) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LrsReportEndpointMetrics[iNdEx]) + copy(dAtA[i:], m.LrsReportEndpointMetrics[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LrsReportEndpointMetrics[iNdEx]))) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xca + } + } + if msg, ok := m.LbConfig.(*Cluster_RoundRobinLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TypedDnsResolverConfig != nil { + if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedDnsResolverConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xba + } + if m.WaitForWarmOnInit != nil { + size, err := (*wrapperspb.BoolValue)(m.WaitForWarmOnInit).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb2 + } + if m.DnsResolutionConfig != nil { + if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolutionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa + } + if msg, ok := m.LbConfig.(*Cluster_MaglevLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ConnectionPoolPerDownstreamConnection { + i-- + if m.ConnectionPoolPerDownstreamConnection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 + } + if m.PreconnectPolicy != nil { + size, err := m.PreconnectPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x92 + } + if m.TrackClusterStats != nil { + size, err := m.TrackClusterStats.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.UpstreamConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x82 + } + if m.TrackTimeoutBudgets { + i-- + if m.TrackTimeoutBudgets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf8 + } + if m.UpstreamHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.UpstreamHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe8 + } + if m.DnsFailureRefreshRate != nil { + size, err := m.DnsFailureRefreshRate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if len(m.TransportSocketMatches) > 0 { + for iNdEx := len(m.TransportSocketMatches) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TransportSocketMatches[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + } + if m.LrsServer != nil { + if vtmsg, ok := interface{}(m.LrsServer).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LrsServer) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd2 + } + if m.LoadBalancingPolicy != nil { + size, err := m.LoadBalancingPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + } + if m.RespectDnsTtl { + i-- + if m.RespectDnsTtl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if msg, ok := m.ClusterDiscoveryType.(*Cluster_ClusterType); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LbConfig.(*Cluster_LeastRequestLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TypedExtensionProtocolOptions) > 0 { + for k := range m.TypedExtensionProtocolOptions { + v := m.TypedExtensionProtocolOptions[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + } + if msg, ok := m.LbConfig.(*Cluster_OriginalDstLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LoadAssignment != nil { + if vtmsg, ok := interface{}(m.LoadAssignment).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LoadAssignment) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.IgnoreHealthOnHostRemoval { + i-- + if m.IgnoreHealthOnHostRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.CloseConnectionsOnHostHealthFailure { + i-- + if m.CloseConnectionsOnHostHealthFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.UpstreamConnectionOptions != nil { + size, err := m.UpstreamConnectionOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.CommonHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CommonHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.AltStatName) > 0 { + i -= len(m.AltStatName) + copy(dAtA[i:], m.AltStatName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AltStatName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.CommonLbConfig != nil { + size, err := m.CommonLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.ProtocolSelection != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProtocolSelection)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if msg, ok := m.LbConfig.(*Cluster_RingHashLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LbSubsetConfig != nil { + size, err := m.LbSubsetConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.UpstreamBindConfig != nil { + if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamBindConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CleanupInterval != nil { + size, err := (*durationpb.Duration)(m.CleanupInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.OutlierDetection != nil { + size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.DnsResolvers) > 0 { + for iNdEx := len(m.DnsResolvers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DnsResolvers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DnsResolvers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if m.DnsLookupFamily != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DnsLookupFamily)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.DnsRefreshRate != nil { + size, err := (*durationpb.Duration)(m.DnsRefreshRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Http2ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http2ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http2ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.HttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.HttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.CircuitBreakers != nil { + size, err := m.CircuitBreakers.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.MaxRequestsPerConnection != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.HealthChecks) > 0 { + for iNdEx := len(m.HealthChecks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.HealthChecks[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HealthChecks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.LbPolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LbPolicy)) + i-- + dAtA[i] = 0x30 + } + if m.PerConnectionBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.ConnectTimeout != nil { + size, err := (*durationpb.Duration)(m.ConnectTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.EdsClusterConfig != nil { + size, err := m.EdsClusterConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.ClusterDiscoveryType.(*Cluster_Type); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cluster_Type) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_Type) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *Cluster_RingHashLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RingHashLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RingHashLbConfig != nil { + size, err := m.RingHashLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + return len(dAtA) - i, nil +} +func (m *Cluster_OriginalDstLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_OriginalDstLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OriginalDstLbConfig != nil { + size, err := m.OriginalDstLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Cluster_LeastRequestLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_LeastRequestLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LeastRequestLbConfig != nil { + size, err := m.LeastRequestLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + return len(dAtA) - i, nil +} +func (m *Cluster_ClusterType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_ClusterType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClusterType != nil { + size, err := m.ClusterType.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + return len(dAtA) - i, nil +} +func (m *Cluster_MaglevLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_MaglevLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MaglevLbConfig != nil { + size, err := m.MaglevLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + return len(dAtA) - i, nil +} +func (m *Cluster_RoundRobinLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cluster_RoundRobinLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RoundRobinLbConfig != nil { + size, err := m.RoundRobinLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } + return len(dAtA) - i, nil +} +func (m *LoadBalancingPolicy_Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancingPolicy_Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadBalancingPolicy_Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedExtensionConfig != nil { + if vtmsg, ok := interface{}(m.TypedExtensionConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedExtensionConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} + +func (m *LoadBalancingPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancingPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadBalancingPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Policies) > 0 { + for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Policies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FirstAddressFamilyCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.FirstAddressFamilyCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FirstAddressFamilyVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FirstAddressFamilyVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpstreamConnectionOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamConnectionOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamConnectionOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HappyEyeballsConfig != nil { + size, err := m.HappyEyeballsConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.SetLocalInterfaceNameOnUpstreamConnections { + i-- + if m.SetLocalInterfaceNameOnUpstreamConnections { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TcpKeepalive != nil { + if vtmsg, ok := interface{}(m.TcpKeepalive).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TcpKeepalive) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TrackClusterStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrackClusterStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TrackClusterStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PerEndpointStats { + i-- + if m.PerEndpointStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RequestResponseSizes { + i-- + if m.RequestResponseSizes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TimeoutBudgets { + i-- + if m.TimeoutBudgets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterCollection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Entries != nil { + if size, ok := interface{}(m.Entries).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Entries) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_TransportSocketMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Match != nil { + l = (*structpb.Struct)(m.Match).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CustomClusterType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_EdsClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EdsConfig != nil { + if size, ok := interface{}(m.EdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LbSubsetConfig_LbSubsetSelector) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.FallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FallbackPolicy)) + } + if len(m.FallbackKeysSubset) > 0 { + for _, s := range m.FallbackKeysSubset { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SingleHostPerSubset { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LbSubsetConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FallbackPolicy)) + } + if m.DefaultSubset != nil { + l = (*structpb.Struct)(m.DefaultSubset).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SubsetSelectors) > 0 { + for _, e := range m.SubsetSelectors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalityWeightAware { + n += 2 + } + if m.ScaleLocalityWeight { + n += 2 + } + if m.PanicModeAny { + n += 2 + } + if m.ListAsAny { + n += 2 + } + if m.MetadataFallbackPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MetadataFallbackPolicy)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_SlowStartConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartWindow != nil { + l = (*durationpb.Duration)(m.SlowStartWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aggression != nil { + if size, ok := interface{}(m.Aggression).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Aggression) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinWeightPercent != nil { + if size, ok := interface{}(m.MinWeightPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinWeightPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_RoundRobinLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartConfig != nil { + l = m.SlowStartConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_LeastRequestLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChoiceCount != nil { + l = (*wrapperspb.UInt32Value)(m.ChoiceCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveRequestBias != nil { + if size, ok := interface{}(m.ActiveRequestBias).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ActiveRequestBias) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SlowStartConfig != nil { + l = m.SlowStartConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_RingHashLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinimumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HashFunction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HashFunction)) + } + if m.MaximumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaximumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_MaglevLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TableSize != nil { + l = (*wrapperspb.UInt64Value)(m.TableSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_OriginalDstLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHttpHeader { + n += 2 + } + l = len(m.HttpHeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamPortOverride != nil { + l = (*wrapperspb.UInt32Value)(m.UpstreamPortOverride).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingEnabled != nil { + if size, ok := interface{}(m.RoutingEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RoutingEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinClusterSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinClusterSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailTrafficOnPanic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ConsistentHashingLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HealthyPanicThreshold != nil { + if size, ok := interface{}(m.HealthyPanicThreshold).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HealthyPanicThreshold) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LocalityConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.UpdateMergeWindow != nil { + l = (*durationpb.Duration)(m.UpdateMergeWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IgnoreNewHostsUntilFirstHc { + n += 2 + } + if m.CloseConnectionsOnHostSetChange { + n += 2 + } + if m.ConsistentHashingLbConfig != nil { + l = m.ConsistentHashingLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverrideHostStatus != nil { + if size, ok := interface{}(m.OverrideHostStatus).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverrideHostStatus) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_CommonLbConfig_ZoneAwareLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ZoneAwareLbConfig != nil { + l = m.ZoneAwareLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Cluster_CommonLbConfig_LocalityWeightedLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalityWeightedLbConfig != nil { + l = m.LocalityWeightedLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Cluster_RefreshRate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_PreconnectPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PerUpstreamPreconnectRatio != nil { + l = (*wrapperspb.DoubleValue)(m.PerUpstreamPreconnectRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PredictivePreconnectRatio != nil { + l = (*wrapperspb.DoubleValue)(m.PredictivePreconnectRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ClusterDiscoveryType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.EdsClusterConfig != nil { + l = m.EdsClusterConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectTimeout != nil { + l = (*durationpb.Duration)(m.ConnectTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerConnectionBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LbPolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LbPolicy)) + } + if len(m.HealthChecks) > 0 { + for _, e := range m.HealthChecks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxRequestsPerConnection != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CircuitBreakers != nil { + l = m.CircuitBreakers.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpProtocolOptions != nil { + if size, ok := interface{}(m.HttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http2ProtocolOptions != nil { + if size, ok := interface{}(m.Http2ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http2ProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DnsRefreshRate != nil { + l = (*durationpb.Duration)(m.DnsRefreshRate).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DnsLookupFamily != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DnsLookupFamily)) + } + if len(m.DnsResolvers) > 0 { + for _, e := range m.DnsResolvers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CleanupInterval != nil { + l = (*durationpb.Duration)(m.CleanupInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamBindConfig != nil { + if size, ok := interface{}(m.UpstreamBindConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamBindConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LbSubsetConfig != nil { + l = m.LbSubsetConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LbConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProtocolSelection != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ProtocolSelection)) + } + if m.CommonLbConfig != nil { + l = m.CommonLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AltStatName) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CommonHttpProtocolOptions != nil { + if size, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CommonHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamConnectionOptions != nil { + l = m.UpstreamConnectionOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CloseConnectionsOnHostHealthFailure { + n += 3 + } + if m.IgnoreHealthOnHostRemoval { + n += 3 + } + if m.LoadAssignment != nil { + if size, ok := interface{}(m.LoadAssignment).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LoadAssignment) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TypedExtensionProtocolOptions) > 0 { + for k, v := range m.TypedExtensionProtocolOptions { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.RespectDnsTtl { + n += 3 + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadBalancingPolicy != nil { + l = m.LoadBalancingPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LrsServer != nil { + if size, ok := interface{}(m.LrsServer).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LrsServer) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TransportSocketMatches) > 0 { + for _, e := range m.TransportSocketMatches { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DnsFailureRefreshRate != nil { + l = m.DnsFailureRefreshRate.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseTcpForDnsLookups { + n += 3 + } + if m.UpstreamHttpProtocolOptions != nil { + if size, ok := interface{}(m.UpstreamHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackTimeoutBudgets { + n += 3 + } + if m.UpstreamConfig != nil { + if size, ok := interface{}(m.UpstreamConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackClusterStats != nil { + l = m.TrackClusterStats.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreconnectPolicy != nil { + l = m.PreconnectPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionPoolPerDownstreamConnection { + n += 3 + } + if m.DnsResolutionConfig != nil { + if size, ok := interface{}(m.DnsResolutionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DnsResolutionConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WaitForWarmOnInit != nil { + l = (*wrapperspb.BoolValue)(m.WaitForWarmOnInit).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedDnsResolverConfig != nil { + if size, ok := interface{}(m.TypedDnsResolverConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedDnsResolverConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LrsReportEndpointMetrics) > 0 { + for _, s := range m.LrsReportEndpointMetrics { + l = len(s) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Cluster_Type) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + return n +} +func (m *Cluster_RingHashLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RingHashLbConfig != nil { + l = m.RingHashLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_OriginalDstLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OriginalDstLbConfig != nil { + l = m.OriginalDstLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_LeastRequestLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeastRequestLbConfig != nil { + l = m.LeastRequestLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_ClusterType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterType != nil { + l = m.ClusterType.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_MaglevLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaglevLbConfig != nil { + l = m.MaglevLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Cluster_RoundRobinLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoundRobinLbConfig != nil { + l = m.RoundRobinLbConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *LoadBalancingPolicy_Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedExtensionConfig != nil { + if size, ok := interface{}(m.TypedExtensionConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedExtensionConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LoadBalancingPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamConnectionOptions_HappyEyeballsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FirstAddressFamilyVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FirstAddressFamilyVersion)) + } + if m.FirstAddressFamilyCount != nil { + l = (*wrapperspb.UInt32Value)(m.FirstAddressFamilyCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamConnectionOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpKeepalive != nil { + if size, ok := interface{}(m.TcpKeepalive).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TcpKeepalive) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SetLocalInterfaceNameOnUpstreamConnections { + n += 2 + } + if m.HappyEyeballsConfig != nil { + l = m.HappyEyeballsConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TrackClusterStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutBudgets { + n += 2 + } + if m.RequestResponseSizes { + n += 2 + } + if m.PerEndpointStats { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go new file mode 100644 index 0000000000..42ddbe2bb3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // Note that Envoy's :ref:`downstream network + // filters ` are not valid upstream network filters. + // Only one of typed_config or config_discovery can be used. + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + // Only one of typed_config or config_discovery can be used. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,3,opt,name=config_discovery,json=configDiscovery,proto3" json:"config_discovery,omitempty"` +} + +func (x *Filter) Reset() { + *x = Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_filter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Filter) ProtoMessage() {} + +func (x *Filter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_filter_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Filter.ProtoReflect.Descriptor instead. +func (*Filter) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_filter_proto_rawDescGZIP(), []int{0} +} + +func (x *Filter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Filter) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +func (x *Filter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x != nil { + return x.ConfigDiscovery + } + return nil +} + +var File_envoy_config_cluster_v3_filter_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_filter_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, + 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xda, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x56, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, + 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x88, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_cluster_v3_filter_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_filter_proto_rawDescData = file_envoy_config_cluster_v3_filter_proto_rawDesc +) + +func file_envoy_config_cluster_v3_filter_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_filter_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_filter_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_filter_proto_rawDescData +} + +var file_envoy_config_cluster_v3_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_cluster_v3_filter_proto_goTypes = []interface{}{ + (*Filter)(nil), // 0: envoy.config.cluster.v3.Filter + (*anypb.Any)(nil), // 1: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 2: envoy.config.core.v3.ExtensionConfigSource +} +var file_envoy_config_cluster_v3_filter_proto_depIdxs = []int32{ + 1, // 0: envoy.config.cluster.v3.Filter.typed_config:type_name -> google.protobuf.Any + 2, // 1: envoy.config.cluster.v3.Filter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_filter_proto_init() } +func file_envoy_config_cluster_v3_filter_proto_init() { + if File_envoy_config_cluster_v3_filter_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_filter_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_filter_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_filter_proto_depIdxs, + MessageInfos: file_envoy_config_cluster_v3_filter_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_filter_proto = out.File + file_envoy_config_cluster_v3_filter_proto_rawDesc = nil + file_envoy_config_cluster_v3_filter_proto_goTypes = nil + file_envoy_config_cluster_v3_filter_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go new file mode 100644 index 0000000000..6de8120e08 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter.pb.validate.go @@ -0,0 +1,204 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Filter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in FilterMultiError, or nil if none found. +func (m *Filter) ValidateAll() error { + return m.validate(true) +} + +func (m *Filter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := FilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterMultiError(errors) + } + + return nil +} + +// FilterMultiError is an error wrapping multiple validation errors returned by +// Filter.ValidateAll() if the designated constraints aren't met. +type FilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterMultiError) AllErrors() []error { return m } + +// FilterValidationError is the validation error returned by Filter.Validate if +// the designated constraints aren't met. +type FilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterValidationError) ErrorName() string { return "FilterValidationError" } + +// Error satisfies the builtin error interface +func (e FilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go new file mode 100644 index 0000000000..f253344e6a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/filter_vtproto.pb.go @@ -0,0 +1,121 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/filter.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Filter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go new file mode 100644 index 0000000000..531cbd0efc --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.go @@ -0,0 +1,641 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +// [#next-free-field: 26] +type OutlierDetection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of consecutive server-side error responses (for HTTP traffic, + // 5xx responses; for TCP traffic, connection failures; for Redis, failure to + // respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. + Consecutive_5Xx *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=consecutive_5xx,json=consecutive5xx,proto3" json:"consecutive_5xx,omitempty"` + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. + // Defaults to 30000ms or 30s. + BaseEjectionTime *durationpb.Duration `protobuf:"bytes,3,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` + // The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% . + // Will eject at least one host regardless of the value if :ref:`always_eject_one_host` is enabled. + MaxEjectionPercent *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + EnforcingConsecutive_5Xx *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=enforcing_consecutive_5xx,json=enforcingConsecutive5xx,proto3" json:"enforcing_consecutive_5xx,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + EnforcingSuccessRate *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=enforcing_success_rate,json=enforcingSuccessRate,proto3" json:"enforcing_success_rate,omitempty"` + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + SuccessRateMinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=success_rate_minimum_hosts,json=successRateMinimumHosts,proto3" json:"success_rate_minimum_hosts,omitempty"` + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + SuccessRateRequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=success_rate_request_volume,json=successRateRequestVolume,proto3" json:"success_rate_request_volume,omitempty"` + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + SuccessRateStdevFactor *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=success_rate_stdev_factor,json=successRateStdevFactor,proto3" json:"success_rate_stdev_factor,omitempty"` + // The number of consecutive gateway failures (502, 503, 504 status codes) + // before a consecutive gateway failure ejection occurs. Defaults to 5. + ConsecutiveGatewayFailure *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=consecutive_gateway_failure,json=consecutiveGatewayFailure,proto3" json:"consecutive_gateway_failure,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + EnforcingConsecutiveGatewayFailure *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=enforcing_consecutive_gateway_failure,json=enforcingConsecutiveGatewayFailure,proto3" json:"enforcing_consecutive_gateway_failure,omitempty"` + // Determines whether to distinguish local origin failures from external errors. If set to true + // the following configuration parameters are taken into account: + // :ref:`consecutive_local_origin_failure`, + // :ref:`enforcing_consecutive_local_origin_failure` + // and + // :ref:`enforcing_local_origin_success_rate`. + // Defaults to false. + SplitExternalLocalOriginErrors bool `protobuf:"varint,12,opt,name=split_external_local_origin_errors,json=splitExternalLocalOriginErrors,proto3" json:"split_external_local_origin_errors,omitempty"` + // The number of consecutive locally originated failures before ejection + // occurs. Defaults to 5. Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + ConsecutiveLocalOriginFailure *wrapperspb.UInt32Value `protobuf:"bytes,13,opt,name=consecutive_local_origin_failure,json=consecutiveLocalOriginFailure,proto3" json:"consecutive_local_origin_failure,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive locally originated failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + EnforcingConsecutiveLocalOriginFailure *wrapperspb.UInt32Value `protobuf:"bytes,14,opt,name=enforcing_consecutive_local_origin_failure,json=enforcingConsecutiveLocalOriginFailure,proto3" json:"enforcing_consecutive_local_origin_failure,omitempty"` + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics for locally originated errors. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + EnforcingLocalOriginSuccessRate *wrapperspb.UInt32Value `protobuf:"bytes,15,opt,name=enforcing_local_origin_success_rate,json=enforcingLocalOriginSuccessRate,proto3" json:"enforcing_local_origin_success_rate,omitempty"` + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given host is greater than or equal to this value, it will be + // ejected. Defaults to 85. + FailurePercentageThreshold *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=failure_percentage_threshold,json=failurePercentageThreshold,proto3" json:"failure_percentage_threshold,omitempty"` + // The % chance that a host will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + // + // [#next-major-version: setting this without setting failure_percentage_threshold should be + // invalid in v4.] + EnforcingFailurePercentage *wrapperspb.UInt32Value `protobuf:"bytes,17,opt,name=enforcing_failure_percentage,json=enforcingFailurePercentage,proto3" json:"enforcing_failure_percentage,omitempty"` + // The % chance that a host will be actually ejected when an outlier status is detected through + // local-origin failure percentage statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. + EnforcingFailurePercentageLocalOrigin *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=enforcing_failure_percentage_local_origin,json=enforcingFailurePercentageLocalOrigin,proto3" json:"enforcing_failure_percentage_local_origin,omitempty"` + // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. + // If the total number of hosts in the cluster is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + FailurePercentageMinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,19,opt,name=failure_percentage_minimum_hosts,json=failurePercentageMinimumHosts,proto3" json:"failure_percentage_minimum_hosts,omitempty"` + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this host. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + FailurePercentageRequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,20,opt,name=failure_percentage_request_volume,json=failurePercentageRequestVolume,proto3" json:"failure_percentage_request_volume,omitempty"` + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. If not specified, the default value (300000ms or 300s) or + // :ref:`base_ejection_time` value is applied, whatever is larger. + MaxEjectionTime *durationpb.Duration `protobuf:"bytes,21,opt,name=max_ejection_time,json=maxEjectionTime,proto3" json:"max_ejection_time,omitempty"` + // The maximum amount of jitter to add to the ejection time, in order to prevent + // a 'thundering herd' effect where all proxies try to reconnect to host at the same time. + // See :ref:`max_ejection_time_jitter` + // Defaults to 0s. + MaxEjectionTimeJitter *durationpb.Duration `protobuf:"bytes,22,opt,name=max_ejection_time_jitter,json=maxEjectionTimeJitter,proto3" json:"max_ejection_time_jitter,omitempty"` + // If active health checking is enabled and a host is ejected by outlier detection, a successful active health check + // unejects the host by default and considers it as healthy. Unejection also clears all the outlier detection counters. + // To change this default behavior set this config to “false“ where active health checking will not uneject the host. + // Defaults to true. + SuccessfulActiveHealthCheckUnejectHost *wrapperspb.BoolValue `protobuf:"bytes,23,opt,name=successful_active_health_check_uneject_host,json=successfulActiveHealthCheckUnejectHost,proto3" json:"successful_active_health_check_uneject_host,omitempty"` + // Set of host's passive monitors. + // [#not-implemented-hide:] + Monitors []*v3.TypedExtensionConfig `protobuf:"bytes,24,rep,name=monitors,proto3" json:"monitors,omitempty"` + // If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent`. + // Defaults to false. + AlwaysEjectOneHost *wrapperspb.BoolValue `protobuf:"bytes,25,opt,name=always_eject_one_host,json=alwaysEjectOneHost,proto3" json:"always_eject_one_host,omitempty"` +} + +func (x *OutlierDetection) Reset() { + *x = OutlierDetection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutlierDetection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutlierDetection) ProtoMessage() {} + +func (x *OutlierDetection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutlierDetection.ProtoReflect.Descriptor instead. +func (*OutlierDetection) Descriptor() ([]byte, []int) { + return file_envoy_config_cluster_v3_outlier_detection_proto_rawDescGZIP(), []int{0} +} + +func (x *OutlierDetection) GetConsecutive_5Xx() *wrapperspb.UInt32Value { + if x != nil { + return x.Consecutive_5Xx + } + return nil +} + +func (x *OutlierDetection) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *OutlierDetection) GetBaseEjectionTime() *durationpb.Duration { + if x != nil { + return x.BaseEjectionTime + } + return nil +} + +func (x *OutlierDetection) GetMaxEjectionPercent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxEjectionPercent + } + return nil +} + +func (x *OutlierDetection) GetEnforcingConsecutive_5Xx() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingConsecutive_5Xx + } + return nil +} + +func (x *OutlierDetection) GetEnforcingSuccessRate() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingSuccessRate + } + return nil +} + +func (x *OutlierDetection) GetSuccessRateMinimumHosts() *wrapperspb.UInt32Value { + if x != nil { + return x.SuccessRateMinimumHosts + } + return nil +} + +func (x *OutlierDetection) GetSuccessRateRequestVolume() *wrapperspb.UInt32Value { + if x != nil { + return x.SuccessRateRequestVolume + } + return nil +} + +func (x *OutlierDetection) GetSuccessRateStdevFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.SuccessRateStdevFactor + } + return nil +} + +func (x *OutlierDetection) GetConsecutiveGatewayFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.ConsecutiveGatewayFailure + } + return nil +} + +func (x *OutlierDetection) GetEnforcingConsecutiveGatewayFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingConsecutiveGatewayFailure + } + return nil +} + +func (x *OutlierDetection) GetSplitExternalLocalOriginErrors() bool { + if x != nil { + return x.SplitExternalLocalOriginErrors + } + return false +} + +func (x *OutlierDetection) GetConsecutiveLocalOriginFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.ConsecutiveLocalOriginFailure + } + return nil +} + +func (x *OutlierDetection) GetEnforcingConsecutiveLocalOriginFailure() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingConsecutiveLocalOriginFailure + } + return nil +} + +func (x *OutlierDetection) GetEnforcingLocalOriginSuccessRate() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingLocalOriginSuccessRate + } + return nil +} + +func (x *OutlierDetection) GetFailurePercentageThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.FailurePercentageThreshold + } + return nil +} + +func (x *OutlierDetection) GetEnforcingFailurePercentage() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingFailurePercentage + } + return nil +} + +func (x *OutlierDetection) GetEnforcingFailurePercentageLocalOrigin() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcingFailurePercentageLocalOrigin + } + return nil +} + +func (x *OutlierDetection) GetFailurePercentageMinimumHosts() *wrapperspb.UInt32Value { + if x != nil { + return x.FailurePercentageMinimumHosts + } + return nil +} + +func (x *OutlierDetection) GetFailurePercentageRequestVolume() *wrapperspb.UInt32Value { + if x != nil { + return x.FailurePercentageRequestVolume + } + return nil +} + +func (x *OutlierDetection) GetMaxEjectionTime() *durationpb.Duration { + if x != nil { + return x.MaxEjectionTime + } + return nil +} + +func (x *OutlierDetection) GetMaxEjectionTimeJitter() *durationpb.Duration { + if x != nil { + return x.MaxEjectionTimeJitter + } + return nil +} + +func (x *OutlierDetection) GetSuccessfulActiveHealthCheckUnejectHost() *wrapperspb.BoolValue { + if x != nil { + return x.SuccessfulActiveHealthCheckUnejectHost + } + return nil +} + +func (x *OutlierDetection) GetMonitors() []*v3.TypedExtensionConfig { + if x != nil { + return x.Monitors + } + return nil +} + +func (x *OutlierDetection) GetAlwaysEjectOneHost() *wrapperspb.BoolValue { + if x != nil { + return x.AlwaysEjectOneHost + } + return nil +} + +var File_envoy_config_cluster_v3_outlier_detection_proto protoreflect.FileDescriptor + +var file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, + 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x13, 0x0a, 0x10, + 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x35, 0x78, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x12, 0x3f, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x08, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x51, 0x0a, 0x12, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x10, 0x62, 0x61, 0x73, 0x65, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x6d, + 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, + 0x52, 0x12, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x19, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x35, 0x78, + 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x17, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x35, 0x78, 0x78, 0x12, 0x5b, 0x0a, 0x16, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x14, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x1a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x61, 0x74, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, + 0x5b, 0x0a, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x18, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x19, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x64, + 0x65, 0x76, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x53, 0x74, 0x64, 0x65, 0x76, 0x46, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x5c, 0x0a, 0x1b, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x12, 0x78, 0x0a, 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x74, + 0x65, 0x77, 0x61, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x22, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x4a, 0x0a, + 0x22, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1e, 0x73, 0x70, 0x6c, 0x69, 0x74, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x65, 0x0a, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x12, 0x81, 0x01, 0x0a, 0x2a, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x26, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x76, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x73, 0x0a, 0x23, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1f, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, + 0x69, 0x6e, 0x67, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x67, 0x0a, 0x1c, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x12, 0x67, 0x0a, 0x1c, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, + 0x1a, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x7f, 0x0a, 0x29, 0x65, + 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x25, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x69, 0x6e, 0x67, + 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x65, 0x0a, 0x20, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1d, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x21, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x11, + 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x6d, 0x61, + 0x78, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x12, 0x77, 0x0a, 0x2b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x26, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, + 0x6e, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x08, 0x6d, 0x6f, + 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x15, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x65, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x6f, 0x6e, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x6e, 0x65, 0x48, 0x6f, 0x73, + 0x74, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, + 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x92, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x42, 0x15, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescOnce sync.Once + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData = file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc +) + +func file_envoy_config_cluster_v3_outlier_detection_proto_rawDescGZIP() []byte { + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescOnce.Do(func() { + file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData) + }) + return file_envoy_config_cluster_v3_outlier_detection_proto_rawDescData +} + +var file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_cluster_v3_outlier_detection_proto_goTypes = []interface{}{ + (*OutlierDetection)(nil), // 0: envoy.config.cluster.v3.OutlierDetection + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*wrapperspb.BoolValue)(nil), // 3: google.protobuf.BoolValue + (*v3.TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs = []int32{ + 1, // 0: envoy.config.cluster.v3.OutlierDetection.consecutive_5xx:type_name -> google.protobuf.UInt32Value + 2, // 1: envoy.config.cluster.v3.OutlierDetection.interval:type_name -> google.protobuf.Duration + 2, // 2: envoy.config.cluster.v3.OutlierDetection.base_ejection_time:type_name -> google.protobuf.Duration + 1, // 3: envoy.config.cluster.v3.OutlierDetection.max_ejection_percent:type_name -> google.protobuf.UInt32Value + 1, // 4: envoy.config.cluster.v3.OutlierDetection.enforcing_consecutive_5xx:type_name -> google.protobuf.UInt32Value + 1, // 5: envoy.config.cluster.v3.OutlierDetection.enforcing_success_rate:type_name -> google.protobuf.UInt32Value + 1, // 6: envoy.config.cluster.v3.OutlierDetection.success_rate_minimum_hosts:type_name -> google.protobuf.UInt32Value + 1, // 7: envoy.config.cluster.v3.OutlierDetection.success_rate_request_volume:type_name -> google.protobuf.UInt32Value + 1, // 8: envoy.config.cluster.v3.OutlierDetection.success_rate_stdev_factor:type_name -> google.protobuf.UInt32Value + 1, // 9: envoy.config.cluster.v3.OutlierDetection.consecutive_gateway_failure:type_name -> google.protobuf.UInt32Value + 1, // 10: envoy.config.cluster.v3.OutlierDetection.enforcing_consecutive_gateway_failure:type_name -> google.protobuf.UInt32Value + 1, // 11: envoy.config.cluster.v3.OutlierDetection.consecutive_local_origin_failure:type_name -> google.protobuf.UInt32Value + 1, // 12: envoy.config.cluster.v3.OutlierDetection.enforcing_consecutive_local_origin_failure:type_name -> google.protobuf.UInt32Value + 1, // 13: envoy.config.cluster.v3.OutlierDetection.enforcing_local_origin_success_rate:type_name -> google.protobuf.UInt32Value + 1, // 14: envoy.config.cluster.v3.OutlierDetection.failure_percentage_threshold:type_name -> google.protobuf.UInt32Value + 1, // 15: envoy.config.cluster.v3.OutlierDetection.enforcing_failure_percentage:type_name -> google.protobuf.UInt32Value + 1, // 16: envoy.config.cluster.v3.OutlierDetection.enforcing_failure_percentage_local_origin:type_name -> google.protobuf.UInt32Value + 1, // 17: envoy.config.cluster.v3.OutlierDetection.failure_percentage_minimum_hosts:type_name -> google.protobuf.UInt32Value + 1, // 18: envoy.config.cluster.v3.OutlierDetection.failure_percentage_request_volume:type_name -> google.protobuf.UInt32Value + 2, // 19: envoy.config.cluster.v3.OutlierDetection.max_ejection_time:type_name -> google.protobuf.Duration + 2, // 20: envoy.config.cluster.v3.OutlierDetection.max_ejection_time_jitter:type_name -> google.protobuf.Duration + 3, // 21: envoy.config.cluster.v3.OutlierDetection.successful_active_health_check_uneject_host:type_name -> google.protobuf.BoolValue + 4, // 22: envoy.config.cluster.v3.OutlierDetection.monitors:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 23: envoy.config.cluster.v3.OutlierDetection.always_eject_one_host:type_name -> google.protobuf.BoolValue + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_envoy_config_cluster_v3_outlier_detection_proto_init() } +func file_envoy_config_cluster_v3_outlier_detection_proto_init() { + if File_envoy_config_cluster_v3_outlier_detection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutlierDetection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_cluster_v3_outlier_detection_proto_goTypes, + DependencyIndexes: file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs, + MessageInfos: file_envoy_config_cluster_v3_outlier_detection_proto_msgTypes, + }.Build() + File_envoy_config_cluster_v3_outlier_detection_proto = out.File + file_envoy_config_cluster_v3_outlier_detection_proto_rawDesc = nil + file_envoy_config_cluster_v3_outlier_detection_proto_goTypes = nil + file_envoy_config_cluster_v3_outlier_detection_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go new file mode 100644 index 0000000000..966821457d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection.pb.validate.go @@ -0,0 +1,717 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OutlierDetection with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OutlierDetection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OutlierDetection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OutlierDetectionMultiError, or nil if none found. +func (m *OutlierDetection) ValidateAll() error { + return m.validate(true) +} + +func (m *OutlierDetection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConsecutive_5Xx()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "Consecutive_5Xx", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "Consecutive_5Xx", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsecutive_5Xx()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "Consecutive_5Xx", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = OutlierDetectionValidationError{ + field: "Interval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := OutlierDetectionValidationError{ + field: "Interval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetBaseEjectionTime(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = OutlierDetectionValidationError{ + field: "BaseEjectionTime", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := OutlierDetectionValidationError{ + field: "BaseEjectionTime", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if wrapper := m.GetMaxEjectionPercent(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "MaxEjectionPercent", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingConsecutive_5Xx(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingConsecutive_5Xx", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingSuccessRate(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingSuccessRate", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetSuccessRateMinimumHosts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateMinimumHosts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessRateMinimumHosts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessRateRequestVolume()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateRequestVolume()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessRateRequestVolume", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessRateStdevFactor()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateStdevFactor", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessRateStdevFactor", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessRateStdevFactor()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessRateStdevFactor", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConsecutiveGatewayFailure()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveGatewayFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveGatewayFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsecutiveGatewayFailure()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "ConsecutiveGatewayFailure", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetEnforcingConsecutiveGatewayFailure(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingConsecutiveGatewayFailure", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for SplitExternalLocalOriginErrors + + if all { + switch v := interface{}(m.GetConsecutiveLocalOriginFailure()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveLocalOriginFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "ConsecutiveLocalOriginFailure", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsecutiveLocalOriginFailure()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "ConsecutiveLocalOriginFailure", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetEnforcingConsecutiveLocalOriginFailure(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingConsecutiveLocalOriginFailure", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingLocalOriginSuccessRate(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingLocalOriginSuccessRate", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetFailurePercentageThreshold(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "FailurePercentageThreshold", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingFailurePercentage(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingFailurePercentage", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetEnforcingFailurePercentageLocalOrigin(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := OutlierDetectionValidationError{ + field: "EnforcingFailurePercentageLocalOrigin", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetFailurePercentageMinimumHosts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageMinimumHosts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailurePercentageMinimumHosts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "FailurePercentageMinimumHosts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFailurePercentageRequestVolume()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "FailurePercentageRequestVolume", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFailurePercentageRequestVolume()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "FailurePercentageRequestVolume", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetMaxEjectionTime(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = OutlierDetectionValidationError{ + field: "MaxEjectionTime", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := OutlierDetectionValidationError{ + field: "MaxEjectionTime", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetMaxEjectionTimeJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "MaxEjectionTimeJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "MaxEjectionTimeJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxEjectionTimeJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "MaxEjectionTimeJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSuccessfulActiveHealthCheckUnejectHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessfulActiveHealthCheckUnejectHost", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "SuccessfulActiveHealthCheckUnejectHost", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuccessfulActiveHealthCheckUnejectHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "SuccessfulActiveHealthCheckUnejectHost", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetMonitors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: fmt.Sprintf("Monitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetAlwaysEjectOneHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlwaysEjectOneHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutlierDetectionValidationError{ + field: "AlwaysEjectOneHost", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OutlierDetectionMultiError(errors) + } + + return nil +} + +// OutlierDetectionMultiError is an error wrapping multiple validation errors +// returned by OutlierDetection.ValidateAll() if the designated constraints +// aren't met. +type OutlierDetectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OutlierDetectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OutlierDetectionMultiError) AllErrors() []error { return m } + +// OutlierDetectionValidationError is the validation error returned by +// OutlierDetection.Validate if the designated constraints aren't met. +type OutlierDetectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OutlierDetectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OutlierDetectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OutlierDetectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OutlierDetectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OutlierDetectionValidationError) ErrorName() string { return "OutlierDetectionValidationError" } + +// Error satisfies the builtin error interface +func (e OutlierDetectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOutlierDetection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OutlierDetectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OutlierDetectionValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go new file mode 100644 index 0000000000..f837d56bd0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/outlier_detection_vtproto.pb.go @@ -0,0 +1,456 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/cluster/v3/outlier_detection.proto + +package clusterv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AlwaysEjectOneHost != nil { + size, err := (*wrapperspb.BoolValue)(m.AlwaysEjectOneHost).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Monitors[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Monitors[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.SuccessfulActiveHealthCheckUnejectHost != nil { + size, err := (*wrapperspb.BoolValue)(m.SuccessfulActiveHealthCheckUnejectHost).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.MaxEjectionTimeJitter != nil { + size, err := (*durationpb.Duration)(m.MaxEjectionTimeJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.MaxEjectionTime != nil { + size, err := (*durationpb.Duration)(m.MaxEjectionTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.FailurePercentageRequestVolume != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageRequestVolume).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.FailurePercentageMinimumHosts != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageMinimumHosts).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.EnforcingFailurePercentageLocalOrigin != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentageLocalOrigin).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.EnforcingFailurePercentage != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FailurePercentageThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.FailurePercentageThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.EnforcingLocalOriginSuccessRate != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingLocalOriginSuccessRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.EnforcingConsecutiveLocalOriginFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveLocalOriginFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.ConsecutiveLocalOriginFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.ConsecutiveLocalOriginFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.SplitExternalLocalOriginErrors { + i-- + if m.SplitExternalLocalOriginErrors { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.EnforcingConsecutiveGatewayFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveGatewayFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.ConsecutiveGatewayFailure != nil { + size, err := (*wrapperspb.UInt32Value)(m.ConsecutiveGatewayFailure).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.SuccessRateStdevFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateStdevFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.SuccessRateRequestVolume != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateRequestVolume).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.SuccessRateMinimumHosts != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuccessRateMinimumHosts).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.EnforcingSuccessRate != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingSuccessRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.EnforcingConsecutive_5Xx != nil { + size, err := (*wrapperspb.UInt32Value)(m.EnforcingConsecutive_5Xx).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxEjectionPercent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxEjectionPercent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.BaseEjectionTime != nil { + size, err := (*durationpb.Duration)(m.BaseEjectionTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Consecutive_5Xx != nil { + size, err := (*wrapperspb.UInt32Value)(m.Consecutive_5Xx).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OutlierDetection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Consecutive_5Xx != nil { + l = (*wrapperspb.UInt32Value)(m.Consecutive_5Xx).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BaseEjectionTime != nil { + l = (*durationpb.Duration)(m.BaseEjectionTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionPercent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxEjectionPercent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutive_5Xx != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutive_5Xx).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingSuccessRate != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingSuccessRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateMinimumHosts != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateMinimumHosts).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateRequestVolume != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateRequestVolume).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessRateStdevFactor != nil { + l = (*wrapperspb.UInt32Value)(m.SuccessRateStdevFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConsecutiveGatewayFailure != nil { + l = (*wrapperspb.UInt32Value)(m.ConsecutiveGatewayFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutiveGatewayFailure != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveGatewayFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SplitExternalLocalOriginErrors { + n += 2 + } + if m.ConsecutiveLocalOriginFailure != nil { + l = (*wrapperspb.UInt32Value)(m.ConsecutiveLocalOriginFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingConsecutiveLocalOriginFailure != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingConsecutiveLocalOriginFailure).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingLocalOriginSuccessRate != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingLocalOriginSuccessRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageThreshold).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingFailurePercentage != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentage).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforcingFailurePercentageLocalOrigin != nil { + l = (*wrapperspb.UInt32Value)(m.EnforcingFailurePercentageLocalOrigin).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageMinimumHosts != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageMinimumHosts).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailurePercentageRequestVolume != nil { + l = (*wrapperspb.UInt32Value)(m.FailurePercentageRequestVolume).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionTime != nil { + l = (*durationpb.Duration)(m.MaxEjectionTime).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEjectionTimeJitter != nil { + l = (*durationpb.Duration)(m.MaxEjectionTimeJitter).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuccessfulActiveHealthCheckUnejectHost != nil { + l = (*wrapperspb.BoolValue)(m.SuccessfulActiveHealthCheckUnejectHost).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Monitors) > 0 { + for _, e := range m.Monitors { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AlwaysEjectOneHost != nil { + l = (*wrapperspb.BoolValue)(m.AlwaysEjectOneHost).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go new file mode 100644 index 0000000000..659879f9d7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.go @@ -0,0 +1,1778 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A matcher, which may traverse a matching tree in order to result in a match action. +// During matching, the tree will be traversed until a match is found, or if no match +// is found the action specified by the most specific on_no_match will be evaluated. +// As an on_no_match might result in another matching tree being evaluated, this process +// might repeat several times until the final OnMatch (or no match) is decided. +// +// .. note:: +// +// Please use the syntactically equivalent :ref:`matching API ` +type Matcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatcherType: + // + // *Matcher_MatcherList_ + // *Matcher_MatcherTree_ + MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"` + // Optional OnMatch to use if the matcher failed. + // If specified, the OnMatch is used, and the matcher is considered + // to have matched. + // If not specified, the matcher is considered not to have matched. + OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"` +} + +func (x *Matcher) Reset() { + *x = Matcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher) ProtoMessage() {} + +func (x *Matcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher.ProtoReflect.Descriptor instead. +func (*Matcher) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0} +} + +func (m *Matcher) GetMatcherType() isMatcher_MatcherType { + if m != nil { + return m.MatcherType + } + return nil +} + +func (x *Matcher) GetMatcherList() *Matcher_MatcherList { + if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok { + return x.MatcherList + } + return nil +} + +func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree { + if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok { + return x.MatcherTree + } + return nil +} + +func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch { + if x != nil { + return x.OnNoMatch + } + return nil +} + +type isMatcher_MatcherType interface { + isMatcher_MatcherType() +} + +type Matcher_MatcherList_ struct { + // A linear list of matchers to evaluate. + MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"` +} + +type Matcher_MatcherTree_ struct { + // A match tree to evaluate. + MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"` +} + +func (*Matcher_MatcherList_) isMatcher_MatcherType() {} + +func (*Matcher_MatcherTree_) isMatcher_MatcherType() {} + +// Match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +type MatchPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *MatchPredicate_OrMatch + // *MatchPredicate_AndMatch + // *MatchPredicate_NotMatch + // *MatchPredicate_AnyMatch + // *MatchPredicate_HttpRequestHeadersMatch + // *MatchPredicate_HttpRequestTrailersMatch + // *MatchPredicate_HttpResponseHeadersMatch + // *MatchPredicate_HttpResponseTrailersMatch + // *MatchPredicate_HttpRequestGenericBodyMatch + // *MatchPredicate_HttpResponseGenericBodyMatch + Rule isMatchPredicate_Rule `protobuf_oneof:"rule"` +} + +func (x *MatchPredicate) Reset() { + *x = MatchPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate) ProtoMessage() {} + +func (x *MatchPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate.ProtoReflect.Descriptor instead. +func (*MatchPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{1} +} + +func (m *MatchPredicate) GetRule() isMatchPredicate_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *MatchPredicate) GetOrMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_OrMatch); ok { + return x.OrMatch + } + return nil +} + +func (x *MatchPredicate) GetAndMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_AndMatch); ok { + return x.AndMatch + } + return nil +} + +func (x *MatchPredicate) GetNotMatch() *MatchPredicate { + if x, ok := x.GetRule().(*MatchPredicate_NotMatch); ok { + return x.NotMatch + } + return nil +} + +func (x *MatchPredicate) GetAnyMatch() bool { + if x, ok := x.GetRule().(*MatchPredicate_AnyMatch); ok { + return x.AnyMatch + } + return false +} + +func (x *MatchPredicate) GetHttpRequestHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestHeadersMatch); ok { + return x.HttpRequestHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestTrailersMatch); ok { + return x.HttpRequestTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseHeadersMatch); ok { + return x.HttpResponseHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseTrailersMatch); ok { + return x.HttpResponseTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + return x.HttpRequestGenericBodyMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + return x.HttpResponseGenericBodyMatch + } + return nil +} + +type isMatchPredicate_Rule interface { + isMatchPredicate_Rule() +} + +type MatchPredicate_OrMatch struct { + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + OrMatch *MatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +type MatchPredicate_AndMatch struct { + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + AndMatch *MatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` +} + +type MatchPredicate_NotMatch struct { + // A negation match. The match configuration will match if the negated match condition matches. + NotMatch *MatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` +} + +type MatchPredicate_AnyMatch struct { + // The match configuration will always match. + AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestHeadersMatch struct { + // HTTP request headers match configuration. + HttpRequestHeadersMatch *HttpHeadersMatch `protobuf:"bytes,5,opt,name=http_request_headers_match,json=httpRequestHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestTrailersMatch struct { + // HTTP request trailers match configuration. + HttpRequestTrailersMatch *HttpHeadersMatch `protobuf:"bytes,6,opt,name=http_request_trailers_match,json=httpRequestTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseHeadersMatch struct { + // HTTP response headers match configuration. + HttpResponseHeadersMatch *HttpHeadersMatch `protobuf:"bytes,7,opt,name=http_response_headers_match,json=httpResponseHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseTrailersMatch struct { + // HTTP response trailers match configuration. + HttpResponseTrailersMatch *HttpHeadersMatch `protobuf:"bytes,8,opt,name=http_response_trailers_match,json=httpResponseTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestGenericBodyMatch struct { + // HTTP request generic body match configuration. + HttpRequestGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,9,opt,name=http_request_generic_body_match,json=httpRequestGenericBodyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseGenericBodyMatch struct { + // HTTP response generic body match configuration. + HttpResponseGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,10,opt,name=http_response_generic_body_match,json=httpResponseGenericBodyMatch,proto3,oneof"` +} + +func (*MatchPredicate_OrMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AndMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_NotMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AnyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestGenericBodyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseGenericBodyMatch) isMatchPredicate_Rule() {} + +// HTTP headers match configuration. +type HttpHeadersMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // HTTP headers to match. + Headers []*v3.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HttpHeadersMatch) Reset() { + *x = HttpHeadersMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpHeadersMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpHeadersMatch) ProtoMessage() {} + +func (x *HttpHeadersMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpHeadersMatch.ProtoReflect.Descriptor instead. +func (*HttpHeadersMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpHeadersMatch) GetHeaders() []*v3.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +type HttpGenericBodyMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + BytesLimit uint32 `protobuf:"varint,1,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` + // List of patterns to match. + Patterns []*HttpGenericBodyMatch_GenericTextMatch `protobuf:"bytes,2,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *HttpGenericBodyMatch) Reset() { + *x = HttpGenericBodyMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{3} +} + +func (x *HttpGenericBodyMatch) GetBytesLimit() uint32 { + if x != nil { + return x.BytesLimit + } + return 0 +} + +func (x *HttpGenericBodyMatch) GetPatterns() []*HttpGenericBodyMatch_GenericTextMatch { + if x != nil { + return x.Patterns + } + return nil +} + +// What to do if a match is successful. +type Matcher_OnMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OnMatch: + // + // *Matcher_OnMatch_Matcher + // *Matcher_OnMatch_Action + OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"` +} + +func (x *Matcher_OnMatch) Reset() { + *x = Matcher_OnMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_OnMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_OnMatch) ProtoMessage() {} + +func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead. +func (*Matcher_OnMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch { + if m != nil { + return m.OnMatch + } + return nil +} + +func (x *Matcher_OnMatch) GetMatcher() *Matcher { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok { + return x.Matcher + } + return nil +} + +func (x *Matcher_OnMatch) GetAction() *v31.TypedExtensionConfig { + if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok { + return x.Action + } + return nil +} + +type isMatcher_OnMatch_OnMatch interface { + isMatcher_OnMatch_OnMatch() +} + +type Matcher_OnMatch_Matcher struct { + // Nested matcher to evaluate. + // If the nested matcher does not match and does not specify + // on_no_match, then this matcher is considered not to have + // matched, even if a predicate at this level or above returned + // true. + Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"` +} + +type Matcher_OnMatch_Action struct { + // Protocol-specific action to take. + Action *v31.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"` +} + +func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {} + +func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {} + +// A linear list of field matchers. +// The field matchers are evaluated in order, and the first match +// wins. +type Matcher_MatcherList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of matchers. First match wins. + Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` +} + +func (x *Matcher_MatcherList) Reset() { + *x = Matcher_MatcherList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList) ProtoMessage() {} + +func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher { + if x != nil { + return x.Matchers + } + return nil +} + +type Matcher_MatcherTree struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Protocol-specific specification of input field to match on. + Input *v31.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Exact or prefix match maps in which to look up the input value. + // If the lookup succeeds, the match is considered successful, and + // the corresponding OnMatch is used. + // + // Types that are assignable to TreeType: + // + // *Matcher_MatcherTree_ExactMatchMap + // *Matcher_MatcherTree_PrefixMatchMap + // *Matcher_MatcherTree_CustomMatch + TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"` +} + +func (x *Matcher_MatcherTree) Reset() { + *x = Matcher_MatcherTree{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree) ProtoMessage() {} + +func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Matcher_MatcherTree) GetInput() *v31.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType { + if m != nil { + return m.TreeType + } + return nil +} + +func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok { + return x.ExactMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok { + return x.PrefixMatchMap + } + return nil +} + +func (x *Matcher_MatcherTree) GetCustomMatch() *v31.TypedExtensionConfig { + if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherTree_TreeType interface { + isMatcher_MatcherTree_TreeType() +} + +type Matcher_MatcherTree_ExactMatchMap struct { + ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_PrefixMatchMap struct { + // Longest matching prefix wins. + PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"` +} + +type Matcher_MatcherTree_CustomMatch struct { + // Extension for custom matching logic. + CustomMatch *v31.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {} + +func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {} + +// Predicate to determine if a match is successful. +type Matcher_MatcherList_Predicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchType: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ + // *Matcher_MatcherList_Predicate_OrMatcher + // *Matcher_MatcherList_Predicate_AndMatcher + // *Matcher_MatcherList_Predicate_NotMatcher + MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"` +} + +func (x *Matcher_MatcherList_Predicate) Reset() { + *x = Matcher_MatcherList_Predicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType { + if m != nil { + return m.MatchType + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + return x.SinglePredicate + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok { + return x.OrMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok { + return x.AndMatcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate { + if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok { + return x.NotMatcher + } + return nil +} + +type isMatcher_MatcherList_Predicate_MatchType interface { + isMatcher_MatcherList_Predicate_MatchType() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ struct { + // A single predicate to evaluate. + SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_OrMatcher struct { + // A list of predicates to be OR-ed together. + OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_AndMatcher struct { + // A list of predicates to be AND-ed together. + AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_NotMatcher struct { + // The invert of a predicate + NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {} + +// An individual matcher. +type Matcher_MatcherList_FieldMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines if the match succeeds. + Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` + // What to do if the match succeeds. + OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` +} + +func (x *Matcher_MatcherList_FieldMatcher) Reset() { + *x = Matcher_MatcherList_FieldMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_FieldMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {} + +func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch { + if x != nil { + return x.OnMatch + } + return nil +} + +// Predicate for a single input field. +type Matcher_MatcherList_Predicate_SinglePredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Protocol-specific specification of input field to match on. + // [#extension-category: envoy.matching.common_inputs] + Input *v31.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + // Types that are assignable to Matcher: + // + // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch + // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch + Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"` +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() { + *x = Matcher_MatcherList_Predicate_SinglePredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0} +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v31.TypedExtensionConfig { + if x != nil { + return x.Input + } + return nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *v32.StringMatcher { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + return x.ValueMatch + } + return nil +} + +func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v31.TypedExtensionConfig { + if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + return x.CustomMatch + } + return nil +} + +type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface { + isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() +} + +type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct { + // Built-in string matcher. + ValueMatch *v32.StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"` +} + +type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct { + // Extension for custom matching logic. + // [#extension-category: envoy.matching.input_matchers] + CustomMatch *v31.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"` +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { +} + +// A list of two or more matchers. Used to allow using a list within a oneof. +type Matcher_MatcherList_Predicate_PredicateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"` +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() { + *x = Matcher_MatcherList_Predicate_PredicateList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {} + +func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1} +} + +func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate { + if x != nil { + return x.Predicate + } + return nil +} + +// A map of configured matchers. Used to allow using a map within a oneof. +type Matcher_MatcherTree_MatchMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Matcher_MatcherTree_MatchMap) Reset() { + *x = Matcher_MatcherTree_MatchMap{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Matcher_MatcherTree_MatchMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {} + +func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead. +func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2, 0} +} + +func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch { + if x != nil { + return x.Map + } + return nil +} + +// A set of match configurations used for logical operations. +type MatchPredicate_MatchSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of rules that make up the set. + Rules []*MatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *MatchPredicate_MatchSet) Reset() { + *x = MatchPredicate_MatchSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate_MatchSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate_MatchSet) ProtoMessage() {} + +func (x *MatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate_MatchSet.ProtoReflect.Descriptor instead. +func (*MatchPredicate_MatchSet) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *MatchPredicate_MatchSet) GetRules() []*MatchPredicate { + if x != nil { + return x.Rules + } + return nil +} + +type HttpGenericBodyMatch_GenericTextMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *HttpGenericBodyMatch_GenericTextMatch_StringMatch + // *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch + Rule isHttpGenericBodyMatch_GenericTextMatch_Rule `protobuf_oneof:"rule"` +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) Reset() { + *x = HttpGenericBodyMatch_GenericTextMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch_GenericTextMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch_GenericTextMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch_GenericTextMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch_GenericTextMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) GetRule() isHttpGenericBodyMatch_GenericTextMatch_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetStringMatch() string { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + return x.StringMatch + } + return "" +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetBinaryMatch() []byte { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + return x.BinaryMatch + } + return nil +} + +type isHttpGenericBodyMatch_GenericTextMatch_Rule interface { + isHttpGenericBodyMatch_GenericTextMatch_Rule() +} + +type HttpGenericBodyMatch_GenericTextMatch_StringMatch struct { + // Text string to be located in HTTP body. + StringMatch string `protobuf:"bytes,1,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type HttpGenericBodyMatch_GenericTextMatch_BinaryMatch struct { + // Sequence of bytes to be located in HTTP body. + BinaryMatch []byte `protobuf:"bytes,2,opt,name=binary_match,json=binaryMatch,proto3,oneof"` +} + +func (*HttpGenericBodyMatch_GenericTextMatch_StringMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +func (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +var File_envoy_config_common_matcher_v3_matcher_proto protoreflect.FileDescriptor + +var file_envoy_config_common_matcher_v3_matcher_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, + 0x11, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x0c, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, + 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x4f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, + 0xa5, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x43, 0x0a, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x44, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xa2, 0x09, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, + 0xdc, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x7a, 0x0a, + 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, + 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x6c, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6e, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, 0x64, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x87, 0x02, 0x0a, 0x0f, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4a, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x0b, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x4f, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, + 0xf8, 0x42, 0x01, 0x1a, 0x76, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x65, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, + 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xcb, + 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x65, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xe7, 0x04, 0x0a, + 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x4a, 0x0a, 0x05, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x66, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, + 0x12, 0x68, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x4f, 0x0a, 0x0c, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xd6, 0x01, 0x0a, 0x08, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x61, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x4d, 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x67, 0x0a, 0x08, 0x4d, + 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x13, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xe8, 0x08, 0x0a, 0x0e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x54, + 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x56, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x4d, 0x0a, 0x09, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, + 0x6e, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x6f, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x17, 0x68, 0x74, 0x74, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x71, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x68, + 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x71, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x73, 0x0a, 0x1c, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x48, 0x00, 0x52, 0x19, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x7c, 0x0a, 0x1f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, + 0x52, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x7e, 0x0a, + 0x20, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, + 0x1c, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x5a, 0x0a, + 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x12, 0x4e, 0x0a, 0x05, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x52, 0x0a, 0x10, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x14, 0x48, + 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x6b, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x73, 0x1a, 0x7b, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x7a, 0x02, + 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x97, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x2c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_common_matcher_v3_matcher_proto_rawDescOnce sync.Once + file_envoy_config_common_matcher_v3_matcher_proto_rawDescData = file_envoy_config_common_matcher_v3_matcher_proto_rawDesc +) + +func file_envoy_config_common_matcher_v3_matcher_proto_rawDescGZIP() []byte { + file_envoy_config_common_matcher_v3_matcher_proto_rawDescOnce.Do(func() { + file_envoy_config_common_matcher_v3_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_common_matcher_v3_matcher_proto_rawDescData) + }) + return file_envoy_config_common_matcher_v3_matcher_proto_rawDescData +} + +var file_envoy_config_common_matcher_v3_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_envoy_config_common_matcher_v3_matcher_proto_goTypes = []interface{}{ + (*Matcher)(nil), // 0: envoy.config.common.matcher.v3.Matcher + (*MatchPredicate)(nil), // 1: envoy.config.common.matcher.v3.MatchPredicate + (*HttpHeadersMatch)(nil), // 2: envoy.config.common.matcher.v3.HttpHeadersMatch + (*HttpGenericBodyMatch)(nil), // 3: envoy.config.common.matcher.v3.HttpGenericBodyMatch + (*Matcher_OnMatch)(nil), // 4: envoy.config.common.matcher.v3.Matcher.OnMatch + (*Matcher_MatcherList)(nil), // 5: envoy.config.common.matcher.v3.Matcher.MatcherList + (*Matcher_MatcherTree)(nil), // 6: envoy.config.common.matcher.v3.Matcher.MatcherTree + (*Matcher_MatcherList_Predicate)(nil), // 7: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + (*Matcher_MatcherList_FieldMatcher)(nil), // 8: envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher + (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 9: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 10: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + (*Matcher_MatcherTree_MatchMap)(nil), // 11: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap + nil, // 12: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + (*MatchPredicate_MatchSet)(nil), // 13: envoy.config.common.matcher.v3.MatchPredicate.MatchSet + (*HttpGenericBodyMatch_GenericTextMatch)(nil), // 14: envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch + (*v3.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*v31.TypedExtensionConfig)(nil), // 16: envoy.config.core.v3.TypedExtensionConfig + (*v32.StringMatcher)(nil), // 17: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_config_common_matcher_v3_matcher_proto_depIdxs = []int32{ + 5, // 0: envoy.config.common.matcher.v3.Matcher.matcher_list:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList + 6, // 1: envoy.config.common.matcher.v3.Matcher.matcher_tree:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree + 4, // 2: envoy.config.common.matcher.v3.Matcher.on_no_match:type_name -> envoy.config.common.matcher.v3.Matcher.OnMatch + 13, // 3: envoy.config.common.matcher.v3.MatchPredicate.or_match:type_name -> envoy.config.common.matcher.v3.MatchPredicate.MatchSet + 13, // 4: envoy.config.common.matcher.v3.MatchPredicate.and_match:type_name -> envoy.config.common.matcher.v3.MatchPredicate.MatchSet + 1, // 5: envoy.config.common.matcher.v3.MatchPredicate.not_match:type_name -> envoy.config.common.matcher.v3.MatchPredicate + 2, // 6: envoy.config.common.matcher.v3.MatchPredicate.http_request_headers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 2, // 7: envoy.config.common.matcher.v3.MatchPredicate.http_request_trailers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 2, // 8: envoy.config.common.matcher.v3.MatchPredicate.http_response_headers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 2, // 9: envoy.config.common.matcher.v3.MatchPredicate.http_response_trailers_match:type_name -> envoy.config.common.matcher.v3.HttpHeadersMatch + 3, // 10: envoy.config.common.matcher.v3.MatchPredicate.http_request_generic_body_match:type_name -> envoy.config.common.matcher.v3.HttpGenericBodyMatch + 3, // 11: envoy.config.common.matcher.v3.MatchPredicate.http_response_generic_body_match:type_name -> envoy.config.common.matcher.v3.HttpGenericBodyMatch + 15, // 12: envoy.config.common.matcher.v3.HttpHeadersMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 14, // 13: envoy.config.common.matcher.v3.HttpGenericBodyMatch.patterns:type_name -> envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch + 0, // 14: envoy.config.common.matcher.v3.Matcher.OnMatch.matcher:type_name -> envoy.config.common.matcher.v3.Matcher + 16, // 15: envoy.config.common.matcher.v3.Matcher.OnMatch.action:type_name -> envoy.config.core.v3.TypedExtensionConfig + 8, // 16: envoy.config.common.matcher.v3.Matcher.MatcherList.matchers:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher + 16, // 17: envoy.config.common.matcher.v3.Matcher.MatcherTree.input:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // 18: envoy.config.common.matcher.v3.Matcher.MatcherTree.exact_match_map:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap + 11, // 19: envoy.config.common.matcher.v3.Matcher.MatcherTree.prefix_match_map:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap + 16, // 20: envoy.config.common.matcher.v3.Matcher.MatcherTree.custom_match:type_name -> envoy.config.core.v3.TypedExtensionConfig + 9, // 21: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.single_predicate:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate + 10, // 22: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.or_matcher:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 10, // 23: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.and_matcher:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList + 7, // 24: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.not_matcher:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + 7, // 25: envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher.predicate:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + 4, // 26: envoy.config.common.matcher.v3.Matcher.MatcherList.FieldMatcher.on_match:type_name -> envoy.config.common.matcher.v3.Matcher.OnMatch + 16, // 27: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> envoy.config.core.v3.TypedExtensionConfig + 17, // 28: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> envoy.type.matcher.v3.StringMatcher + 16, // 29: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> envoy.config.core.v3.TypedExtensionConfig + 7, // 30: envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherList.Predicate + 12, // 31: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.map:type_name -> envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry + 4, // 32: envoy.config.common.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> envoy.config.common.matcher.v3.Matcher.OnMatch + 1, // 33: envoy.config.common.matcher.v3.MatchPredicate.MatchSet.rules:type_name -> envoy.config.common.matcher.v3.MatchPredicate + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_envoy_config_common_matcher_v3_matcher_proto_init() } +func file_envoy_config_common_matcher_v3_matcher_proto_init() { + if File_envoy_config_common_matcher_v3_matcher_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpHeadersMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_OnMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_FieldMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Matcher_MatcherTree_MatchMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate_MatchSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch_GenericTextMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_)(nil), + (*Matcher_MatcherTree_)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MatchPredicate_OrMatch)(nil), + (*MatchPredicate_AndMatch)(nil), + (*MatchPredicate_NotMatch)(nil), + (*MatchPredicate_AnyMatch)(nil), + (*MatchPredicate_HttpRequestHeadersMatch)(nil), + (*MatchPredicate_HttpRequestTrailersMatch)(nil), + (*MatchPredicate_HttpResponseHeadersMatch)(nil), + (*MatchPredicate_HttpResponseTrailersMatch)(nil), + (*MatchPredicate_HttpRequestGenericBodyMatch)(nil), + (*MatchPredicate_HttpResponseGenericBodyMatch)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*Matcher_OnMatch_Matcher)(nil), + (*Matcher_OnMatch_Action)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*Matcher_MatcherTree_ExactMatchMap)(nil), + (*Matcher_MatcherTree_PrefixMatchMap)(nil), + (*Matcher_MatcherTree_CustomMatch)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil), + (*Matcher_MatcherList_Predicate_OrMatcher)(nil), + (*Matcher_MatcherList_Predicate_AndMatcher)(nil), + (*Matcher_MatcherList_Predicate_NotMatcher)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil), + (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil), + } + file_envoy_config_common_matcher_v3_matcher_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*HttpGenericBodyMatch_GenericTextMatch_StringMatch)(nil), + (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_common_matcher_v3_matcher_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_common_matcher_v3_matcher_proto_goTypes, + DependencyIndexes: file_envoy_config_common_matcher_v3_matcher_proto_depIdxs, + MessageInfos: file_envoy_config_common_matcher_v3_matcher_proto_msgTypes, + }.Build() + File_envoy_config_common_matcher_v3_matcher_proto = out.File + file_envoy_config_common_matcher_v3_matcher_proto_rawDesc = nil + file_envoy_config_common_matcher_v3_matcher_proto_goTypes = nil + file_envoy_config_common_matcher_v3_matcher_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go new file mode 100644 index 0000000000..88607c30b1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher.pb.validate.go @@ -0,0 +1,3044 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in MatcherMultiError, or nil if none found. +func (m *Matcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetOnNoMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnNoMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "OnNoMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofMatcherTypePresent := false + switch v := m.MatcherType.(type) { + case *Matcher_MatcherList_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherTypePresent = true + + if all { + switch v := interface{}(m.GetMatcherList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_: + if v == nil { + err := MatcherValidationError{ + field: "MatcherType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherTypePresent = true + + if all { + switch v := interface{}(m.GetMatcherTree()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcherTree()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatcherValidationError{ + field: "MatcherTree", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherTypePresent { + err := MatcherValidationError{ + field: "MatcherType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MatcherMultiError(errors) + } + + return nil +} + +// MatcherMultiError is an error wrapping multiple validation errors returned +// by Matcher.ValidateAll() if the designated constraints aren't met. +type MatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatcherMultiError) AllErrors() []error { return m } + +// MatcherValidationError is the validation error returned by Matcher.Validate +// if the designated constraints aren't met. +type MatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatcherValidationError) ErrorName() string { return "MatcherValidationError" } + +// Error satisfies the builtin error interface +func (e MatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatcherValidationError{} + +// Validate checks the field values on MatchPredicate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MatchPredicateMultiError, +// or nil if none found. +func (m *MatchPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *MatchPredicate_OrMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AndMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_NotMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AnyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAnyMatch() != true { + err := MatchPredicateValidationError{ + field: "AnyMatch", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *MatchPredicate_HttpRequestHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MatchPredicateMultiError(errors) + } + + return nil +} + +// MatchPredicateMultiError is an error wrapping multiple validation errors +// returned by MatchPredicate.ValidateAll() if the designated constraints +// aren't met. +type MatchPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicateMultiError) AllErrors() []error { return m } + +// MatchPredicateValidationError is the validation error returned by +// MatchPredicate.Validate if the designated constraints aren't met. +type MatchPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicateValidationError) ErrorName() string { return "MatchPredicateValidationError" } + +// Error satisfies the builtin error interface +func (e MatchPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicateValidationError{} + +// Validate checks the field values on HttpHeadersMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HttpHeadersMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpHeadersMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpHeadersMatchMultiError, or nil if none found. +func (m *HttpHeadersMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpHeadersMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpHeadersMatchMultiError(errors) + } + + return nil +} + +// HttpHeadersMatchMultiError is an error wrapping multiple validation errors +// returned by HttpHeadersMatch.ValidateAll() if the designated constraints +// aren't met. +type HttpHeadersMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpHeadersMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpHeadersMatchMultiError) AllErrors() []error { return m } + +// HttpHeadersMatchValidationError is the validation error returned by +// HttpHeadersMatch.Validate if the designated constraints aren't met. +type HttpHeadersMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpHeadersMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpHeadersMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpHeadersMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpHeadersMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpHeadersMatchValidationError) ErrorName() string { return "HttpHeadersMatchValidationError" } + +// Error satisfies the builtin error interface +func (e HttpHeadersMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpHeadersMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpHeadersMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpHeadersMatchValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpGenericBodyMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesLimit + + if len(m.GetPatterns()) < 1 { + err := HttpGenericBodyMatchValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpGenericBodyMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatchMultiError is an error wrapping multiple validation +// errors returned by HttpGenericBodyMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatchValidationError is the validation error returned by +// HttpGenericBodyMatch.Validate if the designated constraints aren't met. +type HttpGenericBodyMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatchValidationError{} + +// Validate checks the field values on Matcher_OnMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Matcher_OnMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_OnMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_OnMatchMultiError, or nil if none found. +func (m *Matcher_OnMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_OnMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOnMatchPresent := false + switch v := m.OnMatch.(type) { + case *Matcher_OnMatch_Matcher: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_OnMatch_Action: + if v == nil { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOnMatchPresent = true + + if all { + switch v := interface{}(m.GetAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_OnMatchValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOnMatchPresent { + err := Matcher_OnMatchValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_OnMatchMultiError(errors) + } + + return nil +} + +// Matcher_OnMatchMultiError is an error wrapping multiple validation errors +// returned by Matcher_OnMatch.ValidateAll() if the designated constraints +// aren't met. +type Matcher_OnMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_OnMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_OnMatchMultiError) AllErrors() []error { return m } + +// Matcher_OnMatchValidationError is the validation error returned by +// Matcher_OnMatch.Validate if the designated constraints aren't met. +type Matcher_OnMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_OnMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_OnMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_OnMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_OnMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_OnMatchValidationError) ErrorName() string { return "Matcher_OnMatchValidationError" } + +// Error satisfies the builtin error interface +func (e Matcher_OnMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_OnMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_OnMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_OnMatchValidationError{} + +// Validate checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherListMultiError, or nil if none found. +func (m *Matcher_MatcherList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMatchers()) < 1 { + err := Matcher_MatcherListValidationError{ + field: "Matchers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherListValidationError{ + field: fmt.Sprintf("Matchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherListMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherListValidationError is the validation error returned by +// Matcher_MatcherList.Validate if the designated constraints aren't met. +type Matcher_MatcherListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherListValidationError) ErrorName() string { + return "Matcher_MatcherListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTreeMultiError, or nil if none found. +func (m *Matcher_MatcherTree) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTreeTypePresent := false + switch v := m.TreeType.(type) { + case *Matcher_MatcherTree_ExactMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetExactMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "ExactMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_PrefixMatchMap: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetPrefixMatchMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrefixMatchMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "PrefixMatchMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherTree_CustomMatch: + if v == nil { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTreeTypePresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTreeValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTreeTypePresent { + err := Matcher_MatcherTreeValidationError{ + field: "TreeType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherTreeMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTreeMultiError is an error wrapping multiple validation +// errors returned by Matcher_MatcherTree.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherTreeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTreeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTreeMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTreeValidationError is the validation error returned by +// Matcher_MatcherTree.Validate if the designated constraints aren't met. +type Matcher_MatcherTreeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTreeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTreeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTreeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTreeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTreeValidationError) ErrorName() string { + return "Matcher_MatcherTreeValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTreeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTreeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTreeValidationError{} + +// Validate checks the field values on Matcher_MatcherList_Predicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_Predicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_PredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchTypePresent := false + switch v := m.MatchType.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetSinglePredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSinglePredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "SinglePredicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_OrMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetOrMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "OrMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_AndMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetAndMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "AndMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_NotMatcher: + if v == nil { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchTypePresent = true + + if all { + switch v := interface{}(m.GetNotMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_PredicateValidationError{ + field: "NotMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchTypePresent { + err := Matcher_MatcherList_PredicateValidationError{ + field: "MatchType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_PredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_PredicateMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherList_Predicate.ValidateAll() +// if the designated constraints aren't met. +type Matcher_MatcherList_PredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_PredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_PredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_PredicateValidationError is the validation error +// returned by Matcher_MatcherList_Predicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_PredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_PredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_PredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_PredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_PredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_PredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_PredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_PredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_PredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_PredicateValidationError{} + +// Validate checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *Matcher_MatcherList_FieldMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherList_FieldMatcher with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Matcher_MatcherList_FieldMatcherMultiError, or nil if none found. +func (m *Matcher_MatcherList_FieldMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_FieldMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPredicate() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPredicate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPredicate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "Predicate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetOnMatch() == nil { + err := Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOnMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_FieldMatcherValidationError{ + field: "OnMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Matcher_MatcherList_FieldMatcherMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_FieldMatcherMultiError is an error wrapping multiple +// validation errors returned by +// Matcher_MatcherList_FieldMatcher.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_FieldMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_FieldMatcherMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_FieldMatcherValidationError is the validation error +// returned by Matcher_MatcherList_FieldMatcher.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_FieldMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_FieldMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_FieldMatcherValidationError) ErrorName() string { + return "Matcher_MatcherList_FieldMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_FieldMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_FieldMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_FieldMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_FieldMatcherValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_SinglePredicateMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_SinglePredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetInput() == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Input", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "ValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch: + if v == nil { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetCustomMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "CustomMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_SinglePredicateMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_SinglePredicateMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_SinglePredicate.ValidateAll() if the +// designated constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_SinglePredicateValidationError is the +// validation error returned by +// Matcher_MatcherList_Predicate_SinglePredicate.Validate if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_SinglePredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_SinglePredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_SinglePredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_SinglePredicateValidationError{} + +// Validate checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherList_Predicate_PredicateList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Matcher_MatcherList_Predicate_PredicateListMultiError, or nil if none found. +func (m *Matcher_MatcherList_Predicate_PredicateList) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPredicate()) < 2 { + err := Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: "Predicate", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherList_Predicate_PredicateListValidationError{ + field: fmt.Sprintf("Predicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Matcher_MatcherList_Predicate_PredicateListMultiError(errors) + } + + return nil +} + +// Matcher_MatcherList_Predicate_PredicateListMultiError is an error wrapping +// multiple validation errors returned by +// Matcher_MatcherList_Predicate_PredicateList.ValidateAll() if the designated +// constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherList_Predicate_PredicateListMultiError) AllErrors() []error { return m } + +// Matcher_MatcherList_Predicate_PredicateListValidationError is the validation +// error returned by Matcher_MatcherList_Predicate_PredicateList.Validate if +// the designated constraints aren't met. +type Matcher_MatcherList_Predicate_PredicateListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) ErrorName() string { + return "Matcher_MatcherList_Predicate_PredicateListValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherList_Predicate_PredicateList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherList_Predicate_PredicateListValidationError{} + +// Validate checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Matcher_MatcherTree_MatchMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Matcher_MatcherTree_MatchMap with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Matcher_MatcherTree_MatchMapMultiError, or nil if none found. +func (m *Matcher_MatcherTree_MatchMap) ValidateAll() error { + return m.validate(true) +} + +func (m *Matcher_MatcherTree_MatchMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetMap()) < 1 { + err := Matcher_MatcherTree_MatchMapValidationError{ + field: "Map", + reason: "value must contain at least 1 pair(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + { + sorted_keys := make([]string, len(m.GetMap())) + i := 0 + for key := range m.GetMap() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetMap()[key] + _ = val + + // no validation rules for Map[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Matcher_MatcherTree_MatchMapValidationError{ + field: fmt.Sprintf("Map[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return Matcher_MatcherTree_MatchMapMultiError(errors) + } + + return nil +} + +// Matcher_MatcherTree_MatchMapMultiError is an error wrapping multiple +// validation errors returned by Matcher_MatcherTree_MatchMap.ValidateAll() if +// the designated constraints aren't met. +type Matcher_MatcherTree_MatchMapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Matcher_MatcherTree_MatchMapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Matcher_MatcherTree_MatchMapMultiError) AllErrors() []error { return m } + +// Matcher_MatcherTree_MatchMapValidationError is the validation error returned +// by Matcher_MatcherTree_MatchMap.Validate if the designated constraints +// aren't met. +type Matcher_MatcherTree_MatchMapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Matcher_MatcherTree_MatchMapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Matcher_MatcherTree_MatchMapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Matcher_MatcherTree_MatchMapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Matcher_MatcherTree_MatchMapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Matcher_MatcherTree_MatchMapValidationError) ErrorName() string { + return "Matcher_MatcherTree_MatchMapValidationError" +} + +// Error satisfies the builtin error interface +func (e Matcher_MatcherTree_MatchMapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatcher_MatcherTree_MatchMap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Matcher_MatcherTree_MatchMapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Matcher_MatcherTree_MatchMapValidationError{} + +// Validate checks the field values on MatchPredicate_MatchSet with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate_MatchSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate_MatchSet with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MatchPredicate_MatchSetMultiError, or nil if none found. +func (m *MatchPredicate_MatchSet) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate_MatchSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 2 { + err := MatchPredicate_MatchSetValidationError{ + field: "Rules", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return MatchPredicate_MatchSetMultiError(errors) + } + + return nil +} + +// MatchPredicate_MatchSetMultiError is an error wrapping multiple validation +// errors returned by MatchPredicate_MatchSet.ValidateAll() if the designated +// constraints aren't met. +type MatchPredicate_MatchSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicate_MatchSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicate_MatchSetMultiError) AllErrors() []error { return m } + +// MatchPredicate_MatchSetValidationError is the validation error returned by +// MatchPredicate_MatchSet.Validate if the designated constraints aren't met. +type MatchPredicate_MatchSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicate_MatchSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicate_MatchSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicate_MatchSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicate_MatchSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicate_MatchSetValidationError) ErrorName() string { + return "MatchPredicate_MatchSetValidationError" +} + +// Error satisfies the builtin error interface +func (e MatchPredicate_MatchSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate_MatchSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicate_MatchSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicate_MatchSetValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpGenericBodyMatch_GenericTextMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatch_GenericTextMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch_GenericTextMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *HttpGenericBodyMatch_GenericTextMatch_StringMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if utf8.RuneCountInString(m.GetStringMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "StringMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if len(m.GetBinaryMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "BinaryMatch", + reason: "value length must be at least 1 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpGenericBodyMatch_GenericTextMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatch_GenericTextMatchMultiError is an error wrapping +// multiple validation errors returned by +// HttpGenericBodyMatch_GenericTextMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatch_GenericTextMatchValidationError is the validation error +// returned by HttpGenericBodyMatch_GenericTextMatch.Validate if the +// designated constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatch_GenericTextMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch_GenericTextMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatch_GenericTextMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatch_GenericTextMatchValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go new file mode 100644 index 0000000000..431572ef8e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3/matcher_vtproto.pb.go @@ -0,0 +1,2035 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/common/matcher/v3/matcher.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Matcher_OnMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_OnMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OnMatch.(*Matcher_OnMatch_Action); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OnMatch.(*Matcher_OnMatch_Matcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_OnMatch_Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch_Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matcher != nil { + size, err := m.Matcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_OnMatch_Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_OnMatch_Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Action != nil { + if vtmsg, ok := interface{}(m.Action).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Action) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Matcher.(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Matcher.(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Input != nil { + if vtmsg, ok := interface{}(m.Input).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Input) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValueMatch != nil { + if vtmsg, ok := interface{}(m.ValueMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ValueMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomMatch != nil { + if vtmsg, ok := interface{}(m.CustomMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_PredicateList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Predicate) > 0 { + for iNdEx := len(m.Predicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Predicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_Predicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_NotMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_AndMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_OrMatcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchType.(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SinglePredicate != nil { + size, err := m.SinglePredicate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_OrMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_OrMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatcher != nil { + size, err := m.OrMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_AndMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_AndMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatcher != nil { + size, err := m.AndMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_Predicate_NotMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_Predicate_NotMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatcher != nil { + size, err := m.NotMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherList_FieldMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList_FieldMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_FieldMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnMatch != nil { + size, err := m.OnMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Predicate != nil { + size, err := m.Predicate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Matchers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_MatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Map) > 0 { + for k := range m.Map { + v := m.Map[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher_MatcherTree) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_CustomMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_PrefixMatchMap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TreeType.(*Matcher_MatcherTree_ExactMatchMap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Input != nil { + if vtmsg, ok := interface{}(m.Input).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Input) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherTree_ExactMatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_ExactMatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExactMatchMap != nil { + size, err := m.ExactMatchMap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_PrefixMatchMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_PrefixMatchMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrefixMatchMap != nil { + size, err := m.PrefixMatchMap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_CustomMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_CustomMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomMatch != nil { + if vtmsg, ok := interface{}(m.CustomMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Matcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnNoMatch != nil { + size, err := m.OnNoMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.MatcherType.(*Matcher_MatcherTree_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatcherType.(*Matcher_MatcherList_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Matcher_MatcherList_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherList_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MatcherList != nil { + size, err := m.MatcherList.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Matcher_MatcherTree_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Matcher_MatcherTree_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MatcherTree != nil { + size, err := m.MatcherTree.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestHeadersMatch != nil { + size, err := m.HttpRequestHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestTrailersMatch != nil { + size, err := m.HttpRequestTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseHeadersMatch != nil { + size, err := m.HttpResponseHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseTrailersMatch != nil { + size, err := m.HttpResponseTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestGenericBodyMatch != nil { + size, err := m.HttpRequestGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseGenericBodyMatch != nil { + size, err := m.HttpResponseGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *HttpHeadersMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringMatch) + copy(dAtA[i:], m.StringMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringMatch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BinaryMatch) + copy(dAtA[i:], m.BinaryMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinaryMatch))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.BytesLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesLimit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Matcher_OnMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OnMatch.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_OnMatch_Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + l = m.Matcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_OnMatch_Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + if size, ok := interface{}(m.Action).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Action) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != nil { + if size, ok := interface{}(m.Input).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Input) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Matcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValueMatch != nil { + if size, ok := interface{}(m.ValueMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ValueMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomMatch != nil { + if size, ok := interface{}(m.CustomMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_PredicateList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Predicate) > 0 { + for _, e := range m.Predicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_Predicate_SinglePredicate_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SinglePredicate != nil { + l = m.SinglePredicate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_OrMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatcher != nil { + l = m.OrMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_AndMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatcher != nil { + l = m.AndMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_Predicate_NotMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatcher != nil { + l = m.NotMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherList_FieldMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Predicate != nil { + l = m.Predicate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnMatch != nil { + l = m.OnMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree_MatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Map) > 0 { + for k, v := range m.Map { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != nil { + if size, ok := interface{}(m.Input).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Input) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TreeType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherTree_ExactMatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExactMatchMap != nil { + l = m.ExactMatchMap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_PrefixMatchMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrefixMatchMap != nil { + l = m.PrefixMatchMap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_CustomMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomMatch != nil { + if size, ok := interface{}(m.CustomMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatcherType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.OnNoMatch != nil { + l = m.OnNoMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Matcher_MatcherList_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatcherList != nil { + l = m.MatcherList.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Matcher_MatcherTree_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatcherTree != nil { + l = m.MatcherTree.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *MatchPredicate_HttpRequestHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestHeadersMatch != nil { + l = m.HttpRequestHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestTrailersMatch != nil { + l = m.HttpRequestTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseHeadersMatch != nil { + l = m.HttpResponseHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseTrailersMatch != nil { + l = m.HttpResponseTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestGenericBodyMatch != nil { + l = m.HttpRequestGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseGenericBodyMatch != nil { + l = m.HttpResponseGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinaryMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesLimit)) + } + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go new file mode 100644 index 0000000000..a0852aa600 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.go @@ -0,0 +1,1110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SocketAddress_Protocol int32 + +const ( + SocketAddress_TCP SocketAddress_Protocol = 0 + SocketAddress_UDP SocketAddress_Protocol = 1 +) + +// Enum value maps for SocketAddress_Protocol. +var ( + SocketAddress_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + } + SocketAddress_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + } +) + +func (x SocketAddress_Protocol) Enum() *SocketAddress_Protocol { + p := new(SocketAddress_Protocol) + *p = x + return p +} + +func (x SocketAddress_Protocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SocketAddress_Protocol) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_address_proto_enumTypes[0].Descriptor() +} + +func (SocketAddress_Protocol) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_address_proto_enumTypes[0] +} + +func (x SocketAddress_Protocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SocketAddress_Protocol.Descriptor instead. +func (SocketAddress_Protocol) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2, 0} +} + +type Pipe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // The mode for the Pipe. Not applicable for abstract sockets. + Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` +} + +func (x *Pipe) Reset() { + *x = Pipe{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Pipe) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Pipe) ProtoMessage() {} + +func (x *Pipe) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Pipe.ProtoReflect.Descriptor instead. +func (*Pipe) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{0} +} + +func (x *Pipe) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *Pipe) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +// The address represents an envoy internal listener. +// [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] +type EnvoyInternalAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to AddressNameSpecifier: + // + // *EnvoyInternalAddress_ServerListenerName + AddressNameSpecifier isEnvoyInternalAddress_AddressNameSpecifier `protobuf_oneof:"address_name_specifier"` + // Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a + // single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for + // example, may be set to the final destination IP for the target internal listener. + EndpointId string `protobuf:"bytes,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` +} + +func (x *EnvoyInternalAddress) Reset() { + *x = EnvoyInternalAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnvoyInternalAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnvoyInternalAddress) ProtoMessage() {} + +func (x *EnvoyInternalAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnvoyInternalAddress.ProtoReflect.Descriptor instead. +func (*EnvoyInternalAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{1} +} + +func (m *EnvoyInternalAddress) GetAddressNameSpecifier() isEnvoyInternalAddress_AddressNameSpecifier { + if m != nil { + return m.AddressNameSpecifier + } + return nil +} + +func (x *EnvoyInternalAddress) GetServerListenerName() string { + if x, ok := x.GetAddressNameSpecifier().(*EnvoyInternalAddress_ServerListenerName); ok { + return x.ServerListenerName + } + return "" +} + +func (x *EnvoyInternalAddress) GetEndpointId() string { + if x != nil { + return x.EndpointId + } + return "" +} + +type isEnvoyInternalAddress_AddressNameSpecifier interface { + isEnvoyInternalAddress_AddressNameSpecifier() +} + +type EnvoyInternalAddress_ServerListenerName struct { + // Specifies the :ref:`name ` of the + // internal listener. + ServerListenerName string `protobuf:"bytes,1,opt,name=server_listener_name,json=serverListenerName,proto3,oneof"` +} + +func (*EnvoyInternalAddress_ServerListenerName) isEnvoyInternalAddress_AddressNameSpecifier() {} + +// [#next-free-field: 7] +type SocketAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Protocol SocketAddress_Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"` + // The address for this socket. :ref:`Listeners ` will bind + // to the address. An empty address is not allowed. Specify “0.0.0.0“ or “::“ + // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: + // It is possible to distinguish a Listener address via the prefix/suffix matching + // in :ref:`FilterChainMatch `.] When used + // within an upstream :ref:`BindConfig `, the address + // controls the source address of outbound connections. For :ref:`clusters + // `, the cluster type determines whether the + // address must be an IP (“STATIC“ or “EDS“ clusters) or a hostname resolved by DNS + // (“STRICT_DNS“ or “LOGICAL_DNS“ clusters). Address resolution can be customized + // via :ref:`resolver_name `. + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // Types that are assignable to PortSpecifier: + // + // *SocketAddress_PortValue + // *SocketAddress_NamedPort + PortSpecifier isSocketAddress_PortSpecifier `protobuf_oneof:"port_specifier"` + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // “STRICT_DNS“ or “LOGICAL_DNS“ will generate an error at runtime. + ResolverName string `protobuf:"bytes,5,opt,name=resolver_name,json=resolverName,proto3" json:"resolver_name,omitempty"` + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to “::“ will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as “::FFFF:“. + Ipv4Compat bool `protobuf:"varint,6,opt,name=ipv4_compat,json=ipv4Compat,proto3" json:"ipv4_compat,omitempty"` +} + +func (x *SocketAddress) Reset() { + *x = SocketAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketAddress) ProtoMessage() {} + +func (x *SocketAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketAddress.ProtoReflect.Descriptor instead. +func (*SocketAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2} +} + +func (x *SocketAddress) GetProtocol() SocketAddress_Protocol { + if x != nil { + return x.Protocol + } + return SocketAddress_TCP +} + +func (x *SocketAddress) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (m *SocketAddress) GetPortSpecifier() isSocketAddress_PortSpecifier { + if m != nil { + return m.PortSpecifier + } + return nil +} + +func (x *SocketAddress) GetPortValue() uint32 { + if x, ok := x.GetPortSpecifier().(*SocketAddress_PortValue); ok { + return x.PortValue + } + return 0 +} + +func (x *SocketAddress) GetNamedPort() string { + if x, ok := x.GetPortSpecifier().(*SocketAddress_NamedPort); ok { + return x.NamedPort + } + return "" +} + +func (x *SocketAddress) GetResolverName() string { + if x != nil { + return x.ResolverName + } + return "" +} + +func (x *SocketAddress) GetIpv4Compat() bool { + if x != nil { + return x.Ipv4Compat + } + return false +} + +type isSocketAddress_PortSpecifier interface { + isSocketAddress_PortSpecifier() +} + +type SocketAddress_PortValue struct { + PortValue uint32 `protobuf:"varint,3,opt,name=port_value,json=portValue,proto3,oneof"` +} + +type SocketAddress_NamedPort struct { + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + NamedPort string `protobuf:"bytes,4,opt,name=named_port,json=namedPort,proto3,oneof"` +} + +func (*SocketAddress_PortValue) isSocketAddress_PortSpecifier() {} + +func (*SocketAddress_NamedPort) isSocketAddress_PortSpecifier() {} + +type TcpKeepalive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum number of keepalive probes to send without response before deciding + // the connection is dead. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 9.) + KeepaliveProbes *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"` + // The number of seconds a connection needs to be idle before keep-alive probes + // start being sent. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 7200s (i.e., 2 hours.) + KeepaliveTime *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"` + // The number of seconds between keep-alive probes. Default is to use the OS + // level configuration (unless overridden, Linux defaults to 75s.) + KeepaliveInterval *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"` +} + +func (x *TcpKeepalive) Reset() { + *x = TcpKeepalive{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TcpKeepalive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TcpKeepalive) ProtoMessage() {} + +func (x *TcpKeepalive) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TcpKeepalive.ProtoReflect.Descriptor instead. +func (*TcpKeepalive) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{3} +} + +func (x *TcpKeepalive) GetKeepaliveProbes() *wrapperspb.UInt32Value { + if x != nil { + return x.KeepaliveProbes + } + return nil +} + +func (x *TcpKeepalive) GetKeepaliveTime() *wrapperspb.UInt32Value { + if x != nil { + return x.KeepaliveTime + } + return nil +} + +func (x *TcpKeepalive) GetKeepaliveInterval() *wrapperspb.UInt32Value { + if x != nil { + return x.KeepaliveInterval + } + return nil +} + +type ExtraSourceAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The additional address to bind. + Address *SocketAddress `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the BindConfig. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptions *SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *ExtraSourceAddress) Reset() { + *x = ExtraSourceAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtraSourceAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtraSourceAddress) ProtoMessage() {} + +func (x *ExtraSourceAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtraSourceAddress.ProtoReflect.Descriptor instead. +func (*ExtraSourceAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtraSourceAddress) GetAddress() *SocketAddress { + if x != nil { + return x.Address + } + return nil +} + +func (x *ExtraSourceAddress) GetSocketOptions() *SocketOptionsOverride { + if x != nil { + return x.SocketOptions + } + return nil +} + +// [#next-free-field: 7] +type BindConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The address to bind to when creating a socket. + SourceAddress *SocketAddress `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"` + // Whether to set the “IP_FREEBIND“ option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option “IP_FREEBIND“ is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + Freebind *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Extra source addresses appended to the address specified in the “source_address“ + // field. This enables to specify multiple source addresses. + // The source address selection is determined by :ref:`local_address_selector + // `. + ExtraSourceAddresses []*ExtraSourceAddress `protobuf:"bytes,5,rep,name=extra_source_addresses,json=extraSourceAddresses,proto3" json:"extra_source_addresses,omitempty"` + // Deprecated by + // :ref:`extra_source_addresses ` + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/address.proto. + AdditionalSourceAddresses []*SocketAddress `protobuf:"bytes,4,rep,name=additional_source_addresses,json=additionalSourceAddresses,proto3" json:"additional_source_addresses,omitempty"` + // Custom local address selector to override the default (i.e. + // :ref:`DefaultLocalAddressSelector + // `). + // [#extension-category: envoy.upstream.local_address_selector] + LocalAddressSelector *TypedExtensionConfig `protobuf:"bytes,6,opt,name=local_address_selector,json=localAddressSelector,proto3" json:"local_address_selector,omitempty"` +} + +func (x *BindConfig) Reset() { + *x = BindConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BindConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BindConfig) ProtoMessage() {} + +func (x *BindConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BindConfig.ProtoReflect.Descriptor instead. +func (*BindConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{5} +} + +func (x *BindConfig) GetSourceAddress() *SocketAddress { + if x != nil { + return x.SourceAddress + } + return nil +} + +func (x *BindConfig) GetFreebind() *wrapperspb.BoolValue { + if x != nil { + return x.Freebind + } + return nil +} + +func (x *BindConfig) GetSocketOptions() []*SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +func (x *BindConfig) GetExtraSourceAddresses() []*ExtraSourceAddress { + if x != nil { + return x.ExtraSourceAddresses + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/address.proto. +func (x *BindConfig) GetAdditionalSourceAddresses() []*SocketAddress { + if x != nil { + return x.AdditionalSourceAddresses + } + return nil +} + +func (x *BindConfig) GetLocalAddressSelector() *TypedExtensionConfig { + if x != nil { + return x.LocalAddressSelector + } + return nil +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Address: + // + // *Address_SocketAddress + // *Address_Pipe + // *Address_EnvoyInternalAddress + Address isAddress_Address `protobuf_oneof:"address"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{6} +} + +func (m *Address) GetAddress() isAddress_Address { + if m != nil { + return m.Address + } + return nil +} + +func (x *Address) GetSocketAddress() *SocketAddress { + if x, ok := x.GetAddress().(*Address_SocketAddress); ok { + return x.SocketAddress + } + return nil +} + +func (x *Address) GetPipe() *Pipe { + if x, ok := x.GetAddress().(*Address_Pipe); ok { + return x.Pipe + } + return nil +} + +func (x *Address) GetEnvoyInternalAddress() *EnvoyInternalAddress { + if x, ok := x.GetAddress().(*Address_EnvoyInternalAddress); ok { + return x.EnvoyInternalAddress + } + return nil +} + +type isAddress_Address interface { + isAddress_Address() +} + +type Address_SocketAddress struct { + SocketAddress *SocketAddress `protobuf:"bytes,1,opt,name=socket_address,json=socketAddress,proto3,oneof"` +} + +type Address_Pipe struct { + Pipe *Pipe `protobuf:"bytes,2,opt,name=pipe,proto3,oneof"` +} + +type Address_EnvoyInternalAddress struct { + // Specifies a user-space address handled by :ref:`internal listeners + // `. + EnvoyInternalAddress *EnvoyInternalAddress `protobuf:"bytes,3,opt,name=envoy_internal_address,json=envoyInternalAddress,proto3,oneof"` +} + +func (*Address_SocketAddress) isAddress_Address() {} + +func (*Address_Pipe) isAddress_Address() {} + +func (*Address_EnvoyInternalAddress) isAddress_Address() {} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +type CidrRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IPv4 or IPv6 address, e.g. “192.0.0.0“ or “2001:db8::“. + AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"` + // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. + PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"` +} + +func (x *CidrRange) Reset() { + *x = CidrRange{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CidrRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CidrRange) ProtoMessage() {} + +func (x *CidrRange) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_address_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead. +func (*CidrRange) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{7} +} + +func (x *CidrRange) GetAddressPrefix() string { + if x != nil { + return x.AddressPrefix + } + return "" +} + +func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value { + if x != nil { + return x.PrefixLen + } + return nil +} + +var File_envoy_config_core_v3_address_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_address_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x04, 0x50, 0x69, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, + 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff, 0x03, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x3a, 0x1d, 0x9a, + 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x22, 0x8a, 0x01, 0x0a, + 0x14, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x1d, 0x0a, 0x16, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, + 0x03, 0x48, 0x00, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70, 0x76, 0x34, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22, 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, + 0x50, 0x10, 0x01, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x15, 0x0a, 0x0e, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x22, 0x90, 0x02, 0x0a, 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, + 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x6b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0e, + 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x25, + 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x47, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb4, 0x04, 0x0a, 0x0a, 0x42, 0x69, + 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4a, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a, 0x16, 0x65, 0x78, 0x74, 0x72, 0x61, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x52, 0x14, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x70, 0x0a, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x19, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x23, 0x9a, 0xc5, 0x88, + 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x9f, 0x02, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4c, 0x0a, 0x0e, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x69, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x69, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x70, 0x65, 0x12, 0x62, 0x0a, 0x16, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x14, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x22, 0xa6, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x80, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_address_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_address_proto_rawDescData = file_envoy_config_core_v3_address_proto_rawDesc +) + +func file_envoy_config_core_v3_address_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_address_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_address_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_address_proto_rawDescData) + }) + return file_envoy_config_core_v3_address_proto_rawDescData +} + +var file_envoy_config_core_v3_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_envoy_config_core_v3_address_proto_goTypes = []interface{}{ + (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol + (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe + (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress + (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress + (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive + (*ExtraSourceAddress)(nil), // 5: envoy.config.core.v3.ExtraSourceAddress + (*BindConfig)(nil), // 6: envoy.config.core.v3.BindConfig + (*Address)(nil), // 7: envoy.config.core.v3.Address + (*CidrRange)(nil), // 8: envoy.config.core.v3.CidrRange + (*wrapperspb.UInt32Value)(nil), // 9: google.protobuf.UInt32Value + (*SocketOptionsOverride)(nil), // 10: envoy.config.core.v3.SocketOptionsOverride + (*wrapperspb.BoolValue)(nil), // 11: google.protobuf.BoolValue + (*SocketOption)(nil), // 12: envoy.config.core.v3.SocketOption + (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_core_v3_address_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.SocketAddress.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol + 9, // 1: envoy.config.core.v3.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value + 9, // 2: envoy.config.core.v3.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value + 9, // 3: envoy.config.core.v3.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value + 3, // 4: envoy.config.core.v3.ExtraSourceAddress.address:type_name -> envoy.config.core.v3.SocketAddress + 10, // 5: envoy.config.core.v3.ExtraSourceAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride + 3, // 6: envoy.config.core.v3.BindConfig.source_address:type_name -> envoy.config.core.v3.SocketAddress + 11, // 7: envoy.config.core.v3.BindConfig.freebind:type_name -> google.protobuf.BoolValue + 12, // 8: envoy.config.core.v3.BindConfig.socket_options:type_name -> envoy.config.core.v3.SocketOption + 5, // 9: envoy.config.core.v3.BindConfig.extra_source_addresses:type_name -> envoy.config.core.v3.ExtraSourceAddress + 3, // 10: envoy.config.core.v3.BindConfig.additional_source_addresses:type_name -> envoy.config.core.v3.SocketAddress + 13, // 11: envoy.config.core.v3.BindConfig.local_address_selector:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 12: envoy.config.core.v3.Address.socket_address:type_name -> envoy.config.core.v3.SocketAddress + 1, // 13: envoy.config.core.v3.Address.pipe:type_name -> envoy.config.core.v3.Pipe + 2, // 14: envoy.config.core.v3.Address.envoy_internal_address:type_name -> envoy.config.core.v3.EnvoyInternalAddress + 9, // 15: envoy.config.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_address_proto_init() } +func file_envoy_config_core_v3_address_proto_init() { + if File_envoy_config_core_v3_address_proto != nil { + return + } + file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_socket_option_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_address_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Pipe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnvoyInternalAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TcpKeepalive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtraSourceAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BindConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_address_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CidrRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_address_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*EnvoyInternalAddress_ServerListenerName)(nil), + } + file_envoy_config_core_v3_address_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*SocketAddress_PortValue)(nil), + (*SocketAddress_NamedPort)(nil), + } + file_envoy_config_core_v3_address_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*Address_SocketAddress)(nil), + (*Address_Pipe)(nil), + (*Address_EnvoyInternalAddress)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_address_proto_rawDesc, + NumEnums: 1, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_address_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_address_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_address_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_address_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_address_proto = out.File + file_envoy_config_core_v3_address_proto_rawDesc = nil + file_envoy_config_core_v3_address_proto_goTypes = nil + file_envoy_config_core_v3_address_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go new file mode 100644 index 0000000000..81dea205cd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address.pb.validate.go @@ -0,0 +1,1479 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Pipe with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Pipe) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Pipe with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in PipeMultiError, or nil if none found. +func (m *Pipe) ValidateAll() error { + return m.validate(true) +} + +func (m *Pipe) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := PipeValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMode() > 511 { + err := PipeValidationError{ + field: "Mode", + reason: "value must be less than or equal to 511", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PipeMultiError(errors) + } + + return nil +} + +// PipeMultiError is an error wrapping multiple validation errors returned by +// Pipe.ValidateAll() if the designated constraints aren't met. +type PipeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PipeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PipeMultiError) AllErrors() []error { return m } + +// PipeValidationError is the validation error returned by Pipe.Validate if the +// designated constraints aren't met. +type PipeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PipeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PipeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PipeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PipeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PipeValidationError) ErrorName() string { return "PipeValidationError" } + +// Error satisfies the builtin error interface +func (e PipeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPipe.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PipeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PipeValidationError{} + +// Validate checks the field values on EnvoyInternalAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EnvoyInternalAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EnvoyInternalAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EnvoyInternalAddressMultiError, or nil if none found. +func (m *EnvoyInternalAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *EnvoyInternalAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for EndpointId + + oneofAddressNameSpecifierPresent := false + switch v := m.AddressNameSpecifier.(type) { + case *EnvoyInternalAddress_ServerListenerName: + if v == nil { + err := EnvoyInternalAddressValidationError{ + field: "AddressNameSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressNameSpecifierPresent = true + // no validation rules for ServerListenerName + default: + _ = v // ensures v is used + } + if !oneofAddressNameSpecifierPresent { + err := EnvoyInternalAddressValidationError{ + field: "AddressNameSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return EnvoyInternalAddressMultiError(errors) + } + + return nil +} + +// EnvoyInternalAddressMultiError is an error wrapping multiple validation +// errors returned by EnvoyInternalAddress.ValidateAll() if the designated +// constraints aren't met. +type EnvoyInternalAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EnvoyInternalAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EnvoyInternalAddressMultiError) AllErrors() []error { return m } + +// EnvoyInternalAddressValidationError is the validation error returned by +// EnvoyInternalAddress.Validate if the designated constraints aren't met. +type EnvoyInternalAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EnvoyInternalAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EnvoyInternalAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EnvoyInternalAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EnvoyInternalAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EnvoyInternalAddressValidationError) ErrorName() string { + return "EnvoyInternalAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e EnvoyInternalAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEnvoyInternalAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EnvoyInternalAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EnvoyInternalAddressValidationError{} + +// Validate checks the field values on SocketAddress with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SocketAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketAddress with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in SocketAddressMultiError, or +// nil if none found. +func (m *SocketAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := SocketAddress_Protocol_name[int32(m.GetProtocol())]; !ok { + err := SocketAddressValidationError{ + field: "Protocol", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetAddress()) < 1 { + err := SocketAddressValidationError{ + field: "Address", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ResolverName + + // no validation rules for Ipv4Compat + + oneofPortSpecifierPresent := false + switch v := m.PortSpecifier.(type) { + case *SocketAddress_PortValue: + if v == nil { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPortSpecifierPresent = true + + if m.GetPortValue() > 65535 { + err := SocketAddressValidationError{ + field: "PortValue", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *SocketAddress_NamedPort: + if v == nil { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPortSpecifierPresent = true + // no validation rules for NamedPort + default: + _ = v // ensures v is used + } + if !oneofPortSpecifierPresent { + err := SocketAddressValidationError{ + field: "PortSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SocketAddressMultiError(errors) + } + + return nil +} + +// SocketAddressMultiError is an error wrapping multiple validation errors +// returned by SocketAddress.ValidateAll() if the designated constraints +// aren't met. +type SocketAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketAddressMultiError) AllErrors() []error { return m } + +// SocketAddressValidationError is the validation error returned by +// SocketAddress.Validate if the designated constraints aren't met. +type SocketAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketAddressValidationError) ErrorName() string { return "SocketAddressValidationError" } + +// Error satisfies the builtin error interface +func (e SocketAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketAddressValidationError{} + +// Validate checks the field values on TcpKeepalive with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TcpKeepalive) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TcpKeepalive with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TcpKeepaliveMultiError, or +// nil if none found. +func (m *TcpKeepalive) ValidateAll() error { + return m.validate(true) +} + +func (m *TcpKeepalive) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetKeepaliveProbes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveProbes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveProbes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeepaliveProbes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TcpKeepaliveValidationError{ + field: "KeepaliveProbes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeepaliveTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeepaliveTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TcpKeepaliveValidationError{ + field: "KeepaliveTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeepaliveInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TcpKeepaliveValidationError{ + field: "KeepaliveInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeepaliveInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TcpKeepaliveValidationError{ + field: "KeepaliveInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TcpKeepaliveMultiError(errors) + } + + return nil +} + +// TcpKeepaliveMultiError is an error wrapping multiple validation errors +// returned by TcpKeepalive.ValidateAll() if the designated constraints aren't met. +type TcpKeepaliveMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TcpKeepaliveMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TcpKeepaliveMultiError) AllErrors() []error { return m } + +// TcpKeepaliveValidationError is the validation error returned by +// TcpKeepalive.Validate if the designated constraints aren't met. +type TcpKeepaliveValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TcpKeepaliveValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TcpKeepaliveValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TcpKeepaliveValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TcpKeepaliveValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TcpKeepaliveValidationError) ErrorName() string { return "TcpKeepaliveValidationError" } + +// Error satisfies the builtin error interface +func (e TcpKeepaliveValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTcpKeepalive.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TcpKeepaliveValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TcpKeepaliveValidationError{} + +// Validate checks the field values on ExtraSourceAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExtraSourceAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtraSourceAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtraSourceAddressMultiError, or nil if none found. +func (m *ExtraSourceAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtraSourceAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetAddress() == nil { + err := ExtraSourceAddressValidationError{ + field: "Address", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtraSourceAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSocketOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtraSourceAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ExtraSourceAddressMultiError(errors) + } + + return nil +} + +// ExtraSourceAddressMultiError is an error wrapping multiple validation errors +// returned by ExtraSourceAddress.ValidateAll() if the designated constraints +// aren't met. +type ExtraSourceAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtraSourceAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtraSourceAddressMultiError) AllErrors() []error { return m } + +// ExtraSourceAddressValidationError is the validation error returned by +// ExtraSourceAddress.Validate if the designated constraints aren't met. +type ExtraSourceAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtraSourceAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtraSourceAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtraSourceAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtraSourceAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtraSourceAddressValidationError) ErrorName() string { + return "ExtraSourceAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e ExtraSourceAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtraSourceAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtraSourceAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtraSourceAddressValidationError{} + +// Validate checks the field values on BindConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *BindConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BindConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BindConfigMultiError, or +// nil if none found. +func (m *BindConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *BindConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSourceAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "SourceAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "SourceAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: "SourceAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFreebind()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetExtraSourceAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetAdditionalSourceAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLocalAddressSelector()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "LocalAddressSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BindConfigValidationError{ + field: "LocalAddressSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalAddressSelector()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BindConfigValidationError{ + field: "LocalAddressSelector", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BindConfigMultiError(errors) + } + + return nil +} + +// BindConfigMultiError is an error wrapping multiple validation errors +// returned by BindConfig.ValidateAll() if the designated constraints aren't met. +type BindConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BindConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BindConfigMultiError) AllErrors() []error { return m } + +// BindConfigValidationError is the validation error returned by +// BindConfig.Validate if the designated constraints aren't met. +type BindConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BindConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BindConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BindConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BindConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BindConfigValidationError) ErrorName() string { return "BindConfigValidationError" } + +// Error satisfies the builtin error interface +func (e BindConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBindConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BindConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BindConfigValidationError{} + +// Validate checks the field values on Address with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Address) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Address with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in AddressMultiError, or nil if none found. +func (m *Address) ValidateAll() error { + return m.validate(true) +} + +func (m *Address) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofAddressPresent := false + switch v := m.Address.(type) { + case *Address_SocketAddress: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true + + if all { + switch v := interface{}(m.GetSocketAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddressValidationError{ + field: "SocketAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddressValidationError{ + field: "SocketAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddressValidationError{ + field: "SocketAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Address_Pipe: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true + + if all { + switch v := interface{}(m.GetPipe()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddressValidationError{ + field: "Pipe", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddressValidationError{ + field: "Pipe", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPipe()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddressValidationError{ + field: "Pipe", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Address_EnvoyInternalAddress: + if v == nil { + err := AddressValidationError{ + field: "Address", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofAddressPresent = true + + if all { + switch v := interface{}(m.GetEnvoyInternalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddressValidationError{ + field: "EnvoyInternalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddressValidationError{ + field: "EnvoyInternalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnvoyInternalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddressValidationError{ + field: "EnvoyInternalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofAddressPresent { + err := AddressValidationError{ + field: "Address", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AddressMultiError(errors) + } + + return nil +} + +// AddressMultiError is an error wrapping multiple validation errors returned +// by Address.ValidateAll() if the designated constraints aren't met. +type AddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AddressMultiError) AllErrors() []error { return m } + +// AddressValidationError is the validation error returned by Address.Validate +// if the designated constraints aren't met. +type AddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AddressValidationError) ErrorName() string { return "AddressValidationError" } + +// Error satisfies the builtin error interface +func (e AddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AddressValidationError{} + +// Validate checks the field values on CidrRange with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CidrRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CidrRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CidrRangeMultiError, or nil +// if none found. +func (m *CidrRange) ValidateAll() error { + return m.validate(true) +} + +func (m *CidrRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 { + err := CidrRangeValidationError{ + field: "AddressPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetPrefixLen(); wrapper != nil { + + if wrapper.GetValue() > 128 { + err := CidrRangeValidationError{ + field: "PrefixLen", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return CidrRangeMultiError(errors) + } + + return nil +} + +// CidrRangeMultiError is an error wrapping multiple validation errors returned +// by CidrRange.ValidateAll() if the designated constraints aren't met. +type CidrRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CidrRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CidrRangeMultiError) AllErrors() []error { return m } + +// CidrRangeValidationError is the validation error returned by +// CidrRange.Validate if the designated constraints aren't met. +type CidrRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CidrRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CidrRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CidrRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CidrRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" } + +// Error satisfies the builtin error interface +func (e CidrRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCidrRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CidrRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CidrRangeValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go new file mode 100644 index 0000000000..cf1777901a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/address_vtproto.pb.go @@ -0,0 +1,859 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/address.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Pipe) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pipe) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Pipe) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Mode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyInternalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyInternalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyInternalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.EndpointId) > 0 { + i -= len(m.EndpointId) + copy(dAtA[i:], m.EndpointId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EndpointId))) + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.AddressNameSpecifier.(*EnvoyInternalAddress_ServerListenerName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *EnvoyInternalAddress_ServerListenerName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyInternalAddress_ServerListenerName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ServerListenerName) + copy(dAtA[i:], m.ServerListenerName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerListenerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SocketAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Ipv4Compat { + i-- + if m.Ipv4Compat { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.ResolverName) > 0 { + i -= len(m.ResolverName) + copy(dAtA[i:], m.ResolverName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResolverName))) + i-- + dAtA[i] = 0x2a + } + if msg, ok := m.PortSpecifier.(*SocketAddress_NamedPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PortSpecifier.(*SocketAddress_PortValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.Protocol != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Protocol)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SocketAddress_PortValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress_PortValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortValue)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *SocketAddress_NamedPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketAddress_NamedPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.NamedPort) + copy(dAtA[i:], m.NamedPort) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NamedPort))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *TcpKeepalive) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpKeepalive) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TcpKeepalive) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepaliveInterval != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.KeepaliveTime != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.KeepaliveProbes != nil { + size, err := (*wrapperspb.UInt32Value)(m.KeepaliveProbes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExtraSourceAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtraSourceAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtraSourceAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SocketOptions != nil { + size, err := m.SocketOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + size, err := m.Address.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BindConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BindConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BindConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LocalAddressSelector != nil { + size, err := m.LocalAddressSelector.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.ExtraSourceAddresses) > 0 { + for iNdEx := len(m.ExtraSourceAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ExtraSourceAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AdditionalSourceAddresses) > 0 { + for iNdEx := len(m.AdditionalSourceAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalSourceAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SocketOptions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Freebind != nil { + size, err := (*wrapperspb.BoolValue)(m.Freebind).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.SourceAddress != nil { + size, err := m.SourceAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Address) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Address.(*Address_EnvoyInternalAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Address.(*Address_Pipe); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Address.(*Address_SocketAddress); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Address_SocketAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_SocketAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SocketAddress != nil { + size, err := m.SocketAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Address_Pipe) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_Pipe) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Pipe != nil { + size, err := m.Pipe.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Address_EnvoyInternalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Address_EnvoyInternalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EnvoyInternalAddress != nil { + size, err := m.EnvoyInternalAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CidrRange) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CidrRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CidrRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrefixLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.PrefixLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.AddressPrefix) > 0 { + i -= len(m.AddressPrefix) + copy(dAtA[i:], m.AddressPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AddressPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Pipe) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Mode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Mode)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyInternalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.AddressNameSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.EndpointId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyInternalAddress_ServerListenerName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServerListenerName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SocketAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Protocol)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.PortSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.ResolverName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ipv4Compat { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SocketAddress_PortValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortValue)) + return n +} +func (m *SocketAddress_NamedPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamedPort) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TcpKeepalive) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KeepaliveProbes != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveProbes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeepaliveTime != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeepaliveInterval != nil { + l = (*wrapperspb.UInt32Value)(m.KeepaliveInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExtraSourceAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + l = m.Address.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketOptions != nil { + l = m.SocketOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BindConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceAddress != nil { + l = m.SourceAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Freebind != nil { + l = (*wrapperspb.BoolValue)(m.Freebind).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AdditionalSourceAddresses) > 0 { + for _, e := range m.AdditionalSourceAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ExtraSourceAddresses) > 0 { + for _, e := range m.ExtraSourceAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LocalAddressSelector != nil { + l = m.LocalAddressSelector.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Address.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Address_SocketAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SocketAddress != nil { + l = m.SocketAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Address_Pipe) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pipe != nil { + l = m.Pipe.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Address_EnvoyInternalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnvoyInternalAddress != nil { + l = m.EnvoyInternalAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CidrRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AddressPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrefixLen != nil { + l = (*wrapperspb.UInt32Value)(m.PrefixLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go new file mode 100644 index 0000000000..68841ad157 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.go @@ -0,0 +1,193 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration defining a jittered exponential back off strategy. +type BackoffStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The base interval to be used for the next back off computation. It should + // be greater than zero and less than or equal to :ref:`max_interval + // `. + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + // Specifies the maximum interval between retries. This parameter is optional, + // but must be greater than or equal to the :ref:`base_interval + // ` if set. The default + // is 10 times the :ref:`base_interval + // `. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *BackoffStrategy) Reset() { + *x = BackoffStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BackoffStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackoffStrategy) ProtoMessage() {} + +func (x *BackoffStrategy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackoffStrategy.ProtoReflect.Descriptor instead. +func (*BackoffStrategy) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_backoff_proto_rawDescGZIP(), []int{0} +} + +func (x *BackoffStrategy) GetBaseInterval() *durationpb.Duration { + if x != nil { + return x.BaseInterval + } + return nil +} + +func (x *BackoffStrategy) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +var File_envoy_config_core_v3_backoff_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_backoff_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd3, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, + 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, + 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, + 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, 0x80, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x42, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_backoff_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_backoff_proto_rawDescData = file_envoy_config_core_v3_backoff_proto_rawDesc +) + +func file_envoy_config_core_v3_backoff_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_backoff_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_backoff_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_backoff_proto_rawDescData) + }) + return file_envoy_config_core_v3_backoff_proto_rawDescData +} + +var file_envoy_config_core_v3_backoff_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_backoff_proto_goTypes = []interface{}{ + (*BackoffStrategy)(nil), // 0: envoy.config.core.v3.BackoffStrategy + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration +} +var file_envoy_config_core_v3_backoff_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.BackoffStrategy.base_interval:type_name -> google.protobuf.Duration + 1, // 1: envoy.config.core.v3.BackoffStrategy.max_interval:type_name -> google.protobuf.Duration + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_backoff_proto_init() } +func file_envoy_config_core_v3_backoff_proto_init() { + if File_envoy_config_core_v3_backoff_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_backoff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackoffStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_backoff_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_backoff_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_backoff_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_backoff_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_backoff_proto = out.File + file_envoy_config_core_v3_backoff_proto_rawDesc = nil + file_envoy_config_core_v3_backoff_proto_goTypes = nil + file_envoy_config_core_v3_backoff_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go new file mode 100644 index 0000000000..6c9df76280 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff.pb.validate.go @@ -0,0 +1,208 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on BackoffStrategy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *BackoffStrategy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BackoffStrategy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BackoffStrategyMultiError, or nil if none found. +func (m *BackoffStrategy) ValidateAll() error { + return m.validate(true) +} + +func (m *BackoffStrategy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetBaseInterval() == nil { + err := BackoffStrategyValidationError{ + field: "BaseInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetBaseInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = BackoffStrategyValidationError{ + field: "BaseInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := BackoffStrategyValidationError{ + field: "BaseInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = BackoffStrategyValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := BackoffStrategyValidationError{ + field: "MaxInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return BackoffStrategyMultiError(errors) + } + + return nil +} + +// BackoffStrategyMultiError is an error wrapping multiple validation errors +// returned by BackoffStrategy.ValidateAll() if the designated constraints +// aren't met. +type BackoffStrategyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BackoffStrategyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BackoffStrategyMultiError) AllErrors() []error { return m } + +// BackoffStrategyValidationError is the validation error returned by +// BackoffStrategy.Validate if the designated constraints aren't met. +type BackoffStrategyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BackoffStrategyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BackoffStrategyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BackoffStrategyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BackoffStrategyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BackoffStrategyValidationError) ErrorName() string { return "BackoffStrategyValidationError" } + +// Error satisfies the builtin error interface +func (e BackoffStrategyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBackoffStrategy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BackoffStrategyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BackoffStrategyValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go new file mode 100644 index 0000000000..3c66ff712a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/backoff_vtproto.pb.go @@ -0,0 +1,91 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/backoff.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *BackoffStrategy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackoffStrategy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BackoffStrategy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BackoffStrategy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go new file mode 100644 index 0000000000..862e09a833 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.go @@ -0,0 +1,3242 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +type RoutingPriority int32 + +const ( + RoutingPriority_DEFAULT RoutingPriority = 0 + RoutingPriority_HIGH RoutingPriority = 1 +) + +// Enum value maps for RoutingPriority. +var ( + RoutingPriority_name = map[int32]string{ + 0: "DEFAULT", + 1: "HIGH", + } + RoutingPriority_value = map[string]int32{ + "DEFAULT": 0, + "HIGH": 1, + } +) + +func (x RoutingPriority) Enum() *RoutingPriority { + p := new(RoutingPriority) + *p = x + return p +} + +func (x RoutingPriority) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RoutingPriority) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[0].Descriptor() +} + +func (RoutingPriority) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[0] +} + +func (x RoutingPriority) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RoutingPriority.Descriptor instead. +func (RoutingPriority) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0} +} + +// HTTP request method. +type RequestMethod int32 + +const ( + RequestMethod_METHOD_UNSPECIFIED RequestMethod = 0 + RequestMethod_GET RequestMethod = 1 + RequestMethod_HEAD RequestMethod = 2 + RequestMethod_POST RequestMethod = 3 + RequestMethod_PUT RequestMethod = 4 + RequestMethod_DELETE RequestMethod = 5 + RequestMethod_CONNECT RequestMethod = 6 + RequestMethod_OPTIONS RequestMethod = 7 + RequestMethod_TRACE RequestMethod = 8 + RequestMethod_PATCH RequestMethod = 9 +) + +// Enum value maps for RequestMethod. +var ( + RequestMethod_name = map[int32]string{ + 0: "METHOD_UNSPECIFIED", + 1: "GET", + 2: "HEAD", + 3: "POST", + 4: "PUT", + 5: "DELETE", + 6: "CONNECT", + 7: "OPTIONS", + 8: "TRACE", + 9: "PATCH", + } + RequestMethod_value = map[string]int32{ + "METHOD_UNSPECIFIED": 0, + "GET": 1, + "HEAD": 2, + "POST": 3, + "PUT": 4, + "DELETE": 5, + "CONNECT": 6, + "OPTIONS": 7, + "TRACE": 8, + "PATCH": 9, + } +) + +func (x RequestMethod) Enum() *RequestMethod { + p := new(RequestMethod) + *p = x + return p +} + +func (x RequestMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RequestMethod) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[1].Descriptor() +} + +func (RequestMethod) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[1] +} + +func (x RequestMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RequestMethod.Descriptor instead. +func (RequestMethod) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1} +} + +// Identifies the direction of the traffic relative to the local Envoy. +type TrafficDirection int32 + +const ( + // Default option is unspecified. + TrafficDirection_UNSPECIFIED TrafficDirection = 0 + // The transport is used for incoming traffic. + TrafficDirection_INBOUND TrafficDirection = 1 + // The transport is used for outgoing traffic. + TrafficDirection_OUTBOUND TrafficDirection = 2 +) + +// Enum value maps for TrafficDirection. +var ( + TrafficDirection_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "INBOUND", + 2: "OUTBOUND", + } + TrafficDirection_value = map[string]int32{ + "UNSPECIFIED": 0, + "INBOUND": 1, + "OUTBOUND": 2, + } +) + +func (x TrafficDirection) Enum() *TrafficDirection { + p := new(TrafficDirection) + *p = x + return p +} + +func (x TrafficDirection) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[2].Descriptor() +} + +func (TrafficDirection) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[2] +} + +func (x TrafficDirection) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TrafficDirection.Descriptor instead. +func (TrafficDirection) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2} +} + +// Describes the supported actions types for key/value pair append action. +type KeyValueAppend_KeyValueAppendAction int32 + +const ( + // If the key already exists, this action will result in the following behavior: + // + // - Comma-concatenated value if multiple values are not allowed. + // - New value added to the list of values if multiple values are allowed. + // + // If the key doesn't exist then this will add pair with specified key and value. + KeyValueAppend_APPEND_IF_EXISTS_OR_ADD KeyValueAppend_KeyValueAppendAction = 0 + // This action will add the key/value pair if it doesn't already exist. If the + // key already exists then this will be a no-op. + KeyValueAppend_ADD_IF_ABSENT KeyValueAppend_KeyValueAppendAction = 1 + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will add + // the pair with specified key and value. + KeyValueAppend_OVERWRITE_IF_EXISTS_OR_ADD KeyValueAppend_KeyValueAppendAction = 2 + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will + // be no-op. + KeyValueAppend_OVERWRITE_IF_EXISTS KeyValueAppend_KeyValueAppendAction = 3 +) + +// Enum value maps for KeyValueAppend_KeyValueAppendAction. +var ( + KeyValueAppend_KeyValueAppendAction_name = map[int32]string{ + 0: "APPEND_IF_EXISTS_OR_ADD", + 1: "ADD_IF_ABSENT", + 2: "OVERWRITE_IF_EXISTS_OR_ADD", + 3: "OVERWRITE_IF_EXISTS", + } + KeyValueAppend_KeyValueAppendAction_value = map[string]int32{ + "APPEND_IF_EXISTS_OR_ADD": 0, + "ADD_IF_ABSENT": 1, + "OVERWRITE_IF_EXISTS_OR_ADD": 2, + "OVERWRITE_IF_EXISTS": 3, + } +) + +func (x KeyValueAppend_KeyValueAppendAction) Enum() *KeyValueAppend_KeyValueAppendAction { + p := new(KeyValueAppend_KeyValueAppendAction) + *p = x + return p +} + +func (x KeyValueAppend_KeyValueAppendAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KeyValueAppend_KeyValueAppendAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[3].Descriptor() +} + +func (KeyValueAppend_KeyValueAppendAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[3] +} + +func (x KeyValueAppend_KeyValueAppendAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use KeyValueAppend_KeyValueAppendAction.Descriptor instead. +func (KeyValueAppend_KeyValueAppendAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10, 0} +} + +// Describes the supported actions types for header append action. +type HeaderValueOption_HeaderAppendAction int32 + +const ( + // If the header already exists, this action will result in: + // + // - Comma-concatenated for predefined inline headers. + // - Duplicate header added in the “HeaderMap“ for other headers. + // + // If the header doesn't exist then this will add new header with specified key and value. + HeaderValueOption_APPEND_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 0 + // This action will add the header if it doesn't already exist. If the header + // already exists then this will be a no-op. + HeaderValueOption_ADD_IF_ABSENT HeaderValueOption_HeaderAppendAction = 1 + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will add the header + // with specified key and value. + HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 2 + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will be no-op. + HeaderValueOption_OVERWRITE_IF_EXISTS HeaderValueOption_HeaderAppendAction = 3 +) + +// Enum value maps for HeaderValueOption_HeaderAppendAction. +var ( + HeaderValueOption_HeaderAppendAction_name = map[int32]string{ + 0: "APPEND_IF_EXISTS_OR_ADD", + 1: "ADD_IF_ABSENT", + 2: "OVERWRITE_IF_EXISTS_OR_ADD", + 3: "OVERWRITE_IF_EXISTS", + } + HeaderValueOption_HeaderAppendAction_value = map[string]int32{ + "APPEND_IF_EXISTS_OR_ADD": 0, + "ADD_IF_ABSENT": 1, + "OVERWRITE_IF_EXISTS_OR_ADD": 2, + "OVERWRITE_IF_EXISTS": 3, + } +) + +func (x HeaderValueOption_HeaderAppendAction) Enum() *HeaderValueOption_HeaderAppendAction { + p := new(HeaderValueOption_HeaderAppendAction) + *p = x + return p +} + +func (x HeaderValueOption_HeaderAppendAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderValueOption_HeaderAppendAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_base_proto_enumTypes[4].Descriptor() +} + +func (HeaderValueOption_HeaderAppendAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_base_proto_enumTypes[4] +} + +func (x HeaderValueOption_HeaderAppendAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HeaderValueOption_HeaderAppendAction.Descriptor instead. +func (HeaderValueOption_HeaderAppendAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14, 0} +} + +// Identifies location of where either Envoy runs or where upstream hosts run. +type Locality struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Region this :ref:`zone ` belongs to. + Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"` + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + SubZone string `protobuf:"bytes,3,opt,name=sub_zone,json=subZone,proto3" json:"sub_zone,omitempty"` +} + +func (x *Locality) Reset() { + *x = Locality{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Locality) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Locality) ProtoMessage() {} + +func (x *Locality) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Locality.ProtoReflect.Descriptor instead. +func (*Locality) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0} +} + +func (x *Locality) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + +func (x *Locality) GetZone() string { + if x != nil { + return x.Zone + } + return "" +} + +func (x *Locality) GetSubZone() string { + if x != nil { + return x.SubZone + } + return "" +} + +// BuildVersion combines SemVer version of extension with free-form build information +// (i.e. 'alpha', 'private-build') as a set of strings. +type BuildVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SemVer version of extension. + Version *v3.SemanticVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Free-form build information. + // Envoy defines several well known keys in the source/common/version/version.h file + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *BuildVersion) Reset() { + *x = BuildVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BuildVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BuildVersion) ProtoMessage() {} + +func (x *BuildVersion) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BuildVersion.ProtoReflect.Descriptor instead. +func (*BuildVersion) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1} +} + +func (x *BuildVersion) GetVersion() *v3.SemanticVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *BuildVersion) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +// Version and identification for an Envoy extension. +// [#next-free-field: 7] +type Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the name of the Envoy filter as specified in the Envoy + // configuration, e.g. envoy.filters.http.router, com.acme.widget. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Category of the extension. + // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" + // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from + // acme.com vendor. + // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] + Category string `protobuf:"bytes,2,opt,name=category,proto3" json:"category,omitempty"` + // [#not-implemented-hide:] Type descriptor of extension configuration proto. + // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] + // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. + TypeDescriptor string `protobuf:"bytes,3,opt,name=type_descriptor,json=typeDescriptor,proto3" json:"type_descriptor,omitempty"` + // The version is a property of the extension and maintained independently + // of other extensions and the Envoy API. + // This field is not set when extension did not provide version information. + Version *BuildVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Indicates that the extension is present but was disabled via dynamic configuration. + Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` + // Type URLs of extension configuration protos. + TypeUrls []string `protobuf:"bytes,6,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"` +} + +func (x *Extension) Reset() { + *x = Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Extension) ProtoMessage() {} + +func (x *Extension) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Extension.ProtoReflect.Descriptor instead. +func (*Extension) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2} +} + +func (x *Extension) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Extension) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. +func (x *Extension) GetTypeDescriptor() string { + if x != nil { + return x.TypeDescriptor + } + return "" +} + +func (x *Extension) GetVersion() *BuildVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *Extension) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +func (x *Extension) GetTypeUrls() []string { + if x != nil { + return x.TypeUrls + } + return nil +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +// [#next-free-field: 13] +type Node struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification + // `, + // :ref:`runtime override directory `, + // :ref:`user agent addition + // `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + Metadata *structpb.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike + // other fields in this message). For example, the xDS client may have a shard identifier that + // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the + // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic + // parameter then appears in this field during future discovery requests. + DynamicParameters map[string]*v31.ContextParams `protobuf:"bytes,12,rep,name=dynamic_parameters,json=dynamicParameters,proto3" json:"dynamic_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Locality specifying where the Envoy instance is running. + Locality *Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"` + // Free-form string that identifies the entity requesting config. + // E.g. "envoy" or "grpc" + UserAgentName string `protobuf:"bytes,6,opt,name=user_agent_name,json=userAgentName,proto3" json:"user_agent_name,omitempty"` + // Types that are assignable to UserAgentVersionType: + // + // *Node_UserAgentVersion + // *Node_UserAgentBuildVersion + UserAgentVersionType isNode_UserAgentVersionType `protobuf_oneof:"user_agent_version_type"` + // List of extensions and their versions supported by the node. + Extensions []*Extension `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty"` + // Client feature support list. These are well known features described + // in the Envoy API repository for a given major version of an API. Client features + // use reverse DNS naming scheme, for example “com.acme.feature“. + // See :ref:`the list of features ` that xDS client may + // support. + ClientFeatures []string `protobuf:"bytes,10,rep,name=client_features,json=clientFeatures,proto3" json:"client_features,omitempty"` + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress “(0.0.0.0,80)“. The field is optional and just a hint. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. + ListeningAddresses []*Address `protobuf:"bytes,11,rep,name=listening_addresses,json=listeningAddresses,proto3" json:"listening_addresses,omitempty"` +} + +func (x *Node) Reset() { + *x = Node{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Node) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Node) ProtoMessage() {} + +func (x *Node) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Node.ProtoReflect.Descriptor instead. +func (*Node) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{3} +} + +func (x *Node) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Node) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *Node) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Node) GetDynamicParameters() map[string]*v31.ContextParams { + if x != nil { + return x.DynamicParameters + } + return nil +} + +func (x *Node) GetLocality() *Locality { + if x != nil { + return x.Locality + } + return nil +} + +func (x *Node) GetUserAgentName() string { + if x != nil { + return x.UserAgentName + } + return "" +} + +func (m *Node) GetUserAgentVersionType() isNode_UserAgentVersionType { + if m != nil { + return m.UserAgentVersionType + } + return nil +} + +func (x *Node) GetUserAgentVersion() string { + if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentVersion); ok { + return x.UserAgentVersion + } + return "" +} + +func (x *Node) GetUserAgentBuildVersion() *BuildVersion { + if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentBuildVersion); ok { + return x.UserAgentBuildVersion + } + return nil +} + +func (x *Node) GetExtensions() []*Extension { + if x != nil { + return x.Extensions + } + return nil +} + +func (x *Node) GetClientFeatures() []string { + if x != nil { + return x.ClientFeatures + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. +func (x *Node) GetListeningAddresses() []*Address { + if x != nil { + return x.ListeningAddresses + } + return nil +} + +type isNode_UserAgentVersionType interface { + isNode_UserAgentVersionType() +} + +type Node_UserAgentVersion struct { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + UserAgentVersion string `protobuf:"bytes,7,opt,name=user_agent_version,json=userAgentVersion,proto3,oneof"` +} + +type Node_UserAgentBuildVersion struct { + // Structured version of the entity requesting config. + UserAgentBuildVersion *BuildVersion `protobuf:"bytes,8,opt,name=user_agent_build_version,json=userAgentBuildVersion,proto3,oneof"` +} + +func (*Node_UserAgentVersion) isNode_UserAgentVersionType() {} + +func (*Node_UserAgentBuildVersion) isNode_UserAgentVersionType() {} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map, usually from +// filter name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// Another example use of metadata is to per service config info in cluster metadata, which may get +// consumed by multiple filters. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// - “{"envoy.lb": {"canary": }}“ This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +// +// [#next-major-version: move to type/metadata/v2] +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“ + // namespace is reserved for Envoy's built-in filters. + // If both “filter_metadata“ and + // :ref:`typed_filter_metadata ` + // fields are present in the metadata with same keys, + // only “typed_filter_metadata“ field will be parsed. + FilterMetadata map[string]*structpb.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“ + // namespace is reserved for Envoy's built-in filters. + // The value is encoded as google.protobuf.Any. + // If both :ref:`filter_metadata ` + // and “typed_filter_metadata“ fields are present in the metadata with same keys, + // only “typed_filter_metadata“ field will be parsed. + TypedFilterMetadata map[string]*anypb.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{4} +} + +func (x *Metadata) GetFilterMetadata() map[string]*structpb.Struct { + if x != nil { + return x.FilterMetadata + } + return nil +} + +func (x *Metadata) GetTypedFilterMetadata() map[string]*anypb.Any { + if x != nil { + return x.TypedFilterMetadata + } + return nil +} + +// Runtime derived uint32 with a default when not specified. +type RuntimeUInt32 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue uint32 `protobuf:"varint,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. + RuntimeKey string `protobuf:"bytes,3,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeUInt32) Reset() { + *x = RuntimeUInt32{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeUInt32) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeUInt32) ProtoMessage() {} + +func (x *RuntimeUInt32) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeUInt32.ProtoReflect.Descriptor instead. +func (*RuntimeUInt32) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{5} +} + +func (x *RuntimeUInt32) GetDefaultValue() uint32 { + if x != nil { + return x.DefaultValue + } + return 0 +} + +func (x *RuntimeUInt32) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Runtime derived percentage with a default when not specified. +type RuntimePercent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue *v3.Percent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimePercent) Reset() { + *x = RuntimePercent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimePercent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimePercent) ProtoMessage() {} + +func (x *RuntimePercent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimePercent.ProtoReflect.Descriptor instead. +func (*RuntimePercent) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{6} +} + +func (x *RuntimePercent) GetDefaultValue() *v3.Percent { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *RuntimePercent) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Runtime derived double with a default when not specified. +type RuntimeDouble struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue float64 `protobuf:"fixed64,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeDouble) Reset() { + *x = RuntimeDouble{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeDouble) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeDouble) ProtoMessage() {} + +func (x *RuntimeDouble) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeDouble.ProtoReflect.Descriptor instead. +func (*RuntimeDouble) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{7} +} + +func (x *RuntimeDouble) GetDefaultValue() float64 { + if x != nil { + return x.DefaultValue + } + return 0 +} + +func (x *RuntimeDouble) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Runtime derived bool with a default when not specified. +type RuntimeFeatureFlag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if runtime value is not available. + DefaultValue *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key to get value for comparison. This value is used if defined. The boolean value must + // be represented via its + // `canonical JSON encoding `_. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeFeatureFlag) Reset() { + *x = RuntimeFeatureFlag{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeFeatureFlag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeFeatureFlag) ProtoMessage() {} + +func (x *RuntimeFeatureFlag) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeFeatureFlag.ProtoReflect.Descriptor instead. +func (*RuntimeFeatureFlag) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{8} +} + +func (x *RuntimeFeatureFlag) GetDefaultValue() *wrapperspb.BoolValue { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *RuntimeFeatureFlag) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key of the key/value pair. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the key/value pair. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{9} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Key/value pair plus option to control append behavior. This is used to specify +// key/value pairs that should be appended to a set of existing key/value pairs. +type KeyValueAppend struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key/value pair entry that this option to append or overwrite. + Entry *KeyValue `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + // Describes the action taken to append/overwrite the given value for an existing + // key or to only add this key if it's absent. + Action KeyValueAppend_KeyValueAppendAction `protobuf:"varint,2,opt,name=action,proto3,enum=envoy.config.core.v3.KeyValueAppend_KeyValueAppendAction" json:"action,omitempty"` +} + +func (x *KeyValueAppend) Reset() { + *x = KeyValueAppend{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueAppend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueAppend) ProtoMessage() {} + +func (x *KeyValueAppend) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueAppend.ProtoReflect.Descriptor instead. +func (*KeyValueAppend) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10} +} + +func (x *KeyValueAppend) GetEntry() *KeyValue { + if x != nil { + return x.Entry + } + return nil +} + +func (x *KeyValueAppend) GetAction() KeyValueAppend_KeyValueAppendAction { + if x != nil { + return x.Action + } + return KeyValueAppend_APPEND_IF_EXISTS_OR_ADD +} + +// Key/value pair to append or remove. +type KeyValueMutation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key/value pair to append or overwrite. Only one of “append“ or “remove“ can be set. + Append *KeyValueAppend `protobuf:"bytes,1,opt,name=append,proto3" json:"append,omitempty"` + // Key to remove. Only one of “append“ or “remove“ can be set. + Remove string `protobuf:"bytes,2,opt,name=remove,proto3" json:"remove,omitempty"` +} + +func (x *KeyValueMutation) Reset() { + *x = KeyValueMutation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueMutation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueMutation) ProtoMessage() {} + +func (x *KeyValueMutation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueMutation.ProtoReflect.Descriptor instead. +func (*KeyValueMutation) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11} +} + +func (x *KeyValueMutation) GetAppend() *KeyValueAppend { + if x != nil { + return x.Append + } + return nil +} + +func (x *KeyValueMutation) GetRemove() string { + if x != nil { + return x.Remove + } + return "" +} + +// Query parameter name/value pair. +type QueryParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key of the query parameter. Case sensitive. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the query parameter. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *QueryParameter) Reset() { + *x = QueryParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameter) ProtoMessage() {} + +func (x *QueryParameter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameter.ProtoReflect.Descriptor instead. +func (*QueryParameter) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{12} +} + +func (x *QueryParameter) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *QueryParameter) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Header name/value pair. +type HeaderValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Header name. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of “-“. + // Header value is encoded as string. This does not work for non-utf8 characters. + // Only one of “value“ or “raw_value“ can be set. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Header value is encoded as bytes which can support non-utf8 characters. + // Only one of “value“ or “raw_value“ can be set. + RawValue []byte `protobuf:"bytes,3,opt,name=raw_value,json=rawValue,proto3" json:"raw_value,omitempty"` +} + +func (x *HeaderValue) Reset() { + *x = HeaderValue{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderValue) ProtoMessage() {} + +func (x *HeaderValue) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead. +func (*HeaderValue) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{13} +} + +func (x *HeaderValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *HeaderValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *HeaderValue) GetRawValue() []byte { + if x != nil { + return x.RawValue + } + return nil +} + +// Header name/value pair plus option to control append behavior. +type HeaderValueOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Header name/value pair that this option applies to. + Header *HeaderValue `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // Should the value be appended? If true (default), the value is appended to + // existing values. Otherwise it replaces any existing values. + // This field is deprecated and please use + // :ref:`append_action ` as replacement. + // + // .. note:: + // + // The :ref:`external authorization service ` and + // :ref:`external processor service ` have + // default value (``false``) for this field. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. + Append *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"` + // Describes the action taken to append/overwrite the given value for an existing header + // or to only add this header if it's absent. + // Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD + // `. + AppendAction HeaderValueOption_HeaderAppendAction `protobuf:"varint,3,opt,name=append_action,json=appendAction,proto3,enum=envoy.config.core.v3.HeaderValueOption_HeaderAppendAction" json:"append_action,omitempty"` + // Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped, + // otherwise they are added. + KeepEmptyValue bool `protobuf:"varint,4,opt,name=keep_empty_value,json=keepEmptyValue,proto3" json:"keep_empty_value,omitempty"` +} + +func (x *HeaderValueOption) Reset() { + *x = HeaderValueOption{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderValueOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderValueOption) ProtoMessage() {} + +func (x *HeaderValueOption) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderValueOption.ProtoReflect.Descriptor instead. +func (*HeaderValueOption) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14} +} + +func (x *HeaderValueOption) GetHeader() *HeaderValue { + if x != nil { + return x.Header + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/base.proto. +func (x *HeaderValueOption) GetAppend() *wrapperspb.BoolValue { + if x != nil { + return x.Append + } + return nil +} + +func (x *HeaderValueOption) GetAppendAction() HeaderValueOption_HeaderAppendAction { + if x != nil { + return x.AppendAction + } + return HeaderValueOption_APPEND_IF_EXISTS_OR_ADD +} + +func (x *HeaderValueOption) GetKeepEmptyValue() bool { + if x != nil { + return x.KeepEmptyValue + } + return false +} + +// Wrapper for a set of headers. +type HeaderMap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Headers []*HeaderValue `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HeaderMap) Reset() { + *x = HeaderMap{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderMap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderMap) ProtoMessage() {} + +func (x *HeaderMap) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderMap.ProtoReflect.Descriptor instead. +func (*HeaderMap) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{15} +} + +func (x *HeaderMap) GetHeaders() []*HeaderValue { + if x != nil { + return x.Headers + } + return nil +} + +// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename +// events inside this directory trigger the watch. +type WatchedDirectory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Directory path to watch. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *WatchedDirectory) Reset() { + *x = WatchedDirectory{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchedDirectory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchedDirectory) ProtoMessage() {} + +func (x *WatchedDirectory) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchedDirectory.ProtoReflect.Descriptor instead. +func (*WatchedDirectory) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{16} +} + +func (x *WatchedDirectory) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +// Data source consisting of a file, an inline value, or an environment variable. +// [#next-free-field: 6] +type DataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Specifier: + // + // *DataSource_Filename + // *DataSource_InlineBytes + // *DataSource_InlineString + // *DataSource_EnvironmentVariable + Specifier isDataSource_Specifier `protobuf_oneof:"specifier"` + // Watched directory that is watched for file changes. If this is set explicitly, the file + // specified in the “filename“ field will be reloaded when relevant file move events occur. + // + // .. note:: + // + // This field only makes sense when the ``filename`` field is set. + // + // .. note:: + // + // Envoy only updates when the file is replaced by a file move, and not when the file is + // edited in place. + // + // .. note:: + // + // Not all use cases of ``DataSource`` support watching directories. It depends on the + // specific usage of the ``DataSource``. See the documentation of the parent message for + // details. + WatchedDirectory *WatchedDirectory `protobuf:"bytes,5,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` +} + +func (x *DataSource) Reset() { + *x = DataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataSource) ProtoMessage() {} + +func (x *DataSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataSource.ProtoReflect.Descriptor instead. +func (*DataSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{17} +} + +func (m *DataSource) GetSpecifier() isDataSource_Specifier { + if m != nil { + return m.Specifier + } + return nil +} + +func (x *DataSource) GetFilename() string { + if x, ok := x.GetSpecifier().(*DataSource_Filename); ok { + return x.Filename + } + return "" +} + +func (x *DataSource) GetInlineBytes() []byte { + if x, ok := x.GetSpecifier().(*DataSource_InlineBytes); ok { + return x.InlineBytes + } + return nil +} + +func (x *DataSource) GetInlineString() string { + if x, ok := x.GetSpecifier().(*DataSource_InlineString); ok { + return x.InlineString + } + return "" +} + +func (x *DataSource) GetEnvironmentVariable() string { + if x, ok := x.GetSpecifier().(*DataSource_EnvironmentVariable); ok { + return x.EnvironmentVariable + } + return "" +} + +func (x *DataSource) GetWatchedDirectory() *WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +type isDataSource_Specifier interface { + isDataSource_Specifier() +} + +type DataSource_Filename struct { + // Local filesystem data source. + Filename string `protobuf:"bytes,1,opt,name=filename,proto3,oneof"` +} + +type DataSource_InlineBytes struct { + // Bytes inlined in the configuration. + InlineBytes []byte `protobuf:"bytes,2,opt,name=inline_bytes,json=inlineBytes,proto3,oneof"` +} + +type DataSource_InlineString struct { + // String inlined in the configuration. + InlineString string `protobuf:"bytes,3,opt,name=inline_string,json=inlineString,proto3,oneof"` +} + +type DataSource_EnvironmentVariable struct { + // Environment variable data source. + EnvironmentVariable string `protobuf:"bytes,4,opt,name=environment_variable,json=environmentVariable,proto3,oneof"` +} + +func (*DataSource_Filename) isDataSource_Specifier() {} + +func (*DataSource_InlineBytes) isDataSource_Specifier() {} + +func (*DataSource_InlineString) isDataSource_Specifier() {} + +func (*DataSource_EnvironmentVariable) isDataSource_Specifier() {} + +// The message specifies the retry policy of remote data source when fetching fails. +// [#next-free-field: 7] +type RetryPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies parameters that control :ref:`retry backoff strategy `. + // This parameter is optional, in which case the default base interval is 1000 milliseconds. The + // default maximum interval is 10 times the base interval. + RetryBackOff *BackoffStrategy `protobuf:"bytes,1,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. + NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + // For details, see :ref:`retry_on `. + RetryOn string `protobuf:"bytes,3,opt,name=retry_on,json=retryOn,proto3" json:"retry_on,omitempty"` + // For details, see :ref:`retry_priority `. + RetryPriority *RetryPolicy_RetryPriority `protobuf:"bytes,4,opt,name=retry_priority,json=retryPriority,proto3" json:"retry_priority,omitempty"` + // For details, see :ref:`RetryHostPredicate `. + RetryHostPredicate []*RetryPolicy_RetryHostPredicate `protobuf:"bytes,5,rep,name=retry_host_predicate,json=retryHostPredicate,proto3" json:"retry_host_predicate,omitempty"` + // For details, see :ref:`host_selection_retry_max_attempts `. + HostSelectionRetryMaxAttempts int64 `protobuf:"varint,6,opt,name=host_selection_retry_max_attempts,json=hostSelectionRetryMaxAttempts,proto3" json:"host_selection_retry_max_attempts,omitempty"` +} + +func (x *RetryPolicy) Reset() { + *x = RetryPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy) ProtoMessage() {} + +func (x *RetryPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. +func (*RetryPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18} +} + +func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy { + if x != nil { + return x.RetryBackOff + } + return nil +} + +func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value { + if x != nil { + return x.NumRetries + } + return nil +} + +func (x *RetryPolicy) GetRetryOn() string { + if x != nil { + return x.RetryOn + } + return "" +} + +func (x *RetryPolicy) GetRetryPriority() *RetryPolicy_RetryPriority { + if x != nil { + return x.RetryPriority + } + return nil +} + +func (x *RetryPolicy) GetRetryHostPredicate() []*RetryPolicy_RetryHostPredicate { + if x != nil { + return x.RetryHostPredicate + } + return nil +} + +func (x *RetryPolicy) GetHostSelectionRetryMaxAttempts() int64 { + if x != nil { + return x.HostSelectionRetryMaxAttempts + } + return 0 +} + +// The message specifies how to fetch data from remote and how to verify it. +type RemoteDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP URI to fetch the remote data. + HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"` + // SHA256 string for verifying data. + Sha256 string `protobuf:"bytes,2,opt,name=sha256,proto3" json:"sha256,omitempty"` + // Retry policy for fetching remote data. + RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` +} + +func (x *RemoteDataSource) Reset() { + *x = RemoteDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoteDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoteDataSource) ProtoMessage() {} + +func (x *RemoteDataSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoteDataSource.ProtoReflect.Descriptor instead. +func (*RemoteDataSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{19} +} + +func (x *RemoteDataSource) GetHttpUri() *HttpUri { + if x != nil { + return x.HttpUri + } + return nil +} + +func (x *RemoteDataSource) GetSha256() string { + if x != nil { + return x.Sha256 + } + return "" +} + +func (x *RemoteDataSource) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +// Async data source which support async data fetch. +type AsyncDataSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Specifier: + // + // *AsyncDataSource_Local + // *AsyncDataSource_Remote + Specifier isAsyncDataSource_Specifier `protobuf_oneof:"specifier"` +} + +func (x *AsyncDataSource) Reset() { + *x = AsyncDataSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsyncDataSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsyncDataSource) ProtoMessage() {} + +func (x *AsyncDataSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsyncDataSource.ProtoReflect.Descriptor instead. +func (*AsyncDataSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{20} +} + +func (m *AsyncDataSource) GetSpecifier() isAsyncDataSource_Specifier { + if m != nil { + return m.Specifier + } + return nil +} + +func (x *AsyncDataSource) GetLocal() *DataSource { + if x, ok := x.GetSpecifier().(*AsyncDataSource_Local); ok { + return x.Local + } + return nil +} + +func (x *AsyncDataSource) GetRemote() *RemoteDataSource { + if x, ok := x.GetSpecifier().(*AsyncDataSource_Remote); ok { + return x.Remote + } + return nil +} + +type isAsyncDataSource_Specifier interface { + isAsyncDataSource_Specifier() +} + +type AsyncDataSource_Local struct { + // Local async data source. + Local *DataSource `protobuf:"bytes,1,opt,name=local,proto3,oneof"` +} + +type AsyncDataSource_Remote struct { + // Remote async data source. + Remote *RemoteDataSource `protobuf:"bytes,2,opt,name=remote,proto3,oneof"` +} + +func (*AsyncDataSource_Local) isAsyncDataSource_Specifier() {} + +func (*AsyncDataSource_Remote) isAsyncDataSource_Specifier() {} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +type TransportSocket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + // + // Types that are assignable to ConfigType: + // + // *TransportSocket_TypedConfig + ConfigType isTransportSocket_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *TransportSocket) Reset() { + *x = TransportSocket{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransportSocket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransportSocket) ProtoMessage() {} + +func (x *TransportSocket) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransportSocket.ProtoReflect.Descriptor instead. +func (*TransportSocket) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{21} +} + +func (x *TransportSocket) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *TransportSocket) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isTransportSocket_ConfigType interface { + isTransportSocket_ConfigType() +} + +type TransportSocket_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {} + +// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +// specified via a runtime key. +// +// .. note:: +// +// Parsing of the runtime key's data is implemented such that it may be represented as a +// :ref:`FractionalPercent ` proto represented as JSON/YAML +// and may also be represented as an integer with the assumption that the value is an integral +// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse +// as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED. +type RuntimeFractionalPercent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Default value if the runtime value's for the numerator/denominator keys are not available. + DefaultValue *v3.FractionalPercent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Runtime key for a YAML representation of a FractionalPercent. + RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"` +} + +func (x *RuntimeFractionalPercent) Reset() { + *x = RuntimeFractionalPercent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RuntimeFractionalPercent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RuntimeFractionalPercent) ProtoMessage() {} + +func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RuntimeFractionalPercent.ProtoReflect.Descriptor instead. +func (*RuntimeFractionalPercent) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{22} +} + +func (x *RuntimeFractionalPercent) GetDefaultValue() *v3.FractionalPercent { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *RuntimeFractionalPercent) GetRuntimeKey() string { + if x != nil { + return x.RuntimeKey + } + return "" +} + +// Identifies a specific ControlPlane instance that Envoy is connected to. +type ControlPlane struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque control plane identifier that uniquely identifies an instance + // of control plane. This can be used to identify which control plane instance, + // the Envoy is connected to. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` +} + +func (x *ControlPlane) Reset() { + *x = ControlPlane{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControlPlane) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlPlane) ProtoMessage() {} + +func (x *ControlPlane) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlPlane.ProtoReflect.Descriptor instead. +func (*ControlPlane) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{23} +} + +func (x *ControlPlane) GetIdentifier() string { + if x != nil { + return x.Identifier + } + return "" +} + +// See :ref:`RetryPriority `. +type RetryPolicy_RetryPriority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryPriority_TypedConfig + ConfigType isRetryPolicy_RetryPriority_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryPriority) Reset() { + *x = RetryPolicy_RetryPriority{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryPriority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryPriority) ProtoMessage() {} + +func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryPriority.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryPriority) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *RetryPolicy_RetryPriority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryPriority_ConfigType interface { + isRetryPolicy_RetryPriority_ConfigType() +} + +type RetryPolicy_RetryPriority_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} + +// See :ref:`RetryHostPredicate `. +type RetryPolicy_RetryHostPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryHostPredicate_TypedConfig + ConfigType isRetryPolicy_RetryHostPredicate_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryHostPredicate) Reset() { + *x = RetryPolicy_RetryHostPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryHostPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryHostPredicate) ProtoMessage() {} + +func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_base_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryHostPredicate.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryHostPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18, 1} +} + +func (x *RetryPolicy_RetryHostPredicate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHostPredicate_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryHostPredicate_ConfigType interface { + isRetryPolicy_RetryHostPredicate_ConfigType() +} + +type RetryPolicy_RetryHostPredicate_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} + +var File_envoy_config_core_v3_base_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_base_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x74, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, + 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, + 0x62, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x62, 0x5a, 0x6f, 0x6e, 0x65, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, + 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x8c, 0x02, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x34, 0x0a, + 0x0f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x0e, 0x74, 0x79, 0x70, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x12, 0x3c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, + 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb2, + 0x06, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x60, 0x0a, 0x12, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x75, + 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x75, 0x73, 0x65, 0x72, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x18, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x15, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x12, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x42, 0x19, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x52, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x69, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0x9a, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x79, 0x0a, 0x15, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x9a, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x13, 0x74, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x22, 0x77, 0x0a, 0x0e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x3b, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0c, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x22, 0xb6, + 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x3f, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xae, 0x02, 0x0a, 0x0e, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x5b, 0x0a, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7f, 0x0a, 0x14, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, + 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, + 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, + 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, + 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, + 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, + 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x22, 0x73, 0x0a, 0x10, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, + 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x70, 0x70, + 0x65, 0x6e, 0x64, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x06, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xfa, 0x42, 0x06, + 0x72, 0x04, 0x28, 0x80, 0x80, 0x01, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x41, + 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, + 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x21, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x3a, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x1d, 0xfa, 0x42, 0x08, 0x7a, 0x06, 0x10, 0x00, 0x18, 0x80, 0x80, 0x01, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, + 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xd9, 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x06, + 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, 0x0a, + 0x0d, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, + 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x7d, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, + 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, + 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, + 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, + 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, + 0x03, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, + 0x09, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xc9, 0x02, 0x0a, + 0x0a, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, + 0x6e, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3c, + 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x13, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x53, 0x0a, 0x11, + 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, + 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xee, 0x05, 0x0a, 0x0b, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, + 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x56, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x14, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x12, 0x72, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, + 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x1a, 0x76, + 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x7b, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, + 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, + 0x72, 0x69, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, + 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, + 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, + 0x65, 0x79, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, + 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, + 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, + 0x44, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, + 0x03, 0x50, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, + 0x0b, 0x0a, 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, + 0x10, 0x09, 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x02, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x09, 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_base_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_base_proto_rawDescData = file_envoy_config_core_v3_base_proto_rawDesc +) + +func file_envoy_config_core_v3_base_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_base_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_base_proto_rawDescData) + }) + return file_envoy_config_core_v3_base_proto_rawDescData +} + +var file_envoy_config_core_v3_base_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_envoy_config_core_v3_base_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_envoy_config_core_v3_base_proto_goTypes = []interface{}{ + (RoutingPriority)(0), // 0: envoy.config.core.v3.RoutingPriority + (RequestMethod)(0), // 1: envoy.config.core.v3.RequestMethod + (TrafficDirection)(0), // 2: envoy.config.core.v3.TrafficDirection + (KeyValueAppend_KeyValueAppendAction)(0), // 3: envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction + (HeaderValueOption_HeaderAppendAction)(0), // 4: envoy.config.core.v3.HeaderValueOption.HeaderAppendAction + (*Locality)(nil), // 5: envoy.config.core.v3.Locality + (*BuildVersion)(nil), // 6: envoy.config.core.v3.BuildVersion + (*Extension)(nil), // 7: envoy.config.core.v3.Extension + (*Node)(nil), // 8: envoy.config.core.v3.Node + (*Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*RuntimeUInt32)(nil), // 10: envoy.config.core.v3.RuntimeUInt32 + (*RuntimePercent)(nil), // 11: envoy.config.core.v3.RuntimePercent + (*RuntimeDouble)(nil), // 12: envoy.config.core.v3.RuntimeDouble + (*RuntimeFeatureFlag)(nil), // 13: envoy.config.core.v3.RuntimeFeatureFlag + (*KeyValue)(nil), // 14: envoy.config.core.v3.KeyValue + (*KeyValueAppend)(nil), // 15: envoy.config.core.v3.KeyValueAppend + (*KeyValueMutation)(nil), // 16: envoy.config.core.v3.KeyValueMutation + (*QueryParameter)(nil), // 17: envoy.config.core.v3.QueryParameter + (*HeaderValue)(nil), // 18: envoy.config.core.v3.HeaderValue + (*HeaderValueOption)(nil), // 19: envoy.config.core.v3.HeaderValueOption + (*HeaderMap)(nil), // 20: envoy.config.core.v3.HeaderMap + (*WatchedDirectory)(nil), // 21: envoy.config.core.v3.WatchedDirectory + (*DataSource)(nil), // 22: envoy.config.core.v3.DataSource + (*RetryPolicy)(nil), // 23: envoy.config.core.v3.RetryPolicy + (*RemoteDataSource)(nil), // 24: envoy.config.core.v3.RemoteDataSource + (*AsyncDataSource)(nil), // 25: envoy.config.core.v3.AsyncDataSource + (*TransportSocket)(nil), // 26: envoy.config.core.v3.TransportSocket + (*RuntimeFractionalPercent)(nil), // 27: envoy.config.core.v3.RuntimeFractionalPercent + (*ControlPlane)(nil), // 28: envoy.config.core.v3.ControlPlane + nil, // 29: envoy.config.core.v3.Node.DynamicParametersEntry + nil, // 30: envoy.config.core.v3.Metadata.FilterMetadataEntry + nil, // 31: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry + (*RetryPolicy_RetryPriority)(nil), // 32: envoy.config.core.v3.RetryPolicy.RetryPriority + (*RetryPolicy_RetryHostPredicate)(nil), // 33: envoy.config.core.v3.RetryPolicy.RetryHostPredicate + (*v3.SemanticVersion)(nil), // 34: envoy.type.v3.SemanticVersion + (*structpb.Struct)(nil), // 35: google.protobuf.Struct + (*Address)(nil), // 36: envoy.config.core.v3.Address + (*v3.Percent)(nil), // 37: envoy.type.v3.Percent + (*wrapperspb.BoolValue)(nil), // 38: google.protobuf.BoolValue + (*BackoffStrategy)(nil), // 39: envoy.config.core.v3.BackoffStrategy + (*wrapperspb.UInt32Value)(nil), // 40: google.protobuf.UInt32Value + (*HttpUri)(nil), // 41: envoy.config.core.v3.HttpUri + (*anypb.Any)(nil), // 42: google.protobuf.Any + (*v3.FractionalPercent)(nil), // 43: envoy.type.v3.FractionalPercent + (*v31.ContextParams)(nil), // 44: xds.core.v3.ContextParams +} +var file_envoy_config_core_v3_base_proto_depIdxs = []int32{ + 34, // 0: envoy.config.core.v3.BuildVersion.version:type_name -> envoy.type.v3.SemanticVersion + 35, // 1: envoy.config.core.v3.BuildVersion.metadata:type_name -> google.protobuf.Struct + 6, // 2: envoy.config.core.v3.Extension.version:type_name -> envoy.config.core.v3.BuildVersion + 35, // 3: envoy.config.core.v3.Node.metadata:type_name -> google.protobuf.Struct + 29, // 4: envoy.config.core.v3.Node.dynamic_parameters:type_name -> envoy.config.core.v3.Node.DynamicParametersEntry + 5, // 5: envoy.config.core.v3.Node.locality:type_name -> envoy.config.core.v3.Locality + 6, // 6: envoy.config.core.v3.Node.user_agent_build_version:type_name -> envoy.config.core.v3.BuildVersion + 7, // 7: envoy.config.core.v3.Node.extensions:type_name -> envoy.config.core.v3.Extension + 36, // 8: envoy.config.core.v3.Node.listening_addresses:type_name -> envoy.config.core.v3.Address + 30, // 9: envoy.config.core.v3.Metadata.filter_metadata:type_name -> envoy.config.core.v3.Metadata.FilterMetadataEntry + 31, // 10: envoy.config.core.v3.Metadata.typed_filter_metadata:type_name -> envoy.config.core.v3.Metadata.TypedFilterMetadataEntry + 37, // 11: envoy.config.core.v3.RuntimePercent.default_value:type_name -> envoy.type.v3.Percent + 38, // 12: envoy.config.core.v3.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue + 14, // 13: envoy.config.core.v3.KeyValueAppend.entry:type_name -> envoy.config.core.v3.KeyValue + 3, // 14: envoy.config.core.v3.KeyValueAppend.action:type_name -> envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction + 15, // 15: envoy.config.core.v3.KeyValueMutation.append:type_name -> envoy.config.core.v3.KeyValueAppend + 18, // 16: envoy.config.core.v3.HeaderValueOption.header:type_name -> envoy.config.core.v3.HeaderValue + 38, // 17: envoy.config.core.v3.HeaderValueOption.append:type_name -> google.protobuf.BoolValue + 4, // 18: envoy.config.core.v3.HeaderValueOption.append_action:type_name -> envoy.config.core.v3.HeaderValueOption.HeaderAppendAction + 18, // 19: envoy.config.core.v3.HeaderMap.headers:type_name -> envoy.config.core.v3.HeaderValue + 21, // 20: envoy.config.core.v3.DataSource.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 39, // 21: envoy.config.core.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.core.v3.BackoffStrategy + 40, // 22: envoy.config.core.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 32, // 23: envoy.config.core.v3.RetryPolicy.retry_priority:type_name -> envoy.config.core.v3.RetryPolicy.RetryPriority + 33, // 24: envoy.config.core.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.core.v3.RetryPolicy.RetryHostPredicate + 41, // 25: envoy.config.core.v3.RemoteDataSource.http_uri:type_name -> envoy.config.core.v3.HttpUri + 23, // 26: envoy.config.core.v3.RemoteDataSource.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 22, // 27: envoy.config.core.v3.AsyncDataSource.local:type_name -> envoy.config.core.v3.DataSource + 24, // 28: envoy.config.core.v3.AsyncDataSource.remote:type_name -> envoy.config.core.v3.RemoteDataSource + 42, // 29: envoy.config.core.v3.TransportSocket.typed_config:type_name -> google.protobuf.Any + 43, // 30: envoy.config.core.v3.RuntimeFractionalPercent.default_value:type_name -> envoy.type.v3.FractionalPercent + 44, // 31: envoy.config.core.v3.Node.DynamicParametersEntry.value:type_name -> xds.core.v3.ContextParams + 35, // 32: envoy.config.core.v3.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct + 42, // 33: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry.value:type_name -> google.protobuf.Any + 42, // 34: envoy.config.core.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 42, // 35: envoy.config.core.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_base_proto_init() } +func file_envoy_config_core_v3_base_proto_init() { + if File_envoy_config_core_v3_base_proto != nil { + return + } + file_envoy_config_core_v3_address_proto_init() + file_envoy_config_core_v3_backoff_proto_init() + file_envoy_config_core_v3_http_uri_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Locality); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeUInt32); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimePercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeDouble); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFeatureFlag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueAppend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueMutation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderValueOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderMap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchedDirectory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AsyncDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransportSocket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuntimeFractionalPercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControlPlane); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryPriority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_base_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryHostPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_base_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Node_UserAgentVersion)(nil), + (*Node_UserAgentBuildVersion)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[17].OneofWrappers = []interface{}{ + (*DataSource_Filename)(nil), + (*DataSource_InlineBytes)(nil), + (*DataSource_InlineString)(nil), + (*DataSource_EnvironmentVariable)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*AsyncDataSource_Local)(nil), + (*AsyncDataSource_Remote)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[21].OneofWrappers = []interface{}{ + (*TransportSocket_TypedConfig)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[27].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryPriority_TypedConfig)(nil), + } + file_envoy_config_core_v3_base_proto_msgTypes[28].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryHostPredicate_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_base_proto_rawDesc, + NumEnums: 5, + NumMessages: 29, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_base_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_base_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_base_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_base_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_base_proto = out.File + file_envoy_config_core_v3_base_proto_rawDesc = nil + file_envoy_config_core_v3_base_proto_goTypes = nil + file_envoy_config_core_v3_base_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go new file mode 100644 index 0000000000..09d836dac4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base.pb.validate.go @@ -0,0 +1,4164 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Locality with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Locality) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Locality with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LocalityMultiError, or nil +// if none found. +func (m *Locality) ValidateAll() error { + return m.validate(true) +} + +func (m *Locality) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Region + + // no validation rules for Zone + + // no validation rules for SubZone + + if len(errors) > 0 { + return LocalityMultiError(errors) + } + + return nil +} + +// LocalityMultiError is an error wrapping multiple validation errors returned +// by Locality.ValidateAll() if the designated constraints aren't met. +type LocalityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityMultiError) AllErrors() []error { return m } + +// LocalityValidationError is the validation error returned by +// Locality.Validate if the designated constraints aren't met. +type LocalityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityValidationError) ErrorName() string { return "LocalityValidationError" } + +// Error satisfies the builtin error interface +func (e LocalityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocality.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityValidationError{} + +// Validate checks the field values on BuildVersion with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *BuildVersion) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BuildVersion with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in BuildVersionMultiError, or +// nil if none found. +func (m *BuildVersion) ValidateAll() error { + return m.validate(true) +} + +func (m *BuildVersion) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetVersion()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BuildVersionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BuildVersionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BuildVersionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BuildVersionMultiError(errors) + } + + return nil +} + +// BuildVersionMultiError is an error wrapping multiple validation errors +// returned by BuildVersion.ValidateAll() if the designated constraints aren't met. +type BuildVersionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BuildVersionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BuildVersionMultiError) AllErrors() []error { return m } + +// BuildVersionValidationError is the validation error returned by +// BuildVersion.Validate if the designated constraints aren't met. +type BuildVersionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BuildVersionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BuildVersionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BuildVersionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BuildVersionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BuildVersionValidationError) ErrorName() string { return "BuildVersionValidationError" } + +// Error satisfies the builtin error interface +func (e BuildVersionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBuildVersion.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BuildVersionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BuildVersionValidationError{} + +// Validate checks the field values on Extension with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Extension) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Extension with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ExtensionMultiError, or nil +// if none found. +func (m *Extension) ValidateAll() error { + return m.validate(true) +} + +func (m *Extension) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for Category + + // no validation rules for TypeDescriptor + + if all { + switch v := interface{}(m.GetVersion()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtensionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtensionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtensionValidationError{ + field: "Version", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Disabled + + if len(errors) > 0 { + return ExtensionMultiError(errors) + } + + return nil +} + +// ExtensionMultiError is an error wrapping multiple validation errors returned +// by Extension.ValidateAll() if the designated constraints aren't met. +type ExtensionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtensionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtensionMultiError) AllErrors() []error { return m } + +// ExtensionValidationError is the validation error returned by +// Extension.Validate if the designated constraints aren't met. +type ExtensionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtensionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtensionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtensionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtensionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtensionValidationError) ErrorName() string { return "ExtensionValidationError" } + +// Error satisfies the builtin error interface +func (e ExtensionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtension.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtensionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtensionValidationError{} + +// Validate checks the field values on Node with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Node) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Node with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in NodeMultiError, or nil if none found. +func (m *Node) ValidateAll() error { + return m.validate(true) +} + +func (m *Node) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + // no validation rules for Cluster + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetDynamicParameters())) + i := 0 + for key := range m.GetDynamicParameters() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetDynamicParameters()[key] + _ = val + + // no validation rules for DynamicParameters[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("DynamicParameters[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("DynamicParameters[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: fmt.Sprintf("DynamicParameters[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UserAgentName + + for idx, item := range m.GetExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("Extensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("Extensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: fmt.Sprintf("Extensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetListeningAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("ListeningAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: fmt.Sprintf("ListeningAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: fmt.Sprintf("ListeningAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + switch v := m.UserAgentVersionType.(type) { + case *Node_UserAgentVersion: + if v == nil { + err := NodeValidationError{ + field: "UserAgentVersionType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for UserAgentVersion + case *Node_UserAgentBuildVersion: + if v == nil { + err := NodeValidationError{ + field: "UserAgentVersionType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetUserAgentBuildVersion()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeValidationError{ + field: "UserAgentBuildVersion", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeValidationError{ + field: "UserAgentBuildVersion", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUserAgentBuildVersion()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeValidationError{ + field: "UserAgentBuildVersion", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return NodeMultiError(errors) + } + + return nil +} + +// NodeMultiError is an error wrapping multiple validation errors returned by +// Node.ValidateAll() if the designated constraints aren't met. +type NodeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NodeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NodeMultiError) AllErrors() []error { return m } + +// NodeValidationError is the validation error returned by Node.Validate if the +// designated constraints aren't met. +type NodeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NodeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NodeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NodeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NodeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NodeValidationError) ErrorName() string { return "NodeValidationError" } + +// Error satisfies the builtin error interface +func (e NodeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNode.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NodeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NodeValidationError{} + +// Validate checks the field values on Metadata with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Metadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataMultiError, or nil +// if none found. +func (m *Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + { + sorted_keys := make([]string, len(m.GetFilterMetadata())) + i := 0 + for key := range m.GetFilterMetadata() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetFilterMetadata()[key] + _ = val + + if utf8.RuneCountInString(key) < 1 { + err := MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: fmt.Sprintf("FilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + { + sorted_keys := make([]string, len(m.GetTypedFilterMetadata())) + i := 0 + for key := range m.GetTypedFilterMetadata() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedFilterMetadata()[key] + _ = val + + if utf8.RuneCountInString(key) < 1 { + err := MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: fmt.Sprintf("TypedFilterMetadata[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return MetadataMultiError(errors) + } + + return nil +} + +// MetadataMultiError is an error wrapping multiple validation errors returned +// by Metadata.ValidateAll() if the designated constraints aren't met. +type MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMultiError) AllErrors() []error { return m } + +// MetadataValidationError is the validation error returned by +// Metadata.Validate if the designated constraints aren't met. +type MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataValidationError{} + +// Validate checks the field values on RuntimeUInt32 with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeUInt32) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeUInt32 with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeUInt32MultiError, or +// nil if none found. +func (m *RuntimeUInt32) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeUInt32) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DefaultValue + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeUInt32ValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeUInt32MultiError(errors) + } + + return nil +} + +// RuntimeUInt32MultiError is an error wrapping multiple validation errors +// returned by RuntimeUInt32.ValidateAll() if the designated constraints +// aren't met. +type RuntimeUInt32MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeUInt32MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeUInt32MultiError) AllErrors() []error { return m } + +// RuntimeUInt32ValidationError is the validation error returned by +// RuntimeUInt32.Validate if the designated constraints aren't met. +type RuntimeUInt32ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeUInt32ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeUInt32ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeUInt32ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeUInt32ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeUInt32ValidationError) ErrorName() string { return "RuntimeUInt32ValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeUInt32ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeUInt32.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeUInt32ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeUInt32ValidationError{} + +// Validate checks the field values on RuntimePercent with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimePercent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimePercent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimePercentMultiError, +// or nil if none found. +func (m *RuntimePercent) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimePercent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimePercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimePercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimePercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimePercentValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimePercentMultiError(errors) + } + + return nil +} + +// RuntimePercentMultiError is an error wrapping multiple validation errors +// returned by RuntimePercent.ValidateAll() if the designated constraints +// aren't met. +type RuntimePercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimePercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimePercentMultiError) AllErrors() []error { return m } + +// RuntimePercentValidationError is the validation error returned by +// RuntimePercent.Validate if the designated constraints aren't met. +type RuntimePercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimePercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimePercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimePercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimePercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimePercentValidationError) ErrorName() string { return "RuntimePercentValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimePercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimePercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimePercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimePercentValidationError{} + +// Validate checks the field values on RuntimeDouble with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RuntimeDouble) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeDouble with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RuntimeDoubleMultiError, or +// nil if none found. +func (m *RuntimeDouble) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeDouble) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DefaultValue + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeDoubleValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeDoubleMultiError(errors) + } + + return nil +} + +// RuntimeDoubleMultiError is an error wrapping multiple validation errors +// returned by RuntimeDouble.ValidateAll() if the designated constraints +// aren't met. +type RuntimeDoubleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeDoubleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeDoubleMultiError) AllErrors() []error { return m } + +// RuntimeDoubleValidationError is the validation error returned by +// RuntimeDouble.Validate if the designated constraints aren't met. +type RuntimeDoubleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeDoubleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeDoubleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeDoubleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeDoubleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeDoubleValidationError) ErrorName() string { return "RuntimeDoubleValidationError" } + +// Error satisfies the builtin error interface +func (e RuntimeDoubleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeDouble.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeDoubleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeDoubleValidationError{} + +// Validate checks the field values on RuntimeFeatureFlag with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeFeatureFlag) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeFeatureFlag with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeFeatureFlagMultiError, or nil if none found. +func (m *RuntimeFeatureFlag) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeFeatureFlag) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetDefaultValue() == nil { + err := RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeFeatureFlagValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 { + err := RuntimeFeatureFlagValidationError{ + field: "RuntimeKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RuntimeFeatureFlagMultiError(errors) + } + + return nil +} + +// RuntimeFeatureFlagMultiError is an error wrapping multiple validation errors +// returned by RuntimeFeatureFlag.ValidateAll() if the designated constraints +// aren't met. +type RuntimeFeatureFlagMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeFeatureFlagMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeFeatureFlagMultiError) AllErrors() []error { return m } + +// RuntimeFeatureFlagValidationError is the validation error returned by +// RuntimeFeatureFlag.Validate if the designated constraints aren't met. +type RuntimeFeatureFlagValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeFeatureFlagValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeFeatureFlagValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeFeatureFlagValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeFeatureFlagValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeFeatureFlagValidationError) ErrorName() string { + return "RuntimeFeatureFlagValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeFeatureFlagValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeFeatureFlag.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeFeatureFlagValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeFeatureFlagValidationError{} + +// Validate checks the field values on KeyValue with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyValue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueMultiError, or nil +// if none found. +func (m *KeyValue) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := KeyValueValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetKey()) > 16384 { + err := KeyValueValidationError{ + field: "Key", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Value + + if len(errors) > 0 { + return KeyValueMultiError(errors) + } + + return nil +} + +// KeyValueMultiError is an error wrapping multiple validation errors returned +// by KeyValue.ValidateAll() if the designated constraints aren't met. +type KeyValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueMultiError) AllErrors() []error { return m } + +// KeyValueValidationError is the validation error returned by +// KeyValue.Validate if the designated constraints aren't met. +type KeyValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueValidationError) ErrorName() string { return "KeyValueValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueValidationError{} + +// Validate checks the field values on KeyValueAppend with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyValueAppend) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValueAppend with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValueAppendMultiError, +// or nil if none found. +func (m *KeyValueAppend) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValueAppend) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetEntry() == nil { + err := KeyValueAppendValidationError{ + field: "Entry", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEntry()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEntry()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeyValueAppendValidationError{ + field: "Entry", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := KeyValueAppend_KeyValueAppendAction_name[int32(m.GetAction())]; !ok { + err := KeyValueAppendValidationError{ + field: "Action", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return KeyValueAppendMultiError(errors) + } + + return nil +} + +// KeyValueAppendMultiError is an error wrapping multiple validation errors +// returned by KeyValueAppend.ValidateAll() if the designated constraints +// aren't met. +type KeyValueAppendMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueAppendMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueAppendMultiError) AllErrors() []error { return m } + +// KeyValueAppendValidationError is the validation error returned by +// KeyValueAppend.Validate if the designated constraints aren't met. +type KeyValueAppendValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueAppendValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueAppendValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueAppendValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueAppendValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueAppendValidationError) ErrorName() string { return "KeyValueAppendValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueAppendValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValueAppend.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueAppendValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueAppendValidationError{} + +// Validate checks the field values on KeyValueMutation with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *KeyValueMutation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValueMutation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// KeyValueMutationMultiError, or nil if none found. +func (m *KeyValueMutation) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValueMutation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAppend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeyValueMutationValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeyValueMutationValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeyValueMutationValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRemove()) > 16384 { + err := KeyValueMutationValidationError{ + field: "Remove", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return KeyValueMutationMultiError(errors) + } + + return nil +} + +// KeyValueMutationMultiError is an error wrapping multiple validation errors +// returned by KeyValueMutation.ValidateAll() if the designated constraints +// aren't met. +type KeyValueMutationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValueMutationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValueMutationMultiError) AllErrors() []error { return m } + +// KeyValueMutationValidationError is the validation error returned by +// KeyValueMutation.Validate if the designated constraints aren't met. +type KeyValueMutationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValueMutationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValueMutationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValueMutationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValueMutationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValueMutationValidationError) ErrorName() string { return "KeyValueMutationValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValueMutationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValueMutation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValueMutationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValueMutationValidationError{} + +// Validate checks the field values on QueryParameter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *QueryParameter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QueryParameter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in QueryParameterMultiError, +// or nil if none found. +func (m *QueryParameter) ValidateAll() error { + return m.validate(true) +} + +func (m *QueryParameter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := QueryParameterValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Value + + if len(errors) > 0 { + return QueryParameterMultiError(errors) + } + + return nil +} + +// QueryParameterMultiError is an error wrapping multiple validation errors +// returned by QueryParameter.ValidateAll() if the designated constraints +// aren't met. +type QueryParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QueryParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QueryParameterMultiError) AllErrors() []error { return m } + +// QueryParameterValidationError is the validation error returned by +// QueryParameter.Validate if the designated constraints aren't met. +type QueryParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QueryParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QueryParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QueryParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QueryParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QueryParameterValidationError) ErrorName() string { return "QueryParameterValidationError" } + +// Error satisfies the builtin error interface +func (e QueryParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQueryParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QueryParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QueryParameterValidationError{} + +// Validate checks the field values on HeaderValue with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderValue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderValue with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderValueMultiError, or +// nil if none found. +func (m *HeaderValue) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderValue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := HeaderValueValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetKey()) > 16384 { + err := HeaderValueValidationError{ + field: "Key", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) { + err := HeaderValueValidationError{ + field: "Key", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetValue()) > 16384 { + err := HeaderValueValidationError{ + field: "Value", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) { + err := HeaderValueValidationError{ + field: "Value", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if l := len(m.GetRawValue()); l < 0 || l > 16384 { + err := HeaderValueValidationError{ + field: "RawValue", + reason: "value length must be between 0 and 16384 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HeaderValueMultiError(errors) + } + + return nil +} + +// HeaderValueMultiError is an error wrapping multiple validation errors +// returned by HeaderValue.ValidateAll() if the designated constraints aren't met. +type HeaderValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderValueMultiError) AllErrors() []error { return m } + +// HeaderValueValidationError is the validation error returned by +// HeaderValue.Validate if the designated constraints aren't met. +type HeaderValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderValue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderValueValidationError{} + +var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HeaderValueOption with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HeaderValueOption) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderValueOption with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HeaderValueOptionMultiError, or nil if none found. +func (m *HeaderValueOption) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderValueOption) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHeader() == nil { + err := HeaderValueOptionValidationError{ + field: "Header", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderValueOptionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAppend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderValueOptionValidationError{ + field: "Append", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := HeaderValueOption_HeaderAppendAction_name[int32(m.GetAppendAction())]; !ok { + err := HeaderValueOptionValidationError{ + field: "AppendAction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for KeepEmptyValue + + if len(errors) > 0 { + return HeaderValueOptionMultiError(errors) + } + + return nil +} + +// HeaderValueOptionMultiError is an error wrapping multiple validation errors +// returned by HeaderValueOption.ValidateAll() if the designated constraints +// aren't met. +type HeaderValueOptionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderValueOptionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderValueOptionMultiError) AllErrors() []error { return m } + +// HeaderValueOptionValidationError is the validation error returned by +// HeaderValueOption.Validate if the designated constraints aren't met. +type HeaderValueOptionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderValueOptionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderValueOptionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderValueOptionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderValueOptionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderValueOptionValidationError) ErrorName() string { + return "HeaderValueOptionValidationError" +} + +// Error satisfies the builtin error interface +func (e HeaderValueOptionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderValueOption.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderValueOptionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderValueOptionValidationError{} + +// Validate checks the field values on HeaderMap with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderMap) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderMap with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderMapMultiError, or nil +// if none found. +func (m *HeaderMap) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderMap) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMapValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMapValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMapValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HeaderMapMultiError(errors) + } + + return nil +} + +// HeaderMapMultiError is an error wrapping multiple validation errors returned +// by HeaderMap.ValidateAll() if the designated constraints aren't met. +type HeaderMapMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderMapMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderMapMultiError) AllErrors() []error { return m } + +// HeaderMapValidationError is the validation error returned by +// HeaderMap.Validate if the designated constraints aren't met. +type HeaderMapValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderMapValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderMapValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderMapValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderMapValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderMapValidationError) ErrorName() string { return "HeaderMapValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderMapValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderMap.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderMapValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderMapValidationError{} + +// Validate checks the field values on WatchedDirectory with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *WatchedDirectory) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WatchedDirectory with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WatchedDirectoryMultiError, or nil if none found. +func (m *WatchedDirectory) ValidateAll() error { + return m.validate(true) +} + +func (m *WatchedDirectory) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := WatchedDirectoryValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return WatchedDirectoryMultiError(errors) + } + + return nil +} + +// WatchedDirectoryMultiError is an error wrapping multiple validation errors +// returned by WatchedDirectory.ValidateAll() if the designated constraints +// aren't met. +type WatchedDirectoryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WatchedDirectoryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WatchedDirectoryMultiError) AllErrors() []error { return m } + +// WatchedDirectoryValidationError is the validation error returned by +// WatchedDirectory.Validate if the designated constraints aren't met. +type WatchedDirectoryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WatchedDirectoryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WatchedDirectoryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WatchedDirectoryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WatchedDirectoryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WatchedDirectoryValidationError) ErrorName() string { return "WatchedDirectoryValidationError" } + +// Error satisfies the builtin error interface +func (e WatchedDirectoryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWatchedDirectory.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WatchedDirectoryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WatchedDirectoryValidationError{} + +// Validate checks the field values on DataSource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DataSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DataSource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DataSourceMultiError, or +// nil if none found. +func (m *DataSource) ValidateAll() error { + return m.validate(true) +} + +func (m *DataSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DataSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofSpecifierPresent := false + switch v := m.Specifier.(type) { + case *DataSource_Filename: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if utf8.RuneCountInString(m.GetFilename()) < 1 { + err := DataSourceValidationError{ + field: "Filename", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *DataSource_InlineBytes: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + // no validation rules for InlineBytes + case *DataSource_InlineString: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + // no validation rules for InlineString + case *DataSource_EnvironmentVariable: + if v == nil { + err := DataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if utf8.RuneCountInString(m.GetEnvironmentVariable()) < 1 { + err := DataSourceValidationError{ + field: "EnvironmentVariable", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSpecifierPresent { + err := DataSourceValidationError{ + field: "Specifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DataSourceMultiError(errors) + } + + return nil +} + +// DataSourceMultiError is an error wrapping multiple validation errors +// returned by DataSource.ValidateAll() if the designated constraints aren't met. +type DataSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DataSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DataSourceMultiError) AllErrors() []error { return m } + +// DataSourceValidationError is the validation error returned by +// DataSource.Validate if the designated constraints aren't met. +type DataSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DataSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DataSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DataSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DataSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DataSourceValidationError) ErrorName() string { return "DataSourceValidationError" } + +// Error satisfies the builtin error interface +func (e DataSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDataSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DataSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DataSourceValidationError{} + +// Validate checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RetryPolicyMultiError, or +// nil if none found. +func (m *RetryPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRetryBackOff()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetNumRetries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RetryOn + + if all { + switch v := interface{}(m.GetRetryPriority()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPriority()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetryHostPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for HostSelectionRetryMaxAttempts + + if len(errors) > 0 { + return RetryPolicyMultiError(errors) + } + + return nil +} + +// RetryPolicyMultiError is an error wrapping multiple validation errors +// returned by RetryPolicy.ValidateAll() if the designated constraints aren't met. +type RetryPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicyMultiError) AllErrors() []error { return m } + +// RetryPolicyValidationError is the validation error returned by +// RetryPolicy.Validate if the designated constraints aren't met. +type RetryPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e RetryPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicyValidationError{} + +// Validate checks the field values on RemoteDataSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RemoteDataSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RemoteDataSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RemoteDataSourceMultiError, or nil if none found. +func (m *RemoteDataSource) ValidateAll() error { + return m.validate(true) +} + +func (m *RemoteDataSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHttpUri() == nil { + err := RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHttpUri()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RemoteDataSourceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetSha256()) < 1 { + err := RemoteDataSourceValidationError{ + field: "Sha256", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RemoteDataSourceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RemoteDataSourceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RemoteDataSourceMultiError(errors) + } + + return nil +} + +// RemoteDataSourceMultiError is an error wrapping multiple validation errors +// returned by RemoteDataSource.ValidateAll() if the designated constraints +// aren't met. +type RemoteDataSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RemoteDataSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RemoteDataSourceMultiError) AllErrors() []error { return m } + +// RemoteDataSourceValidationError is the validation error returned by +// RemoteDataSource.Validate if the designated constraints aren't met. +type RemoteDataSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RemoteDataSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RemoteDataSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RemoteDataSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RemoteDataSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RemoteDataSourceValidationError) ErrorName() string { return "RemoteDataSourceValidationError" } + +// Error satisfies the builtin error interface +func (e RemoteDataSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRemoteDataSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RemoteDataSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RemoteDataSourceValidationError{} + +// Validate checks the field values on AsyncDataSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AsyncDataSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AsyncDataSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AsyncDataSourceMultiError, or nil if none found. +func (m *AsyncDataSource) ValidateAll() error { + return m.validate(true) +} + +func (m *AsyncDataSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSpecifierPresent := false + switch v := m.Specifier.(type) { + case *AsyncDataSource_Local: + if v == nil { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocal()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Local", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Local", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocal()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AsyncDataSourceValidationError{ + field: "Local", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *AsyncDataSource_Remote: + if v == nil { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRemote()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Remote", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AsyncDataSourceValidationError{ + field: "Remote", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemote()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AsyncDataSourceValidationError{ + field: "Remote", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofSpecifierPresent { + err := AsyncDataSourceValidationError{ + field: "Specifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AsyncDataSourceMultiError(errors) + } + + return nil +} + +// AsyncDataSourceMultiError is an error wrapping multiple validation errors +// returned by AsyncDataSource.ValidateAll() if the designated constraints +// aren't met. +type AsyncDataSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AsyncDataSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AsyncDataSourceMultiError) AllErrors() []error { return m } + +// AsyncDataSourceValidationError is the validation error returned by +// AsyncDataSource.Validate if the designated constraints aren't met. +type AsyncDataSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AsyncDataSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AsyncDataSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AsyncDataSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AsyncDataSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AsyncDataSourceValidationError) ErrorName() string { return "AsyncDataSourceValidationError" } + +// Error satisfies the builtin error interface +func (e AsyncDataSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAsyncDataSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AsyncDataSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AsyncDataSourceValidationError{} + +// Validate checks the field values on TransportSocket with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TransportSocket) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TransportSocket with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TransportSocketMultiError, or nil if none found. +func (m *TransportSocket) ValidateAll() error { + return m.validate(true) +} + +func (m *TransportSocket) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TransportSocketValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *TransportSocket_TypedConfig: + if v == nil { + err := TransportSocketValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TransportSocketValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TransportSocketValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TransportSocketValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TransportSocketMultiError(errors) + } + + return nil +} + +// TransportSocketMultiError is an error wrapping multiple validation errors +// returned by TransportSocket.ValidateAll() if the designated constraints +// aren't met. +type TransportSocketMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TransportSocketMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TransportSocketMultiError) AllErrors() []error { return m } + +// TransportSocketValidationError is the validation error returned by +// TransportSocket.Validate if the designated constraints aren't met. +type TransportSocketValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TransportSocketValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TransportSocketValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TransportSocketValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TransportSocketValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TransportSocketValidationError) ErrorName() string { return "TransportSocketValidationError" } + +// Error satisfies the builtin error interface +func (e TransportSocketValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTransportSocket.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TransportSocketValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TransportSocketValidationError{} + +// Validate checks the field values on RuntimeFractionalPercent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RuntimeFractionalPercent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RuntimeFractionalPercent with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RuntimeFractionalPercentMultiError, or nil if none found. +func (m *RuntimeFractionalPercent) ValidateAll() error { + return m.validate(true) +} + +func (m *RuntimeFractionalPercent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetDefaultValue() == nil { + err := RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RuntimeFractionalPercentValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RuntimeKey + + if len(errors) > 0 { + return RuntimeFractionalPercentMultiError(errors) + } + + return nil +} + +// RuntimeFractionalPercentMultiError is an error wrapping multiple validation +// errors returned by RuntimeFractionalPercent.ValidateAll() if the designated +// constraints aren't met. +type RuntimeFractionalPercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RuntimeFractionalPercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RuntimeFractionalPercentMultiError) AllErrors() []error { return m } + +// RuntimeFractionalPercentValidationError is the validation error returned by +// RuntimeFractionalPercent.Validate if the designated constraints aren't met. +type RuntimeFractionalPercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RuntimeFractionalPercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RuntimeFractionalPercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RuntimeFractionalPercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RuntimeFractionalPercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RuntimeFractionalPercentValidationError) ErrorName() string { + return "RuntimeFractionalPercentValidationError" +} + +// Error satisfies the builtin error interface +func (e RuntimeFractionalPercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRuntimeFractionalPercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RuntimeFractionalPercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RuntimeFractionalPercentValidationError{} + +// Validate checks the field values on ControlPlane with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ControlPlane) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ControlPlane with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ControlPlaneMultiError, or +// nil if none found. +func (m *ControlPlane) ValidateAll() error { + return m.validate(true) +} + +func (m *ControlPlane) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Identifier + + if len(errors) > 0 { + return ControlPlaneMultiError(errors) + } + + return nil +} + +// ControlPlaneMultiError is an error wrapping multiple validation errors +// returned by ControlPlane.ValidateAll() if the designated constraints aren't met. +type ControlPlaneMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ControlPlaneMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ControlPlaneMultiError) AllErrors() []error { return m } + +// ControlPlaneValidationError is the validation error returned by +// ControlPlane.Validate if the designated constraints aren't met. +type ControlPlaneValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ControlPlaneValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ControlPlaneValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ControlPlaneValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ControlPlaneValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ControlPlaneValidationError) ErrorName() string { return "ControlPlaneValidationError" } + +// Error satisfies the builtin error interface +func (e ControlPlaneValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sControlPlane.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ControlPlaneValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ControlPlaneValidationError{} + +// Validate checks the field values on RetryPolicy_RetryPriority with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryPriority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryPriority with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryPriorityMultiError, or nil if none found. +func (m *RetryPolicy_RetryPriority) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryPriority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryPriorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryPriority_TypedConfig: + if v == nil { + err := RetryPolicy_RetryPriorityValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryPriorityMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryPriorityMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryPriority.ValidateAll() if the +// designated constraints aren't met. +type RetryPolicy_RetryPriorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryPriorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryPriorityMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryPriorityValidationError is the validation error returned by +// RetryPolicy_RetryPriority.Validate if the designated constraints aren't met. +type RetryPolicy_RetryPriorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryPriorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryPriorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryPriorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryPriorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryPriorityValidationError) ErrorName() string { + return "RetryPolicy_RetryPriorityValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryPriorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryPriority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryPriorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryPriorityValidationError{} + +// Validate checks the field values on RetryPolicy_RetryHostPredicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryHostPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryHostPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryHostPredicateMultiError, or nil if none found. +func (m *RetryPolicy_RetryHostPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryHostPredicate_TypedConfig: + if v == nil { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryHostPredicateMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryHostPredicateMultiError is an error wrapping multiple +// validation errors returned by RetryPolicy_RetryHostPredicate.ValidateAll() +// if the designated constraints aren't met. +type RetryPolicy_RetryHostPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryHostPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryHostPredicateMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryHostPredicateValidationError is the validation error +// returned by RetryPolicy_RetryHostPredicate.Validate if the designated +// constraints aren't met. +type RetryPolicy_RetryHostPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryHostPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryHostPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryHostPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryHostPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryHostPredicateValidationError) ErrorName() string { + return "RetryPolicy_RetryHostPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryHostPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryHostPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryHostPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryHostPredicateValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go new file mode 100644 index 0000000000..fa4823023f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/base_vtproto.pb.go @@ -0,0 +1,2497 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/base.proto + +package corev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Locality) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Locality) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Locality) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SubZone) > 0 { + i -= len(m.SubZone) + copy(dAtA[i:], m.SubZone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubZone))) + i-- + dAtA[i] = 0x1a + } + if len(m.Zone) > 0 { + i -= len(m.Zone) + copy(dAtA[i:], m.Zone) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Zone))) + i-- + dAtA[i] = 0x12 + } + if len(m.Region) > 0 { + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildVersion) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BuildVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Version != nil { + if vtmsg, ok := interface{}(m.Version).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Version) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Extension) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Extension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypeUrls) > 0 { + for iNdEx := len(m.TypeUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TypeUrls[iNdEx]) + copy(dAtA[i:], m.TypeUrls[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrls[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Version != nil { + size, err := m.Version.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.TypeDescriptor) > 0 { + i -= len(m.TypeDescriptor) + copy(dAtA[i:], m.TypeDescriptor) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeDescriptor))) + i-- + dAtA[i] = 0x1a + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Node) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicParameters) > 0 { + for k := range m.DynamicParameters { + v := m.DynamicParameters[k] + baseI := i + if vtmsg, ok := interface{}(v).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(v) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ListeningAddresses) > 0 { + for iNdEx := len(m.ListeningAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListeningAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ClientFeatures) > 0 { + for iNdEx := len(m.ClientFeatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClientFeatures[iNdEx]) + copy(dAtA[i:], m.ClientFeatures[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientFeatures[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.Extensions) > 0 { + for iNdEx := len(m.Extensions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Extensions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if msg, ok := m.UserAgentVersionType.(*Node_UserAgentBuildVersion); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.UserAgentVersionType.(*Node_UserAgentVersion); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.UserAgentName) > 0 { + i -= len(m.UserAgentName) + copy(dAtA[i:], m.UserAgentName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgentName))) + i-- + dAtA[i] = 0x32 + } + if m.Locality != nil { + size, err := m.Locality.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Node_UserAgentVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node_UserAgentVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.UserAgentVersion) + copy(dAtA[i:], m.UserAgentVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgentVersion))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *Node_UserAgentBuildVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Node_UserAgentBuildVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UserAgentBuildVersion != nil { + size, err := m.UserAgentBuildVersion.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Metadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypedFilterMetadata) > 0 { + for k := range m.TypedFilterMetadata { + v := m.TypedFilterMetadata[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.FilterMetadata) > 0 { + for k := range m.FilterMetadata { + v := m.FilterMetadata[k] + baseI := i + size, err := (*structpb.Struct)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeUInt32) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeUInt32) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeUInt32) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x1a + } + if m.DefaultValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DefaultValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *RuntimePercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimePercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimePercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + if vtmsg, ok := interface{}(m.DefaultValue).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultValue) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeDouble) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeDouble) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeDouble) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DefaultValue)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *RuntimeFeatureFlag) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFeatureFlag) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFeatureFlag) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + size, err := (*wrapperspb.BoolValue)(m.DefaultValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValueAppend) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueAppend) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValueAppend) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if m.Entry != nil { + size, err := m.Entry.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValueMutation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueMutation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeyValueMutation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Remove) > 0 { + i -= len(m.Remove) + copy(dAtA[i:], m.Remove) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Remove))) + i-- + dAtA[i] = 0x12 + } + if m.Append != nil { + size, err := m.Append.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RawValue) > 0 { + i -= len(m.RawValue) + copy(dAtA[i:], m.RawValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RawValue))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderValueOption) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderValueOption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderValueOption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepEmptyValue { + i-- + if m.KeepEmptyValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.AppendAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AppendAction)) + i-- + dAtA[i] = 0x18 + } + if m.Append != nil { + size, err := (*wrapperspb.BoolValue)(m.Append).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + size, err := m.Header.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMap) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderMap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WatchedDirectory) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchedDirectory) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WatchedDirectory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WatchedDirectory != nil { + size, err := m.WatchedDirectory.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if msg, ok := m.Specifier.(*DataSource_EnvironmentVariable); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_InlineString); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_InlineBytes); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*DataSource_Filename); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DataSource_Filename) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_Filename) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filename))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *DataSource_InlineBytes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_InlineBytes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.InlineBytes) + copy(dAtA[i:], m.InlineBytes) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineBytes))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DataSource_InlineString) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_InlineString) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.InlineString) + copy(dAtA[i:], m.InlineString) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineString))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *DataSource_EnvironmentVariable) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DataSource_EnvironmentVariable) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.EnvironmentVariable) + copy(dAtA[i:], m.EnvironmentVariable) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EnvironmentVariable))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryPriority) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryPriority) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryPriority_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryHostPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HostSelectionRetryMaxAttempts != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HostSelectionRetryMaxAttempts)) + i-- + dAtA[i] = 0x30 + } + if len(m.RetryHostPredicate) > 0 { + for iNdEx := len(m.RetryHostPredicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetryHostPredicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RetryPriority != nil { + size, err := m.RetryPriority.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0x1a + } + if m.NumRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RetryBackOff != nil { + size, err := m.RetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoteDataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoteDataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RemoteDataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Sha256) > 0 { + i -= len(m.Sha256) + copy(dAtA[i:], m.Sha256) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Sha256))) + i-- + dAtA[i] = 0x12 + } + if m.HttpUri != nil { + size, err := m.HttpUri.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AsyncDataSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AsyncDataSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Specifier.(*AsyncDataSource_Remote); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Specifier.(*AsyncDataSource_Local); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AsyncDataSource_Local) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource_Local) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Local != nil { + size, err := m.Local.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *AsyncDataSource_Remote) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AsyncDataSource_Remote) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Remote != nil { + size, err := m.Remote.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *TransportSocket) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransportSocket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TransportSocket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*TransportSocket_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TransportSocket_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TransportSocket_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RuntimeFractionalPercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeFractionalPercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RuntimeFractionalPercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RuntimeKey) > 0 { + i -= len(m.RuntimeKey) + copy(dAtA[i:], m.RuntimeKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey))) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValue != nil { + if vtmsg, ok := interface{}(m.DefaultValue).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DefaultValue) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ControlPlane) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControlPlane) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ControlPlane) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Locality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Region) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Zone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubZone) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BuildVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != nil { + if size, ok := interface{}(m.Version).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Version) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Extension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TypeDescriptor) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Version != nil { + l = m.Version.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Disabled { + n += 2 + } + if len(m.TypeUrls) > 0 { + for _, s := range m.TypeUrls { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Node) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Cluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Locality != nil { + l = m.Locality.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UserAgentName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.UserAgentVersionType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ClientFeatures) > 0 { + for _, s := range m.ClientFeatures { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ListeningAddresses) > 0 { + for _, e := range m.ListeningAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DynamicParameters) > 0 { + for k, v := range m.DynamicParameters { + _ = k + _ = v + l = 0 + if v != nil { + if size, ok := interface{}(v).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(v) + } + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Node_UserAgentVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserAgentVersion) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Node_UserAgentBuildVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserAgentBuildVersion != nil { + l = m.UserAgentBuildVersion.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FilterMetadata) > 0 { + for k, v := range m.FilterMetadata { + _ = k + _ = v + l = 0 + if v != nil { + l = (*structpb.Struct)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.TypedFilterMetadata) > 0 { + for k, v := range m.TypedFilterMetadata { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeUInt32) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DefaultValue)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimePercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + if size, ok := interface{}(m.DefaultValue).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultValue) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeDouble) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != 0 { + n += 9 + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RuntimeFeatureFlag) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + l = (*wrapperspb.BoolValue)(m.DefaultValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueAppend) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Entry != nil { + l = m.Entry.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueMutation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Append != nil { + l = m.Append.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Remove) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QueryParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RawValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderValueOption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Append != nil { + l = (*wrapperspb.BoolValue)(m.Append).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendAction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AppendAction)) + } + if m.KeepEmptyValue { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WatchedDirectory) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Specifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.WatchedDirectory != nil { + l = m.WatchedDirectory.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DataSource_Filename) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filename) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_InlineBytes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineBytes) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_InlineString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InlineString) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DataSource_EnvironmentVariable) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EnvironmentVariable) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RetryPolicy_RetryPriority) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryHostPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RetryBackOff != nil { + l = m.RetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRetries != nil { + l = (*wrapperspb.UInt32Value)(m.NumRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPriority != nil { + l = m.RetryPriority.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryHostPredicate) > 0 { + for _, e := range m.RetryHostPredicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HostSelectionRetryMaxAttempts != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HostSelectionRetryMaxAttempts)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoteDataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpUri != nil { + l = m.HttpUri.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Sha256) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AsyncDataSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Specifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AsyncDataSource_Local) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Local != nil { + l = m.Local.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *AsyncDataSource_Remote) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Remote != nil { + l = m.Remote.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TransportSocket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TransportSocket_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RuntimeFractionalPercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValue != nil { + if size, ok := interface{}(m.DefaultValue).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DefaultValue) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RuntimeKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ControlPlane) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go new file mode 100644 index 0000000000..295398b9f1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.go @@ -0,0 +1,1194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// xDS API and non-xDS services version. This is used to describe both resource and transport +// protocol versions (in distinct configuration fields). +type ApiVersion int32 + +const ( + // When not specified, we assume v3; it is the only supported version. + ApiVersion_AUTO ApiVersion = 0 + // Use xDS v2 API. This is no longer supported. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + ApiVersion_V2 ApiVersion = 1 + // Use xDS v3 API. + ApiVersion_V3 ApiVersion = 2 +) + +// Enum value maps for ApiVersion. +var ( + ApiVersion_name = map[int32]string{ + 0: "AUTO", + 1: "V2", + 2: "V3", + } + ApiVersion_value = map[string]int32{ + "AUTO": 0, + "V2": 1, + "V3": 2, + } +) + +func (x ApiVersion) Enum() *ApiVersion { + p := new(ApiVersion) + *p = x + return p +} + +func (x ApiVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ApiVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_config_source_proto_enumTypes[0].Descriptor() +} + +func (ApiVersion) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_config_source_proto_enumTypes[0] +} + +func (x ApiVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ApiVersion.Descriptor instead. +func (ApiVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0} +} + +// APIs may be fetched via either REST or gRPC. +type ApiConfigSource_ApiType int32 + +const ( + // Ideally this would be 'reserved 0' but one can't reserve the default + // value. Instead we throw an exception if this is ever used. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + ApiConfigSource_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE ApiConfigSource_ApiType = 0 + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + ApiConfigSource_REST ApiConfigSource_ApiType = 1 + // SotW gRPC service. + ApiConfigSource_GRPC ApiConfigSource_ApiType = 2 + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + ApiConfigSource_DELTA_GRPC ApiConfigSource_ApiType = 3 + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + ApiConfigSource_AGGREGATED_GRPC ApiConfigSource_ApiType = 5 + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + ApiConfigSource_AGGREGATED_DELTA_GRPC ApiConfigSource_ApiType = 6 +) + +// Enum value maps for ApiConfigSource_ApiType. +var ( + ApiConfigSource_ApiType_name = map[int32]string{ + 0: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE", + 1: "REST", + 2: "GRPC", + 3: "DELTA_GRPC", + 5: "AGGREGATED_GRPC", + 6: "AGGREGATED_DELTA_GRPC", + } + ApiConfigSource_ApiType_value = map[string]int32{ + "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE": 0, + "REST": 1, + "GRPC": 2, + "DELTA_GRPC": 3, + "AGGREGATED_GRPC": 5, + "AGGREGATED_DELTA_GRPC": 6, + } +) + +func (x ApiConfigSource_ApiType) Enum() *ApiConfigSource_ApiType { + p := new(ApiConfigSource_ApiType) + *p = x + return p +} + +func (x ApiConfigSource_ApiType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ApiConfigSource_ApiType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_config_source_proto_enumTypes[1].Descriptor() +} + +func (ApiConfigSource_ApiType) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_config_source_proto_enumTypes[1] +} + +func (x ApiConfigSource_ApiType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ApiConfigSource_ApiType.Descriptor instead. +func (ApiConfigSource_ApiType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0, 0} +} + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +// [#next-free-field: 10] +type ApiConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API type (gRPC, REST, delta gRPC) + ApiType ApiConfigSource_ApiType `protobuf:"varint,1,opt,name=api_type,json=apiType,proto3,enum=envoy.config.core.v3.ApiConfigSource_ApiType" json:"api_type,omitempty"` + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + TransportApiVersion ApiVersion `protobuf:"varint,8,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"` + // Cluster names should be used only with REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + ClusterNames []string `protobuf:"bytes,2,rep,name=cluster_names,json=clusterNames,proto3" json:"cluster_names,omitempty"` + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + GrpcServices []*GrpcService `protobuf:"bytes,4,rep,name=grpc_services,json=grpcServices,proto3" json:"grpc_services,omitempty"` + // For REST APIs, the delay between successive polls. + RefreshDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"` + // For REST APIs, the request timeout. If not set, a default value of 1s will be used. + RequestTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + // rate limited. + RateLimitSettings *RateLimitSettings `protobuf:"bytes,6,opt,name=rate_limit_settings,json=rateLimitSettings,proto3" json:"rate_limit_settings,omitempty"` + // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + SetNodeOnFirstMessageOnly bool `protobuf:"varint,7,opt,name=set_node_on_first_message_only,json=setNodeOnFirstMessageOnly,proto3" json:"set_node_on_first_message_only,omitempty"` + // A list of config validators that will be executed when a new update is + // received from the ApiConfigSource. Note that each validator handles a + // specific xDS service type, and only the validators corresponding to the + // type url (in “:ref: DiscoveryResponse“ or “:ref: DeltaDiscoveryResponse“) + // will be invoked. + // If the validator returns false or throws an exception, the config will be rejected by + // the client, and a NACK will be sent. + // [#extension-category: envoy.config.validators] + ConfigValidators []*TypedExtensionConfig `protobuf:"bytes,9,rep,name=config_validators,json=configValidators,proto3" json:"config_validators,omitempty"` +} + +func (x *ApiConfigSource) Reset() { + *x = ApiConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiConfigSource) ProtoMessage() {} + +func (x *ApiConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiConfigSource.ProtoReflect.Descriptor instead. +func (*ApiConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0} +} + +func (x *ApiConfigSource) GetApiType() ApiConfigSource_ApiType { + if x != nil { + return x.ApiType + } + return ApiConfigSource_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE +} + +func (x *ApiConfigSource) GetTransportApiVersion() ApiVersion { + if x != nil { + return x.TransportApiVersion + } + return ApiVersion_AUTO +} + +func (x *ApiConfigSource) GetClusterNames() []string { + if x != nil { + return x.ClusterNames + } + return nil +} + +func (x *ApiConfigSource) GetGrpcServices() []*GrpcService { + if x != nil { + return x.GrpcServices + } + return nil +} + +func (x *ApiConfigSource) GetRefreshDelay() *durationpb.Duration { + if x != nil { + return x.RefreshDelay + } + return nil +} + +func (x *ApiConfigSource) GetRequestTimeout() *durationpb.Duration { + if x != nil { + return x.RequestTimeout + } + return nil +} + +func (x *ApiConfigSource) GetRateLimitSettings() *RateLimitSettings { + if x != nil { + return x.RateLimitSettings + } + return nil +} + +func (x *ApiConfigSource) GetSetNodeOnFirstMessageOnly() bool { + if x != nil { + return x.SetNodeOnFirstMessageOnly + } + return false +} + +func (x *ApiConfigSource) GetConfigValidators() []*TypedExtensionConfig { + if x != nil { + return x.ConfigValidators + } + return nil +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +type AggregatedConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AggregatedConfigSource) Reset() { + *x = AggregatedConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AggregatedConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AggregatedConfigSource) ProtoMessage() {} + +func (x *AggregatedConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AggregatedConfigSource.ProtoReflect.Descriptor instead. +func (*AggregatedConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{1} +} + +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +type SelfConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + TransportApiVersion ApiVersion `protobuf:"varint,1,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"` +} + +func (x *SelfConfigSource) Reset() { + *x = SelfConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelfConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelfConfigSource) ProtoMessage() {} + +func (x *SelfConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelfConfigSource.ProtoReflect.Descriptor instead. +func (*SelfConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{2} +} + +func (x *SelfConfigSource) GetTransportApiVersion() ApiVersion { + if x != nil { + return x.TransportApiVersion + } + return ApiVersion_AUTO +} + +// Rate Limit settings to be applied for discovery requests made by Envoy. +type RateLimitSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a + // default value of 100 will be used. + MaxTokens *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` + // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens + // per second will be used. The minimal fill rate is once per year. Lower + // fill rates will be set to once per year. + FillRate *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"` +} + +func (x *RateLimitSettings) Reset() { + *x = RateLimitSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitSettings) ProtoMessage() {} + +func (x *RateLimitSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitSettings.ProtoReflect.Descriptor instead. +func (*RateLimitSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{3} +} + +func (x *RateLimitSettings) GetMaxTokens() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxTokens + } + return nil +} + +func (x *RateLimitSettings) GetFillRate() *wrapperspb.DoubleValue { + if x != nil { + return x.FillRate + } + return nil +} + +// Local filesystem path configuration source. +type PathConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for a :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*. + // This is because in general only moves are atomic. The same method of swapping files as is + // demonstrated in the :ref:`runtime documentation ` can be + // used here also. If ``watched_directory`` is configured, no watch will be placed directly on + // this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of + // this path. This is required in certain deployment scenarios. See below for more information. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // If configured, this directory will be watched for *moves*. When an entry in this directory is + // moved to, the “path“ will be reloaded. This is required in certain deployment scenarios. + // + // Specifically, if trying to load an xDS resource using a + // `Kubernetes ConfigMap `_, the + // following configuration might be used: + // 1. Store xds.yaml inside a ConfigMap. + // 2. Mount the ConfigMap to “/config_map/xds“ + // 3. Configure path “/config_map/xds/xds.yaml“ + // 4. Configure watched directory “/config_map/xds“ + // + // The above configuration will ensure that Envoy watches the owning directory for moves which is + // required due to how Kubernetes manages ConfigMap symbolic links during atomic updates. + WatchedDirectory *WatchedDirectory `protobuf:"bytes,2,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` +} + +func (x *PathConfigSource) Reset() { + *x = PathConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathConfigSource) ProtoMessage() {} + +func (x *PathConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathConfigSource.ProtoReflect.Descriptor instead. +func (*PathConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{4} +} + +func (x *PathConfigSource) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *PathConfigSource) GetWatchedDirectory() *WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +// [#next-free-field: 9] +type ConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Authorities that this config source may be used for. An authority specified in a xdstp:// URL + // is resolved to a “ConfigSource“ prior to configuration fetch. This field provides the + // association between authority name and configuration source. + // [#not-implemented-hide:] + Authorities []*v3.Authority `protobuf:"bytes,7,rep,name=authorities,proto3" json:"authorities,omitempty"` + // Types that are assignable to ConfigSourceSpecifier: + // + // *ConfigSource_Path + // *ConfigSource_PathConfigSource + // *ConfigSource_ApiConfigSource + // *ConfigSource_Ads + // *ConfigSource_Self + ConfigSourceSpecifier isConfigSource_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"` + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + InitialFetchTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"` + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ResourceApiVersion ApiVersion `protobuf:"varint,6,opt,name=resource_api_version,json=resourceApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"resource_api_version,omitempty"` +} + +func (x *ConfigSource) Reset() { + *x = ConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigSource) ProtoMessage() {} + +func (x *ConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigSource.ProtoReflect.Descriptor instead. +func (*ConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{5} +} + +func (x *ConfigSource) GetAuthorities() []*v3.Authority { + if x != nil { + return x.Authorities + } + return nil +} + +func (m *ConfigSource) GetConfigSourceSpecifier() isConfigSource_ConfigSourceSpecifier { + if m != nil { + return m.ConfigSourceSpecifier + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. +func (x *ConfigSource) GetPath() string { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Path); ok { + return x.Path + } + return "" +} + +func (x *ConfigSource) GetPathConfigSource() *PathConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_PathConfigSource); ok { + return x.PathConfigSource + } + return nil +} + +func (x *ConfigSource) GetApiConfigSource() *ApiConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_ApiConfigSource); ok { + return x.ApiConfigSource + } + return nil +} + +func (x *ConfigSource) GetAds() *AggregatedConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Ads); ok { + return x.Ads + } + return nil +} + +func (x *ConfigSource) GetSelf() *SelfConfigSource { + if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Self); ok { + return x.Self + } + return nil +} + +func (x *ConfigSource) GetInitialFetchTimeout() *durationpb.Duration { + if x != nil { + return x.InitialFetchTimeout + } + return nil +} + +func (x *ConfigSource) GetResourceApiVersion() ApiVersion { + if x != nil { + return x.ResourceApiVersion + } + return ApiVersion_AUTO +} + +type isConfigSource_ConfigSourceSpecifier interface { + isConfigSource_ConfigSourceSpecifier() +} + +type ConfigSource_Path struct { + // Deprecated in favor of “path_config_source“. Use that field instead. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/config_source.proto. + Path string `protobuf:"bytes,1,opt,name=path,proto3,oneof"` +} + +type ConfigSource_PathConfigSource struct { + // Local filesystem path configuration source. + PathConfigSource *PathConfigSource `protobuf:"bytes,8,opt,name=path_config_source,json=pathConfigSource,proto3,oneof"` +} + +type ConfigSource_ApiConfigSource struct { + // API configuration source. + ApiConfigSource *ApiConfigSource `protobuf:"bytes,2,opt,name=api_config_source,json=apiConfigSource,proto3,oneof"` +} + +type ConfigSource_Ads struct { + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + Ads *AggregatedConfigSource `protobuf:"bytes,3,opt,name=ads,proto3,oneof"` +} + +type ConfigSource_Self struct { + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + Self *SelfConfigSource `protobuf:"bytes,5,opt,name=self,proto3,oneof"` +} + +func (*ConfigSource_Path) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_PathConfigSource) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_ApiConfigSource) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_Ads) isConfigSource_ConfigSourceSpecifier() {} + +func (*ConfigSource_Self) isConfigSource_ConfigSourceSpecifier() {} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +type ExtensionConfigSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigSource *ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // “apply_default_config_without_warming“ flag is set. + DefaultConfig *anypb.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"` + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + ApplyDefaultConfigWithoutWarming bool `protobuf:"varint,3,opt,name=apply_default_config_without_warming,json=applyDefaultConfigWithoutWarming,proto3" json:"apply_default_config_without_warming,omitempty"` + // A set of permitted extension type URLs. Extension configuration updates are rejected + // if they do not match any type URL in the set. + TypeUrls []string `protobuf:"bytes,4,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"` +} + +func (x *ExtensionConfigSource) Reset() { + *x = ExtensionConfigSource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionConfigSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionConfigSource) ProtoMessage() {} + +func (x *ExtensionConfigSource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionConfigSource.ProtoReflect.Descriptor instead. +func (*ExtensionConfigSource) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{6} +} + +func (x *ExtensionConfigSource) GetConfigSource() *ConfigSource { + if x != nil { + return x.ConfigSource + } + return nil +} + +func (x *ExtensionConfigSource) GetDefaultConfig() *anypb.Any { + if x != nil { + return x.DefaultConfig + } + return nil +} + +func (x *ExtensionConfigSource) GetApplyDefaultConfigWithoutWarming() bool { + if x != nil { + return x.ApplyDefaultConfigWithoutWarming + } + return false +} + +func (x *ExtensionConfigSource) GetTypeUrls() []string { + if x != nil { + return x.TypeUrls + } + return nil +} + +var File_envoy_config_core_v3_config_source_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_config_source_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf4, 0x06, 0x0a, 0x0f, 0x41, 0x70, 0x69, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x08, + 0x61, 0x70, 0x69, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x0c, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, + 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x4c, 0x0a, + 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x72, + 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x11, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1e, 0x73, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x65, + 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x57, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, + 0x22, 0x92, 0x01, 0x0a, 0x07, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x25, + 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, + 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, + 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x08, + 0x01, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, + 0x54, 0x45, 0x44, 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x47, + 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x06, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, + 0x49, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x9d, 0x01, 0x0a, 0x10, 0x53, + 0x65, 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, + 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x11, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3b, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x49, 0x0a, + 0x09, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0e, + 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x61, 0x74, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, 0x8c, 0x05, 0x0a, 0x0c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x0b, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x56, 0x0a, 0x12, 0x70, 0x61, 0x74, + 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x10, 0x70, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x53, 0x0a, 0x11, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x61, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x48, 0x00, 0x52, 0x03, 0x61, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, + 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x4d, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x5c, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x9e, 0x02, 0x0a, 0x15, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x24, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x57, 0x61, 0x72, + 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x2a, 0x33, 0x0a, 0x0a, 0x41, + 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, + 0x4f, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x1a, 0x0b, 0x8a, 0xf4, 0x9b, + 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30, 0x08, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, + 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_config_source_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_config_source_proto_rawDescData = file_envoy_config_core_v3_config_source_proto_rawDesc +) + +func file_envoy_config_core_v3_config_source_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_config_source_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_config_source_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_config_source_proto_rawDescData) + }) + return file_envoy_config_core_v3_config_source_proto_rawDescData +} + +var file_envoy_config_core_v3_config_source_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_core_v3_config_source_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_core_v3_config_source_proto_goTypes = []interface{}{ + (ApiVersion)(0), // 0: envoy.config.core.v3.ApiVersion + (ApiConfigSource_ApiType)(0), // 1: envoy.config.core.v3.ApiConfigSource.ApiType + (*ApiConfigSource)(nil), // 2: envoy.config.core.v3.ApiConfigSource + (*AggregatedConfigSource)(nil), // 3: envoy.config.core.v3.AggregatedConfigSource + (*SelfConfigSource)(nil), // 4: envoy.config.core.v3.SelfConfigSource + (*RateLimitSettings)(nil), // 5: envoy.config.core.v3.RateLimitSettings + (*PathConfigSource)(nil), // 6: envoy.config.core.v3.PathConfigSource + (*ConfigSource)(nil), // 7: envoy.config.core.v3.ConfigSource + (*ExtensionConfigSource)(nil), // 8: envoy.config.core.v3.ExtensionConfigSource + (*GrpcService)(nil), // 9: envoy.config.core.v3.GrpcService + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*TypedExtensionConfig)(nil), // 11: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.UInt32Value)(nil), // 12: google.protobuf.UInt32Value + (*wrapperspb.DoubleValue)(nil), // 13: google.protobuf.DoubleValue + (*WatchedDirectory)(nil), // 14: envoy.config.core.v3.WatchedDirectory + (*v3.Authority)(nil), // 15: xds.core.v3.Authority + (*anypb.Any)(nil), // 16: google.protobuf.Any +} +var file_envoy_config_core_v3_config_source_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.ApiConfigSource.api_type:type_name -> envoy.config.core.v3.ApiConfigSource.ApiType + 0, // 1: envoy.config.core.v3.ApiConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 9, // 2: envoy.config.core.v3.ApiConfigSource.grpc_services:type_name -> envoy.config.core.v3.GrpcService + 10, // 3: envoy.config.core.v3.ApiConfigSource.refresh_delay:type_name -> google.protobuf.Duration + 10, // 4: envoy.config.core.v3.ApiConfigSource.request_timeout:type_name -> google.protobuf.Duration + 5, // 5: envoy.config.core.v3.ApiConfigSource.rate_limit_settings:type_name -> envoy.config.core.v3.RateLimitSettings + 11, // 6: envoy.config.core.v3.ApiConfigSource.config_validators:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 7: envoy.config.core.v3.SelfConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 12, // 8: envoy.config.core.v3.RateLimitSettings.max_tokens:type_name -> google.protobuf.UInt32Value + 13, // 9: envoy.config.core.v3.RateLimitSettings.fill_rate:type_name -> google.protobuf.DoubleValue + 14, // 10: envoy.config.core.v3.PathConfigSource.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 15, // 11: envoy.config.core.v3.ConfigSource.authorities:type_name -> xds.core.v3.Authority + 6, // 12: envoy.config.core.v3.ConfigSource.path_config_source:type_name -> envoy.config.core.v3.PathConfigSource + 2, // 13: envoy.config.core.v3.ConfigSource.api_config_source:type_name -> envoy.config.core.v3.ApiConfigSource + 3, // 14: envoy.config.core.v3.ConfigSource.ads:type_name -> envoy.config.core.v3.AggregatedConfigSource + 4, // 15: envoy.config.core.v3.ConfigSource.self:type_name -> envoy.config.core.v3.SelfConfigSource + 10, // 16: envoy.config.core.v3.ConfigSource.initial_fetch_timeout:type_name -> google.protobuf.Duration + 0, // 17: envoy.config.core.v3.ConfigSource.resource_api_version:type_name -> envoy.config.core.v3.ApiVersion + 7, // 18: envoy.config.core.v3.ExtensionConfigSource.config_source:type_name -> envoy.config.core.v3.ConfigSource + 16, // 19: envoy.config.core.v3.ExtensionConfigSource.default_config:type_name -> google.protobuf.Any + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_config_source_proto_init() } +func file_envoy_config_core_v3_config_source_proto_init() { + if File_envoy_config_core_v3_config_source_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_grpc_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_config_source_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AggregatedConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelfConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionConfigSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_config_source_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*ConfigSource_Path)(nil), + (*ConfigSource_PathConfigSource)(nil), + (*ConfigSource_ApiConfigSource)(nil), + (*ConfigSource_Ads)(nil), + (*ConfigSource_Self)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_config_source_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_config_source_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_config_source_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_config_source_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_config_source_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_config_source_proto = out.File + file_envoy_config_core_v3_config_source_proto_rawDesc = nil + file_envoy_config_core_v3_config_source_proto_goTypes = nil + file_envoy_config_core_v3_config_source_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go new file mode 100644 index 0000000000..c2d0b15953 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source.pb.validate.go @@ -0,0 +1,1345 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ApiConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ApiConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApiConfigSourceMultiError, or nil if none found. +func (m *ApiConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ApiConfigSource_ApiType_name[int32(m.GetApiType())]; !ok { + err := ApiConfigSourceValidationError{ + field: "ApiType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { + err := ApiConfigSourceValidationError{ + field: "TransportApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetGrpcServices() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("GrpcServices[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("GrpcServices[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: fmt.Sprintf("GrpcServices[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRefreshDelay()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RefreshDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RefreshDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRefreshDelay()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: "RefreshDelay", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetRequestTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ApiConfigSourceValidationError{ + field: "RequestTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ApiConfigSourceValidationError{ + field: "RequestTimeout", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetRateLimitSettings()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RateLimitSettings", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: "RateLimitSettings", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRateLimitSettings()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: "RateLimitSettings", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SetNodeOnFirstMessageOnly + + for idx, item := range m.GetConfigValidators() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("ConfigValidators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiConfigSourceValidationError{ + field: fmt.Sprintf("ConfigValidators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiConfigSourceValidationError{ + field: fmt.Sprintf("ConfigValidators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ApiConfigSourceMultiError(errors) + } + + return nil +} + +// ApiConfigSourceMultiError is an error wrapping multiple validation errors +// returned by ApiConfigSource.ValidateAll() if the designated constraints +// aren't met. +type ApiConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiConfigSourceMultiError) AllErrors() []error { return m } + +// ApiConfigSourceValidationError is the validation error returned by +// ApiConfigSource.Validate if the designated constraints aren't met. +type ApiConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiConfigSourceValidationError) ErrorName() string { return "ApiConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e ApiConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiConfigSourceValidationError{} + +// Validate checks the field values on AggregatedConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *AggregatedConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AggregatedConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AggregatedConfigSourceMultiError, or nil if none found. +func (m *AggregatedConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *AggregatedConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return AggregatedConfigSourceMultiError(errors) + } + + return nil +} + +// AggregatedConfigSourceMultiError is an error wrapping multiple validation +// errors returned by AggregatedConfigSource.ValidateAll() if the designated +// constraints aren't met. +type AggregatedConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AggregatedConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AggregatedConfigSourceMultiError) AllErrors() []error { return m } + +// AggregatedConfigSourceValidationError is the validation error returned by +// AggregatedConfigSource.Validate if the designated constraints aren't met. +type AggregatedConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AggregatedConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AggregatedConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AggregatedConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AggregatedConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AggregatedConfigSourceValidationError) ErrorName() string { + return "AggregatedConfigSourceValidationError" +} + +// Error satisfies the builtin error interface +func (e AggregatedConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAggregatedConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AggregatedConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AggregatedConfigSourceValidationError{} + +// Validate checks the field values on SelfConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SelfConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SelfConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SelfConfigSourceMultiError, or nil if none found. +func (m *SelfConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *SelfConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { + err := SelfConfigSourceValidationError{ + field: "TransportApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SelfConfigSourceMultiError(errors) + } + + return nil +} + +// SelfConfigSourceMultiError is an error wrapping multiple validation errors +// returned by SelfConfigSource.ValidateAll() if the designated constraints +// aren't met. +type SelfConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SelfConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SelfConfigSourceMultiError) AllErrors() []error { return m } + +// SelfConfigSourceValidationError is the validation error returned by +// SelfConfigSource.Validate if the designated constraints aren't met. +type SelfConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SelfConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SelfConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SelfConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SelfConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SelfConfigSourceValidationError) ErrorName() string { return "SelfConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e SelfConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSelfConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SelfConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SelfConfigSourceValidationError{} + +// Validate checks the field values on RateLimitSettings with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimitSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimitSettingsMultiError, or nil if none found. +func (m *RateLimitSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxTokens()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitSettingsValidationError{ + field: "MaxTokens", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitSettingsValidationError{ + field: "MaxTokens", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxTokens()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitSettingsValidationError{ + field: "MaxTokens", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetFillRate(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := RateLimitSettingsValidationError{ + field: "FillRate", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return RateLimitSettingsMultiError(errors) + } + + return nil +} + +// RateLimitSettingsMultiError is an error wrapping multiple validation errors +// returned by RateLimitSettings.ValidateAll() if the designated constraints +// aren't met. +type RateLimitSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitSettingsMultiError) AllErrors() []error { return m } + +// RateLimitSettingsValidationError is the validation error returned by +// RateLimitSettings.Validate if the designated constraints aren't met. +type RateLimitSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitSettingsValidationError) ErrorName() string { + return "RateLimitSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitSettingsValidationError{} + +// Validate checks the field values on PathConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PathConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PathConfigSourceMultiError, or nil if none found. +func (m *PathConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *PathConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := PathConfigSourceValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathConfigSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathConfigSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathConfigSourceValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return PathConfigSourceMultiError(errors) + } + + return nil +} + +// PathConfigSourceMultiError is an error wrapping multiple validation errors +// returned by PathConfigSource.ValidateAll() if the designated constraints +// aren't met. +type PathConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathConfigSourceMultiError) AllErrors() []error { return m } + +// PathConfigSourceValidationError is the validation error returned by +// PathConfigSource.Validate if the designated constraints aren't met. +type PathConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathConfigSourceValidationError) ErrorName() string { return "PathConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e PathConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathConfigSourceValidationError{} + +// Validate checks the field values on ConfigSource with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConfigSource with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ConfigSourceMultiError, or +// nil if none found. +func (m *ConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *ConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAuthorities() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: fmt.Sprintf("Authorities[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: fmt.Sprintf("Authorities[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: fmt.Sprintf("Authorities[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetInitialFetchTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "InitialFetchTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "InitialFetchTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInitialFetchTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "InitialFetchTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := ApiVersion_name[int32(m.GetResourceApiVersion())]; !ok { + err := ConfigSourceValidationError{ + field: "ResourceApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofConfigSourceSpecifierPresent := false + switch v := m.ConfigSourceSpecifier.(type) { + case *ConfigSource_Path: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + // no validation rules for Path + case *ConfigSource_PathConfigSource: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetPathConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "PathConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "PathConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "PathConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ConfigSource_ApiConfigSource: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetApiConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "ApiConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "ApiConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApiConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "ApiConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ConfigSource_Ads: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Ads", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Ads", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "Ads", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ConfigSource_Self: + if v == nil { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetSelf()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Self", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigSourceValidationError{ + field: "Self", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSelf()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigSourceValidationError{ + field: "Self", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigSourceSpecifierPresent { + err := ConfigSourceValidationError{ + field: "ConfigSourceSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ConfigSourceMultiError(errors) + } + + return nil +} + +// ConfigSourceMultiError is an error wrapping multiple validation errors +// returned by ConfigSource.ValidateAll() if the designated constraints aren't met. +type ConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConfigSourceMultiError) AllErrors() []error { return m } + +// ConfigSourceValidationError is the validation error returned by +// ConfigSource.Validate if the designated constraints aren't met. +type ConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConfigSourceValidationError) ErrorName() string { return "ConfigSourceValidationError" } + +// Error satisfies the builtin error interface +func (e ConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConfigSourceValidationError{} + +// Validate checks the field values on ExtensionConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExtensionConfigSource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExtensionConfigSource with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExtensionConfigSourceMultiError, or nil if none found. +func (m *ExtensionConfigSource) ValidateAll() error { + return m.validate(true) +} + +func (m *ExtensionConfigSource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetConfigSource() == nil { + err := ExtensionConfigSourceValidationError{ + field: "ConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if a := m.GetConfigSource(); a != nil { + + } + + if all { + switch v := interface{}(m.GetDefaultConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExtensionConfigSourceValidationError{ + field: "DefaultConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExtensionConfigSourceValidationError{ + field: "DefaultConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExtensionConfigSourceValidationError{ + field: "DefaultConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ApplyDefaultConfigWithoutWarming + + if len(m.GetTypeUrls()) < 1 { + err := ExtensionConfigSourceValidationError{ + field: "TypeUrls", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ExtensionConfigSourceMultiError(errors) + } + + return nil +} + +// ExtensionConfigSourceMultiError is an error wrapping multiple validation +// errors returned by ExtensionConfigSource.ValidateAll() if the designated +// constraints aren't met. +type ExtensionConfigSourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExtensionConfigSourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExtensionConfigSourceMultiError) AllErrors() []error { return m } + +// ExtensionConfigSourceValidationError is the validation error returned by +// ExtensionConfigSource.Validate if the designated constraints aren't met. +type ExtensionConfigSourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExtensionConfigSourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExtensionConfigSourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExtensionConfigSourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExtensionConfigSourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExtensionConfigSourceValidationError) ErrorName() string { + return "ExtensionConfigSourceValidationError" +} + +// Error satisfies the builtin error interface +func (e ExtensionConfigSourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExtensionConfigSource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExtensionConfigSourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExtensionConfigSourceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go new file mode 100644 index 0000000000..ae2fd091de --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/config_source_vtproto.pb.go @@ -0,0 +1,831 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/config_source.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ApiConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ConfigValidators) > 0 { + for iNdEx := len(m.ConfigValidators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ConfigValidators[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x40 + } + if m.SetNodeOnFirstMessageOnly { + i-- + if m.SetNodeOnFirstMessageOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.RateLimitSettings != nil { + size, err := m.RateLimitSettings.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.RequestTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.GrpcServices) > 0 { + for iNdEx := len(m.GrpcServices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.GrpcServices[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.RefreshDelay != nil { + size, err := (*durationpb.Duration)(m.RefreshDelay).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ClusterNames) > 0 { + for iNdEx := len(m.ClusterNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterNames[iNdEx]) + copy(dAtA[i:], m.ClusterNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.ApiType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ApiType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AggregatedConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregatedConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AggregatedConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SelfConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SelfConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FillRate != nil { + size, err := (*wrapperspb.DoubleValue)(m.FillRate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTokens != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxTokens).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PathConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WatchedDirectory != nil { + size, err := m.WatchedDirectory.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_PathConfigSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Authorities) > 0 { + for iNdEx := len(m.Authorities) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Authorities[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Authorities[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + } + if m.ResourceApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResourceApiVersion)) + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Self); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InitialFetchTimeout != nil { + size, err := (*durationpb.Duration)(m.InitialFetchTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Ads); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_ApiConfigSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSourceSpecifier.(*ConfigSource_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ConfigSource_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *ConfigSource_ApiConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_ApiConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApiConfigSource != nil { + size, err := m.ApiConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_Ads) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Ads) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ads != nil { + size, err := m.Ads.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_Self) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_Self) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Self != nil { + size, err := m.Self.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ConfigSource_PathConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConfigSource_PathConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PathConfigSource != nil { + size, err := m.PathConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *ExtensionConfigSource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtensionConfigSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ExtensionConfigSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TypeUrls) > 0 { + for iNdEx := len(m.TypeUrls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TypeUrls[iNdEx]) + copy(dAtA[i:], m.TypeUrls[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrls[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.ApplyDefaultConfigWithoutWarming { + i-- + if m.ApplyDefaultConfigWithoutWarming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.DefaultConfig != nil { + size, err := (*anypb.Any)(m.DefaultConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ConfigSource != nil { + size, err := m.ConfigSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApiConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ApiType)) + } + if len(m.ClusterNames) > 0 { + for _, s := range m.ClusterNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RefreshDelay != nil { + l = (*durationpb.Duration)(m.RefreshDelay).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.GrpcServices) > 0 { + for _, e := range m.GrpcServices { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequestTimeout != nil { + l = (*durationpb.Duration)(m.RequestTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RateLimitSettings != nil { + l = m.RateLimitSettings.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SetNodeOnFirstMessageOnly { + n += 2 + } + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + if len(m.ConfigValidators) > 0 { + for _, e := range m.ConfigValidators { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AggregatedConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SelfConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTokens != nil { + l = (*wrapperspb.UInt32Value)(m.MaxTokens).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FillRate != nil { + l = (*wrapperspb.DoubleValue)(m.FillRate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PathConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WatchedDirectory != nil { + l = m.WatchedDirectory.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ConfigSourceSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.InitialFetchTimeout != nil { + l = (*durationpb.Duration)(m.InitialFetchTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResourceApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResourceApiVersion)) + } + if len(m.Authorities) > 0 { + for _, e := range m.Authorities { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ConfigSource_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ConfigSource_ApiConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiConfigSource != nil { + l = m.ApiConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_Ads) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ads != nil { + l = m.Ads.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_Self) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Self != nil { + l = m.Self.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ConfigSource_PathConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PathConfigSource != nil { + l = m.PathConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ExtensionConfigSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + l = m.ConfigSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultConfig != nil { + l = (*anypb.Any)(m.DefaultConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplyDefaultConfigWithoutWarming { + n += 2 + } + if len(m.TypeUrls) > 0 { + for _, s := range m.TypeUrls { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go new file mode 100644 index 0000000000..8d995326c1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +type EventServiceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConfigSourceSpecifier: + // + // *EventServiceConfig_GrpcService + ConfigSourceSpecifier isEventServiceConfig_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"` +} + +func (x *EventServiceConfig) Reset() { + *x = EventServiceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventServiceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventServiceConfig) ProtoMessage() {} + +func (x *EventServiceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventServiceConfig.ProtoReflect.Descriptor instead. +func (*EventServiceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP(), []int{0} +} + +func (m *EventServiceConfig) GetConfigSourceSpecifier() isEventServiceConfig_ConfigSourceSpecifier { + if m != nil { + return m.ConfigSourceSpecifier + } + return nil +} + +func (x *EventServiceConfig) GetGrpcService() *GrpcService { + if x, ok := x.GetConfigSourceSpecifier().(*EventServiceConfig_GrpcService); ok { + return x.GrpcService + } + return nil +} + +type isEventServiceConfig_ConfigSourceSpecifier interface { + isEventServiceConfig_ConfigSourceSpecifier() +} + +type EventServiceConfig_GrpcService struct { + // Specifies the gRPC service that hosts the event reporting service. + GrpcService *GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3,oneof"` +} + +func (*EventServiceConfig_GrpcService) isEventServiceConfig_ConfigSourceSpecifier() {} + +var File_envoy_config_core_v3_event_service_config_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_event_service_config_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x01, 0x0a, 0x12, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, + 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x8b, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_event_service_config_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_event_service_config_proto_rawDescData = file_envoy_config_core_v3_event_service_config_proto_rawDesc +) + +func file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_event_service_config_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_event_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_event_service_config_proto_rawDescData) + }) + return file_envoy_config_core_v3_event_service_config_proto_rawDescData +} + +var file_envoy_config_core_v3_event_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_event_service_config_proto_goTypes = []interface{}{ + (*EventServiceConfig)(nil), // 0: envoy.config.core.v3.EventServiceConfig + (*GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService +} +var file_envoy_config_core_v3_event_service_config_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.EventServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_event_service_config_proto_init() } +func file_envoy_config_core_v3_event_service_config_proto_init() { + if File_envoy_config_core_v3_event_service_config_proto != nil { + return + } + file_envoy_config_core_v3_grpc_service_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventServiceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*EventServiceConfig_GrpcService)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_event_service_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_event_service_config_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_event_service_config_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_event_service_config_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_event_service_config_proto = out.File + file_envoy_config_core_v3_event_service_config_proto_rawDesc = nil + file_envoy_config_core_v3_event_service_config_proto_goTypes = nil + file_envoy_config_core_v3_event_service_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go new file mode 100644 index 0000000000..21f29c8b6f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config.pb.validate.go @@ -0,0 +1,197 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on EventServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EventServiceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EventServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EventServiceConfigMultiError, or nil if none found. +func (m *EventServiceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *EventServiceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofConfigSourceSpecifierPresent := false + switch v := m.ConfigSourceSpecifier.(type) { + case *EventServiceConfig_GrpcService: + if v == nil { + err := EventServiceConfigValidationError{ + field: "ConfigSourceSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSourceSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EventServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EventServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EventServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigSourceSpecifierPresent { + err := EventServiceConfigValidationError{ + field: "ConfigSourceSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return EventServiceConfigMultiError(errors) + } + + return nil +} + +// EventServiceConfigMultiError is an error wrapping multiple validation errors +// returned by EventServiceConfig.ValidateAll() if the designated constraints +// aren't met. +type EventServiceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EventServiceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EventServiceConfigMultiError) AllErrors() []error { return m } + +// EventServiceConfigValidationError is the validation error returned by +// EventServiceConfig.Validate if the designated constraints aren't met. +type EventServiceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EventServiceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EventServiceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EventServiceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EventServiceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EventServiceConfigValidationError) ErrorName() string { + return "EventServiceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e EventServiceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEventServiceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EventServiceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EventServiceConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go new file mode 100644 index 0000000000..c8e65c66fe --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/event_service_config_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/event_service_config.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *EventServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EventServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSourceSpecifier.(*EventServiceConfig_GrpcService); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *EventServiceConfig_GrpcService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EventServiceConfig_GrpcService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcService != nil { + size, err := m.GrpcService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *EventServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ConfigSourceSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *EventServiceConfig_GrpcService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + l = m.GrpcService.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go new file mode 100644 index 0000000000..81ec41a6e7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.go @@ -0,0 +1,185 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +type TypedExtensionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is “xds.type.v3.TypedStruct“ + // (or, for historical reasons, “udpa.type.v1.TypedStruct“), the inner type + // URL of “TypedStruct“ will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *TypedExtensionConfig) Reset() { + *x = TypedExtensionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypedExtensionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypedExtensionConfig) ProtoMessage() {} + +func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead. +func (*TypedExtensionConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *TypedExtensionConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +var File_envoy_config_core_v3_extension_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_extension_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x76, 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x82, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_extension_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_extension_proto_rawDescData = file_envoy_config_core_v3_extension_proto_rawDesc +) + +func file_envoy_config_core_v3_extension_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_extension_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_extension_proto_rawDescData) + }) + return file_envoy_config_core_v3_extension_proto_rawDescData +} + +var file_envoy_config_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_extension_proto_goTypes = []interface{}{ + (*TypedExtensionConfig)(nil), // 0: envoy.config.core.v3.TypedExtensionConfig + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_envoy_config_core_v3_extension_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_extension_proto_init() } +func file_envoy_config_core_v3_extension_proto_init() { + if File_envoy_config_core_v3_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypedExtensionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_extension_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_extension_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_extension_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_extension_proto = out.File + file_envoy_config_core_v3_extension_proto_rawDesc = nil + file_envoy_config_core_v3_extension_proto_goTypes = nil + file_envoy_config_core_v3_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go new file mode 100644 index 0000000000..c0df5946a5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension.pb.validate.go @@ -0,0 +1,165 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TypedExtensionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TypedExtensionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TypedExtensionConfigMultiError, or nil if none found. +func (m *TypedExtensionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TypedExtensionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TypedExtensionConfigValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTypedConfig() == nil { + err := TypedExtensionConfigValidationError{ + field: "TypedConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if a := m.GetTypedConfig(); a != nil { + + } + + if len(errors) > 0 { + return TypedExtensionConfigMultiError(errors) + } + + return nil +} + +// TypedExtensionConfigMultiError is an error wrapping multiple validation +// errors returned by TypedExtensionConfig.ValidateAll() if the designated +// constraints aren't met. +type TypedExtensionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TypedExtensionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TypedExtensionConfigMultiError) AllErrors() []error { return m } + +// TypedExtensionConfigValidationError is the validation error returned by +// TypedExtensionConfig.Validate if the designated constraints aren't met. +type TypedExtensionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TypedExtensionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TypedExtensionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TypedExtensionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TypedExtensionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TypedExtensionConfigValidationError) ErrorName() string { + return "TypedExtensionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e TypedExtensionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTypedExtensionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TypedExtensionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TypedExtensionConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go new file mode 100644 index 0000000000..f81b8b38b0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/extension_vtproto.pb.go @@ -0,0 +1,88 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/extension.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TypedExtensionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypedExtensionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TypedExtensionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TypedExtensionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go new file mode 100644 index 0000000000..199ac40f04 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.go @@ -0,0 +1,246 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A list of gRPC methods which can be used as an allowlist, for example. +type GrpcMethodList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Services []*GrpcMethodList_Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` +} + +func (x *GrpcMethodList) Reset() { + *x = GrpcMethodList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcMethodList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcMethodList) ProtoMessage() {} + +func (x *GrpcMethodList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcMethodList.ProtoReflect.Descriptor instead. +func (*GrpcMethodList) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcMethodList) GetServices() []*GrpcMethodList_Service { + if x != nil { + return x.Services + } + return nil +} + +type GrpcMethodList_Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the gRPC service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The names of the gRPC methods in this service. + MethodNames []string `protobuf:"bytes,2,rep,name=method_names,json=methodNames,proto3" json:"method_names,omitempty"` +} + +func (x *GrpcMethodList_Service) Reset() { + *x = GrpcMethodList_Service{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcMethodList_Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcMethodList_Service) ProtoMessage() {} + +func (x *GrpcMethodList_Service) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcMethodList_Service.ProtoReflect.Descriptor instead. +func (*GrpcMethodList_Service) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *GrpcMethodList_Service) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GrpcMethodList_Service) GetMethodNames() []string { + if x != nil { + return x.MethodNames + } + return nil +} + +var File_envoy_config_core_v3_grpc_method_list_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, + 0x02, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x48, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x07, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x87, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x47, 0x72, 0x70, 0x63, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = file_envoy_config_core_v3_grpc_method_list_proto_rawDesc +) + +func file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_method_list_proto_rawDescData) + }) + return file_envoy_config_core_v3_grpc_method_list_proto_rawDescData +} + +var file_envoy_config_core_v3_grpc_method_list_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_grpc_method_list_proto_goTypes = []interface{}{ + (*GrpcMethodList)(nil), // 0: envoy.config.core.v3.GrpcMethodList + (*GrpcMethodList_Service)(nil), // 1: envoy.config.core.v3.GrpcMethodList.Service +} +var file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.GrpcMethodList.services:type_name -> envoy.config.core.v3.GrpcMethodList.Service + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_grpc_method_list_proto_init() } +func file_envoy_config_core_v3_grpc_method_list_proto_init() { + if File_envoy_config_core_v3_grpc_method_list_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcMethodList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcMethodList_Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_grpc_method_list_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_grpc_method_list_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_grpc_method_list_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_grpc_method_list_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_grpc_method_list_proto = out.File + file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = nil + file_envoy_config_core_v3_grpc_method_list_proto_goTypes = nil + file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go new file mode 100644 index 0000000000..994cc4df73 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list.pb.validate.go @@ -0,0 +1,295 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GrpcMethodList with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcMethodList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcMethodList with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GrpcMethodListMultiError, +// or nil if none found. +func (m *GrpcMethodList) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcMethodList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetServices() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcMethodListValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcMethodListValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcMethodListValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return GrpcMethodListMultiError(errors) + } + + return nil +} + +// GrpcMethodListMultiError is an error wrapping multiple validation errors +// returned by GrpcMethodList.ValidateAll() if the designated constraints +// aren't met. +type GrpcMethodListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcMethodListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcMethodListMultiError) AllErrors() []error { return m } + +// GrpcMethodListValidationError is the validation error returned by +// GrpcMethodList.Validate if the designated constraints aren't met. +type GrpcMethodListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcMethodListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcMethodListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcMethodListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcMethodListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcMethodListValidationError) ErrorName() string { return "GrpcMethodListValidationError" } + +// Error satisfies the builtin error interface +func (e GrpcMethodListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcMethodList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcMethodListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcMethodListValidationError{} + +// Validate checks the field values on GrpcMethodList_Service with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcMethodList_Service) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcMethodList_Service with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcMethodList_ServiceMultiError, or nil if none found. +func (m *GrpcMethodList_Service) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcMethodList_Service) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := GrpcMethodList_ServiceValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetMethodNames()) < 1 { + err := GrpcMethodList_ServiceValidationError{ + field: "MethodNames", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcMethodList_ServiceMultiError(errors) + } + + return nil +} + +// GrpcMethodList_ServiceMultiError is an error wrapping multiple validation +// errors returned by GrpcMethodList_Service.ValidateAll() if the designated +// constraints aren't met. +type GrpcMethodList_ServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcMethodList_ServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcMethodList_ServiceMultiError) AllErrors() []error { return m } + +// GrpcMethodList_ServiceValidationError is the validation error returned by +// GrpcMethodList_Service.Validate if the designated constraints aren't met. +type GrpcMethodList_ServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcMethodList_ServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcMethodList_ServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcMethodList_ServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcMethodList_ServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcMethodList_ServiceValidationError) ErrorName() string { + return "GrpcMethodList_ServiceValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcMethodList_ServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcMethodList_Service.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcMethodList_ServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcMethodList_ServiceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go new file mode 100644 index 0000000000..940531d1d4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_method_list_vtproto.pb.go @@ -0,0 +1,149 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_method_list.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GrpcMethodList_Service) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcMethodList_Service) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcMethodList_Service) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.MethodNames) > 0 { + for iNdEx := len(m.MethodNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MethodNames[iNdEx]) + copy(dAtA[i:], m.MethodNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MethodNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcMethodList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcMethodList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcMethodList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Services) > 0 { + for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Services[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcMethodList_Service) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.MethodNames) > 0 { + for _, s := range m.MethodNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcMethodList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go new file mode 100644 index 0000000000..3967277f61 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.go @@ -0,0 +1,1814 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +// [#next-free-field: 7] +type GrpcService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TargetSpecifier: + // + // *GrpcService_EnvoyGrpc_ + // *GrpcService_GoogleGrpc_ + TargetSpecifier isGrpcService_TargetSpecifier `protobuf_oneof:"target_specifier"` + // The timeout for the gRPC request. This is the timeout for a specific + // request. + Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. “x-foo-bar: baz-key“) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. + InitialMetadata []*HeaderValue `protobuf:"bytes,5,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"` + // Optional default retry policy for streams toward the service. + // If an async stream doesn't have retry policy configured in its stream options, this retry policy is used. + RetryPolicy *RetryPolicy `protobuf:"bytes,6,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` +} + +func (x *GrpcService) Reset() { + *x = GrpcService{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService) ProtoMessage() {} + +func (x *GrpcService) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService.ProtoReflect.Descriptor instead. +func (*GrpcService) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (m *GrpcService) GetTargetSpecifier() isGrpcService_TargetSpecifier { + if m != nil { + return m.TargetSpecifier + } + return nil +} + +func (x *GrpcService) GetEnvoyGrpc() *GrpcService_EnvoyGrpc { + if x, ok := x.GetTargetSpecifier().(*GrpcService_EnvoyGrpc_); ok { + return x.EnvoyGrpc + } + return nil +} + +func (x *GrpcService) GetGoogleGrpc() *GrpcService_GoogleGrpc { + if x, ok := x.GetTargetSpecifier().(*GrpcService_GoogleGrpc_); ok { + return x.GoogleGrpc + } + return nil +} + +func (x *GrpcService) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *GrpcService) GetInitialMetadata() []*HeaderValue { + if x != nil { + return x.InitialMetadata + } + return nil +} + +func (x *GrpcService) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +type isGrpcService_TargetSpecifier interface { + isGrpcService_TargetSpecifier() +} + +type GrpcService_EnvoyGrpc_ struct { + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc *GrpcService_EnvoyGrpc `protobuf:"bytes,1,opt,name=envoy_grpc,json=envoyGrpc,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ struct { + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc *GrpcService_GoogleGrpc `protobuf:"bytes,2,opt,name=google_grpc,json=googleGrpc,proto3,oneof"` +} + +func (*GrpcService_EnvoyGrpc_) isGrpcService_TargetSpecifier() {} + +func (*GrpcService_GoogleGrpc_) isGrpcService_TargetSpecifier() {} + +// [#next-free-field: 6] +type GrpcService_EnvoyGrpc struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`transport_socket + // `. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The “:authority“ header in the grpc request. If this field is not set, the authority header value will be “cluster_name“. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + // Indicates the retry policy for re-establishing the gRPC stream + // This field is optional. If max interval is not provided, it will be set to ten times the provided base interval. + // Currently only supported for xDS gRPC streams. + // If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. + RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // Maximum gRPC message size that is allowed to be received. + // If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error. + // This limit is applied to individual messages in the streaming response and not the total size of streaming response. + // Defaults to 0, which means unlimited. + MaxReceiveMessageLength *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_receive_message_length,json=maxReceiveMessageLength,proto3" json:"max_receive_message_length,omitempty"` + // This provides gRPC client level control over envoy generated headers. + // If false, the header will be sent but it can be overridden by per stream option. + // If true, the header will be removed and can not be overridden by per stream option. + // Default to false. + SkipEnvoyHeaders bool `protobuf:"varint,5,opt,name=skip_envoy_headers,json=skipEnvoyHeaders,proto3" json:"skip_envoy_headers,omitempty"` +} + +func (x *GrpcService_EnvoyGrpc) Reset() { + *x = GrpcService_EnvoyGrpc{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_EnvoyGrpc) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_EnvoyGrpc) ProtoMessage() {} + +func (x *GrpcService_EnvoyGrpc) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_EnvoyGrpc.ProtoReflect.Descriptor instead. +func (*GrpcService_EnvoyGrpc) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *GrpcService_EnvoyGrpc) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *GrpcService_EnvoyGrpc) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *GrpcService_EnvoyGrpc) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *GrpcService_EnvoyGrpc) GetMaxReceiveMessageLength() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxReceiveMessageLength + } + return nil +} + +func (x *GrpcService_EnvoyGrpc) GetSkipEnvoyHeaders() bool { + if x != nil { + return x.SkipEnvoyHeaders + } + return false +} + +// [#next-free-field: 9] +type GrpcService_GoogleGrpc struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`channel_credentials `. + TargetUri string `protobuf:"bytes,1,opt,name=target_uri,json=targetUri,proto3" json:"target_uri,omitempty"` + ChannelCredentials *GrpcService_GoogleGrpc_ChannelCredentials `protobuf:"bytes,2,opt,name=channel_credentials,json=channelCredentials,proto3" json:"channel_credentials,omitempty"` + // A set of call credentials that can be composed with `channel credentials + // `_. + CallCredentials []*GrpcService_GoogleGrpc_CallCredentials `protobuf:"bytes,3,rep,name=call_credentials,json=callCredentials,proto3" json:"call_credentials,omitempty"` + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + StatPrefix string `protobuf:"bytes,4,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` + // The name of the Google gRPC credentials factory to use. This must have been registered with + // Envoy. If this is empty, a default credentials factory will be used that sets up channel + // credentials based on other configuration parameters. + CredentialsFactoryName string `protobuf:"bytes,5,opt,name=credentials_factory_name,json=credentialsFactoryName,proto3" json:"credentials_factory_name,omitempty"` + // Additional configuration for site-specific customizations of the Google + // gRPC library. + Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + PerStreamBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=per_stream_buffer_limit_bytes,json=perStreamBufferLimitBytes,proto3" json:"per_stream_buffer_limit_bytes,omitempty"` + // Custom channels args. + ChannelArgs *GrpcService_GoogleGrpc_ChannelArgs `protobuf:"bytes,8,opt,name=channel_args,json=channelArgs,proto3" json:"channel_args,omitempty"` +} + +func (x *GrpcService_GoogleGrpc) Reset() { + *x = GrpcService_GoogleGrpc{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *GrpcService_GoogleGrpc) GetTargetUri() string { + if x != nil { + return x.TargetUri + } + return "" +} + +func (x *GrpcService_GoogleGrpc) GetChannelCredentials() *GrpcService_GoogleGrpc_ChannelCredentials { + if x != nil { + return x.ChannelCredentials + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetCallCredentials() []*GrpcService_GoogleGrpc_CallCredentials { + if x != nil { + return x.CallCredentials + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +func (x *GrpcService_GoogleGrpc) GetCredentialsFactoryName() string { + if x != nil { + return x.CredentialsFactoryName + } + return "" +} + +func (x *GrpcService_GoogleGrpc) GetConfig() *structpb.Struct { + if x != nil { + return x.Config + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetPerStreamBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerStreamBufferLimitBytes + } + return nil +} + +func (x *GrpcService_GoogleGrpc) GetChannelArgs() *GrpcService_GoogleGrpc_ChannelArgs { + if x != nil { + return x.ChannelArgs + } + return nil +} + +// See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. +type GrpcService_GoogleGrpc_SslCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // PEM encoded server root certificates. + RootCerts *DataSource `protobuf:"bytes,1,opt,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"` + // PEM encoded client private key. + PrivateKey *DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // PEM encoded client certificate chain. + CertChain *DataSource `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) Reset() { + *x = GrpcService_GoogleGrpc_SslCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_SslCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_SslCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_SslCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_SslCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) GetRootCerts() *DataSource { + if x != nil { + return x.RootCerts + } + return nil +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) GetPrivateKey() *DataSource { + if x != nil { + return x.PrivateKey + } + return nil +} + +func (x *GrpcService_GoogleGrpc_SslCredentials) GetCertChain() *DataSource { + if x != nil { + return x.CertChain + } + return nil +} + +// Local channel credentials. Only UDS is supported for now. +// See https://github.com/grpc/grpc/pull/15909. +type GrpcService_GoogleGrpc_GoogleLocalCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) Reset() { + *x = GrpcService_GoogleGrpc_GoogleLocalCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_GoogleLocalCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 1} +} + +// See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call +// credential types. +type GrpcService_GoogleGrpc_ChannelCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to CredentialSpecifier: + // + // *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials + // *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault + // *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials + CredentialSpecifier isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"` +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) Reset() { + *x = GrpcService_GoogleGrpc_ChannelCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_ChannelCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_ChannelCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier { + if m != nil { + return m.CredentialSpecifier + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetSslCredentials() *GrpcService_GoogleGrpc_SslCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok { + return x.SslCredentials + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetGoogleDefault() *emptypb.Empty { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok { + return x.GoogleDefault + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetLocalCredentials() *GrpcService_GoogleGrpc_GoogleLocalCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok { + return x.LocalCredentials + } + return nil +} + +type isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier interface { + isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() +} + +type GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials struct { + SslCredentials *GrpcService_GoogleGrpc_SslCredentials `protobuf:"bytes,1,opt,name=ssl_credentials,json=sslCredentials,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault struct { + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + GoogleDefault *emptypb.Empty `protobuf:"bytes,2,opt,name=google_default,json=googleDefault,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials struct { + LocalCredentials *GrpcService_GoogleGrpc_GoogleLocalCredentials `protobuf:"bytes,3,opt,name=local_credentials,json=localCredentials,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() { +} + +// [#next-free-field: 8] +type GrpcService_GoogleGrpc_CallCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to CredentialSpecifier: + // + // *GrpcService_GoogleGrpc_CallCredentials_AccessToken + // *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine + // *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken + // *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess + // *GrpcService_GoogleGrpc_CallCredentials_GoogleIam + // *GrpcService_GoogleGrpc_CallCredentials_FromPlugin + // *GrpcService_GoogleGrpc_CallCredentials_StsService_ + CredentialSpecifier isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3} +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier { + if m != nil { + return m.CredentialSpecifier + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetAccessToken() string { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok { + return x.AccessToken + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleComputeEngine() *emptypb.Empty { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok { + return x.GoogleComputeEngine + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleRefreshToken() string { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok { + return x.GoogleRefreshToken + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetServiceAccountJwtAccess() *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok { + return x.ServiceAccountJwtAccess + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleIam() *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok { + return x.GoogleIam + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetFromPlugin() *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok { + return x.FromPlugin + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials) GetStsService() *GrpcService_GoogleGrpc_CallCredentials_StsService { + if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok { + return x.StsService + } + return nil +} + +type isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier interface { + isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() +} + +type GrpcService_GoogleGrpc_CallCredentials_AccessToken struct { + // Access token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + AccessToken string `protobuf:"bytes,1,opt,name=access_token,json=accessToken,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine struct { + // Google Compute Engine credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + GoogleComputeEngine *emptypb.Empty `protobuf:"bytes,2,opt,name=google_compute_engine,json=googleComputeEngine,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken struct { + // Google refresh token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. + GoogleRefreshToken string `protobuf:"bytes,3,opt,name=google_refresh_token,json=googleRefreshToken,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess struct { + // Service Account JWT Access credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. + ServiceAccountJwtAccess *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials `protobuf:"bytes,4,opt,name=service_account_jwt_access,json=serviceAccountJwtAccess,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleIam struct { + // Google IAM credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. + GoogleIam *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials `protobuf:"bytes,5,opt,name=google_iam,json=googleIam,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_FromPlugin struct { + // Custom authenticator credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. + // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. + FromPlugin *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin `protobuf:"bytes,6,opt,name=from_plugin,json=fromPlugin,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_CallCredentials_StsService_ struct { + // Custom security token service which implements OAuth 2.0 token exchange. + // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 + // See https://github.com/grpc/grpc/pull/19587. + StsService *GrpcService_GoogleGrpc_CallCredentials_StsService `protobuf:"bytes,7,opt,name=sts_service,json=stsService,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_CallCredentials_AccessToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +func (*GrpcService_GoogleGrpc_CallCredentials_StsService_) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() { +} + +// Channel arguments. +type GrpcService_GoogleGrpc_ChannelArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + Args map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) Reset() { + *x = GrpcService_GoogleGrpc_ChannelArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_ChannelArgs) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_ChannelArgs) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4} +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs) GetArgs() map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value { + if x != nil { + return x.Args + } + return nil +} + +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + JsonKey string `protobuf:"bytes,1,opt,name=json_key,json=jsonKey,proto3" json:"json_key,omitempty"` + TokenLifetimeSeconds uint64 `protobuf:"varint,2,opt,name=token_lifetime_seconds,json=tokenLifetimeSeconds,proto3" json:"token_lifetime_seconds,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 0} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetJsonKey() string { + if x != nil { + return x.JsonKey + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetTokenLifetimeSeconds() uint64 { + if x != nil { + return x.TokenLifetimeSeconds + } + return 0 +} + +type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthorizationToken string `protobuf:"bytes,1,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` + AuthoritySelector string `protobuf:"bytes,2,opt,name=authority_selector,json=authoritySelector,proto3" json:"authority_selector,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 1} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthorizationToken() string { + if x != nil { + return x.AuthorizationToken + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthoritySelector() string { + if x != nil { + return x.AuthoritySelector + } + return "" +} + +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // [#extension-category: envoy.grpc_credentials] + // + // Types that are assignable to ConfigType: + // + // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig + ConfigType isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 2} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetConfigType() isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType interface { + isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() +} + +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() { +} + +// Security token service configuration that allows Google gRPC to +// fetch security token from an OAuth 2.0 authorization server. +// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and +// https://github.com/grpc/grpc/pull/19587. +// [#next-free-field: 10] +type GrpcService_GoogleGrpc_CallCredentials_StsService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URI of the token exchange service that handles token exchange requests. + // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by + // https://github.com/bufbuild/protoc-gen-validate/issues/303] + TokenExchangeServiceUri string `protobuf:"bytes,1,opt,name=token_exchange_service_uri,json=tokenExchangeServiceUri,proto3" json:"token_exchange_service_uri,omitempty"` + // Location of the target service or resource where the client + // intends to use the requested security token. + Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Logical name of the target service where the client intends to + // use the requested security token. + Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"` + // The desired scope of the requested security token in the + // context of the service or resource where the token will be used. + Scope string `protobuf:"bytes,4,opt,name=scope,proto3" json:"scope,omitempty"` + // Type of the requested security token. + RequestedTokenType string `protobuf:"bytes,5,opt,name=requested_token_type,json=requestedTokenType,proto3" json:"requested_token_type,omitempty"` + // The path of subject token, a security token that represents the + // identity of the party on behalf of whom the request is being made. + SubjectTokenPath string `protobuf:"bytes,6,opt,name=subject_token_path,json=subjectTokenPath,proto3" json:"subject_token_path,omitempty"` + // Type of the subject token. + SubjectTokenType string `protobuf:"bytes,7,opt,name=subject_token_type,json=subjectTokenType,proto3" json:"subject_token_type,omitempty"` + // The path of actor token, a security token that represents the identity + // of the acting party. The acting party is authorized to use the + // requested security token and act on behalf of the subject. + ActorTokenPath string `protobuf:"bytes,8,opt,name=actor_token_path,json=actorTokenPath,proto3" json:"actor_token_path,omitempty"` + // Type of the actor token. + ActorTokenType string `protobuf:"bytes,9,opt,name=actor_token_type,json=actorTokenType,proto3" json:"actor_token_type,omitempty"` +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) Reset() { + *x = GrpcService_GoogleGrpc_CallCredentials_StsService{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_StsService.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_CallCredentials_StsService) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 3} +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetTokenExchangeServiceUri() string { + if x != nil { + return x.TokenExchangeServiceUri + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetScope() string { + if x != nil { + return x.Scope + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetRequestedTokenType() string { + if x != nil { + return x.RequestedTokenType + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenPath() string { + if x != nil { + return x.SubjectTokenPath + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenType() string { + if x != nil { + return x.SubjectTokenType + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenPath() string { + if x != nil { + return x.ActorTokenPath + } + return "" +} + +func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenType() string { + if x != nil { + return x.ActorTokenType + } + return "" +} + +type GrpcService_GoogleGrpc_ChannelArgs_Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + // + // Types that are assignable to ValueSpecifier: + // + // *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue + // *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue + ValueSpecifier isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier `protobuf_oneof:"value_specifier"` +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) Reset() { + *x = GrpcService_GoogleGrpc_ChannelArgs_Value{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoMessage() {} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs_Value.ProtoReflect.Descriptor instead. +func (*GrpcService_GoogleGrpc_ChannelArgs_Value) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4, 0} +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) GetValueSpecifier() isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier { + if m != nil { + return m.ValueSpecifier + } + return nil +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetStringValue() string { + if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetIntValue() int64 { + if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok { + return x.IntValue + } + return 0 +} + +type isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier interface { + isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() +} + +type GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +func (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() { +} + +func (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() { +} + +var File_envoy_config_core_v3_grpc_service_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x23, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x67, 0x72, 0x70, + 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, + 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x09, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, + 0x63, 0x12, 0x4f, 0x0a, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x70, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xe7, 0x02, 0x0a, 0x09, + 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, + 0x00, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x59, 0x0a, 0x1a, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, + 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, + 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a, 0x13, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x67, + 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x0c, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53, 0x73, + 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0a, + 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49, 0x0a, + 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, + 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, + 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a, 0x12, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x3a, + 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0x88, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a, 0x1a, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61, 0x6d, + 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, + 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x22, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, + 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, + 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, + 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a, 0x56, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, + 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f, 0x9a, + 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, + 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a, 0x0b, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, + 0x72, 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, + 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, + 0x70, 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_grpc_service_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_grpc_service_proto_rawDescData = file_envoy_config_core_v3_grpc_service_proto_rawDesc +) + +func file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_grpc_service_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_service_proto_rawDescData) + }) + return file_envoy_config_core_v3_grpc_service_proto_rawDescData +} + +var file_envoy_config_core_v3_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_envoy_config_core_v3_grpc_service_proto_goTypes = []interface{}{ + (*GrpcService)(nil), // 0: envoy.config.core.v3.GrpcService + (*GrpcService_EnvoyGrpc)(nil), // 1: envoy.config.core.v3.GrpcService.EnvoyGrpc + (*GrpcService_GoogleGrpc)(nil), // 2: envoy.config.core.v3.GrpcService.GoogleGrpc + (*GrpcService_GoogleGrpc_SslCredentials)(nil), // 3: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials + (*GrpcService_GoogleGrpc_GoogleLocalCredentials)(nil), // 4: envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials + (*GrpcService_GoogleGrpc_ChannelCredentials)(nil), // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials + (*GrpcService_GoogleGrpc_CallCredentials)(nil), // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials + (*GrpcService_GoogleGrpc_ChannelArgs)(nil), // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs + (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials)(nil), // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials + (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials)(nil), // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials + (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin)(nil), // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin + (*GrpcService_GoogleGrpc_CallCredentials_StsService)(nil), // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService + (*GrpcService_GoogleGrpc_ChannelArgs_Value)(nil), // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value + nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + (*durationpb.Duration)(nil), // 14: google.protobuf.Duration + (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue + (*RetryPolicy)(nil), // 16: envoy.config.core.v3.RetryPolicy + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*structpb.Struct)(nil), // 18: google.protobuf.Struct + (*DataSource)(nil), // 19: envoy.config.core.v3.DataSource + (*emptypb.Empty)(nil), // 20: google.protobuf.Empty + (*anypb.Any)(nil), // 21: google.protobuf.Any +} +var file_envoy_config_core_v3_grpc_service_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.GrpcService.envoy_grpc:type_name -> envoy.config.core.v3.GrpcService.EnvoyGrpc + 2, // 1: envoy.config.core.v3.GrpcService.google_grpc:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc + 14, // 2: envoy.config.core.v3.GrpcService.timeout:type_name -> google.protobuf.Duration + 15, // 3: envoy.config.core.v3.GrpcService.initial_metadata:type_name -> envoy.config.core.v3.HeaderValue + 16, // 4: envoy.config.core.v3.GrpcService.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 16, // 5: envoy.config.core.v3.GrpcService.EnvoyGrpc.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy + 17, // 6: envoy.config.core.v3.GrpcService.EnvoyGrpc.max_receive_message_length:type_name -> google.protobuf.UInt32Value + 5, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials + 6, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials + 18, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct + 17, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 7, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs + 19, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource + 19, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource + 19, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource + 3, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials + 20, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty + 4, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials + 20, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty + 8, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials + 9, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials + 10, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin + 11, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService + 13, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry + 21, // 24: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any + 12, // 25: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_grpc_service_proto_init() } +func file_envoy_config_core_v3_grpc_service_proto_init() { + if File_envoy_config_core_v3_grpc_service_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_EnvoyGrpc); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_SslCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_GoogleLocalCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_ChannelCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_StsService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs_Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcService_EnvoyGrpc_)(nil), + (*GrpcService_GoogleGrpc_)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials)(nil), + (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault)(nil), + (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_CallCredentials_AccessToken)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin)(nil), + (*GrpcService_GoogleGrpc_CallCredentials_StsService_)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig)(nil), + } + file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue)(nil), + (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_grpc_service_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_grpc_service_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_grpc_service_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_grpc_service_proto = out.File + file_envoy_config_core_v3_grpc_service_proto_rawDesc = nil + file_envoy_config_core_v3_grpc_service_proto_goTypes = nil + file_envoy_config_core_v3_grpc_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go new file mode 100644 index 0000000000..9ef41b0774 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service.pb.validate.go @@ -0,0 +1,2579 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GrpcService with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GrpcServiceMultiError, or +// nil if none found. +func (m *GrpcService) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetInitialMetadata() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTargetSpecifierPresent := false + switch v := m.TargetSpecifier.(type) { + case *GrpcService_EnvoyGrpc_: + if v == nil { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetSpecifierPresent = true + + if all { + switch v := interface{}(m.GetEnvoyGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "EnvoyGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "EnvoyGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnvoyGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "EnvoyGrpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_: + if v == nil { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "GoogleGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcServiceValidationError{ + field: "GoogleGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcServiceValidationError{ + field: "GoogleGrpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTargetSpecifierPresent { + err := GrpcServiceValidationError{ + field: "TargetSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcServiceMultiError(errors) + } + + return nil +} + +// GrpcServiceMultiError is an error wrapping multiple validation errors +// returned by GrpcService.ValidateAll() if the designated constraints aren't met. +type GrpcServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcServiceMultiError) AllErrors() []error { return m } + +// GrpcServiceValidationError is the validation error returned by +// GrpcService.Validate if the designated constraints aren't met. +type GrpcServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcServiceValidationError) ErrorName() string { return "GrpcServiceValidationError" } + +// Error satisfies the builtin error interface +func (e GrpcServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcServiceValidationError{} + +// Validate checks the field values on GrpcService_EnvoyGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcService_EnvoyGrpc) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_EnvoyGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcService_EnvoyGrpcMultiError, or nil if none found. +func (m *GrpcService_EnvoyGrpc) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_EnvoyGrpc) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetClusterName()) < 1 { + err := GrpcService_EnvoyGrpcValidationError{ + field: "ClusterName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetAuthority()) < 0 { + err := GrpcService_EnvoyGrpcValidationError{ + field: "Authority", + reason: "value length must be at least 0 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetAuthority()) > 16384 { + err := GrpcService_EnvoyGrpcValidationError{ + field: "Authority", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_GrpcService_EnvoyGrpc_Authority_Pattern.MatchString(m.GetAuthority()) { + err := GrpcService_EnvoyGrpcValidationError{ + field: "Authority", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_EnvoyGrpcValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxReceiveMessageLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxReceiveMessageLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_EnvoyGrpcValidationError{ + field: "MaxReceiveMessageLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SkipEnvoyHeaders + + if len(errors) > 0 { + return GrpcService_EnvoyGrpcMultiError(errors) + } + + return nil +} + +// GrpcService_EnvoyGrpcMultiError is an error wrapping multiple validation +// errors returned by GrpcService_EnvoyGrpc.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_EnvoyGrpcMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_EnvoyGrpcMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_EnvoyGrpcMultiError) AllErrors() []error { return m } + +// GrpcService_EnvoyGrpcValidationError is the validation error returned by +// GrpcService_EnvoyGrpc.Validate if the designated constraints aren't met. +type GrpcService_EnvoyGrpcValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_EnvoyGrpcValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_EnvoyGrpcValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_EnvoyGrpcValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_EnvoyGrpcValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_EnvoyGrpcValidationError) ErrorName() string { + return "GrpcService_EnvoyGrpcValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_EnvoyGrpcValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_EnvoyGrpc.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_EnvoyGrpcValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_EnvoyGrpcValidationError{} + +var _GrpcService_EnvoyGrpc_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on GrpcService_GoogleGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_GoogleGrpc with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpcMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetTargetUri()) < 1 { + err := GrpcService_GoogleGrpcValidationError{ + field: "TargetUri", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetChannelCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetChannelCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "ChannelCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCallCredentials() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: fmt.Sprintf("CallCredentials[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: fmt.Sprintf("CallCredentials[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: fmt.Sprintf("CallCredentials[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if utf8.RuneCountInString(m.GetStatPrefix()) < 1 { + err := GrpcService_GoogleGrpcValidationError{ + field: "StatPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for CredentialsFactoryName + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerStreamBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "PerStreamBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "PerStreamBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerStreamBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "PerStreamBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetChannelArgs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelArgs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpcValidationError{ + field: "ChannelArgs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetChannelArgs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpcValidationError{ + field: "ChannelArgs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpcMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpcMultiError is an error wrapping multiple validation +// errors returned by GrpcService_GoogleGrpc.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpcMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpcMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpcMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpcValidationError is the validation error returned by +// GrpcService_GoogleGrpc.Validate if the designated constraints aren't met. +type GrpcService_GoogleGrpcValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpcValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpcValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpcValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpcValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpcValidationError) ErrorName() string { + return "GrpcService_GoogleGrpcValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpcValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpcValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpcValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_SslCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_SslCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_GoogleGrpc_SslCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_SslCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_SslCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRootCerts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "RootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "RootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRootCerts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "RootCerts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrivateKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCertChain()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "CertChain", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "CertChain", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCertChain()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_SslCredentialsValidationError{ + field: "CertChain", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_SslCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_SslCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_SslCredentials.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_SslCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_SslCredentialsValidationError is the validation error +// returned by GrpcService_GoogleGrpc_SslCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_SslCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_SslCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_SslCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_SslCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_SslCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_GoogleLocalCredentials.ValidateAll() if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError is the +// validation error returned by +// GrpcService_GoogleGrpc_GoogleLocalCredentials.Validate if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_GoogleLocalCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_ChannelCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// GrpcService_GoogleGrpc_ChannelCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_ChannelCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofCredentialSpecifierPresent := false + switch v := m.CredentialSpecifier.(type) { + case *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetSslCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "SslCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "SslCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSslCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "SslCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleDefault()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "GoogleDefault", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "GoogleDefault", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleDefault()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "GoogleDefault", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocalCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "LocalCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "LocalCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "LocalCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofCredentialSpecifierPresent { + err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_ChannelCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_ChannelCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_ChannelCredentials.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_ChannelCredentialsValidationError is the validation +// error returned by GrpcService_GoogleGrpc_ChannelCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_ChannelCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_ChannelCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_ChannelCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_CallCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentialsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofCredentialSpecifierPresent := false + switch v := m.CredentialSpecifier.(type) { + case *GrpcService_GoogleGrpc_CallCredentials_AccessToken: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + // no validation rules for AccessToken + case *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleComputeEngine()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleComputeEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleComputeEngine", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleComputeEngine()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleComputeEngine", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + // no validation rules for GoogleRefreshToken + case *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetServiceAccountJwtAccess()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "ServiceAccountJwtAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "ServiceAccountJwtAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServiceAccountJwtAccess()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "ServiceAccountJwtAccess", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_GoogleIam: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGoogleIam()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleIam", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleIam", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleIam()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "GoogleIam", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_FromPlugin: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetFromPlugin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "FromPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "FromPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFromPlugin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "FromPlugin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *GrpcService_GoogleGrpc_CallCredentials_StsService_: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofCredentialSpecifierPresent = true + + if all { + switch v := interface{}(m.GetStsService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "StsService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "StsService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStsService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "StsService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofCredentialSpecifierPresent { + err := GrpcService_GoogleGrpc_CallCredentialsValidationError{ + field: "CredentialSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentialsMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_CallCredentialsValidationError is the validation +// error returned by GrpcService_GoogleGrpc_CallCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentialsValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *GrpcService_GoogleGrpc_ChannelArgs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcService_GoogleGrpc_ChannelArgs +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_ChannelArgsMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_ChannelArgs) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + { + sorted_keys := make([]string, len(m.GetArgs())) + i := 0 + for key := range m.GetArgs() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetArgs()[key] + _ = val + + // no validation rules for Args[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelArgsValidationError{ + field: fmt.Sprintf("Args[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_ChannelArgsValidationError{ + field: fmt.Sprintf("Args[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_ChannelArgsValidationError{ + field: fmt.Sprintf("Args[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_ChannelArgsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_ChannelArgsMultiError is an error wrapping multiple +// validation errors returned by +// GrpcService_GoogleGrpc_ChannelArgs.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_ChannelArgsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_ChannelArgsMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_ChannelArgsValidationError is the validation error +// returned by GrpcService_GoogleGrpc_ChannelArgs.Validate if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_ChannelArgsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_ChannelArgs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_ChannelArgsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_ChannelArgsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError, +// or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for JsonKey + + // no validation rules for TokenLifetimeSeconds + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError +// is an error wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ValidateAll() +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) AllErrors() []error { + return m +} + +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError +// is the validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.Validate +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError, or +// nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AuthorizationToken + + // no validation rules for AuthoritySelector + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError is an +// error wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ValidateAll() +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) AllErrors() []error { + return m +} + +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError +// is the validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError, +// or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.ConfigType.(type) { + case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig: + if v == nil { + err := GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError +// is an error wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ValidateAll() +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) AllErrors() []error { + return m +} + +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError +// is the validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.Validate +// if the designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{} + +// Validate checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TokenExchangeServiceUri + + // no validation rules for Resource + + // no validation rules for Audience + + // no validation rules for Scope + + // no validation rules for RequestedTokenType + + if utf8.RuneCountInString(m.GetSubjectTokenPath()) < 1 { + err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{ + field: "SubjectTokenPath", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetSubjectTokenType()) < 1 { + err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{ + field: "SubjectTokenType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ActorTokenPath + + // no validation rules for ActorTokenType + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError is an error +// wrapping multiple validation errors returned by +// GrpcService_GoogleGrpc_CallCredentials_StsService.ValidateAll() if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError is the +// validation error returned by +// GrpcService_GoogleGrpc_CallCredentials_StsService.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_CallCredentials_StsService.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{} + +// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs_Value +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// GrpcService_GoogleGrpc_ChannelArgs_Value with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError, or nil if none found. +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofValueSpecifierPresent := false + switch v := m.ValueSpecifier.(type) { + case *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValueSpecifierPresent = true + // no validation rules for StringValue + case *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue: + if v == nil { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValueSpecifierPresent = true + // no validation rules for IntValue + default: + _ = v // ensures v is used + } + if !oneofValueSpecifierPresent { + err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{ + field: "ValueSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError(errors) + } + + return nil +} + +// GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError is an error wrapping +// multiple validation errors returned by +// GrpcService_GoogleGrpc_ChannelArgs_Value.ValidateAll() if the designated +// constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError) AllErrors() []error { return m } + +// GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError is the validation +// error returned by GrpcService_GoogleGrpc_ChannelArgs_Value.Validate if the +// designated constraints aren't met. +type GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) ErrorName() string { + return "GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcService_GoogleGrpc_ChannelArgs_Value.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go new file mode 100644 index 0000000000..90d07efa4a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/grpc_service_vtproto.pb.go @@ -0,0 +1,1648 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/grpc_service.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + emptypb "github.com/planetscale/vtprotobuf/types/known/emptypb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GrpcService_EnvoyGrpc) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_EnvoyGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_EnvoyGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipEnvoyHeaders { + i-- + if m.SkipEnvoyHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.MaxReceiveMessageLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxReceiveMessageLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CertChain != nil { + size, err := m.CertChain.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.PrivateKey != nil { + size, err := m.PrivateKey.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RootCerts != nil { + size, err := m.RootCerts.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SslCredentials != nil { + size, err := m.SslCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleDefault != nil { + size, err := (*emptypb.Empty)(m.GoogleDefault).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalCredentials != nil { + size, err := m.LocalCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TokenLifetimeSeconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TokenLifetimeSeconds)) + i-- + dAtA[i] = 0x10 + } + if len(m.JsonKey) > 0 { + i -= len(m.JsonKey) + copy(dAtA[i:], m.JsonKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.JsonKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AuthoritySelector) > 0 { + i -= len(m.AuthoritySelector) + copy(dAtA[i:], m.AuthoritySelector) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AuthoritySelector))) + i-- + dAtA[i] = 0x12 + } + if len(m.AuthorizationToken) > 0 { + i -= len(m.AuthorizationToken) + copy(dAtA[i:], m.AuthorizationToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AuthorizationToken))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ActorTokenType) > 0 { + i -= len(m.ActorTokenType) + copy(dAtA[i:], m.ActorTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ActorTokenType))) + i-- + dAtA[i] = 0x4a + } + if len(m.ActorTokenPath) > 0 { + i -= len(m.ActorTokenPath) + copy(dAtA[i:], m.ActorTokenPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ActorTokenPath))) + i-- + dAtA[i] = 0x42 + } + if len(m.SubjectTokenType) > 0 { + i -= len(m.SubjectTokenType) + copy(dAtA[i:], m.SubjectTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubjectTokenType))) + i-- + dAtA[i] = 0x3a + } + if len(m.SubjectTokenPath) > 0 { + i -= len(m.SubjectTokenPath) + copy(dAtA[i:], m.SubjectTokenPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SubjectTokenPath))) + i-- + dAtA[i] = 0x32 + } + if len(m.RequestedTokenType) > 0 { + i -= len(m.RequestedTokenType) + copy(dAtA[i:], m.RequestedTokenType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestedTokenType))) + i-- + dAtA[i] = 0x2a + } + if len(m.Scope) > 0 { + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Audience) > 0 { + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0x1a + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + } + if len(m.TokenExchangeServiceUri) > 0 { + i -= len(m.TokenExchangeServiceUri) + copy(dAtA[i:], m.TokenExchangeServiceUri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TokenExchangeServiceUri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.CredentialSpecifier.(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.AccessToken) + copy(dAtA[i:], m.AccessToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessToken))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleComputeEngine != nil { + size, err := (*emptypb.Empty)(m.GoogleComputeEngine).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.GoogleRefreshToken) + copy(dAtA[i:], m.GoogleRefreshToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GoogleRefreshToken))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ServiceAccountJwtAccess != nil { + size, err := m.ServiceAccountJwtAccess.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleIam != nil { + size, err := m.GoogleIam.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FromPlugin != nil { + size, err := m.FromPlugin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StsService != nil { + size, err := m.StsService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ValueSpecifier.(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ValueSpecifier.(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Args) > 0 { + for k := range m.Args { + v := m.Args[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_GoogleGrpc) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService_GoogleGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ChannelArgs != nil { + size, err := m.ChannelArgs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.PerStreamBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerStreamBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Config != nil { + size, err := (*structpb.Struct)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.CredentialsFactoryName) > 0 { + i -= len(m.CredentialsFactoryName) + copy(dAtA[i:], m.CredentialsFactoryName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CredentialsFactoryName))) + i-- + dAtA[i] = 0x2a + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x22 + } + if len(m.CallCredentials) > 0 { + for iNdEx := len(m.CallCredentials) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CallCredentials[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ChannelCredentials != nil { + size, err := m.ChannelCredentials.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.TargetUri) > 0 { + i -= len(m.TargetUri) + copy(dAtA[i:], m.TargetUri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetUri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.InitialMetadata) > 0 { + for iNdEx := len(m.InitialMetadata) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InitialMetadata[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.TargetSpecifier.(*GrpcService_GoogleGrpc_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TargetSpecifier.(*GrpcService_EnvoyGrpc_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *GrpcService_EnvoyGrpc_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_EnvoyGrpc_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EnvoyGrpc != nil { + size, err := m.EnvoyGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *GrpcService_GoogleGrpc_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcService_GoogleGrpc_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleGrpc != nil { + size, err := m.GoogleGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *GrpcService_EnvoyGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxReceiveMessageLength != nil { + l = (*wrapperspb.UInt32Value)(m.MaxReceiveMessageLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SkipEnvoyHeaders { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_SslCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RootCerts != nil { + l = m.RootCerts.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrivateKey != nil { + l = m.PrivateKey.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CertChain != nil { + l = m.CertChain.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.CredentialSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SslCredentials != nil { + l = m.SslCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleDefault != nil { + l = (*emptypb.Empty)(m.GoogleDefault).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalCredentials != nil { + l = m.LocalCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JsonKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TokenLifetimeSeconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TokenLifetimeSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AuthorizationToken) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AuthoritySelector) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TokenExchangeServiceUri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Audience) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Scope) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RequestedTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubjectTokenPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SubjectTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ActorTokenPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ActorTokenType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.CredentialSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_CallCredentials_AccessToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AccessToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleComputeEngine != nil { + l = (*emptypb.Empty)(m.GoogleComputeEngine).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GoogleRefreshToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ServiceAccountJwtAccess != nil { + l = m.ServiceAccountJwtAccess.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIam) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleIam != nil { + l = m.GoogleIam.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_FromPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FromPlugin != nil { + l = m.FromPlugin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_CallCredentials_StsService_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StsService != nil { + l = m.StsService.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ValueSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + return n +} +func (m *GrpcService_GoogleGrpc_ChannelArgs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for k, v := range m.Args { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_GoogleGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetUri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ChannelCredentials != nil { + l = m.ChannelCredentials.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CallCredentials) > 0 { + for _, e := range m.CallCredentials { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.StatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CredentialsFactoryName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Config != nil { + l = (*structpb.Struct)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerStreamBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerStreamBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ChannelArgs != nil { + l = m.ChannelArgs.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.TargetSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InitialMetadata) > 0 { + for _, e := range m.InitialMetadata { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcService_EnvoyGrpc_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnvoyGrpc != nil { + l = m.EnvoyGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *GrpcService_GoogleGrpc_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleGrpc != nil { + l = m.GoogleGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go new file mode 100644 index 0000000000..96ac5fc632 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.go @@ -0,0 +1,1705 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Endpoint health status. +type HealthStatus int32 + +const ( + // The health status is not known. This is interpreted by Envoy as “HEALTHY“. + HealthStatus_UNKNOWN HealthStatus = 0 + // Healthy. + HealthStatus_HEALTHY HealthStatus = 1 + // Unhealthy. + HealthStatus_UNHEALTHY HealthStatus = 2 + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as “UNHEALTHY“. + HealthStatus_DRAINING HealthStatus = 3 + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // “UNHEALTHY“. + HealthStatus_TIMEOUT HealthStatus = 4 + // Degraded. + HealthStatus_DEGRADED HealthStatus = 5 +) + +// Enum value maps for HealthStatus. +var ( + HealthStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HEALTHY", + 2: "UNHEALTHY", + 3: "DRAINING", + 4: "TIMEOUT", + 5: "DEGRADED", + } + HealthStatus_value = map[string]int32{ + "UNKNOWN": 0, + "HEALTHY": 1, + "UNHEALTHY": 2, + "DRAINING": 3, + "TIMEOUT": 4, + "DEGRADED": 5, + } +) + +func (x HealthStatus) Enum() *HealthStatus { + p := new(HealthStatus) + *p = x + return p +} + +func (x HealthStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_health_check_proto_enumTypes[0].Descriptor() +} + +func (HealthStatus) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_health_check_proto_enumTypes[0] +} + +func (x HealthStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthStatus.Descriptor instead. +func (HealthStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0} +} + +type HealthStatusSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An order-independent set of health status. + Statuses []HealthStatus `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.core.v3.HealthStatus" json:"statuses,omitempty"` +} + +func (x *HealthStatusSet) Reset() { + *x = HealthStatusSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthStatusSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthStatusSet) ProtoMessage() {} + +func (x *HealthStatusSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthStatusSet.ProtoReflect.Descriptor instead. +func (*HealthStatusSet) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthStatusSet) GetStatuses() []HealthStatus { + if x != nil { + return x.Statuses + } + return nil +} + +// [#next-free-field: 27] +type HealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + Timeout *durationpb.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The interval between health checks. + Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + InitialJitter *durationpb.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"` + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + IntervalJitter *durationpb.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add “interval_ms“ * + // “interval_jitter_percent“ / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + IntervalJitterPercent uint32 `protobuf:"varint,18,opt,name=interval_jitter_percent,json=intervalJitterPercent,proto3" json:"interval_jitter_percent,omitempty"` + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for “http“ health checking if a host responds with a code not in + // :ref:`expected_statuses ` + // or :ref:`retriable_statuses `, + // this threshold is ignored and the host is considered immediately unhealthy. + UnhealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"` + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + HealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"` + // [#not-implemented-hide:] Non-serving port for health checking. + AltPort *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"` + // Reuse health check connection between health checks. Default is true. + ReuseConnection *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"` + // Types that are assignable to HealthChecker: + // + // *HealthCheck_HttpHealthCheck_ + // *HealthCheck_TcpHealthCheck_ + // *HealthCheck_GrpcHealthCheck_ + // *HealthCheck_CustomHealthCheck_ + HealthChecker isHealthCheck_HealthChecker `protobuf_oneof:"health_checker"` + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + NoTrafficInterval *durationpb.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"` + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // “no_traffic_interval“ but then revert to lower frequency “no_traffic_healthy_interval“ once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + NoTrafficHealthyInterval *durationpb.Duration `protobuf:"bytes,24,opt,name=no_traffic_healthy_interval,json=noTrafficHealthyInterval,proto3" json:"no_traffic_healthy_interval,omitempty"` + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + UnhealthyInterval *durationpb.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"` + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + UnhealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"` + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + HealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"` + // .. attention:: + // This field is deprecated in favor of the extension + // :ref:`event_logger ` and + // :ref:`event_log_path ` + // in the file sink extension. + // + // Specifies the path to the :ref:`health check event log `. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/health_check.proto. + EventLogPath string `protobuf:"bytes,17,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"` + // A list of event log sinks to process the health check event. + // [#extension-category: envoy.health_check.event_sinks] + EventLogger []*TypedExtensionConfig `protobuf:"bytes,25,rep,name=event_logger,json=eventLogger,proto3" json:"event_logger,omitempty"` + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventService *EventServiceConfig `protobuf:"bytes,22,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"` + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + AlwaysLogHealthCheckFailures bool `protobuf:"varint,19,opt,name=always_log_health_check_failures,json=alwaysLogHealthCheckFailures,proto3" json:"always_log_health_check_failures,omitempty"` + // If set to true, health check success events will always be logged. If set to false, only host addition event will be logged + // if it is the first successful health check, or if the healthy threshold is reached. + // The default value is false. + AlwaysLogHealthCheckSuccess bool `protobuf:"varint,26,opt,name=always_log_health_check_success,json=alwaysLogHealthCheckSuccess,proto3" json:"always_log_health_check_success,omitempty"` + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions *HealthCheck_TlsOptions `protobuf:"bytes,21,opt,name=tls_options,json=tlsOptions,proto3" json:"tls_options,omitempty"` + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of “envoy.transport_socket“ in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + TransportSocketMatchCriteria *structpb.Struct `protobuf:"bytes,23,opt,name=transport_socket_match_criteria,json=transportSocketMatchCriteria,proto3" json:"transport_socket_match_criteria,omitempty"` +} + +func (x *HealthCheck) Reset() { + *x = HealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck) ProtoMessage() {} + +func (x *HealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheck) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *HealthCheck) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *HealthCheck) GetInitialJitter() *durationpb.Duration { + if x != nil { + return x.InitialJitter + } + return nil +} + +func (x *HealthCheck) GetIntervalJitter() *durationpb.Duration { + if x != nil { + return x.IntervalJitter + } + return nil +} + +func (x *HealthCheck) GetIntervalJitterPercent() uint32 { + if x != nil { + return x.IntervalJitterPercent + } + return 0 +} + +func (x *HealthCheck) GetUnhealthyThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.UnhealthyThreshold + } + return nil +} + +func (x *HealthCheck) GetHealthyThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.HealthyThreshold + } + return nil +} + +func (x *HealthCheck) GetAltPort() *wrapperspb.UInt32Value { + if x != nil { + return x.AltPort + } + return nil +} + +func (x *HealthCheck) GetReuseConnection() *wrapperspb.BoolValue { + if x != nil { + return x.ReuseConnection + } + return nil +} + +func (m *HealthCheck) GetHealthChecker() isHealthCheck_HealthChecker { + if m != nil { + return m.HealthChecker + } + return nil +} + +func (x *HealthCheck) GetHttpHealthCheck() *HealthCheck_HttpHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_HttpHealthCheck_); ok { + return x.HttpHealthCheck + } + return nil +} + +func (x *HealthCheck) GetTcpHealthCheck() *HealthCheck_TcpHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_TcpHealthCheck_); ok { + return x.TcpHealthCheck + } + return nil +} + +func (x *HealthCheck) GetGrpcHealthCheck() *HealthCheck_GrpcHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_GrpcHealthCheck_); ok { + return x.GrpcHealthCheck + } + return nil +} + +func (x *HealthCheck) GetCustomHealthCheck() *HealthCheck_CustomHealthCheck { + if x, ok := x.GetHealthChecker().(*HealthCheck_CustomHealthCheck_); ok { + return x.CustomHealthCheck + } + return nil +} + +func (x *HealthCheck) GetNoTrafficInterval() *durationpb.Duration { + if x != nil { + return x.NoTrafficInterval + } + return nil +} + +func (x *HealthCheck) GetNoTrafficHealthyInterval() *durationpb.Duration { + if x != nil { + return x.NoTrafficHealthyInterval + } + return nil +} + +func (x *HealthCheck) GetUnhealthyInterval() *durationpb.Duration { + if x != nil { + return x.UnhealthyInterval + } + return nil +} + +func (x *HealthCheck) GetUnhealthyEdgeInterval() *durationpb.Duration { + if x != nil { + return x.UnhealthyEdgeInterval + } + return nil +} + +func (x *HealthCheck) GetHealthyEdgeInterval() *durationpb.Duration { + if x != nil { + return x.HealthyEdgeInterval + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/health_check.proto. +func (x *HealthCheck) GetEventLogPath() string { + if x != nil { + return x.EventLogPath + } + return "" +} + +func (x *HealthCheck) GetEventLogger() []*TypedExtensionConfig { + if x != nil { + return x.EventLogger + } + return nil +} + +func (x *HealthCheck) GetEventService() *EventServiceConfig { + if x != nil { + return x.EventService + } + return nil +} + +func (x *HealthCheck) GetAlwaysLogHealthCheckFailures() bool { + if x != nil { + return x.AlwaysLogHealthCheckFailures + } + return false +} + +func (x *HealthCheck) GetAlwaysLogHealthCheckSuccess() bool { + if x != nil { + return x.AlwaysLogHealthCheckSuccess + } + return false +} + +func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions { + if x != nil { + return x.TlsOptions + } + return nil +} + +func (x *HealthCheck) GetTransportSocketMatchCriteria() *structpb.Struct { + if x != nil { + return x.TransportSocketMatchCriteria + } + return nil +} + +type isHealthCheck_HealthChecker interface { + isHealthCheck_HealthChecker() +} + +type HealthCheck_HttpHealthCheck_ struct { + // HTTP health check. + HttpHealthCheck *HealthCheck_HttpHealthCheck `protobuf:"bytes,8,opt,name=http_health_check,json=httpHealthCheck,proto3,oneof"` +} + +type HealthCheck_TcpHealthCheck_ struct { + // TCP health check. + TcpHealthCheck *HealthCheck_TcpHealthCheck `protobuf:"bytes,9,opt,name=tcp_health_check,json=tcpHealthCheck,proto3,oneof"` +} + +type HealthCheck_GrpcHealthCheck_ struct { + // gRPC health check. + GrpcHealthCheck *HealthCheck_GrpcHealthCheck `protobuf:"bytes,11,opt,name=grpc_health_check,json=grpcHealthCheck,proto3,oneof"` +} + +type HealthCheck_CustomHealthCheck_ struct { + // Custom health check. + CustomHealthCheck *HealthCheck_CustomHealthCheck `protobuf:"bytes,13,opt,name=custom_health_check,json=customHealthCheck,proto3,oneof"` +} + +func (*HealthCheck_HttpHealthCheck_) isHealthCheck_HealthChecker() {} + +func (*HealthCheck_TcpHealthCheck_) isHealthCheck_HealthChecker() {} + +func (*HealthCheck_GrpcHealthCheck_) isHealthCheck_HealthChecker() {} + +func (*HealthCheck_CustomHealthCheck_) isHealthCheck_HealthChecker() {} + +// Describes the encoding of the payload bytes in the payload. +type HealthCheck_Payload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *HealthCheck_Payload_Text + // *HealthCheck_Payload_Binary + Payload isHealthCheck_Payload_Payload `protobuf_oneof:"payload"` +} + +func (x *HealthCheck_Payload) Reset() { + *x = HealthCheck_Payload{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_Payload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_Payload) ProtoMessage() {} + +func (x *HealthCheck_Payload) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_Payload.ProtoReflect.Descriptor instead. +func (*HealthCheck_Payload) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 0} +} + +func (m *HealthCheck_Payload) GetPayload() isHealthCheck_Payload_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *HealthCheck_Payload) GetText() string { + if x, ok := x.GetPayload().(*HealthCheck_Payload_Text); ok { + return x.Text + } + return "" +} + +func (x *HealthCheck_Payload) GetBinary() []byte { + if x, ok := x.GetPayload().(*HealthCheck_Payload_Binary); ok { + return x.Binary + } + return nil +} + +type isHealthCheck_Payload_Payload interface { + isHealthCheck_Payload_Payload() +} + +type HealthCheck_Payload_Text struct { + // Hex encoded payload. E.g., "000000FF". + Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"` +} + +type HealthCheck_Payload_Binary struct { + // Binary payload. + Binary []byte `protobuf:"bytes,2,opt,name=binary,proto3,oneof"` +} + +func (*HealthCheck_Payload_Text) isHealthCheck_Payload_Payload() {} + +func (*HealthCheck_Payload_Binary) isHealthCheck_Payload_Payload() {} + +// [#next-free-field: 15] +type HealthCheck_HttpHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value of the host header in the HTTP health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // Specifies the HTTP path that will be requested during health checking. For example + // “/healthcheck“. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // [#not-implemented-hide:] HTTP specific payload. + Send *HealthCheck_Payload `protobuf:"bytes,3,opt,name=send,proto3" json:"send,omitempty"` + // Specifies a list of HTTP expected responses to match in the first “response_buffer_size“ bytes of the response body. + // If it is set, both the expected response check and status code determine the health check. + // When checking the response, “fuzzy” matching is performed such that each payload block must be found, + // and in the order specified, but not necessarily contiguous. + // + // .. note:: + // + // It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency. + // The default buffer size is 1024 bytes when it is not set. + Receive []*HealthCheck_Payload `protobuf:"bytes,4,rep,name=receive,proto3" json:"receive,omitempty"` + // Specifies the size of response buffer in bytes that is used to Payload match. + // The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. + ResponseBufferSize *wrapperspb.UInt64Value `protobuf:"bytes,14,opt,name=response_buffer_size,json=responseBufferSize,proto3" json:"response_buffer_size,omitempty"` + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. For more information, including details on header value syntax, see + // the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request that is sent to the + // health checked cluster. + RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. The start and end of each + // range are required. Only statuses in the range [100, 600) are allowed. + ExpectedStatuses []*v3.Int64Range `protobuf:"bytes,9,rep,name=expected_statuses,json=expectedStatuses,proto3" json:"expected_statuses,omitempty"` + // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + // will count towards the configured :ref:`unhealthy_threshold `, + // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + // :ref:`Int64Range `. The start and end of each range are required. + // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + // be considered a successful health check. By default all responses not in + // :ref:`expected_statuses ` will result in + // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + // non-200 response will result in the host being marked unhealthy. + RetriableStatuses []*v3.Int64Range `protobuf:"bytes,12,rep,name=retriable_statuses,json=retriableStatuses,proto3" json:"retriable_statuses,omitempty"` + // Use specified application protocol for health checks. + CodecClientType v3.CodecClientType `protobuf:"varint,10,opt,name=codec_client_type,json=codecClientType,proto3,enum=envoy.type.v3.CodecClientType" json:"codec_client_type,omitempty"` + // An optional service name parameter which is used to validate the identity of + // the health checked cluster using a :ref:`StringMatcher + // `. See the :ref:`architecture overview + // ` for more information. + ServiceNameMatcher *v31.StringMatcher `protobuf:"bytes,11,opt,name=service_name_matcher,json=serviceNameMatcher,proto3" json:"service_name_matcher,omitempty"` + // HTTP Method that will be used for health checking, default is "GET". + // GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported. + // CONNECT method is disallowed because it is not appropriate for health check request. + // If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `. + Method RequestMethod `protobuf:"varint,13,opt,name=method,proto3,enum=envoy.config.core.v3.RequestMethod" json:"method,omitempty"` +} + +func (x *HealthCheck_HttpHealthCheck) Reset() { + *x = HealthCheck_HttpHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_HttpHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_HttpHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_HttpHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_HttpHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_HttpHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *HealthCheck_HttpHealthCheck) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *HealthCheck_HttpHealthCheck) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *HealthCheck_HttpHealthCheck) GetSend() *HealthCheck_Payload { + if x != nil { + return x.Send + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetReceive() []*HealthCheck_Payload { + if x != nil { + return x.Receive + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetResponseBufferSize() *wrapperspb.UInt64Value { + if x != nil { + return x.ResponseBufferSize + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToAdd() []*HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetExpectedStatuses() []*v3.Int64Range { + if x != nil { + return x.ExpectedStatuses + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetRetriableStatuses() []*v3.Int64Range { + if x != nil { + return x.RetriableStatuses + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetCodecClientType() v3.CodecClientType { + if x != nil { + return x.CodecClientType + } + return v3.CodecClientType(0) +} + +func (x *HealthCheck_HttpHealthCheck) GetServiceNameMatcher() *v31.StringMatcher { + if x != nil { + return x.ServiceNameMatcher + } + return nil +} + +func (x *HealthCheck_HttpHealthCheck) GetMethod() RequestMethod { + if x != nil { + return x.Method + } + return RequestMethod_METHOD_UNSPECIFIED +} + +type HealthCheck_TcpHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Empty payloads imply a connect-only health check. + Send *HealthCheck_Payload `protobuf:"bytes,1,opt,name=send,proto3" json:"send,omitempty"` + // When checking the response, “fuzzy” matching is performed such that each + // payload block must be found, and in the order specified, but not + // necessarily contiguous. + Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"` + // When setting this value, it tries to attempt health check request with ProxyProtocol. + // When “send“ is presented, they are sent after preceding ProxyProtocol header. + // Only ProxyProtocol header is sent when “send“ is not presented. + // It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes + // LOCAL command and doesn't include L3/L4. + ProxyProtocolConfig *ProxyProtocolConfig `protobuf:"bytes,3,opt,name=proxy_protocol_config,json=proxyProtocolConfig,proto3" json:"proxy_protocol_config,omitempty"` +} + +func (x *HealthCheck_TcpHealthCheck) Reset() { + *x = HealthCheck_TcpHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_TcpHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_TcpHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_TcpHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_TcpHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_TcpHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *HealthCheck_TcpHealthCheck) GetSend() *HealthCheck_Payload { + if x != nil { + return x.Send + } + return nil +} + +func (x *HealthCheck_TcpHealthCheck) GetReceive() []*HealthCheck_Payload { + if x != nil { + return x.Receive + } + return nil +} + +func (x *HealthCheck_TcpHealthCheck) GetProxyProtocolConfig() *ProxyProtocolConfig { + if x != nil { + return x.ProxyProtocolConfig + } + return nil +} + +type HealthCheck_RedisHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set, optionally perform “EXISTS “ instead of “PING“. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *HealthCheck_RedisHealthCheck) Reset() { + *x = HealthCheck_RedisHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_RedisHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_RedisHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_RedisHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_RedisHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_RedisHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *HealthCheck_RedisHealthCheck) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// `grpc.health.v1.Health +// `_-based +// healthcheck. See `gRPC doc `_ +// for details. +type HealthCheck_GrpcHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // The value of the :authority header in the gRPC health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + // Specifies a list of key-value pairs that should be added to the metadata of each GRPC call + // that is sent to the health checked cluster. For more information, including details on header value syntax, + // see the documentation on :ref:`custom request headers + // `. + InitialMetadata []*HeaderValueOption `protobuf:"bytes,3,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"` +} + +func (x *HealthCheck_GrpcHealthCheck) Reset() { + *x = HealthCheck_GrpcHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_GrpcHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_GrpcHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_GrpcHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_GrpcHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_GrpcHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *HealthCheck_GrpcHealthCheck) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *HealthCheck_GrpcHealthCheck) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *HealthCheck_GrpcHealthCheck) GetInitialMetadata() []*HeaderValueOption { + if x != nil { + return x.InitialMetadata + } + return nil +} + +// Custom health check. +type HealthCheck_CustomHealthCheck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The registered name of the custom health checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + // [#extension-category: envoy.health_checkers] + // + // Types that are assignable to ConfigType: + // + // *HealthCheck_CustomHealthCheck_TypedConfig + ConfigType isHealthCheck_CustomHealthCheck_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *HealthCheck_CustomHealthCheck) Reset() { + *x = HealthCheck_CustomHealthCheck{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_CustomHealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_CustomHealthCheck) ProtoMessage() {} + +func (x *HealthCheck_CustomHealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_CustomHealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck_CustomHealthCheck) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *HealthCheck_CustomHealthCheck) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHealthCheck_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isHealthCheck_CustomHealthCheck_ConfigType interface { + isHealthCheck_CustomHealthCheck_ConfigType() +} + +type HealthCheck_CustomHealthCheck_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {} + +// Health checks occur over the transport socket specified for the cluster. This implies that if a +// cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. +// +// This allows overriding the cluster TLS settings, just for health check connections. +type HealthCheck_TlsOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the ALPN protocols for health check connections. This is useful if the + // corresponding upstream is using ALPN-based :ref:`FilterChainMatch + // ` along with different protocols for health checks + // versus data connections. If empty, no ALPN protocols will be set on health check connections. + AlpnProtocols []string `protobuf:"bytes,1,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"` +} + +func (x *HealthCheck_TlsOptions) Reset() { + *x = HealthCheck_TlsOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheck_TlsOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck_TlsOptions) ProtoMessage() {} + +func (x *HealthCheck_TlsOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck_TlsOptions.ProtoReflect.Descriptor instead. +func (*HealthCheck_TlsOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 6} +} + +func (x *HealthCheck_TlsOptions) GetAlpnProtocols() []string { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +var File_envoy_config_core_v3_health_check_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d, 0xfa, + 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0x8c, 0x20, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, + 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x40, 0x0a, 0x0e, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, + 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, + 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x75, 0x6e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x12, 0x75, + 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61, 0x6c, 0x74, 0x5f, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x53, 0x0a, + 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, + 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x18, 0x6e, 0x6f, + 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x5b, 0x0a, 0x17, 0x75, 0x6e, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, + 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x13, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, + 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x61, 0x6c, 0x77, + 0x61, 0x79, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1b, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x4d, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, + 0x0a, 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x52, 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, + 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xc6, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0xc0, 0x01, 0x02, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0xc0, 0x01, 0x02, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, + 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x12, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, + 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, + 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x10, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x48, + 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x11, 0x63, 0x6f, 0x64, 0x65, + 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x63, + 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x56, + 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, + 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, + 0x09, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a, 0xa8, 0x02, 0x0a, 0x0e, 0x54, + 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3d, 0x0a, + 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, + 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, + 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, + 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, + 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, + 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, + 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, + 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, + 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_health_check_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_health_check_proto_rawDescData = file_envoy_config_core_v3_health_check_proto_rawDesc +) + +func file_envoy_config_core_v3_health_check_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_health_check_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_health_check_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_health_check_proto_rawDescData) + }) + return file_envoy_config_core_v3_health_check_proto_rawDescData +} + +var file_envoy_config_core_v3_health_check_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_health_check_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{ + (HealthStatus)(0), // 0: envoy.config.core.v3.HealthStatus + (*HealthStatusSet)(nil), // 1: envoy.config.core.v3.HealthStatusSet + (*HealthCheck)(nil), // 2: envoy.config.core.v3.HealthCheck + (*HealthCheck_Payload)(nil), // 3: envoy.config.core.v3.HealthCheck.Payload + (*HealthCheck_HttpHealthCheck)(nil), // 4: envoy.config.core.v3.HealthCheck.HttpHealthCheck + (*HealthCheck_TcpHealthCheck)(nil), // 5: envoy.config.core.v3.HealthCheck.TcpHealthCheck + (*HealthCheck_RedisHealthCheck)(nil), // 6: envoy.config.core.v3.HealthCheck.RedisHealthCheck + (*HealthCheck_GrpcHealthCheck)(nil), // 7: envoy.config.core.v3.HealthCheck.GrpcHealthCheck + (*HealthCheck_CustomHealthCheck)(nil), // 8: envoy.config.core.v3.HealthCheck.CustomHealthCheck + (*HealthCheck_TlsOptions)(nil), // 9: envoy.config.core.v3.HealthCheck.TlsOptions + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 11: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue + (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig + (*EventServiceConfig)(nil), // 14: envoy.config.core.v3.EventServiceConfig + (*structpb.Struct)(nil), // 15: google.protobuf.Struct + (*wrapperspb.UInt64Value)(nil), // 16: google.protobuf.UInt64Value + (*HeaderValueOption)(nil), // 17: envoy.config.core.v3.HeaderValueOption + (*v3.Int64Range)(nil), // 18: envoy.type.v3.Int64Range + (v3.CodecClientType)(0), // 19: envoy.type.v3.CodecClientType + (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher + (RequestMethod)(0), // 21: envoy.config.core.v3.RequestMethod + (*ProxyProtocolConfig)(nil), // 22: envoy.config.core.v3.ProxyProtocolConfig + (*anypb.Any)(nil), // 23: google.protobuf.Any +} +var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.HealthStatusSet.statuses:type_name -> envoy.config.core.v3.HealthStatus + 10, // 1: envoy.config.core.v3.HealthCheck.timeout:type_name -> google.protobuf.Duration + 10, // 2: envoy.config.core.v3.HealthCheck.interval:type_name -> google.protobuf.Duration + 10, // 3: envoy.config.core.v3.HealthCheck.initial_jitter:type_name -> google.protobuf.Duration + 10, // 4: envoy.config.core.v3.HealthCheck.interval_jitter:type_name -> google.protobuf.Duration + 11, // 5: envoy.config.core.v3.HealthCheck.unhealthy_threshold:type_name -> google.protobuf.UInt32Value + 11, // 6: envoy.config.core.v3.HealthCheck.healthy_threshold:type_name -> google.protobuf.UInt32Value + 11, // 7: envoy.config.core.v3.HealthCheck.alt_port:type_name -> google.protobuf.UInt32Value + 12, // 8: envoy.config.core.v3.HealthCheck.reuse_connection:type_name -> google.protobuf.BoolValue + 4, // 9: envoy.config.core.v3.HealthCheck.http_health_check:type_name -> envoy.config.core.v3.HealthCheck.HttpHealthCheck + 5, // 10: envoy.config.core.v3.HealthCheck.tcp_health_check:type_name -> envoy.config.core.v3.HealthCheck.TcpHealthCheck + 7, // 11: envoy.config.core.v3.HealthCheck.grpc_health_check:type_name -> envoy.config.core.v3.HealthCheck.GrpcHealthCheck + 8, // 12: envoy.config.core.v3.HealthCheck.custom_health_check:type_name -> envoy.config.core.v3.HealthCheck.CustomHealthCheck + 10, // 13: envoy.config.core.v3.HealthCheck.no_traffic_interval:type_name -> google.protobuf.Duration + 10, // 14: envoy.config.core.v3.HealthCheck.no_traffic_healthy_interval:type_name -> google.protobuf.Duration + 10, // 15: envoy.config.core.v3.HealthCheck.unhealthy_interval:type_name -> google.protobuf.Duration + 10, // 16: envoy.config.core.v3.HealthCheck.unhealthy_edge_interval:type_name -> google.protobuf.Duration + 10, // 17: envoy.config.core.v3.HealthCheck.healthy_edge_interval:type_name -> google.protobuf.Duration + 13, // 18: envoy.config.core.v3.HealthCheck.event_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 14, // 19: envoy.config.core.v3.HealthCheck.event_service:type_name -> envoy.config.core.v3.EventServiceConfig + 9, // 20: envoy.config.core.v3.HealthCheck.tls_options:type_name -> envoy.config.core.v3.HealthCheck.TlsOptions + 15, // 21: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct + 3, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.response_buffer_size:type_name -> google.protobuf.UInt64Value + 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 18, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range + 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range + 19, // 28: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType + 20, // 29: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 21, // 30: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod + 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload + 3, // 32: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload + 22, // 33: envoy.config.core.v3.HealthCheck.TcpHealthCheck.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 17, // 34: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption + 23, // 35: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_health_check_proto_init() } +func file_envoy_config_core_v3_health_check_proto_init() { + if File_envoy_config_core_v3_health_check_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_event_service_config_proto_init() + file_envoy_config_core_v3_extension_proto_init() + file_envoy_config_core_v3_proxy_protocol_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthStatusSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_Payload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_HttpHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_TcpHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_RedisHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_GrpcHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_CustomHealthCheck); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthCheck_TlsOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_health_check_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HealthCheck_HttpHealthCheck_)(nil), + (*HealthCheck_TcpHealthCheck_)(nil), + (*HealthCheck_GrpcHealthCheck_)(nil), + (*HealthCheck_CustomHealthCheck_)(nil), + } + file_envoy_config_core_v3_health_check_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*HealthCheck_Payload_Text)(nil), + (*HealthCheck_Payload_Binary)(nil), + } + file_envoy_config_core_v3_health_check_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*HealthCheck_CustomHealthCheck_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_health_check_proto_rawDesc, + NumEnums: 1, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_health_check_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_health_check_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_health_check_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_health_check_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_health_check_proto = out.File + file_envoy_config_core_v3_health_check_proto_rawDesc = nil + file_envoy_config_core_v3_health_check_proto_goTypes = nil + file_envoy_config_core_v3_health_check_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go new file mode 100644 index 0000000000..707776073f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check.pb.validate.go @@ -0,0 +1,2291 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.CodecClientType(0) +) + +// Validate checks the field values on HealthStatusSet with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HealthStatusSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthStatusSet with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthStatusSetMultiError, or nil if none found. +func (m *HealthStatusSet) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthStatusSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStatuses() { + _, _ = idx, item + + if _, ok := HealthStatus_name[int32(item)]; !ok { + err := HealthStatusSetValidationError{ + field: fmt.Sprintf("Statuses[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return HealthStatusSetMultiError(errors) + } + + return nil +} + +// HealthStatusSetMultiError is an error wrapping multiple validation errors +// returned by HealthStatusSet.ValidateAll() if the designated constraints +// aren't met. +type HealthStatusSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthStatusSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthStatusSetMultiError) AllErrors() []error { return m } + +// HealthStatusSetValidationError is the validation error returned by +// HealthStatusSet.Validate if the designated constraints aren't met. +type HealthStatusSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthStatusSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthStatusSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthStatusSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthStatusSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthStatusSetValidationError) ErrorName() string { return "HealthStatusSetValidationError" } + +// Error satisfies the builtin error interface +func (e HealthStatusSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthStatusSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthStatusSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthStatusSetValidationError{} + +// Validate checks the field values on HealthCheck with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HealthCheckMultiError, or +// nil if none found. +func (m *HealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetTimeout() == nil { + err := HealthCheckValidationError{ + field: "Timeout", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "Timeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "Timeout", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if m.GetInterval() == nil { + err := HealthCheckValidationError{ + field: "Interval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "Interval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "Interval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetInitialJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "InitialJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "InitialJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInitialJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "InitialJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIntervalJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IntervalJitterPercent + + if m.GetUnhealthyThreshold() == nil { + err := HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetUnhealthyThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUnhealthyThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "UnhealthyThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetHealthyThreshold() == nil { + err := HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHealthyThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthyThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "HealthyThreshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAltPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "AltPort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "AltPort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAltPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "AltPort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetReuseConnection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "ReuseConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "ReuseConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReuseConnection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "ReuseConnection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetNoTrafficInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "NoTrafficInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "NoTrafficInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetNoTrafficHealthyInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "NoTrafficHealthyInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "NoTrafficHealthyInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetUnhealthyInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "UnhealthyInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "UnhealthyInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetUnhealthyEdgeInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "UnhealthyEdgeInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "UnhealthyEdgeInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetHealthyEdgeInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HealthCheckValidationError{ + field: "HealthyEdgeInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := HealthCheckValidationError{ + field: "HealthyEdgeInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for EventLogPath + + for idx, item := range m.GetEventLogger() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: fmt.Sprintf("EventLogger[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetEventService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "EventService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AlwaysLogHealthCheckFailures + + // no validation rules for AlwaysLogHealthCheckSuccess + + if all { + switch v := interface{}(m.GetTlsOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TlsOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TlsOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "TlsOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocketMatchCriteria()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TransportSocketMatchCriteria", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TransportSocketMatchCriteria", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocketMatchCriteria()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "TransportSocketMatchCriteria", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofHealthCheckerPresent := false + switch v := m.HealthChecker.(type) { + case *HealthCheck_HttpHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetHttpHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HttpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "HttpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "HttpHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HealthCheck_TcpHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetTcpHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TcpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "TcpHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "TcpHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HealthCheck_GrpcHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetGrpcHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "GrpcHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "GrpcHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "GrpcHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HealthCheck_CustomHealthCheck_: + if v == nil { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHealthCheckerPresent = true + + if all { + switch v := interface{}(m.GetCustomHealthCheck()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "CustomHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheckValidationError{ + field: "CustomHealthCheck", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomHealthCheck()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheckValidationError{ + field: "CustomHealthCheck", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofHealthCheckerPresent { + err := HealthCheckValidationError{ + field: "HealthChecker", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheckMultiError is an error wrapping multiple validation errors +// returned by HealthCheck.ValidateAll() if the designated constraints aren't met. +type HealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheckValidationError is the validation error returned by +// HealthCheck.Validate if the designated constraints aren't met. +type HealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheckValidationError) ErrorName() string { return "HealthCheckValidationError" } + +// Error satisfies the builtin error interface +func (e HealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_Payload with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_Payload) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_Payload with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_PayloadMultiError, or nil if none found. +func (m *HealthCheck_Payload) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_Payload) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofPayloadPresent := false + switch v := m.Payload.(type) { + case *HealthCheck_Payload_Text: + if v == nil { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPayloadPresent = true + + if utf8.RuneCountInString(m.GetText()) < 1 { + err := HealthCheck_PayloadValidationError{ + field: "Text", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HealthCheck_Payload_Binary: + if v == nil { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPayloadPresent = true + // no validation rules for Binary + default: + _ = v // ensures v is used + } + if !oneofPayloadPresent { + err := HealthCheck_PayloadValidationError{ + field: "Payload", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheck_PayloadMultiError(errors) + } + + return nil +} + +// HealthCheck_PayloadMultiError is an error wrapping multiple validation +// errors returned by HealthCheck_Payload.ValidateAll() if the designated +// constraints aren't met. +type HealthCheck_PayloadMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_PayloadMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_PayloadMultiError) AllErrors() []error { return m } + +// HealthCheck_PayloadValidationError is the validation error returned by +// HealthCheck_Payload.Validate if the designated constraints aren't met. +type HealthCheck_PayloadValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_PayloadValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_PayloadValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_PayloadValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_PayloadValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_PayloadValidationError) ErrorName() string { + return "HealthCheck_PayloadValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_PayloadValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_Payload.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_PayloadValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_PayloadValidationError{} + +// Validate checks the field values on HealthCheck_HttpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_HttpHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_HttpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_HttpHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_HttpHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_HttpHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HealthCheck_HttpHealthCheck_Host_Pattern.MatchString(m.GetHost()) { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Host", + reason: "value does not match regex pattern \"^[^\\x00-\\b\\n-\\x1f\\x7f]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HealthCheck_HttpHealthCheck_Path_Pattern.MatchString(m.GetPath()) { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Path", + reason: "value does not match regex pattern \"^[^\\x00-\\b\\n-\\x1f\\x7f]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetReceive() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if wrapper := m.GetResponseBufferSize(); wrapper != nil { + + if wrapper.GetValue() < 0 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "ResponseBufferSize", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if !_HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern.MatchString(item) { + err := HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetExpectedStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("ExpectedStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("ExpectedStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("ExpectedStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRetriableStatuses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RetriableStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RetriableStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: fmt.Sprintf("RetriableStatuses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if _, ok := v3.CodecClientType_name[int32(m.GetCodecClientType())]; !ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "CodecClientType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetServiceNameMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "ServiceNameMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_HttpHealthCheckValidationError{ + field: "ServiceNameMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServiceNameMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_HttpHealthCheckValidationError{ + field: "ServiceNameMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := _HealthCheck_HttpHealthCheck_Method_NotInLookup[m.GetMethod()]; ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Method", + reason: "value must not be in list [CONNECT]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := RequestMethod_name[int32(m.GetMethod())]; !ok { + err := HealthCheck_HttpHealthCheckValidationError{ + field: "Method", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HealthCheck_HttpHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_HttpHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_HttpHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_HttpHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_HttpHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_HttpHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_HttpHealthCheckValidationError is the validation error returned +// by HealthCheck_HttpHealthCheck.Validate if the designated constraints +// aren't met. +type HealthCheck_HttpHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_HttpHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_HttpHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_HttpHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_HttpHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_HttpHealthCheckValidationError) ErrorName() string { + return "HealthCheck_HttpHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_HttpHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_HttpHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_HttpHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_HttpHealthCheckValidationError{} + +var _HealthCheck_HttpHealthCheck_Host_Pattern = regexp.MustCompile("^[^\x00-\b\n-\x1f\x7f]*$") + +var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00-\b\n-\x1f\x7f]*$") + +var _HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HealthCheck_HttpHealthCheck_Method_NotInLookup = map[RequestMethod]struct{}{ + 6: {}, +} + +// Validate checks the field values on HealthCheck_TcpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_TcpHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_TcpHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_TcpHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_TcpHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_TcpHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSend()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: "Send", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetReceive() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: fmt.Sprintf("Receive[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetProxyProtocolConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyProtocolConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_TcpHealthCheckValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HealthCheck_TcpHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_TcpHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_TcpHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_TcpHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_TcpHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_TcpHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_TcpHealthCheckValidationError is the validation error returned +// by HealthCheck_TcpHealthCheck.Validate if the designated constraints aren't met. +type HealthCheck_TcpHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_TcpHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_TcpHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_TcpHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_TcpHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_TcpHealthCheckValidationError) ErrorName() string { + return "HealthCheck_TcpHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_TcpHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_TcpHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_TcpHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_TcpHealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_RedisHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_RedisHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_RedisHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_RedisHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_RedisHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_RedisHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + if len(errors) > 0 { + return HealthCheck_RedisHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_RedisHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_RedisHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_RedisHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_RedisHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_RedisHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_RedisHealthCheckValidationError is the validation error returned +// by HealthCheck_RedisHealthCheck.Validate if the designated constraints +// aren't met. +type HealthCheck_RedisHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_RedisHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_RedisHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_RedisHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_RedisHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_RedisHealthCheckValidationError) ErrorName() string { + return "HealthCheck_RedisHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_RedisHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_RedisHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_RedisHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_RedisHealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_GrpcHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_GrpcHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_GrpcHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_GrpcHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_GrpcHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_GrpcHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ServiceName + + if !_HealthCheck_GrpcHealthCheck_Authority_Pattern.MatchString(m.GetAuthority()) { + err := HealthCheck_GrpcHealthCheckValidationError{ + field: "Authority", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetInitialMetadata()) > 1000 { + err := HealthCheck_GrpcHealthCheckValidationError{ + field: "InitialMetadata", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetInitialMetadata() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_GrpcHealthCheckValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_GrpcHealthCheckValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_GrpcHealthCheckValidationError{ + field: fmt.Sprintf("InitialMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HealthCheck_GrpcHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_GrpcHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_GrpcHealthCheck.ValidateAll() if +// the designated constraints aren't met. +type HealthCheck_GrpcHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_GrpcHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_GrpcHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_GrpcHealthCheckValidationError is the validation error returned +// by HealthCheck_GrpcHealthCheck.Validate if the designated constraints +// aren't met. +type HealthCheck_GrpcHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_GrpcHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_GrpcHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_GrpcHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_GrpcHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_GrpcHealthCheckValidationError) ErrorName() string { + return "HealthCheck_GrpcHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_GrpcHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_GrpcHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_GrpcHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_GrpcHealthCheckValidationError{} + +var _HealthCheck_GrpcHealthCheck_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HealthCheck_CustomHealthCheck with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_CustomHealthCheck) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_CustomHealthCheck with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HealthCheck_CustomHealthCheckMultiError, or nil if none found. +func (m *HealthCheck_CustomHealthCheck) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_CustomHealthCheck) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HealthCheck_CustomHealthCheckValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *HealthCheck_CustomHealthCheck_TypedConfig: + if v == nil { + err := HealthCheck_CustomHealthCheckValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HealthCheck_CustomHealthCheckValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HealthCheck_CustomHealthCheckValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HealthCheck_CustomHealthCheckMultiError(errors) + } + + return nil +} + +// HealthCheck_CustomHealthCheckMultiError is an error wrapping multiple +// validation errors returned by HealthCheck_CustomHealthCheck.ValidateAll() +// if the designated constraints aren't met. +type HealthCheck_CustomHealthCheckMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_CustomHealthCheckMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_CustomHealthCheckMultiError) AllErrors() []error { return m } + +// HealthCheck_CustomHealthCheckValidationError is the validation error +// returned by HealthCheck_CustomHealthCheck.Validate if the designated +// constraints aren't met. +type HealthCheck_CustomHealthCheckValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_CustomHealthCheckValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_CustomHealthCheckValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_CustomHealthCheckValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_CustomHealthCheckValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_CustomHealthCheckValidationError) ErrorName() string { + return "HealthCheck_CustomHealthCheckValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_CustomHealthCheckValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_CustomHealthCheck.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_CustomHealthCheckValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_CustomHealthCheckValidationError{} + +// Validate checks the field values on HealthCheck_TlsOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HealthCheck_TlsOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HealthCheck_TlsOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HealthCheck_TlsOptionsMultiError, or nil if none found. +func (m *HealthCheck_TlsOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HealthCheck_TlsOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HealthCheck_TlsOptionsMultiError(errors) + } + + return nil +} + +// HealthCheck_TlsOptionsMultiError is an error wrapping multiple validation +// errors returned by HealthCheck_TlsOptions.ValidateAll() if the designated +// constraints aren't met. +type HealthCheck_TlsOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HealthCheck_TlsOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HealthCheck_TlsOptionsMultiError) AllErrors() []error { return m } + +// HealthCheck_TlsOptionsValidationError is the validation error returned by +// HealthCheck_TlsOptions.Validate if the designated constraints aren't met. +type HealthCheck_TlsOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HealthCheck_TlsOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HealthCheck_TlsOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HealthCheck_TlsOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HealthCheck_TlsOptionsValidationError) ErrorName() string { + return "HealthCheck_TlsOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HealthCheck_TlsOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHealthCheck_TlsOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HealthCheck_TlsOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HealthCheck_TlsOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go new file mode 100644 index 0000000000..892ea5e4ee --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/health_check_vtproto.pb.go @@ -0,0 +1,1384 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/health_check.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HealthStatusSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthStatusSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthStatusSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Statuses) > 0 { + var pksize2 int + for _, num := range m.Statuses { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Statuses { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_Payload) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_Payload) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Payload.(*HealthCheck_Payload_Binary); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Payload.(*HealthCheck_Payload_Text); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_Payload_Text) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload_Text) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Text) + copy(dAtA[i:], m.Text) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Text))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HealthCheck_Payload_Binary) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_Payload_Binary) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Binary) + copy(dAtA[i:], m.Binary) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Binary))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HealthCheck_HttpHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_HttpHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_HttpHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ResponseBufferSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.ResponseBufferSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.Method != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Method)) + i-- + dAtA[i] = 0x68 + } + if len(m.RetriableStatuses) > 0 { + for iNdEx := len(m.RetriableStatuses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RetriableStatuses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RetriableStatuses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + } + if m.ServiceNameMatcher != nil { + if vtmsg, ok := interface{}(m.ServiceNameMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ServiceNameMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.CodecClientType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CodecClientType)) + i-- + dAtA[i] = 0x50 + } + if len(m.ExpectedStatuses) > 0 { + for iNdEx := len(m.ExpectedStatuses) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ExpectedStatuses[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExpectedStatuses[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestHeadersToAdd[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Receive) > 0 { + for iNdEx := len(m.Receive) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Receive[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.Send != nil { + size, err := m.Send.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_TcpHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TcpHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TcpHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ProxyProtocolConfig != nil { + size, err := m.ProxyProtocolConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Receive) > 0 { + for iNdEx := len(m.Receive) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Receive[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Send != nil { + size, err := m.Send.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_RedisHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_RedisHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_RedisHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_GrpcHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.InitialMetadata) > 0 { + for iNdEx := len(m.InitialMetadata) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.InitialMetadata[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_CustomHealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_CustomHealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*HealthCheck_CustomHealthCheck_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_TlsOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck_TlsOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TlsOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AlpnProtocols) > 0 { + for iNdEx := len(m.AlpnProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AlpnProtocols[iNdEx]) + copy(dAtA[i:], m.AlpnProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AlpnProtocols[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheck) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AlwaysLogHealthCheckSuccess { + i-- + if m.AlwaysLogHealthCheckSuccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.EventLogger) > 0 { + for iNdEx := len(m.EventLogger) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.EventLogger[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.NoTrafficHealthyInterval != nil { + size, err := (*durationpb.Duration)(m.NoTrafficHealthyInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.TransportSocketMatchCriteria != nil { + size, err := (*structpb.Struct)(m.TransportSocketMatchCriteria).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.EventService != nil { + size, err := m.EventService.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.TlsOptions != nil { + size, err := m.TlsOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.InitialJitter != nil { + size, err := (*durationpb.Duration)(m.InitialJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.AlwaysLogHealthCheckFailures { + i-- + if m.AlwaysLogHealthCheckFailures { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.IntervalJitterPercent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntervalJitterPercent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if len(m.EventLogPath) > 0 { + i -= len(m.EventLogPath) + copy(dAtA[i:], m.EventLogPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.HealthyEdgeInterval != nil { + size, err := (*durationpb.Duration)(m.HealthyEdgeInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.UnhealthyEdgeInterval != nil { + size, err := (*durationpb.Duration)(m.UnhealthyEdgeInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.UnhealthyInterval != nil { + size, err := (*durationpb.Duration)(m.UnhealthyInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if msg, ok := m.HealthChecker.(*HealthCheck_CustomHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.NoTrafficInterval != nil { + size, err := (*durationpb.Duration)(m.NoTrafficInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if msg, ok := m.HealthChecker.(*HealthCheck_GrpcHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HealthChecker.(*HealthCheck_TcpHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HealthChecker.(*HealthCheck_HttpHealthCheck_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ReuseConnection != nil { + size, err := (*wrapperspb.BoolValue)(m.ReuseConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AltPort != nil { + size, err := (*wrapperspb.UInt32Value)(m.AltPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.HealthyThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.HealthyThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UnhealthyThreshold != nil { + size, err := (*wrapperspb.UInt32Value)(m.UnhealthyThreshold).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.IntervalJitter != nil { + size, err := (*durationpb.Duration)(m.IntervalJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HealthCheck_HttpHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_HttpHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpHealthCheck != nil { + size, err := m.HttpHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_TcpHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_TcpHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TcpHealthCheck != nil { + size, err := m.TcpHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_GrpcHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_GrpcHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GrpcHealthCheck != nil { + size, err := m.GrpcHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *HealthCheck_CustomHealthCheck_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HealthCheck_CustomHealthCheck_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomHealthCheck != nil { + size, err := m.CustomHealthCheck.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *HealthStatusSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + l = 0 + for _, e := range m.Statuses { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_Payload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Payload.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_Payload_Text) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Text) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HealthCheck_Payload_Binary) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Binary) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HealthCheck_HttpHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Send != nil { + l = m.Send.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Receive) > 0 { + for _, e := range m.Receive { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ExpectedStatuses) > 0 { + for _, e := range m.ExpectedStatuses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.CodecClientType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CodecClientType)) + } + if m.ServiceNameMatcher != nil { + if size, ok := interface{}(m.ServiceNameMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ServiceNameMatcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetriableStatuses) > 0 { + for _, e := range m.RetriableStatuses { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Method != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Method)) + } + if m.ResponseBufferSize != nil { + l = (*wrapperspb.UInt64Value)(m.ResponseBufferSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_TcpHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Send != nil { + l = m.Send.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Receive) > 0 { + for _, e := range m.Receive { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ProxyProtocolConfig != nil { + l = m.ProxyProtocolConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_RedisHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_GrpcHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.InitialMetadata) > 0 { + for _, e := range m.InitialMetadata { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_CustomHealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_CustomHealthCheck_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_TlsOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AlpnProtocols) > 0 { + for _, s := range m.AlpnProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitter != nil { + l = (*durationpb.Duration)(m.IntervalJitter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.UnhealthyThreshold).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthyThreshold != nil { + l = (*wrapperspb.UInt32Value)(m.HealthyThreshold).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AltPort != nil { + l = (*wrapperspb.UInt32Value)(m.AltPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReuseConnection != nil { + l = (*wrapperspb.BoolValue)(m.ReuseConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HealthChecker.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.NoTrafficInterval != nil { + l = (*durationpb.Duration)(m.NoTrafficInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyInterval != nil { + l = (*durationpb.Duration)(m.UnhealthyInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UnhealthyEdgeInterval != nil { + l = (*durationpb.Duration)(m.UnhealthyEdgeInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthyEdgeInterval != nil { + l = (*durationpb.Duration)(m.HealthyEdgeInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.EventLogPath) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitterPercent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.IntervalJitterPercent)) + } + if m.AlwaysLogHealthCheckFailures { + n += 3 + } + if m.InitialJitter != nil { + l = (*durationpb.Duration)(m.InitialJitter).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsOptions != nil { + l = m.TlsOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EventService != nil { + l = m.EventService.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocketMatchCriteria != nil { + l = (*structpb.Struct)(m.TransportSocketMatchCriteria).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NoTrafficHealthyInterval != nil { + l = (*durationpb.Duration)(m.NoTrafficHealthyInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.EventLogger) > 0 { + for _, e := range m.EventLogger { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AlwaysLogHealthCheckSuccess { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *HealthCheck_HttpHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpHealthCheck != nil { + l = m.HttpHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_TcpHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TcpHealthCheck != nil { + l = m.TcpHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_GrpcHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcHealthCheck != nil { + l = m.GrpcHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HealthCheck_CustomHealthCheck_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomHealthCheck != nil { + l = m.CustomHealthCheck.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go new file mode 100644 index 0000000000..ec8d54bb74 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HTTP service configuration. +type HttpService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The service's HTTP URI. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.myserviceapi.com/v1/data + // cluster: www.myserviceapi.com|443 + HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"` + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. + RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,2,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` +} + +func (x *HttpService) Reset() { + *x = HttpService{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_http_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpService) ProtoMessage() {} + +func (x *HttpService) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_http_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpService.ProtoReflect.Descriptor instead. +func (*HttpService) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_http_service_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpService) GetHttpUri() *HttpUri { + if x != nil { + return x.HttpUri + } + return nil +} + +func (x *HttpService) GetRequestHeadersToAdd() []*HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +var File_envoy_config_core_v3_http_service_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_http_service_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb0, 0x01, + 0x0a, 0x0b, 0x48, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x38, 0x0a, + 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x52, 0x07, + 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, + 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x10, 0x48, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_http_service_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_http_service_proto_rawDescData = file_envoy_config_core_v3_http_service_proto_rawDesc +) + +func file_envoy_config_core_v3_http_service_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_http_service_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_http_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_http_service_proto_rawDescData) + }) + return file_envoy_config_core_v3_http_service_proto_rawDescData +} + +var file_envoy_config_core_v3_http_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_http_service_proto_goTypes = []interface{}{ + (*HttpService)(nil), // 0: envoy.config.core.v3.HttpService + (*HttpUri)(nil), // 1: envoy.config.core.v3.HttpUri + (*HeaderValueOption)(nil), // 2: envoy.config.core.v3.HeaderValueOption +} +var file_envoy_config_core_v3_http_service_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.HttpService.http_uri:type_name -> envoy.config.core.v3.HttpUri + 2, // 1: envoy.config.core.v3.HttpService.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_http_service_proto_init() } +func file_envoy_config_core_v3_http_service_proto_init() { + if File_envoy_config_core_v3_http_service_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_http_uri_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_http_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_http_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_http_service_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_http_service_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_http_service_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_http_service_proto = out.File + file_envoy_config_core_v3_http_service_proto_rawDesc = nil + file_envoy_config_core_v3_http_service_proto_goTypes = nil + file_envoy_config_core_v3_http_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go new file mode 100644 index 0000000000..f1ce3fed0c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service.pb.validate.go @@ -0,0 +1,210 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpService with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpService) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpService with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpServiceMultiError, or +// nil if none found. +func (m *HttpService) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpService) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHttpUri()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpServiceValidationError{ + field: "HttpUri", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := HttpServiceValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpServiceValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpServiceValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpServiceMultiError(errors) + } + + return nil +} + +// HttpServiceMultiError is an error wrapping multiple validation errors +// returned by HttpService.ValidateAll() if the designated constraints aren't met. +type HttpServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpServiceMultiError) AllErrors() []error { return m } + +// HttpServiceValidationError is the validation error returned by +// HttpService.Validate if the designated constraints aren't met. +type HttpServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpServiceValidationError) ErrorName() string { return "HttpServiceValidationError" } + +// Error satisfies the builtin error interface +func (e HttpServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpService.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpServiceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go new file mode 100644 index 0000000000..64e0ece670 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_service_vtproto.pb.go @@ -0,0 +1,94 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/http_service.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpService) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpService) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpService) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestHeadersToAdd[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.HttpUri != nil { + size, err := m.HttpUri.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpUri != nil { + l = m.HttpUri.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go new file mode 100644 index 0000000000..c1ba4357f5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Envoy external URI descriptor +type HttpUri struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // Specify how “uri“ is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + // + // Types that are assignable to HttpUpstreamType: + // + // *HttpUri_Cluster + HttpUpstreamType isHttpUri_HttpUpstreamType `protobuf_oneof:"http_upstream_type"` + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *HttpUri) Reset() { + *x = HttpUri{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpUri) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpUri) ProtoMessage() {} + +func (x *HttpUri) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpUri.ProtoReflect.Descriptor instead. +func (*HttpUri) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_http_uri_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpUri) GetUri() string { + if x != nil { + return x.Uri + } + return "" +} + +func (m *HttpUri) GetHttpUpstreamType() isHttpUri_HttpUpstreamType { + if m != nil { + return m.HttpUpstreamType + } + return nil +} + +func (x *HttpUri) GetCluster() string { + if x, ok := x.GetHttpUpstreamType().(*HttpUri_Cluster); ok { + return x.Cluster + } + return "" +} + +func (x *HttpUri) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type isHttpUri_HttpUpstreamType interface { + isHttpUri_HttpUpstreamType() +} + +type HttpUri_Cluster struct { + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3,oneof"` +} + +func (*HttpUri_Cluster) isHttpUri_HttpUpstreamType() {} + +var File_envoy_config_core_v3_http_uri_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_http_uri_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, 0x0a, 0x07, 0x48, 0x74, 0x74, 0x70, 0x55, + 0x72, 0x69, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x23, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x47, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0xaa, 0x01, 0x0c, 0x08, 0x01, 0x1a, 0x06, 0x08, 0x80, 0x80, 0x80, 0x80, 0x10, + 0x32, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x20, 0x9a, 0xc5, 0x88, + 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x19, 0x0a, + 0x12, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x80, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_http_uri_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_http_uri_proto_rawDescData = file_envoy_config_core_v3_http_uri_proto_rawDesc +) + +func file_envoy_config_core_v3_http_uri_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_http_uri_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_http_uri_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_http_uri_proto_rawDescData) + }) + return file_envoy_config_core_v3_http_uri_proto_rawDescData +} + +var file_envoy_config_core_v3_http_uri_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_http_uri_proto_goTypes = []interface{}{ + (*HttpUri)(nil), // 0: envoy.config.core.v3.HttpUri + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration +} +var file_envoy_config_core_v3_http_uri_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.HttpUri.timeout:type_name -> google.protobuf.Duration + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_http_uri_proto_init() } +func file_envoy_config_core_v3_http_uri_proto_init() { + if File_envoy_config_core_v3_http_uri_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_http_uri_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpUri); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_http_uri_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*HttpUri_Cluster)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_http_uri_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_http_uri_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_http_uri_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_http_uri_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_http_uri_proto = out.File + file_envoy_config_core_v3_http_uri_proto_rawDesc = nil + file_envoy_config_core_v3_http_uri_proto_goTypes = nil + file_envoy_config_core_v3_http_uri_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go new file mode 100644 index 0000000000..bbf40c7ce9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri.pb.validate.go @@ -0,0 +1,228 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpUri with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpUri) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpUri with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in HttpUriMultiError, or nil if none found. +func (m *HttpUri) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpUri) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetUri()) < 1 { + err := HttpUriValidationError{ + field: "Uri", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetTimeout() == nil { + err := HttpUriValidationError{ + field: "Timeout", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpUriValidationError{ + field: "Timeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lt := time.Duration(4294967296*time.Second + 0*time.Nanosecond) + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte || dur >= lt { + err := HttpUriValidationError{ + field: "Timeout", + reason: "value must be inside range [0s, 1193046h28m16s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + oneofHttpUpstreamTypePresent := false + switch v := m.HttpUpstreamType.(type) { + case *HttpUri_Cluster: + if v == nil { + err := HttpUriValidationError{ + field: "HttpUpstreamType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHttpUpstreamTypePresent = true + + if utf8.RuneCountInString(m.GetCluster()) < 1 { + err := HttpUriValidationError{ + field: "Cluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofHttpUpstreamTypePresent { + err := HttpUriValidationError{ + field: "HttpUpstreamType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpUriMultiError(errors) + } + + return nil +} + +// HttpUriMultiError is an error wrapping multiple validation errors returned +// by HttpUri.ValidateAll() if the designated constraints aren't met. +type HttpUriMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpUriMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpUriMultiError) AllErrors() []error { return m } + +// HttpUriValidationError is the validation error returned by HttpUri.Validate +// if the designated constraints aren't met. +type HttpUriValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpUriValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpUriValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpUriValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpUriValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpUriValidationError) ErrorName() string { return "HttpUriValidationError" } + +// Error satisfies the builtin error interface +func (e HttpUriValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpUri.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpUriValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpUriValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go new file mode 100644 index 0000000000..73cd12f13a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/http_uri_vtproto.pb.go @@ -0,0 +1,123 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/http_uri.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpUri) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpUri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpUri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.HttpUpstreamType.(*HttpUri_Cluster); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpUri_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpUri_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpUri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HttpUpstreamType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpUri_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go new file mode 100644 index 0000000000..70be28739a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go @@ -0,0 +1,2453 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Action to take when Envoy receives client request with header names containing underscore +// characters. +// Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented +// as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore +// characters. +type HttpProtocolOptions_HeadersWithUnderscoresAction int32 + +const ( + // Allow headers with underscores. This is the default behavior. + HttpProtocolOptions_ALLOW HttpProtocolOptions_HeadersWithUnderscoresAction = 0 + // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + // is incremented for each rejected request. + HttpProtocolOptions_REJECT_REQUEST HttpProtocolOptions_HeadersWithUnderscoresAction = 1 + // Drop the client header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + HttpProtocolOptions_DROP_HEADER HttpProtocolOptions_HeadersWithUnderscoresAction = 2 +) + +// Enum value maps for HttpProtocolOptions_HeadersWithUnderscoresAction. +var ( + HttpProtocolOptions_HeadersWithUnderscoresAction_name = map[int32]string{ + 0: "ALLOW", + 1: "REJECT_REQUEST", + 2: "DROP_HEADER", + } + HttpProtocolOptions_HeadersWithUnderscoresAction_value = map[string]int32{ + "ALLOW": 0, + "REJECT_REQUEST": 1, + "DROP_HEADER": 2, + } +) + +func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Enum() *HttpProtocolOptions_HeadersWithUnderscoresAction { + p := new(HttpProtocolOptions_HeadersWithUnderscoresAction) + *p = x + return p +} + +func (x HttpProtocolOptions_HeadersWithUnderscoresAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpProtocolOptions_HeadersWithUnderscoresAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_protocol_proto_enumTypes[0].Descriptor() +} + +func (HttpProtocolOptions_HeadersWithUnderscoresAction) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_protocol_proto_enumTypes[0] +} + +func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpProtocolOptions_HeadersWithUnderscoresAction.Descriptor instead. +func (HttpProtocolOptions_HeadersWithUnderscoresAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5, 0} +} + +// [#not-implemented-hide:] +type TcpProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *TcpProtocolOptions) Reset() { + *x = TcpProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TcpProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TcpProtocolOptions) ProtoMessage() {} + +func (x *TcpProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TcpProtocolOptions.ProtoReflect.Descriptor instead. +func (*TcpProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{0} +} + +// Config for keepalive probes in a QUIC connection. +// Note that QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet +// itself doesn't timeout waiting for a probing response. Quic has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `. +type QuicKeepAliveSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The max interval for a connection to send keep-alive probing packets (with PING or PATH_RESPONSE). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than 1s to avoid throttling the connection or flooding the peer with probes. + // + // If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing. + // + // If zero, disable keepalive probing. + // If absent, use the QUICHE default interval to probe. + MaxInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` + // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. + // + // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. + // + // If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + InitialInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=initial_interval,json=initialInterval,proto3" json:"initial_interval,omitempty"` +} + +func (x *QuicKeepAliveSettings) Reset() { + *x = QuicKeepAliveSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuicKeepAliveSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuicKeepAliveSettings) ProtoMessage() {} + +func (x *QuicKeepAliveSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuicKeepAliveSettings.ProtoReflect.Descriptor instead. +func (*QuicKeepAliveSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{1} +} + +func (x *QuicKeepAliveSettings) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +func (x *QuicKeepAliveSettings) GetInitialInterval() *durationpb.Duration { + if x != nil { + return x.InitialInterval + } + return nil +} + +// QUIC protocol options which apply to both downstream and upstream connections. +// [#next-free-field: 9] +type QuicProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum number of streams that the client can negotiate per connection. 100 + // if not specified. + MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` + // `Initial stream-level flow-control receive window + // `_ size. Valid values range from + // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 16777216 (16 * 1024 * 1024). + // + // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. + // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the stream buffers. + InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` + // Similar to “initial_stream_window_size“, but for connection-level + // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults + // to 25165824 (24 * 1024 * 1024). + // + // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default + // window size now, so it's also the minimum. + InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` + // The number of timeouts that can occur before port migration is triggered for QUIC clients. + // This defaults to 4. If set to 0, port migration will not occur on path degrading. + // Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO. + // This has no effect on server sessions. + NumTimeoutsToTriggerPortMigration *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=num_timeouts_to_trigger_port_migration,json=numTimeoutsToTriggerPortMigration,proto3" json:"num_timeouts_to_trigger_port_migration,omitempty"` + // Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout. + // If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything. + ConnectionKeepalive *QuicKeepAliveSettings `protobuf:"bytes,5,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"` + // A comma-separated list of strings representing QUIC connection options defined in + // `QUICHE `_ and to be sent by upstream connections. + ConnectionOptions string `protobuf:"bytes,6,opt,name=connection_options,json=connectionOptions,proto3" json:"connection_options,omitempty"` + // A comma-separated list of strings representing QUIC client connection options defined in + // `QUICHE `_ and to be sent by upstream connections. + ClientConnectionOptions string `protobuf:"bytes,7,opt,name=client_connection_options,json=clientConnectionOptions,proto3" json:"client_connection_options,omitempty"` + // The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE + // default 600s will be applied. + // For internal corporate network, a long timeout is often fine. + // But for client facing network, 30s is usually a good choice. + IdleNetworkTimeout *durationpb.Duration `protobuf:"bytes,8,opt,name=idle_network_timeout,json=idleNetworkTimeout,proto3" json:"idle_network_timeout,omitempty"` +} + +func (x *QuicProtocolOptions) Reset() { + *x = QuicProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuicProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuicProtocolOptions) ProtoMessage() {} + +func (x *QuicProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuicProtocolOptions.ProtoReflect.Descriptor instead. +func (*QuicProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{2} +} + +func (x *QuicProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConcurrentStreams + } + return nil +} + +func (x *QuicProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialStreamWindowSize + } + return nil +} + +func (x *QuicProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialConnectionWindowSize + } + return nil +} + +func (x *QuicProtocolOptions) GetNumTimeoutsToTriggerPortMigration() *wrapperspb.UInt32Value { + if x != nil { + return x.NumTimeoutsToTriggerPortMigration + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionKeepalive() *QuicKeepAliveSettings { + if x != nil { + return x.ConnectionKeepalive + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionOptions() string { + if x != nil { + return x.ConnectionOptions + } + return "" +} + +func (x *QuicProtocolOptions) GetClientConnectionOptions() string { + if x != nil { + return x.ClientConnectionOptions + } + return "" +} + +func (x *QuicProtocolOptions) GetIdleNetworkTimeout() *durationpb.Duration { + if x != nil { + return x.IdleNetworkTimeout + } + return nil +} + +type UpstreamHttpProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set transport socket `SNI `_ for new + // upstream connections based on the downstream HTTP host/authority header or any other arbitrary + // header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + AutoSni bool `protobuf:"varint,1,opt,name=auto_sni,json=autoSni,proto3" json:"auto_sni,omitempty"` + // Automatic validate upstream presented certificate for new upstream connections based on the + // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // This field is intended to be set with “auto_sni“ field. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + AutoSanValidation bool `protobuf:"varint,2,opt,name=auto_san_validation,json=autoSanValidation,proto3" json:"auto_san_validation,omitempty"` + // An optional alternative to the host/authority header to be used for setting the SNI value. + // It should be a valid downstream HTTP header, as seen by the + // :ref:`router filter `. + // If unset, host/authority header will be used for populating the SNI. If the specified header + // is not found or the value is empty, host/authority header will be used instead. + // This field is intended to be set with “auto_sni“ and/or “auto_san_validation“ fields. + // If none of these fields are set then setting this would be a no-op. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + OverrideAutoSniHeader string `protobuf:"bytes,3,opt,name=override_auto_sni_header,json=overrideAutoSniHeader,proto3" json:"override_auto_sni_header,omitempty"` +} + +func (x *UpstreamHttpProtocolOptions) Reset() { + *x = UpstreamHttpProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamHttpProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamHttpProtocolOptions) ProtoMessage() {} + +func (x *UpstreamHttpProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamHttpProtocolOptions.ProtoReflect.Descriptor instead. +func (*UpstreamHttpProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{3} +} + +func (x *UpstreamHttpProtocolOptions) GetAutoSni() bool { + if x != nil { + return x.AutoSni + } + return false +} + +func (x *UpstreamHttpProtocolOptions) GetAutoSanValidation() bool { + if x != nil { + return x.AutoSanValidation + } + return false +} + +func (x *UpstreamHttpProtocolOptions) GetOverrideAutoSniHeader() string { + if x != nil { + return x.OverrideAutoSniHeader + } + return "" +} + +// Configures the alternate protocols cache which tracks alternate protocols that can be used to +// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for +// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 +// for the "HTTPS" DNS resource record. +// [#next-free-field: 6] +type AlternateProtocolsCacheOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cache. Multiple named caches allow independent alternate protocols cache + // configurations to operate within a single Envoy process using different configurations. All + // alternate protocols cache options with the same name *must* be equal in all fields when + // referenced from different configuration components. Configuration will fail to load if this is + // not the case. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of entries that the cache will hold. If not specified defaults to 1024. + // + // .. note: + // + // The implementation is approximate and enforced independently on each worker thread, thus + // it is possible for the maximum entries in the cache to go slightly above the configured + // value depending on timing. This is similar to how other circuit breakers work. + MaxEntries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // Allows configuring a persistent + // :ref:`key value store ` to flush + // alternate protocols entries to disk. + // This function is currently only supported if concurrency is 1 + // Cached entries will take precedence over pre-populated entries below. + KeyValueStoreConfig *TypedExtensionConfig `protobuf:"bytes,3,opt,name=key_value_store_config,json=keyValueStoreConfig,proto3" json:"key_value_store_config,omitempty"` + // Allows pre-populating the cache with entries, as described above. + PrepopulatedEntries []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry `protobuf:"bytes,4,rep,name=prepopulated_entries,json=prepopulatedEntries,proto3" json:"prepopulated_entries,omitempty"` + // Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if + // this list contained the value “.c.example.com“, then an Alt-Svc entry for “foo.c.example.com“ + // could be shared with “bar.c.example.com“ but would not be shared with “baz.example.com“. On + // the other hand, if the list contained the value “.example.com“ then all three hosts could share + // Alt-Svc entries. Each entry must start with “.“. If a hostname matches multiple suffixes, the + // first listed suffix will be used. + // + // Since lookup in this list is O(n), it is recommended that the number of suffixes be limited. + // [#not-implemented-hide:] + CanonicalSuffixes []string `protobuf:"bytes,5,rep,name=canonical_suffixes,json=canonicalSuffixes,proto3" json:"canonical_suffixes,omitempty"` +} + +func (x *AlternateProtocolsCacheOptions) Reset() { + *x = AlternateProtocolsCacheOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlternateProtocolsCacheOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlternateProtocolsCacheOptions) ProtoMessage() {} + +func (x *AlternateProtocolsCacheOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlternateProtocolsCacheOptions.ProtoReflect.Descriptor instead. +func (*AlternateProtocolsCacheOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4} +} + +func (x *AlternateProtocolsCacheOptions) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlternateProtocolsCacheOptions) GetMaxEntries() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxEntries + } + return nil +} + +func (x *AlternateProtocolsCacheOptions) GetKeyValueStoreConfig() *TypedExtensionConfig { + if x != nil { + return x.KeyValueStoreConfig + } + return nil +} + +func (x *AlternateProtocolsCacheOptions) GetPrepopulatedEntries() []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry { + if x != nil { + return x.PrepopulatedEntries + } + return nil +} + +func (x *AlternateProtocolsCacheOptions) GetCanonicalSuffixes() []string { + if x != nil { + return x.CanonicalSuffixes + } + return nil +} + +// [#next-free-field: 7] +type HttpProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The idle timeout for connections. The idle timeout is defined as the + // period in which there are no active requests. When the + // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + // downstream connection a drain sequence will occur prior to closing the connection, see + // :ref:`drain_timeout + // `. + // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + // + // .. warning:: + // + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled for downstream connections according to the value for + // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. + IdleTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // The maximum duration of a connection. The duration is defined as a period since a connection + // was established. If not set, there is no max duration. When max_connection_duration is reached, + // the drain sequence will kick-in. The connection will be closed after the drain timeout period + // if there are no active streams. See :ref:`drain_timeout + // `. + MaxConnectionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + MaxHeadersCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` + // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + // reset independent of any other timeouts. If not specified, this value is not set. + MaxStreamDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ALLOW. + // Note: upstream responses are not affected by this setting. + // Note: this only affects client headers. It does not affect headers added + // by Envoy filters and does not have any impact if added to cluster config. + HeadersWithUnderscoresAction HttpProtocolOptions_HeadersWithUnderscoresAction `protobuf:"varint,5,opt,name=headers_with_underscores_action,json=headersWithUnderscoresAction,proto3,enum=envoy.config.core.v3.HttpProtocolOptions_HeadersWithUnderscoresAction" json:"headers_with_underscores_action,omitempty"` + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` +} + +func (x *HttpProtocolOptions) Reset() { + *x = HttpProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpProtocolOptions) ProtoMessage() {} + +func (x *HttpProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpProtocolOptions.ProtoReflect.Descriptor instead. +func (*HttpProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5} +} + +func (x *HttpProtocolOptions) GetIdleTimeout() *durationpb.Duration { + if x != nil { + return x.IdleTimeout + } + return nil +} + +func (x *HttpProtocolOptions) GetMaxConnectionDuration() *durationpb.Duration { + if x != nil { + return x.MaxConnectionDuration + } + return nil +} + +func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxHeadersCount + } + return nil +} + +func (x *HttpProtocolOptions) GetMaxStreamDuration() *durationpb.Duration { + if x != nil { + return x.MaxStreamDuration + } + return nil +} + +func (x *HttpProtocolOptions) GetHeadersWithUnderscoresAction() HttpProtocolOptions_HeadersWithUnderscoresAction { + if x != nil { + return x.HeadersWithUnderscoresAction + } + return HttpProtocolOptions_ALLOW +} + +func (x *HttpProtocolOptions) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequestsPerConnection + } + return nil +} + +// [#next-free-field: 11] +type Http1ProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // “http_proxy“ environment variable. + AllowAbsoluteUrl *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"` + // Handle incoming HTTP/1.0 and HTTP 0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // “default_host_for_http_10“ is configured. + AcceptHttp_10 bool `protobuf:"varint,2,opt,name=accept_http_10,json=acceptHttp10,proto3" json:"accept_http_10,omitempty"` + // A default host for HTTP/1.0 requests. This is highly suggested if “accept_http_10“ is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if “accept_http_10“ is not true. + DefaultHostForHttp_10 string `protobuf:"bytes,3,opt,name=default_host_for_http_10,json=defaultHostForHttp10,proto3" json:"default_host_for_http_10,omitempty"` + // Describes how the keys for response headers should be formatted. By default, all header keys + // are lower cased. + HeaderKeyFormat *Http1ProtocolOptions_HeaderKeyFormat `protobuf:"bytes,4,opt,name=header_key_format,json=headerKeyFormat,proto3" json:"header_key_format,omitempty"` + // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + // + // .. attention:: + // + // Note that this only happens when Envoy is chunk encoding which occurs when: + // - The request is HTTP/1.1. + // - Is neither a HEAD only request nor a HTTP Upgrade. + // - Not a response to a HEAD request. + // - The content length header is not present. + EnableTrailers bool `protobuf:"varint,5,opt,name=enable_trailers,json=enableTrailers,proto3" json:"enable_trailers,omitempty"` + // Allows Envoy to process requests/responses with both “Content-Length“ and “Transfer-Encoding“ + // headers set. By default such messages are rejected, but if option is enabled - Envoy will + // remove Content-Length header and process message. + // See `RFC7230, sec. 3.3.3 `_ for details. + // + // .. attention:: + // + // Enabling this option might lead to request smuggling vulnerability, especially if traffic + // is proxied via multiple layers of proxies. + // + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + AllowChunkedLength bool `protobuf:"varint,6,opt,name=allow_chunked_length,json=allowChunkedLength,proto3" json:"allow_chunked_length,omitempty"` + // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate + // HTTP/1.1 connections upon receiving an invalid HTTP message. However, + // when this option is true, then Envoy will leave the HTTP/1.1 connection + // open where possible. + // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // `. + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + // Allows sending fully qualified URLs when proxying the first line of the + // response. By default, Envoy will only send the path components in the first line. + // If this is true, Envoy will create a fully qualified URI composing scheme + // (inferred if not present), host (from the host/:authority header) and path + // (from first line or :path header). + SendFullyQualifiedUrl bool `protobuf:"varint,8,opt,name=send_fully_qualified_url,json=sendFullyQualifiedUrl,proto3" json:"send_fully_qualified_url,omitempty"` + // [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out. + // If set, force HTTP/1 parser: BalsaParser if true, http-parser if false. + // If unset, HTTP/1 parser is selected based on + // envoy.reloadable_features.http1_use_balsa_parser. + // See issue #21245. + UseBalsaParser *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=use_balsa_parser,json=useBalsaParser,proto3" json:"use_balsa_parser,omitempty"` + // [#not-implemented-hide:] Hiding so that field can be removed. + // If true, and BalsaParser is used (either `use_balsa_parser` above is true, + // or `envoy.reloadable_features.http1_use_balsa_parser` is true and + // `use_balsa_parser` is unset), then every non-empty method with only valid + // characters is accepted. Otherwise, methods not on the hard-coded list are + // rejected. + // Once UHV is enabled, this field should be removed, and BalsaParser should + // allow any method. UHV validates the method, rejecting empty string or + // invalid characters, and provides :ref:`restrict_http_methods + // ` + // to reject custom methods. + AllowCustomMethods bool `protobuf:"varint,10,opt,name=allow_custom_methods,json=allowCustomMethods,proto3" json:"allow_custom_methods,omitempty"` +} + +func (x *Http1ProtocolOptions) Reset() { + *x = Http1ProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http1ProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http1ProtocolOptions) ProtoMessage() {} + +func (x *Http1ProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http1ProtocolOptions.ProtoReflect.Descriptor instead. +func (*Http1ProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6} +} + +func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrapperspb.BoolValue { + if x != nil { + return x.AllowAbsoluteUrl + } + return nil +} + +func (x *Http1ProtocolOptions) GetAcceptHttp_10() bool { + if x != nil { + return x.AcceptHttp_10 + } + return false +} + +func (x *Http1ProtocolOptions) GetDefaultHostForHttp_10() string { + if x != nil { + return x.DefaultHostForHttp_10 + } + return "" +} + +func (x *Http1ProtocolOptions) GetHeaderKeyFormat() *Http1ProtocolOptions_HeaderKeyFormat { + if x != nil { + return x.HeaderKeyFormat + } + return nil +} + +func (x *Http1ProtocolOptions) GetEnableTrailers() bool { + if x != nil { + return x.EnableTrailers + } + return false +} + +func (x *Http1ProtocolOptions) GetAllowChunkedLength() bool { + if x != nil { + return x.AllowChunkedLength + } + return false +} + +func (x *Http1ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.OverrideStreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *Http1ProtocolOptions) GetSendFullyQualifiedUrl() bool { + if x != nil { + return x.SendFullyQualifiedUrl + } + return false +} + +func (x *Http1ProtocolOptions) GetUseBalsaParser() *wrapperspb.BoolValue { + if x != nil { + return x.UseBalsaParser + } + return nil +} + +func (x *Http1ProtocolOptions) GetAllowCustomMethods() bool { + if x != nil { + return x.AllowCustomMethods + } + return false +} + +type KeepaliveSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + // If this is zero, interval PINGs will not be sent. + Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. Note that in order to prevent the influence of + // Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on + // the connection, under the assumption that if a frame is received the connection is healthy. + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is 15%. + IntervalJitter *v3.Percent `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"` + // If the connection has been idle for this duration, send a HTTP/2 ping ahead + // of new stream creation, to quickly detect dead connections. + // If this is zero, this type of PING will not be sent. + // If an interval ping is outstanding, a second ping will not be sent as the + // interval ping will determine if the connection is dead. + // + // The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. + ConnectionIdleInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=connection_idle_interval,json=connectionIdleInterval,proto3" json:"connection_idle_interval,omitempty"` +} + +func (x *KeepaliveSettings) Reset() { + *x = KeepaliveSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepaliveSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepaliveSettings) ProtoMessage() {} + +func (x *KeepaliveSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepaliveSettings.ProtoReflect.Descriptor instead. +func (*KeepaliveSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{7} +} + +func (x *KeepaliveSettings) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *KeepaliveSettings) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *KeepaliveSettings) GetIntervalJitter() *v3.Percent { + if x != nil { + return x.IntervalJitter + } + return nil +} + +func (x *KeepaliveSettings) GetConnectionIdleInterval() *durationpb.Duration { + if x != nil { + return x.ConnectionIdleInterval + } + return nil +} + +// [#next-free-field: 17] +type Http2ProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header + // compression. + HpackTableSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"` + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) + // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). + // + // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given + // connection based on upstream settings. Config dumps will reflect the configured upper bound, + // not the per-connection negotiated limits. + MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"` + // `Initial stream-level flow-control window + // `_ size. Valid values range from 65535 + // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + // (256 * 1024 * 1024). + // + // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default + // window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"` + // Similar to “initial_stream_window_size“, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as “initial_stream_window_size“. + InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"` + // Allows proxying Websocket and other upgrades over H2 connect. + AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"` + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/2 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` + // Limit the number of pending outbound downstream frames of all types (frames that are waiting to + // be written into the socket). Exceeding this limit triggers flood mitigation and connection is + // terminated. The “http2.outbound_flood“ stat tracks the number of terminated connections due + // to flood mitigation. The default limit is 10000. + MaxOutboundFrames *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"` + // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, + // preventing high memory utilization when receiving continuous stream of these frames. Exceeding + // this limit triggers flood mitigation and connection is terminated. The + // “http2.outbound_control_flood“ stat tracks the number of terminated connections due to flood + // mitigation. The default limit is 1000. + MaxOutboundControlFrames *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"` + // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an + // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but + // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood“ + // stat tracks the number of connections terminated due to flood mitigation. + // Setting this to 0 will terminate connection upon receiving first frame with an empty payload + // and no end stream flag. The default limit is 1. + MaxConsecutiveInboundFramesWithEmptyPayload *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"` + // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number + // of PRIORITY frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``) + // + // the connection is terminated. For downstream connections the “opened_streams“ is incremented when + // Envoy receives complete response headers from the upstream server. For upstream connection the + // “opened_streams“ is incremented when Envoy send the HEADERS frame for a new stream. The + // “http2.inbound_priority_frames_flood“ stat tracks + // the number of connections terminated due to flood mitigation. The default limit is 100. + MaxInboundPriorityFramesPerStream *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"` + // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number + // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // 5 + 2 * (``opened_streams`` + + // ``max_inbound_window_update_frames_per_data_frame_sent`` * ``outbound_data_frames``) + // + // the connection is terminated. For downstream connections the “opened_streams“ is incremented when + // Envoy receives complete response headers from the upstream server. For upstream connections the + // “opened_streams“ is incremented when Envoy sends the HEADERS frame for a new stream. The + // “http2.inbound_priority_frames_flood“ stat tracks the number of connections terminated due to + // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. + // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, + // but more complex implementations that try to estimate available bandwidth require at least 2. + MaxInboundWindowUpdateFramesPerDataFrameSent *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"` + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/protocol.proto. + StreamErrorOnInvalidHttpMessaging bool `protobuf:"varint,12,opt,name=stream_error_on_invalid_http_messaging,json=streamErrorOnInvalidHttpMessaging,proto3" json:"stream_error_on_invalid_http_messaging,omitempty"` + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + // [#not-implemented-hide:] + // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: + // + // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by + // Envoy. + // + // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field + // 'allow_connect'. + // + // Note that custom parameters specified through this field can not also be set in the + // corresponding named parameters: + // + // .. code-block:: text + // + // ID Field Name + // ---------------- + // 0x1 hpack_table_size + // 0x3 max_concurrent_streams + // 0x4 initial_stream_window_size + // + // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies + // between custom parameters with the same identifier will trigger a failure. + // + // See `IANA HTTP/2 Settings + // `_ for + // standardized identifiers. + CustomSettingsParameters []*Http2ProtocolOptions_SettingsParameter `protobuf:"bytes,13,rep,name=custom_settings_parameters,json=customSettingsParameters,proto3" json:"custom_settings_parameters,omitempty"` + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + ConnectionKeepalive *KeepaliveSettings `protobuf:"bytes,15,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"` + // [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. + // If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. + // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. + UseOghttp2Codec *wrapperspb.BoolValue `protobuf:"bytes,16,opt,name=use_oghttp2_codec,json=useOghttp2Codec,proto3" json:"use_oghttp2_codec,omitempty"` +} + +func (x *Http2ProtocolOptions) Reset() { + *x = Http2ProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http2ProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http2ProtocolOptions) ProtoMessage() {} + +func (x *Http2ProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http2ProtocolOptions.ProtoReflect.Descriptor instead. +func (*Http2ProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8} +} + +func (x *Http2ProtocolOptions) GetHpackTableSize() *wrapperspb.UInt32Value { + if x != nil { + return x.HpackTableSize + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConcurrentStreams + } + return nil +} + +func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialStreamWindowSize + } + return nil +} + +func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialConnectionWindowSize + } + return nil +} + +func (x *Http2ProtocolOptions) GetAllowConnect() bool { + if x != nil { + return x.AllowConnect + } + return false +} + +func (x *Http2ProtocolOptions) GetAllowMetadata() bool { + if x != nil { + return x.AllowMetadata + } + return false +} + +func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxOutboundFrames + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxOutboundControlFrames + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConsecutiveInboundFramesWithEmptyPayload + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInboundPriorityFramesPerStream + } + return nil +} + +func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInboundWindowUpdateFramesPerDataFrameSent + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/protocol.proto. +func (x *Http2ProtocolOptions) GetStreamErrorOnInvalidHttpMessaging() bool { + if x != nil { + return x.StreamErrorOnInvalidHttpMessaging + } + return false +} + +func (x *Http2ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.OverrideStreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *Http2ProtocolOptions) GetCustomSettingsParameters() []*Http2ProtocolOptions_SettingsParameter { + if x != nil { + return x.CustomSettingsParameters + } + return nil +} + +func (x *Http2ProtocolOptions) GetConnectionKeepalive() *KeepaliveSettings { + if x != nil { + return x.ConnectionKeepalive + } + return nil +} + +func (x *Http2ProtocolOptions) GetUseOghttp2Codec() *wrapperspb.BoolValue { + if x != nil { + return x.UseOghttp2Codec + } + return nil +} + +// [#not-implemented-hide:] +type GrpcProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Http2ProtocolOptions *Http2ProtocolOptions `protobuf:"bytes,1,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` +} + +func (x *GrpcProtocolOptions) Reset() { + *x = GrpcProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcProtocolOptions) ProtoMessage() {} + +func (x *GrpcProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcProtocolOptions.ProtoReflect.Descriptor instead. +func (*GrpcProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{9} +} + +func (x *GrpcProtocolOptions) GetHttp2ProtocolOptions() *Http2ProtocolOptions { + if x != nil { + return x.Http2ProtocolOptions + } + return nil +} + +// A message which allows using HTTP/3. +// [#next-free-field: 7] +type Http3ProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuicProtocolOptions *QuicProtocolOptions `protobuf:"bytes,1,opt,name=quic_protocol_options,json=quicProtocolOptions,proto3" json:"quic_protocol_options,omitempty"` + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // `. + OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"` + // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + // the header mechanisms from the `HTTP/2 extended connect RFC + // `_ + // and settings `proposed for HTTP/3 + // `_ + // Note that HTTP/3 CONNECT is not yet an RFC. + AllowExtendedConnect bool `protobuf:"varint,5,opt,name=allow_extended_connect,json=allowExtendedConnect,proto3" json:"allow_extended_connect,omitempty"` + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/3 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"` +} + +func (x *Http3ProtocolOptions) Reset() { + *x = Http3ProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http3ProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http3ProtocolOptions) ProtoMessage() {} + +func (x *Http3ProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http3ProtocolOptions.ProtoReflect.Descriptor instead. +func (*Http3ProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{10} +} + +func (x *Http3ProtocolOptions) GetQuicProtocolOptions() *QuicProtocolOptions { + if x != nil { + return x.QuicProtocolOptions + } + return nil +} + +func (x *Http3ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.OverrideStreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *Http3ProtocolOptions) GetAllowExtendedConnect() bool { + if x != nil { + return x.AllowExtendedConnect + } + return false +} + +func (x *Http3ProtocolOptions) GetAllowMetadata() bool { + if x != nil { + return x.AllowMetadata + } + return false +} + +// A message to control transformations to the :scheme header +type SchemeHeaderTransformation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Transformation: + // + // *SchemeHeaderTransformation_SchemeToOverwrite + Transformation isSchemeHeaderTransformation_Transformation `protobuf_oneof:"transformation"` + // Set the Scheme header to match the upstream transport protocol. For example, should a + // request be sent to the upstream over TLS, the scheme header will be set to "https". Should the + // request be sent over plaintext, the scheme header will be set to "http". + // If scheme_to_overwrite is set, this field is not used. + MatchUpstream bool `protobuf:"varint,2,opt,name=match_upstream,json=matchUpstream,proto3" json:"match_upstream,omitempty"` +} + +func (x *SchemeHeaderTransformation) Reset() { + *x = SchemeHeaderTransformation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemeHeaderTransformation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemeHeaderTransformation) ProtoMessage() {} + +func (x *SchemeHeaderTransformation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemeHeaderTransformation.ProtoReflect.Descriptor instead. +func (*SchemeHeaderTransformation) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{11} +} + +func (m *SchemeHeaderTransformation) GetTransformation() isSchemeHeaderTransformation_Transformation { + if m != nil { + return m.Transformation + } + return nil +} + +func (x *SchemeHeaderTransformation) GetSchemeToOverwrite() string { + if x, ok := x.GetTransformation().(*SchemeHeaderTransformation_SchemeToOverwrite); ok { + return x.SchemeToOverwrite + } + return "" +} + +func (x *SchemeHeaderTransformation) GetMatchUpstream() bool { + if x != nil { + return x.MatchUpstream + } + return false +} + +type isSchemeHeaderTransformation_Transformation interface { + isSchemeHeaderTransformation_Transformation() +} + +type SchemeHeaderTransformation_SchemeToOverwrite struct { + // Overwrite any Scheme header with the contents of this string. + // If set, takes precedence over match_upstream. + SchemeToOverwrite string `protobuf:"bytes,1,opt,name=scheme_to_overwrite,json=schemeToOverwrite,proto3,oneof"` +} + +func (*SchemeHeaderTransformation_SchemeToOverwrite) isSchemeHeaderTransformation_Transformation() {} + +// Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime. +// This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not +// advertised HTTP/3 support. These entries will be overwritten by alt-svc +// response headers or cached values. +// As with regular cached entries, if the origin response would result in clearing an existing +// alternate protocol cache entry, pre-populated entries will also be cleared. +// Adding a cache entry with hostname=foo.com port=123 is the equivalent of getting +// response headers +// alt-svc: h3=:"123"; ma=86400" in a response to a request to foo.com:123 +type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The host name for the alternate protocol entry. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + // The port for the alternate protocol entry. + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Reset() { + *x = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ProtoMessage() {} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.ProtoReflect.Descriptor instead. +func (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +// [#next-free-field: 9] +type Http1ProtocolOptions_HeaderKeyFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to HeaderFormat: + // + // *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ + // *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter + HeaderFormat isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat `protobuf_oneof:"header_format"` +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) Reset() { + *x = Http1ProtocolOptions_HeaderKeyFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http1ProtocolOptions_HeaderKeyFormat) ProtoMessage() {} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat.ProtoReflect.Descriptor instead. +func (*Http1ProtocolOptions_HeaderKeyFormat) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6, 0} +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) GetHeaderFormat() isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat { + if m != nil { + return m.HeaderFormat + } + return nil +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) GetProperCaseWords() *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords { + if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok { + return x.ProperCaseWords + } + return nil +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat) GetStatefulFormatter() *TypedExtensionConfig { + if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok { + return x.StatefulFormatter + } + return nil +} + +type isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat interface { + isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() +} + +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ struct { + // Formats the header by proper casing words: the first character and any character following + // a special character will be capitalized if it's an alpha character. For example, + // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + // Note that while this results in most headers following conventional casing, certain headers + // are not covered. For example, the "TE" header will be formatted as "Te". + ProperCaseWords *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords `protobuf:"bytes,1,opt,name=proper_case_words,json=properCaseWords,proto3,oneof"` +} + +type Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter struct { + // Configuration for stateful formatter extensions that allow using received headers to + // affect the output of encoding headers. E.g., preserving case during proxying. + // [#extension-category: envoy.http.stateful_header_formatters] + StatefulFormatter *TypedExtensionConfig `protobuf:"bytes,8,opt,name=stateful_formatter,json=statefulFormatter,proto3,oneof"` +} + +func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() { +} + +func (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() { +} + +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Reset() { + *x = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoMessage() {} + +func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ProtoReflect.Descriptor instead. +func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6, 0, 0} +} + +// Defines a parameter to be sent in the SETTINGS frame. +// See `RFC7540, sec. 6.5.1 `_ for details. +type Http2ProtocolOptions_SettingsParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The 16 bit parameter identifier. + Identifier *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // The 32 bit parameter value. + Value *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Http2ProtocolOptions_SettingsParameter) Reset() { + *x = Http2ProtocolOptions_SettingsParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http2ProtocolOptions_SettingsParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http2ProtocolOptions_SettingsParameter) ProtoMessage() {} + +func (x *Http2ProtocolOptions_SettingsParameter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http2ProtocolOptions_SettingsParameter.ProtoReflect.Descriptor instead. +func (*Http2ProtocolOptions_SettingsParameter) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrapperspb.UInt32Value { + if x != nil { + return x.Identifier + } + return nil +} + +func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrapperspb.UInt32Value { + if x != nil { + return x.Value + } + return nil +} + +var File_envoy_config_core_v3_protocol_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x41, 0x0a, 0x12, 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x15, 0x51, 0x75, 0x69, 0x63, 0x4b, 0x65, 0x65, 0x70, 0x41, + 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4a, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, + 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xf1, 0x05, 0x0a, + 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x73, 0x12, 0x67, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x08, 0x28, + 0x01, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6f, 0x0a, 0x1e, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x0c, 0x28, 0x01, 0x52, 0x1b, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x7a, 0x0a, 0x26, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, + 0x18, 0x05, 0x28, 0x00, 0x52, 0x21, 0x6e, 0x75, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x73, 0x54, 0x6f, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, + 0x63, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x5c, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x0c, + 0xaa, 0x01, 0x09, 0x22, 0x03, 0x08, 0xd8, 0x04, 0x32, 0x02, 0x08, 0x01, 0x52, 0x12, 0x69, 0x64, + 0x6c, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, + 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x18, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x72, 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xd0, + 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, + 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, + 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, + 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, + 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, + 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, + 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, + 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, + 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, + 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, + 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xd0, 0x0e, 0x0a, + 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, + 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, + 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, + 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, + 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, + 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, + 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, + 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, + 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, + 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, + 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, + 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x1a, 0xe2, 0x01, 0x0a, 0x11, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x12, 0x4e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x2a, 0x06, 0x18, + 0xff, 0xff, 0x03, 0x28, 0x00, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, + 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, 0x4f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x10, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, + 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_protocol_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_protocol_proto_rawDescData = file_envoy_config_core_v3_protocol_proto_rawDesc +) + +func file_envoy_config_core_v3_protocol_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_protocol_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_protocol_proto_rawDescData) + }) + return file_envoy_config_core_v3_protocol_proto_rawDescData +} + +var file_envoy_config_core_v3_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{ + (HttpProtocolOptions_HeadersWithUnderscoresAction)(0), // 0: envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction + (*TcpProtocolOptions)(nil), // 1: envoy.config.core.v3.TcpProtocolOptions + (*QuicKeepAliveSettings)(nil), // 2: envoy.config.core.v3.QuicKeepAliveSettings + (*QuicProtocolOptions)(nil), // 3: envoy.config.core.v3.QuicProtocolOptions + (*UpstreamHttpProtocolOptions)(nil), // 4: envoy.config.core.v3.UpstreamHttpProtocolOptions + (*AlternateProtocolsCacheOptions)(nil), // 5: envoy.config.core.v3.AlternateProtocolsCacheOptions + (*HttpProtocolOptions)(nil), // 6: envoy.config.core.v3.HttpProtocolOptions + (*Http1ProtocolOptions)(nil), // 7: envoy.config.core.v3.Http1ProtocolOptions + (*KeepaliveSettings)(nil), // 8: envoy.config.core.v3.KeepaliveSettings + (*Http2ProtocolOptions)(nil), // 9: envoy.config.core.v3.Http2ProtocolOptions + (*GrpcProtocolOptions)(nil), // 10: envoy.config.core.v3.GrpcProtocolOptions + (*Http3ProtocolOptions)(nil), // 11: envoy.config.core.v3.Http3ProtocolOptions + (*SchemeHeaderTransformation)(nil), // 12: envoy.config.core.v3.SchemeHeaderTransformation + (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry)(nil), // 13: envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry + (*Http1ProtocolOptions_HeaderKeyFormat)(nil), // 14: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat + (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords)(nil), // 15: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + (*Http2ProtocolOptions_SettingsParameter)(nil), // 16: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value + (*TypedExtensionConfig)(nil), // 19: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*v3.Percent)(nil), // 21: envoy.type.v3.Percent +} +var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ + 17, // 0: envoy.config.core.v3.QuicKeepAliveSettings.max_interval:type_name -> google.protobuf.Duration + 17, // 1: envoy.config.core.v3.QuicKeepAliveSettings.initial_interval:type_name -> google.protobuf.Duration + 18, // 2: envoy.config.core.v3.QuicProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 3: envoy.config.core.v3.QuicProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 4: envoy.config.core.v3.QuicProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 5: envoy.config.core.v3.QuicProtocolOptions.num_timeouts_to_trigger_port_migration:type_name -> google.protobuf.UInt32Value + 2, // 6: envoy.config.core.v3.QuicProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.QuicKeepAliveSettings + 17, // 7: envoy.config.core.v3.QuicProtocolOptions.idle_network_timeout:type_name -> google.protobuf.Duration + 18, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value + 19, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry + 17, // 11: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 17, // 12: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration + 18, // 13: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value + 17, // 14: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration + 0, // 15: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction + 18, // 16: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 20, // 17: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue + 14, // 18: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat + 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 20, // 20: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue + 17, // 21: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration + 17, // 22: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration + 21, // 23: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent + 17, // 24: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration + 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value + 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value + 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value + 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value + 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value + 18, // 33: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value + 20, // 34: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 16, // 35: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + 8, // 36: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings + 20, // 37: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue + 9, // 38: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 3, // 39: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 20, // 40: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 15, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + 19, // 42: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value + 18, // 44: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value + 45, // [45:45] is the sub-list for method output_type + 45, // [45:45] is the sub-list for method input_type + 45, // [45:45] is the sub-list for extension type_name + 45, // [45:45] is the sub-list for extension extendee + 0, // [0:45] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_protocol_proto_init() } +func file_envoy_config_core_v3_protocol_proto_init() { + if File_envoy_config_core_v3_protocol_proto != nil { + return + } + file_envoy_config_core_v3_extension_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TcpProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuicKeepAliveSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuicProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamHttpProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlternateProtocolsCacheOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http1ProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepaliveSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http2ProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http3ProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemeHeaderTransformation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http2ProtocolOptions_SettingsParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_protocol_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*SchemeHeaderTransformation_SchemeToOverwrite)(nil), + } + file_envoy_config_core_v3_protocol_proto_msgTypes[13].OneofWrappers = []interface{}{ + (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_)(nil), + (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_protocol_proto_rawDesc, + NumEnums: 1, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_protocol_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_protocol_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_protocol_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_protocol_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_protocol_proto = out.File + file_envoy_config_core_v3_protocol_proto_rawDesc = nil + file_envoy_config_core_v3_protocol_proto_goTypes = nil + file_envoy_config_core_v3_protocol_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go new file mode 100644 index 0000000000..d0d0be643e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go @@ -0,0 +1,3015 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TcpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TcpProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TcpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TcpProtocolOptionsMultiError, or nil if none found. +func (m *TcpProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *TcpProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return TcpProtocolOptionsMultiError(errors) + } + + return nil +} + +// TcpProtocolOptionsMultiError is an error wrapping multiple validation errors +// returned by TcpProtocolOptions.ValidateAll() if the designated constraints +// aren't met. +type TcpProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TcpProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TcpProtocolOptionsMultiError) AllErrors() []error { return m } + +// TcpProtocolOptionsValidationError is the validation error returned by +// TcpProtocolOptions.Validate if the designated constraints aren't met. +type TcpProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TcpProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TcpProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TcpProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TcpProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TcpProtocolOptionsValidationError) ErrorName() string { + return "TcpProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e TcpProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTcpProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TcpProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TcpProtocolOptionsValidationError{} + +// Validate checks the field values on QuicKeepAliveSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QuicKeepAliveSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuicKeepAliveSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QuicKeepAliveSettingsMultiError, or nil if none found. +func (m *QuicKeepAliveSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *QuicKeepAliveSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicKeepAliveSettingsValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(0*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur > lte && dur < gte { + err := QuicKeepAliveSettingsValidationError{ + field: "MaxInterval", + reason: "value must be outside range (0s, 1s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetInitialInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicKeepAliveSettingsValidationError{ + field: "InitialInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(0*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur > lte && dur < gte { + err := QuicKeepAliveSettingsValidationError{ + field: "InitialInterval", + reason: "value must be outside range (0s, 1s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return QuicKeepAliveSettingsMultiError(errors) + } + + return nil +} + +// QuicKeepAliveSettingsMultiError is an error wrapping multiple validation +// errors returned by QuicKeepAliveSettings.ValidateAll() if the designated +// constraints aren't met. +type QuicKeepAliveSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuicKeepAliveSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuicKeepAliveSettingsMultiError) AllErrors() []error { return m } + +// QuicKeepAliveSettingsValidationError is the validation error returned by +// QuicKeepAliveSettings.Validate if the designated constraints aren't met. +type QuicKeepAliveSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuicKeepAliveSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuicKeepAliveSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuicKeepAliveSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuicKeepAliveSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuicKeepAliveSettingsValidationError) ErrorName() string { + return "QuicKeepAliveSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e QuicKeepAliveSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuicKeepAliveSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuicKeepAliveSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuicKeepAliveSettingsValidationError{} + +// Validate checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QuicProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QuicProtocolOptionsMultiError, or nil if none found. +func (m *QuicProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *QuicProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := QuicProtocolOptionsValidationError{ + field: "MaxConcurrentStreams", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 16777216 { + err := QuicProtocolOptionsValidationError{ + field: "InitialStreamWindowSize", + reason: "value must be inside range [1, 16777216]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 25165824 { + err := QuicProtocolOptionsValidationError{ + field: "InitialConnectionWindowSize", + reason: "value must be inside range [1, 25165824]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetNumTimeoutsToTriggerPortMigration(); wrapper != nil { + + if val := wrapper.GetValue(); val < 0 || val > 5 { + err := QuicProtocolOptionsValidationError{ + field: "NumTimeoutsToTriggerPortMigration", + reason: "value must be inside range [0, 5]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetConnectionKeepalive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ConnectionOptions + + // no validation rules for ClientConnectionOptions + + if d := m.GetIdleNetworkTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = QuicProtocolOptionsValidationError{ + field: "IdleNetworkTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lte := time.Duration(600*time.Second + 0*time.Nanosecond) + gte := time.Duration(1*time.Second + 0*time.Nanosecond) + + if dur < gte || dur > lte { + err := QuicProtocolOptionsValidationError{ + field: "IdleNetworkTimeout", + reason: "value must be inside range [1s, 10m0s]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return QuicProtocolOptionsMultiError(errors) + } + + return nil +} + +// QuicProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by QuicProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type QuicProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuicProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuicProtocolOptionsMultiError) AllErrors() []error { return m } + +// QuicProtocolOptionsValidationError is the validation error returned by +// QuicProtocolOptions.Validate if the designated constraints aren't met. +type QuicProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuicProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuicProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuicProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuicProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuicProtocolOptionsValidationError) ErrorName() string { + return "QuicProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e QuicProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuicProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuicProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuicProtocolOptionsValidationError{} + +// Validate checks the field values on UpstreamHttpProtocolOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamHttpProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamHttpProtocolOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamHttpProtocolOptionsMultiError, or nil if none found. +func (m *UpstreamHttpProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamHttpProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AutoSni + + // no validation rules for AutoSanValidation + + if m.GetOverrideAutoSniHeader() != "" { + + if !_UpstreamHttpProtocolOptions_OverrideAutoSniHeader_Pattern.MatchString(m.GetOverrideAutoSniHeader()) { + err := UpstreamHttpProtocolOptionsValidationError{ + field: "OverrideAutoSniHeader", + reason: "value does not match regex pattern \"^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return UpstreamHttpProtocolOptionsMultiError(errors) + } + + return nil +} + +// UpstreamHttpProtocolOptionsMultiError is an error wrapping multiple +// validation errors returned by UpstreamHttpProtocolOptions.ValidateAll() if +// the designated constraints aren't met. +type UpstreamHttpProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamHttpProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamHttpProtocolOptionsMultiError) AllErrors() []error { return m } + +// UpstreamHttpProtocolOptionsValidationError is the validation error returned +// by UpstreamHttpProtocolOptions.Validate if the designated constraints +// aren't met. +type UpstreamHttpProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamHttpProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamHttpProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamHttpProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamHttpProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamHttpProtocolOptionsValidationError) ErrorName() string { + return "UpstreamHttpProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamHttpProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamHttpProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamHttpProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamHttpProtocolOptionsValidationError{} + +var _UpstreamHttpProtocolOptions_OverrideAutoSniHeader_Pattern = regexp.MustCompile("^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$") + +// Validate checks the field values on AlternateProtocolsCacheOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *AlternateProtocolsCacheOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AlternateProtocolsCacheOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// AlternateProtocolsCacheOptionsMultiError, or nil if none found. +func (m *AlternateProtocolsCacheOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *AlternateProtocolsCacheOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := AlternateProtocolsCacheOptionsValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMaxEntries(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := AlternateProtocolsCacheOptionsValidationError{ + field: "MaxEntries", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetKeyValueStoreConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: "KeyValueStoreConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: "KeyValueStoreConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeyValueStoreConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AlternateProtocolsCacheOptionsValidationError{ + field: "KeyValueStoreConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetPrepopulatedEntries() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: fmt.Sprintf("PrepopulatedEntries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AlternateProtocolsCacheOptionsValidationError{ + field: fmt.Sprintf("PrepopulatedEntries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AlternateProtocolsCacheOptionsValidationError{ + field: fmt.Sprintf("PrepopulatedEntries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return AlternateProtocolsCacheOptionsMultiError(errors) + } + + return nil +} + +// AlternateProtocolsCacheOptionsMultiError is an error wrapping multiple +// validation errors returned by AlternateProtocolsCacheOptions.ValidateAll() +// if the designated constraints aren't met. +type AlternateProtocolsCacheOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AlternateProtocolsCacheOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AlternateProtocolsCacheOptionsMultiError) AllErrors() []error { return m } + +// AlternateProtocolsCacheOptionsValidationError is the validation error +// returned by AlternateProtocolsCacheOptions.Validate if the designated +// constraints aren't met. +type AlternateProtocolsCacheOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AlternateProtocolsCacheOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AlternateProtocolsCacheOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AlternateProtocolsCacheOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AlternateProtocolsCacheOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AlternateProtocolsCacheOptionsValidationError) ErrorName() string { + return "AlternateProtocolsCacheOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e AlternateProtocolsCacheOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAlternateProtocolsCacheOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AlternateProtocolsCacheOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AlternateProtocolsCacheOptionsValidationError{} + +// Validate checks the field values on HttpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpProtocolOptionsMultiError, or nil if none found. +func (m *HttpProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxConnectionDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxConnectionDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxConnectionDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConnectionDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "MaxConnectionDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxHeadersCount(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := HttpProtocolOptionsValidationError{ + field: "MaxHeadersCount", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetMaxStreamDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for HeadersWithUnderscoresAction + + if all { + switch v := interface{}(m.GetMaxRequestsPerConnection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpProtocolOptionsValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxRequestsPerConnection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpProtocolOptionsValidationError{ + field: "MaxRequestsPerConnection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpProtocolOptionsMultiError(errors) + } + + return nil +} + +// HttpProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by HttpProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type HttpProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpProtocolOptionsMultiError) AllErrors() []error { return m } + +// HttpProtocolOptionsValidationError is the validation error returned by +// HttpProtocolOptions.Validate if the designated constraints aren't met. +type HttpProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpProtocolOptionsValidationError) ErrorName() string { + return "HttpProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpProtocolOptionsValidationError{} + +// Validate checks the field values on Http1ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Http1ProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http1ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Http1ProtocolOptionsMultiError, or nil if none found. +func (m *Http1ProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Http1ProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAllowAbsoluteUrl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "AllowAbsoluteUrl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "AllowAbsoluteUrl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowAbsoluteUrl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "AllowAbsoluteUrl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AcceptHttp_10 + + // no validation rules for DefaultHostForHttp_10 + + if all { + switch v := interface{}(m.GetHeaderKeyFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "HeaderKeyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "HeaderKeyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderKeyFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "HeaderKeyFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableTrailers + + // no validation rules for AllowChunkedLength + + if all { + switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SendFullyQualifiedUrl + + if all { + switch v := interface{}(m.GetUseBalsaParser()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseBalsaParser()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptionsValidationError{ + field: "UseBalsaParser", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowCustomMethods + + if len(errors) > 0 { + return Http1ProtocolOptionsMultiError(errors) + } + + return nil +} + +// Http1ProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by Http1ProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type Http1ProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http1ProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http1ProtocolOptionsMultiError) AllErrors() []error { return m } + +// Http1ProtocolOptionsValidationError is the validation error returned by +// Http1ProtocolOptions.Validate if the designated constraints aren't met. +type Http1ProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http1ProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http1ProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http1ProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http1ProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http1ProtocolOptionsValidationError) ErrorName() string { + return "Http1ProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http1ProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp1ProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http1ProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http1ProtocolOptionsValidationError{} + +// Validate checks the field values on KeepaliveSettings with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *KeepaliveSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeepaliveSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// KeepaliveSettingsMultiError, or nil if none found. +func (m *KeepaliveSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *KeepaliveSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = KeepaliveSettingsValidationError{ + field: "Interval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := KeepaliveSettingsValidationError{ + field: "Interval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if m.GetTimeout() == nil { + err := KeepaliveSettingsValidationError{ + field: "Timeout", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = KeepaliveSettingsValidationError{ + field: "Timeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := KeepaliveSettingsValidationError{ + field: "Timeout", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetIntervalJitter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, KeepaliveSettingsValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, KeepaliveSettingsValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return KeepaliveSettingsValidationError{ + field: "IntervalJitter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetConnectionIdleInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = KeepaliveSettingsValidationError{ + field: "ConnectionIdleInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := KeepaliveSettingsValidationError{ + field: "ConnectionIdleInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return KeepaliveSettingsMultiError(errors) + } + + return nil +} + +// KeepaliveSettingsMultiError is an error wrapping multiple validation errors +// returned by KeepaliveSettings.ValidateAll() if the designated constraints +// aren't met. +type KeepaliveSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeepaliveSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeepaliveSettingsMultiError) AllErrors() []error { return m } + +// KeepaliveSettingsValidationError is the validation error returned by +// KeepaliveSettings.Validate if the designated constraints aren't met. +type KeepaliveSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeepaliveSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeepaliveSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeepaliveSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeepaliveSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeepaliveSettingsValidationError) ErrorName() string { + return "KeepaliveSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e KeepaliveSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeepaliveSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeepaliveSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeepaliveSettingsValidationError{} + +// Validate checks the field values on Http2ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Http2ProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http2ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Http2ProtocolOptionsMultiError, or nil if none found. +func (m *Http2ProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Http2ProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHpackTableSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "HpackTableSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "HpackTableSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHpackTableSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "HpackTableSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 2147483647 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxConcurrentStreams", + reason: "value must be inside range [1, 2147483647]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 65535 || val > 2147483647 { + err := Http2ProtocolOptionsValidationError{ + field: "InitialStreamWindowSize", + reason: "value must be inside range [65535, 2147483647]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 65535 || val > 2147483647 { + err := Http2ProtocolOptionsValidationError{ + field: "InitialConnectionWindowSize", + reason: "value must be inside range [65535, 2147483647]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for AllowConnect + + // no validation rules for AllowMetadata + + if wrapper := m.GetMaxOutboundFrames(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxOutboundFrames", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetMaxOutboundControlFrames(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxOutboundControlFrames", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxConsecutiveInboundFramesWithEmptyPayload", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxConsecutiveInboundFramesWithEmptyPayload", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "MaxConsecutiveInboundFramesWithEmptyPayload", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxInboundPriorityFramesPerStream", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "MaxInboundPriorityFramesPerStream", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "MaxInboundPriorityFramesPerStream", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxInboundWindowUpdateFramesPerDataFrameSent(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := Http2ProtocolOptionsValidationError{ + field: "MaxInboundWindowUpdateFramesPerDataFrameSent", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for StreamErrorOnInvalidHttpMessaging + + if all { + switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCustomSettingsParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: fmt.Sprintf("CustomSettingsParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetConnectionKeepalive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "ConnectionKeepalive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUseOghttp2Codec()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseOghttp2Codec()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptionsValidationError{ + field: "UseOghttp2Codec", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Http2ProtocolOptionsMultiError(errors) + } + + return nil +} + +// Http2ProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by Http2ProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type Http2ProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http2ProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http2ProtocolOptionsMultiError) AllErrors() []error { return m } + +// Http2ProtocolOptionsValidationError is the validation error returned by +// Http2ProtocolOptions.Validate if the designated constraints aren't met. +type Http2ProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http2ProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http2ProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http2ProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http2ProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http2ProtocolOptionsValidationError) ErrorName() string { + return "Http2ProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http2ProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp2ProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http2ProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http2ProtocolOptionsValidationError{} + +// Validate checks the field values on GrpcProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GrpcProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrpcProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrpcProtocolOptionsMultiError, or nil if none found. +func (m *GrpcProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *GrpcProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrpcProtocolOptionsValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrpcProtocolOptionsValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrpcProtocolOptionsValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrpcProtocolOptionsMultiError(errors) + } + + return nil +} + +// GrpcProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by GrpcProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type GrpcProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrpcProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrpcProtocolOptionsMultiError) AllErrors() []error { return m } + +// GrpcProtocolOptionsValidationError is the validation error returned by +// GrpcProtocolOptions.Validate if the designated constraints aren't met. +type GrpcProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrpcProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrpcProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrpcProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrpcProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrpcProtocolOptionsValidationError) ErrorName() string { + return "GrpcProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e GrpcProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrpcProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrpcProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrpcProtocolOptionsValidationError{} + +// Validate checks the field values on Http3ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Http3ProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http3ProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Http3ProtocolOptionsMultiError, or nil if none found. +func (m *Http3ProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Http3ProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetQuicProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuicProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http3ProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http3ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http3ProtocolOptionsValidationError{ + field: "OverrideStreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowExtendedConnect + + // no validation rules for AllowMetadata + + if len(errors) > 0 { + return Http3ProtocolOptionsMultiError(errors) + } + + return nil +} + +// Http3ProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by Http3ProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type Http3ProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http3ProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http3ProtocolOptionsMultiError) AllErrors() []error { return m } + +// Http3ProtocolOptionsValidationError is the validation error returned by +// Http3ProtocolOptions.Validate if the designated constraints aren't met. +type Http3ProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http3ProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http3ProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http3ProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http3ProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http3ProtocolOptionsValidationError) ErrorName() string { + return "Http3ProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http3ProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp3ProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http3ProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http3ProtocolOptionsValidationError{} + +// Validate checks the field values on SchemeHeaderTransformation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SchemeHeaderTransformation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SchemeHeaderTransformation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SchemeHeaderTransformationMultiError, or nil if none found. +func (m *SchemeHeaderTransformation) ValidateAll() error { + return m.validate(true) +} + +func (m *SchemeHeaderTransformation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MatchUpstream + + switch v := m.Transformation.(type) { + case *SchemeHeaderTransformation_SchemeToOverwrite: + if v == nil { + err := SchemeHeaderTransformationValidationError{ + field: "Transformation", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := _SchemeHeaderTransformation_SchemeToOverwrite_InLookup[m.GetSchemeToOverwrite()]; !ok { + err := SchemeHeaderTransformationValidationError{ + field: "SchemeToOverwrite", + reason: "value must be in list [http https]", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SchemeHeaderTransformationMultiError(errors) + } + + return nil +} + +// SchemeHeaderTransformationMultiError is an error wrapping multiple +// validation errors returned by SchemeHeaderTransformation.ValidateAll() if +// the designated constraints aren't met. +type SchemeHeaderTransformationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SchemeHeaderTransformationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SchemeHeaderTransformationMultiError) AllErrors() []error { return m } + +// SchemeHeaderTransformationValidationError is the validation error returned +// by SchemeHeaderTransformation.Validate if the designated constraints aren't met. +type SchemeHeaderTransformationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SchemeHeaderTransformationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SchemeHeaderTransformationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SchemeHeaderTransformationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SchemeHeaderTransformationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SchemeHeaderTransformationValidationError) ErrorName() string { + return "SchemeHeaderTransformationValidationError" +} + +// Error satisfies the builtin error interface +func (e SchemeHeaderTransformationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSchemeHeaderTransformation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SchemeHeaderTransformationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SchemeHeaderTransformationValidationError{} + +var _SchemeHeaderTransformation_SchemeToOverwrite_InLookup = map[string]struct{}{ + "http": {}, + "https": {}, +} + +// Validate checks the field values on +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError, or +// nil if none found. +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetHostname() != "" { + + if !_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry_Hostname_Pattern.MatchString(m.GetHostname()) { + err := AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{ + field: "Hostname", + reason: "value does not match regex pattern \"^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if val := m.GetPort(); val <= 0 || val >= 65535 { + err := AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{ + field: "Port", + reason: "value must be inside range (0, 65535)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError(errors) + } + + return nil +} + +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError is an +// error wrapping multiple validation errors returned by +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.ValidateAll() +// if the designated constraints aren't met. +type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError) AllErrors() []error { + return m +} + +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError +// is the validation error returned by +// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.Validate if the +// designated constraints aren't met. +type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) ErrorName() string { + return "AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{} + +var _AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry_Hostname_Pattern = regexp.MustCompile("^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$") + +// Validate checks the field values on Http1ProtocolOptions_HeaderKeyFormat +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Http1ProtocolOptions_HeaderKeyFormat) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Http1ProtocolOptions_HeaderKeyFormat +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// Http1ProtocolOptions_HeaderKeyFormatMultiError, or nil if none found. +func (m *Http1ProtocolOptions_HeaderKeyFormat) ValidateAll() error { + return m.validate(true) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofHeaderFormatPresent := false + switch v := m.HeaderFormat.(type) { + case *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_: + if v == nil { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHeaderFormatPresent = true + + if all { + switch v := interface{}(m.GetProperCaseWords()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "ProperCaseWords", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "ProperCaseWords", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProperCaseWords()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "ProperCaseWords", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter: + if v == nil { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofHeaderFormatPresent = true + + if all { + switch v := interface{}(m.GetStatefulFormatter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "StatefulFormatter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "StatefulFormatter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatefulFormatter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "StatefulFormatter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofHeaderFormatPresent { + err := Http1ProtocolOptions_HeaderKeyFormatValidationError{ + field: "HeaderFormat", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Http1ProtocolOptions_HeaderKeyFormatMultiError(errors) + } + + return nil +} + +// Http1ProtocolOptions_HeaderKeyFormatMultiError is an error wrapping multiple +// validation errors returned by +// Http1ProtocolOptions_HeaderKeyFormat.ValidateAll() if the designated +// constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormatMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) AllErrors() []error { return m } + +// Http1ProtocolOptions_HeaderKeyFormatValidationError is the validation error +// returned by Http1ProtocolOptions_HeaderKeyFormat.Validate if the designated +// constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormatValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) ErrorName() string { + return "Http1ProtocolOptions_HeaderKeyFormatValidationError" +} + +// Error satisfies the builtin error interface +func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp1ProtocolOptions_HeaderKeyFormat.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http1ProtocolOptions_HeaderKeyFormatValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http1ProtocolOptions_HeaderKeyFormatValidationError{} + +// Validate checks the field values on +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError, or nil if +// none found. +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ValidateAll() error { + return m.validate(true) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError(errors) + } + + return nil +} + +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError is an error +// wrapping multiple validation errors returned by +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ValidateAll() if the +// designated constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) AllErrors() []error { return m } + +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError is the +// validation error returned by +// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.Validate if the +// designated constraints aren't met. +type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) ErrorName() string { + return "Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError" +} + +// Error satisfies the builtin error interface +func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{} + +// Validate checks the field values on Http2ProtocolOptions_SettingsParameter +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *Http2ProtocolOptions_SettingsParameter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Http2ProtocolOptions_SettingsParameter with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// Http2ProtocolOptions_SettingsParameterMultiError, or nil if none found. +func (m *Http2ProtocolOptions_SettingsParameter) ValidateAll() error { + return m.validate(true) +} + +func (m *Http2ProtocolOptions_SettingsParameter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetIdentifier(); wrapper != nil { + + if val := wrapper.GetValue(); val < 0 || val > 65535 { + err := Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Identifier", + reason: "value must be inside range [0, 65535]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } else { + err := Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Identifier", + reason: "value is required and must not be nil.", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetValue() == nil { + err := Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Http2ProtocolOptions_SettingsParameterValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Http2ProtocolOptions_SettingsParameterMultiError(errors) + } + + return nil +} + +// Http2ProtocolOptions_SettingsParameterMultiError is an error wrapping +// multiple validation errors returned by +// Http2ProtocolOptions_SettingsParameter.ValidateAll() if the designated +// constraints aren't met. +type Http2ProtocolOptions_SettingsParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Http2ProtocolOptions_SettingsParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Http2ProtocolOptions_SettingsParameterMultiError) AllErrors() []error { return m } + +// Http2ProtocolOptions_SettingsParameterValidationError is the validation +// error returned by Http2ProtocolOptions_SettingsParameter.Validate if the +// designated constraints aren't met. +type Http2ProtocolOptions_SettingsParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Http2ProtocolOptions_SettingsParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Http2ProtocolOptions_SettingsParameterValidationError) ErrorName() string { + return "Http2ProtocolOptions_SettingsParameterValidationError" +} + +// Error satisfies the builtin error interface +func (e Http2ProtocolOptions_SettingsParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttp2ProtocolOptions_SettingsParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Http2ProtocolOptions_SettingsParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Http2ProtocolOptions_SettingsParameterValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go new file mode 100644 index 0000000000..131aa9d32d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go @@ -0,0 +1,1718 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/protocol.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TcpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TcpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TcpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *QuicKeepAliveSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicKeepAliveSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicKeepAliveSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InitialInterval != nil { + size, err := (*durationpb.Duration)(m.InitialInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuicProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IdleNetworkTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleNetworkTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.ClientConnectionOptions) > 0 { + i -= len(m.ClientConnectionOptions) + copy(dAtA[i:], m.ClientConnectionOptions) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientConnectionOptions))) + i-- + dAtA[i] = 0x3a + } + if len(m.ConnectionOptions) > 0 { + i -= len(m.ConnectionOptions) + copy(dAtA[i:], m.ConnectionOptions) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConnectionOptions))) + i-- + dAtA[i] = 0x32 + } + if m.ConnectionKeepalive != nil { + size, err := m.ConnectionKeepalive.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.NumTimeoutsToTriggerPortMigration != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumTimeoutsToTriggerPortMigration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.InitialConnectionWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.InitialStreamWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxConcurrentStreams != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamHttpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamHttpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamHttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.OverrideAutoSniHeader) > 0 { + i -= len(m.OverrideAutoSniHeader) + copy(dAtA[i:], m.OverrideAutoSniHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideAutoSniHeader))) + i-- + dAtA[i] = 0x1a + } + if m.AutoSanValidation { + i-- + if m.AutoSanValidation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AutoSni { + i-- + if m.AutoSni { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Port != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlternateProtocolsCacheOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlternateProtocolsCacheOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AlternateProtocolsCacheOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CanonicalSuffixes) > 0 { + for iNdEx := len(m.CanonicalSuffixes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CanonicalSuffixes[iNdEx]) + copy(dAtA[i:], m.CanonicalSuffixes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CanonicalSuffixes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.PrepopulatedEntries) > 0 { + for iNdEx := len(m.PrepopulatedEntries) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PrepopulatedEntries[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.KeyValueStoreConfig != nil { + size, err := m.KeyValueStoreConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxEntries != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxEntries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxRequestsPerConnection != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.HeadersWithUnderscoresAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeadersWithUnderscoresAction)) + i-- + dAtA[i] = 0x28 + } + if m.MaxStreamDuration != nil { + size, err := (*durationpb.Duration)(m.MaxStreamDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxConnectionDuration != nil { + size, err := (*durationpb.Duration)(m.MaxConnectionDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxHeadersCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxHeadersCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.HeaderFormat.(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderFormat.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProperCaseWords != nil { + size, err := m.ProperCaseWords.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatefulFormatter != nil { + size, err := m.StatefulFormatter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Http1ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http1ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http1ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowCustomMethods { + i-- + if m.AllowCustomMethods { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.UseBalsaParser != nil { + size, err := (*wrapperspb.BoolValue)(m.UseBalsaParser).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.SendFullyQualifiedUrl { + i-- + if m.SendFullyQualifiedUrl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AllowChunkedLength { + i-- + if m.AllowChunkedLength { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.EnableTrailers { + i-- + if m.EnableTrailers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.HeaderKeyFormat != nil { + size, err := m.HeaderKeyFormat.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.DefaultHostForHttp_10) > 0 { + i -= len(m.DefaultHostForHttp_10) + copy(dAtA[i:], m.DefaultHostForHttp_10) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultHostForHttp_10))) + i-- + dAtA[i] = 0x1a + } + if m.AcceptHttp_10 { + i-- + if m.AcceptHttp_10 { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AllowAbsoluteUrl != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowAbsoluteUrl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeepaliveSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeepaliveSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *KeepaliveSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionIdleInterval != nil { + size, err := (*durationpb.Duration)(m.ConnectionIdleInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.IntervalJitter != nil { + if vtmsg, ok := interface{}(m.IntervalJitter).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.IntervalJitter) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Interval != nil { + size, err := (*durationpb.Duration)(m.Interval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http2ProtocolOptions_SettingsParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := (*wrapperspb.UInt32Value)(m.Value).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Identifier != nil { + size, err := (*wrapperspb.UInt32Value)(m.Identifier).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http2ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http2ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http2ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UseOghttp2Codec != nil { + size, err := (*wrapperspb.BoolValue)(m.UseOghttp2Codec).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.ConnectionKeepalive != nil { + size, err := m.ConnectionKeepalive.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.CustomSettingsParameters) > 0 { + for iNdEx := len(m.CustomSettingsParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CustomSettingsParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.StreamErrorOnInvalidHttpMessaging { + i-- + if m.StreamErrorOnInvalidHttpMessaging { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.MaxInboundWindowUpdateFramesPerDataFrameSent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInboundWindowUpdateFramesPerDataFrameSent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.MaxInboundPriorityFramesPerStream != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInboundPriorityFramesPerStream).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.MaxConsecutiveInboundFramesWithEmptyPayload != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConsecutiveInboundFramesWithEmptyPayload).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.MaxOutboundControlFrames != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxOutboundControlFrames).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.MaxOutboundFrames != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxOutboundFrames).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AllowMetadata { + i-- + if m.AllowMetadata { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowConnect { + i-- + if m.AllowConnect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.InitialConnectionWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.InitialStreamWindowSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxConcurrentStreams != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.HpackTableSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.HpackTableSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GrpcProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GrpcProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GrpcProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http2ProtocolOptions != nil { + size, err := m.Http2ProtocolOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Http3ProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http3ProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Http3ProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowMetadata { + i-- + if m.AllowMetadata { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowExtendedConnect { + i-- + if m.AllowExtendedConnect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QuicProtocolOptions != nil { + size, err := m.QuicProtocolOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchemeHeaderTransformation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemeHeaderTransformation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SchemeHeaderTransformation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MatchUpstream { + i-- + if m.MatchUpstream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if msg, ok := m.Transformation.(*SchemeHeaderTransformation_SchemeToOverwrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SchemeToOverwrite) + copy(dAtA[i:], m.SchemeToOverwrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SchemeToOverwrite))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *TcpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *QuicKeepAliveSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialInterval != nil { + l = (*durationpb.Duration)(m.InitialInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QuicProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxConcurrentStreams != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialStreamWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialConnectionWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumTimeoutsToTriggerPortMigration != nil { + l = (*wrapperspb.UInt32Value)(m.NumTimeoutsToTriggerPortMigration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionKeepalive != nil { + l = m.ConnectionKeepalive.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ConnectionOptions) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ClientConnectionOptions) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleNetworkTimeout != nil { + l = (*durationpb.Duration)(m.IdleNetworkTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamHttpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoSni { + n += 2 + } + if m.AutoSanValidation { + n += 2 + } + l = len(m.OverrideAutoSniHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Port != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Port)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlternateProtocolsCacheOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxEntries != nil { + l = (*wrapperspb.UInt32Value)(m.MaxEntries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeyValueStoreConfig != nil { + l = m.KeyValueStoreConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PrepopulatedEntries) > 0 { + for _, e := range m.PrepopulatedEntries { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CanonicalSuffixes) > 0 { + for _, s := range m.CanonicalSuffixes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxHeadersCount != nil { + l = (*wrapperspb.UInt32Value)(m.MaxHeadersCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConnectionDuration != nil { + l = (*durationpb.Duration)(m.MaxConnectionDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxStreamDuration != nil { + l = (*durationpb.Duration)(m.MaxStreamDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HeadersWithUnderscoresAction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HeadersWithUnderscoresAction)) + } + if m.MaxRequestsPerConnection != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.HeaderFormat.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProperCaseWords != nil { + l = m.ProperCaseWords.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatefulFormatter != nil { + l = m.StatefulFormatter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Http1ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AllowAbsoluteUrl != nil { + l = (*wrapperspb.BoolValue)(m.AllowAbsoluteUrl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AcceptHttp_10 { + n += 2 + } + l = len(m.DefaultHostForHttp_10) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HeaderKeyFormat != nil { + l = m.HeaderKeyFormat.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableTrailers { + n += 2 + } + if m.AllowChunkedLength { + n += 2 + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SendFullyQualifiedUrl { + n += 2 + } + if m.UseBalsaParser != nil { + l = (*wrapperspb.BoolValue)(m.UseBalsaParser).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowCustomMethods { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *KeepaliveSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Interval != nil { + l = (*durationpb.Duration)(m.Interval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntervalJitter != nil { + if size, ok := interface{}(m.IntervalJitter).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.IntervalJitter) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionIdleInterval != nil { + l = (*durationpb.Duration)(m.ConnectionIdleInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http2ProtocolOptions_SettingsParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Identifier != nil { + l = (*wrapperspb.UInt32Value)(m.Identifier).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Value != nil { + l = (*wrapperspb.UInt32Value)(m.Value).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http2ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HpackTableSize != nil { + l = (*wrapperspb.UInt32Value)(m.HpackTableSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConcurrentStreams != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConcurrentStreams).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialStreamWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialStreamWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InitialConnectionWindowSize != nil { + l = (*wrapperspb.UInt32Value)(m.InitialConnectionWindowSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowConnect { + n += 2 + } + if m.AllowMetadata { + n += 2 + } + if m.MaxOutboundFrames != nil { + l = (*wrapperspb.UInt32Value)(m.MaxOutboundFrames).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxOutboundControlFrames != nil { + l = (*wrapperspb.UInt32Value)(m.MaxOutboundControlFrames).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxConsecutiveInboundFramesWithEmptyPayload != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConsecutiveInboundFramesWithEmptyPayload).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInboundPriorityFramesPerStream != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInboundPriorityFramesPerStream).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInboundWindowUpdateFramesPerDataFrameSent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInboundWindowUpdateFramesPerDataFrameSent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StreamErrorOnInvalidHttpMessaging { + n += 2 + } + if len(m.CustomSettingsParameters) > 0 { + for _, e := range m.CustomSettingsParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionKeepalive != nil { + l = m.ConnectionKeepalive.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseOghttp2Codec != nil { + l = (*wrapperspb.BoolValue)(m.UseOghttp2Codec).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GrpcProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http2ProtocolOptions != nil { + l = m.Http2ProtocolOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Http3ProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QuicProtocolOptions != nil { + l = m.QuicProtocolOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverrideStreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.OverrideStreamErrorOnInvalidHttpMessage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowExtendedConnect { + n += 2 + } + if m.AllowMetadata { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchemeHeaderTransformation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Transformation.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MatchUpstream { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchemeHeaderTransformation_SchemeToOverwrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemeToOverwrite) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go new file mode 100644 index 0000000000..43e7d77061 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.go @@ -0,0 +1,370 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ProxyProtocolPassThroughTLVs_PassTLVsMatchType int32 + +const ( + // Pass all TLVs. + ProxyProtocolPassThroughTLVs_INCLUDE_ALL ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 0 + // Pass specific TLVs defined in tlv_type. + ProxyProtocolPassThroughTLVs_INCLUDE ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 1 +) + +// Enum value maps for ProxyProtocolPassThroughTLVs_PassTLVsMatchType. +var ( + ProxyProtocolPassThroughTLVs_PassTLVsMatchType_name = map[int32]string{ + 0: "INCLUDE_ALL", + 1: "INCLUDE", + } + ProxyProtocolPassThroughTLVs_PassTLVsMatchType_value = map[string]int32{ + "INCLUDE_ALL": 0, + "INCLUDE": 1, + } +) + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Enum() *ProxyProtocolPassThroughTLVs_PassTLVsMatchType { + p := new(ProxyProtocolPassThroughTLVs_PassTLVsMatchType) + *p = x + return p +} + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0].Descriptor() +} + +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0] +} + +func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyProtocolPassThroughTLVs_PassTLVsMatchType.Descriptor instead. +func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0, 0} +} + +type ProxyProtocolConfig_Version int32 + +const ( + // PROXY protocol version 1. Human readable format. + ProxyProtocolConfig_V1 ProxyProtocolConfig_Version = 0 + // PROXY protocol version 2. Binary format. + ProxyProtocolConfig_V2 ProxyProtocolConfig_Version = 1 +) + +// Enum value maps for ProxyProtocolConfig_Version. +var ( + ProxyProtocolConfig_Version_name = map[int32]string{ + 0: "V1", + 1: "V2", + } + ProxyProtocolConfig_Version_value = map[string]int32{ + "V1": 0, + "V2": 1, + } +) + +func (x ProxyProtocolConfig_Version) Enum() *ProxyProtocolConfig_Version { + p := new(ProxyProtocolConfig_Version) + *p = x + return p +} + +func (x ProxyProtocolConfig_Version) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProxyProtocolConfig_Version) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1].Descriptor() +} + +func (ProxyProtocolConfig_Version) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1] +} + +func (x ProxyProtocolConfig_Version) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProxyProtocolConfig_Version.Descriptor instead. +func (ProxyProtocolConfig_Version) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1, 0} +} + +type ProxyProtocolPassThroughTLVs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The strategy to pass through TLVs. Default is INCLUDE_ALL. + // If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field. + MatchType ProxyProtocolPassThroughTLVs_PassTLVsMatchType `protobuf:"varint,1,opt,name=match_type,json=matchType,proto3,enum=envoy.config.core.v3.ProxyProtocolPassThroughTLVs_PassTLVsMatchType" json:"match_type,omitempty"` + // The TLV types that are applied based on match_type. + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + TlvType []uint32 `protobuf:"varint,2,rep,packed,name=tlv_type,json=tlvType,proto3" json:"tlv_type,omitempty"` +} + +func (x *ProxyProtocolPassThroughTLVs) Reset() { + *x = ProxyProtocolPassThroughTLVs{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyProtocolPassThroughTLVs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyProtocolPassThroughTLVs) ProtoMessage() {} + +func (x *ProxyProtocolPassThroughTLVs) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyProtocolPassThroughTLVs.ProtoReflect.Descriptor instead. +func (*ProxyProtocolPassThroughTLVs) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0} +} + +func (x *ProxyProtocolPassThroughTLVs) GetMatchType() ProxyProtocolPassThroughTLVs_PassTLVsMatchType { + if x != nil { + return x.MatchType + } + return ProxyProtocolPassThroughTLVs_INCLUDE_ALL +} + +func (x *ProxyProtocolPassThroughTLVs) GetTlvType() []uint32 { + if x != nil { + return x.TlvType + } + return nil +} + +type ProxyProtocolConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version ProxyProtocolConfig_Version `protobuf:"varint,1,opt,name=version,proto3,enum=envoy.config.core.v3.ProxyProtocolConfig_Version" json:"version,omitempty"` + // This config controls which TLVs can be passed to upstream if it is Proxy Protocol + // V2 header. If there is no setting for this field, no TLVs will be passed through. + PassThroughTlvs *ProxyProtocolPassThroughTLVs `protobuf:"bytes,2,opt,name=pass_through_tlvs,json=passThroughTlvs,proto3" json:"pass_through_tlvs,omitempty"` +} + +func (x *ProxyProtocolConfig) Reset() { + *x = ProxyProtocolConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProxyProtocolConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProxyProtocolConfig) ProtoMessage() {} + +func (x *ProxyProtocolConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProxyProtocolConfig.ProtoReflect.Descriptor instead. +func (*ProxyProtocolConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1} +} + +func (x *ProxyProtocolConfig) GetVersion() ProxyProtocolConfig_Version { + if x != nil { + return x.Version + } + return ProxyProtocolConfig_V1 +} + +func (x *ProxyProtocolConfig) GetPassThroughTlvs() *ProxyProtocolPassThroughTLVs { + if x != nil { + return x.PassThroughTlvs + } + return nil +} + +var File_envoy_config_core_v3_proxy_protocol_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x1c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, + 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, 0x12, 0x63, 0x0a, 0x0a, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, + 0x56, 0x73, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x28, 0x0a, 0x08, 0x74, 0x6c, 0x76, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0d, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x2a, 0x03, 0x10, 0x80, 0x02, + 0x52, 0x07, 0x74, 0x6c, 0x76, 0x54, 0x79, 0x70, 0x65, 0x22, 0x31, 0x0a, 0x11, 0x50, 0x61, 0x73, + 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x10, 0x01, 0x22, 0xdd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, + 0x68, 0x5f, 0x74, 0x6c, 0x76, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, + 0x52, 0x0f, 0x70, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x6c, 0x76, + 0x73, 0x22, 0x19, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, + 0x56, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x42, 0x86, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, + 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = file_envoy_config_core_v3_proxy_protocol_proto_rawDesc +) + +func file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_proxy_protocol_proto_rawDescData) + }) + return file_envoy_config_core_v3_proxy_protocol_proto_rawDescData +} + +var file_envoy_config_core_v3_proxy_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_core_v3_proxy_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_proxy_protocol_proto_goTypes = []interface{}{ + (ProxyProtocolPassThroughTLVs_PassTLVsMatchType)(0), // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType + (ProxyProtocolConfig_Version)(0), // 1: envoy.config.core.v3.ProxyProtocolConfig.Version + (*ProxyProtocolPassThroughTLVs)(nil), // 2: envoy.config.core.v3.ProxyProtocolPassThroughTLVs + (*ProxyProtocolConfig)(nil), // 3: envoy.config.core.v3.ProxyProtocolConfig +} +var file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.match_type:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType + 1, // 1: envoy.config.core.v3.ProxyProtocolConfig.version:type_name -> envoy.config.core.v3.ProxyProtocolConfig.Version + 2, // 2: envoy.config.core.v3.ProxyProtocolConfig.pass_through_tlvs:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_proxy_protocol_proto_init() } +func file_envoy_config_core_v3_proxy_protocol_proto_init() { + if File_envoy_config_core_v3_proxy_protocol_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyProtocolPassThroughTLVs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyProtocolConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_proxy_protocol_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_proxy_protocol_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_proxy_protocol_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_proxy_protocol_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_proxy_protocol_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_proxy_protocol_proto = out.File + file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = nil + file_envoy_config_core_v3_proxy_protocol_proto_goTypes = nil + file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go new file mode 100644 index 0000000000..66555a1664 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol.pb.validate.go @@ -0,0 +1,291 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ProxyProtocolPassThroughTLVs with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ProxyProtocolPassThroughTLVs) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ProxyProtocolPassThroughTLVs with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ProxyProtocolPassThroughTLVsMultiError, or nil if none found. +func (m *ProxyProtocolPassThroughTLVs) ValidateAll() error { + return m.validate(true) +} + +func (m *ProxyProtocolPassThroughTLVs) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MatchType + + for idx, item := range m.GetTlvType() { + _, _ = idx, item + + if item >= 256 { + err := ProxyProtocolPassThroughTLVsValidationError{ + field: fmt.Sprintf("TlvType[%v]", idx), + reason: "value must be less than 256", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ProxyProtocolPassThroughTLVsMultiError(errors) + } + + return nil +} + +// ProxyProtocolPassThroughTLVsMultiError is an error wrapping multiple +// validation errors returned by ProxyProtocolPassThroughTLVs.ValidateAll() if +// the designated constraints aren't met. +type ProxyProtocolPassThroughTLVsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ProxyProtocolPassThroughTLVsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ProxyProtocolPassThroughTLVsMultiError) AllErrors() []error { return m } + +// ProxyProtocolPassThroughTLVsValidationError is the validation error returned +// by ProxyProtocolPassThroughTLVs.Validate if the designated constraints +// aren't met. +type ProxyProtocolPassThroughTLVsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ProxyProtocolPassThroughTLVsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ProxyProtocolPassThroughTLVsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ProxyProtocolPassThroughTLVsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ProxyProtocolPassThroughTLVsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ProxyProtocolPassThroughTLVsValidationError) ErrorName() string { + return "ProxyProtocolPassThroughTLVsValidationError" +} + +// Error satisfies the builtin error interface +func (e ProxyProtocolPassThroughTLVsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sProxyProtocolPassThroughTLVs.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ProxyProtocolPassThroughTLVsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ProxyProtocolPassThroughTLVsValidationError{} + +// Validate checks the field values on ProxyProtocolConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ProxyProtocolConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ProxyProtocolConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ProxyProtocolConfigMultiError, or nil if none found. +func (m *ProxyProtocolConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ProxyProtocolConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetPassThroughTlvs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPassThroughTlvs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ProxyProtocolConfigValidationError{ + field: "PassThroughTlvs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ProxyProtocolConfigMultiError(errors) + } + + return nil +} + +// ProxyProtocolConfigMultiError is an error wrapping multiple validation +// errors returned by ProxyProtocolConfig.ValidateAll() if the designated +// constraints aren't met. +type ProxyProtocolConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ProxyProtocolConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ProxyProtocolConfigMultiError) AllErrors() []error { return m } + +// ProxyProtocolConfigValidationError is the validation error returned by +// ProxyProtocolConfig.Validate if the designated constraints aren't met. +type ProxyProtocolConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ProxyProtocolConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ProxyProtocolConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ProxyProtocolConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ProxyProtocolConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ProxyProtocolConfigValidationError) ErrorName() string { + return "ProxyProtocolConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ProxyProtocolConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sProxyProtocolConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ProxyProtocolConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ProxyProtocolConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go new file mode 100644 index 0000000000..d429640eee --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/proxy_protocol_vtproto.pb.go @@ -0,0 +1,162 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/proxy_protocol.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ProxyProtocolPassThroughTLVs) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtocolPassThroughTLVs) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ProxyProtocolPassThroughTLVs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TlvType) > 0 { + var pksize2 int + for _, num := range m.TlvType { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.TlvType { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.MatchType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MatchType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyProtocolConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyProtocolConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ProxyProtocolConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PassThroughTlvs != nil { + size, err := m.PassThroughTlvs.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyProtocolPassThroughTLVs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MatchType)) + } + if len(m.TlvType) > 0 { + l = 0 + for _, e := range m.TlvType { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *ProxyProtocolConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Version)) + } + if m.PassThroughTlvs != nil { + l = m.PassThroughTlvs.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go new file mode 100644 index 0000000000..fc4ec52de9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.go @@ -0,0 +1,265 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. +type DnsResolverOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Use TCP for all DNS queries instead of the default protocol UDP. + UseTcpForDnsLookups bool `protobuf:"varint,1,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"` + // Do not use the default search domains; only query hostnames as-is or as aliases. + NoDefaultSearchDomain bool `protobuf:"varint,2,opt,name=no_default_search_domain,json=noDefaultSearchDomain,proto3" json:"no_default_search_domain,omitempty"` +} + +func (x *DnsResolverOptions) Reset() { + *x = DnsResolverOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DnsResolverOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DnsResolverOptions) ProtoMessage() {} + +func (x *DnsResolverOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DnsResolverOptions.ProtoReflect.Descriptor instead. +func (*DnsResolverOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_resolver_proto_rawDescGZIP(), []int{0} +} + +func (x *DnsResolverOptions) GetUseTcpForDnsLookups() bool { + if x != nil { + return x.UseTcpForDnsLookups + } + return false +} + +func (x *DnsResolverOptions) GetNoDefaultSearchDomain() bool { + if x != nil { + return x.NoDefaultSearchDomain + } + return false +} + +// DNS resolution configuration which includes the underlying dns resolver addresses and options. +type DnsResolutionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of dns resolver addresses. If specified, the DNS client library will perform resolution + // via the underlying DNS resolvers. Otherwise, the default system resolvers + // (e.g., /etc/resolv.conf) will be used. + Resolvers []*Address `protobuf:"bytes,1,rep,name=resolvers,proto3" json:"resolvers,omitempty"` + // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + DnsResolverOptions *DnsResolverOptions `protobuf:"bytes,2,opt,name=dns_resolver_options,json=dnsResolverOptions,proto3" json:"dns_resolver_options,omitempty"` +} + +func (x *DnsResolutionConfig) Reset() { + *x = DnsResolutionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DnsResolutionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DnsResolutionConfig) ProtoMessage() {} + +func (x *DnsResolutionConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DnsResolutionConfig.ProtoReflect.Descriptor instead. +func (*DnsResolutionConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_resolver_proto_rawDescGZIP(), []int{1} +} + +func (x *DnsResolutionConfig) GetResolvers() []*Address { + if x != nil { + return x.Resolvers + } + return nil +} + +func (x *DnsResolutionConfig) GetDnsResolverOptions() *DnsResolverOptions { + if x != nil { + return x.DnsResolverOptions + } + return nil +} + +var File_envoy_config_core_v3_resolver_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_resolver_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x44, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, + 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, + 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xb8, 0x01, + 0x0a, 0x13, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14, + 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_resolver_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_resolver_proto_rawDescData = file_envoy_config_core_v3_resolver_proto_rawDesc +) + +func file_envoy_config_core_v3_resolver_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_resolver_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_resolver_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_resolver_proto_rawDescData) + }) + return file_envoy_config_core_v3_resolver_proto_rawDescData +} + +var file_envoy_config_core_v3_resolver_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_resolver_proto_goTypes = []interface{}{ + (*DnsResolverOptions)(nil), // 0: envoy.config.core.v3.DnsResolverOptions + (*DnsResolutionConfig)(nil), // 1: envoy.config.core.v3.DnsResolutionConfig + (*Address)(nil), // 2: envoy.config.core.v3.Address +} +var file_envoy_config_core_v3_resolver_proto_depIdxs = []int32{ + 2, // 0: envoy.config.core.v3.DnsResolutionConfig.resolvers:type_name -> envoy.config.core.v3.Address + 0, // 1: envoy.config.core.v3.DnsResolutionConfig.dns_resolver_options:type_name -> envoy.config.core.v3.DnsResolverOptions + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_resolver_proto_init() } +func file_envoy_config_core_v3_resolver_proto_init() { + if File_envoy_config_core_v3_resolver_proto != nil { + return + } + file_envoy_config_core_v3_address_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_resolver_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DnsResolverOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_resolver_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DnsResolutionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_resolver_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_resolver_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_resolver_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_resolver_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_resolver_proto = out.File + file_envoy_config_core_v3_resolver_proto_rawDesc = nil + file_envoy_config_core_v3_resolver_proto_goTypes = nil + file_envoy_config_core_v3_resolver_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go new file mode 100644 index 0000000000..e9f661f679 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver.pb.validate.go @@ -0,0 +1,319 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DnsResolverOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DnsResolverOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DnsResolverOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DnsResolverOptionsMultiError, or nil if none found. +func (m *DnsResolverOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *DnsResolverOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseTcpForDnsLookups + + // no validation rules for NoDefaultSearchDomain + + if len(errors) > 0 { + return DnsResolverOptionsMultiError(errors) + } + + return nil +} + +// DnsResolverOptionsMultiError is an error wrapping multiple validation errors +// returned by DnsResolverOptions.ValidateAll() if the designated constraints +// aren't met. +type DnsResolverOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DnsResolverOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DnsResolverOptionsMultiError) AllErrors() []error { return m } + +// DnsResolverOptionsValidationError is the validation error returned by +// DnsResolverOptions.Validate if the designated constraints aren't met. +type DnsResolverOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DnsResolverOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DnsResolverOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DnsResolverOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DnsResolverOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DnsResolverOptionsValidationError) ErrorName() string { + return "DnsResolverOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e DnsResolverOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDnsResolverOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DnsResolverOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DnsResolverOptionsValidationError{} + +// Validate checks the field values on DnsResolutionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DnsResolutionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DnsResolutionConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DnsResolutionConfigMultiError, or nil if none found. +func (m *DnsResolutionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DnsResolutionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetResolvers()) < 1 { + err := DnsResolutionConfigValidationError{ + field: "Resolvers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResolvers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: fmt.Sprintf("Resolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: fmt.Sprintf("Resolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DnsResolutionConfigValidationError{ + field: fmt.Sprintf("Resolvers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetDnsResolverOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: "DnsResolverOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DnsResolutionConfigValidationError{ + field: "DnsResolverOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDnsResolverOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DnsResolutionConfigValidationError{ + field: "DnsResolverOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DnsResolutionConfigMultiError(errors) + } + + return nil +} + +// DnsResolutionConfigMultiError is an error wrapping multiple validation +// errors returned by DnsResolutionConfig.ValidateAll() if the designated +// constraints aren't met. +type DnsResolutionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DnsResolutionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DnsResolutionConfigMultiError) AllErrors() []error { return m } + +// DnsResolutionConfigValidationError is the validation error returned by +// DnsResolutionConfig.Validate if the designated constraints aren't met. +type DnsResolutionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DnsResolutionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DnsResolutionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DnsResolutionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DnsResolutionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DnsResolutionConfigValidationError) ErrorName() string { + return "DnsResolutionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e DnsResolutionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDnsResolutionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DnsResolutionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DnsResolutionConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go new file mode 100644 index 0000000000..5ae614bb27 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/resolver_vtproto.pb.go @@ -0,0 +1,163 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/resolver.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DnsResolverOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DnsResolverOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DnsResolverOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NoDefaultSearchDomain { + i-- + if m.NoDefaultSearchDomain { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.UseTcpForDnsLookups { + i-- + if m.UseTcpForDnsLookups { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DnsResolutionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DnsResolutionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DnsResolutionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DnsResolverOptions != nil { + size, err := m.DnsResolverOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Resolvers) > 0 { + for iNdEx := len(m.Resolvers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Resolvers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DnsResolverOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseTcpForDnsLookups { + n += 2 + } + if m.NoDefaultSearchDomain { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DnsResolutionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Resolvers) > 0 { + for _, e := range m.Resolvers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DnsResolverOptions != nil { + l = m.DnsResolverOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go new file mode 100644 index 0000000000..2b684f57b6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.go @@ -0,0 +1,615 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SocketOption_SocketState int32 + +const ( + // Socket options are applied after socket creation but before binding the socket to a port + SocketOption_STATE_PREBIND SocketOption_SocketState = 0 + // Socket options are applied after binding the socket to a port but before calling listen() + SocketOption_STATE_BOUND SocketOption_SocketState = 1 + // Socket options are applied after calling listen() + SocketOption_STATE_LISTENING SocketOption_SocketState = 2 +) + +// Enum value maps for SocketOption_SocketState. +var ( + SocketOption_SocketState_name = map[int32]string{ + 0: "STATE_PREBIND", + 1: "STATE_BOUND", + 2: "STATE_LISTENING", + } + SocketOption_SocketState_value = map[string]int32{ + "STATE_PREBIND": 0, + "STATE_BOUND": 1, + "STATE_LISTENING": 2, + } +) + +func (x SocketOption_SocketState) Enum() *SocketOption_SocketState { + p := new(SocketOption_SocketState) + *p = x + return p +} + +func (x SocketOption_SocketState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SocketOption_SocketState) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_core_v3_socket_option_proto_enumTypes[0].Descriptor() +} + +func (SocketOption_SocketState) Type() protoreflect.EnumType { + return &file_envoy_config_core_v3_socket_option_proto_enumTypes[0] +} + +func (x SocketOption_SocketState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SocketOption_SocketState.Descriptor instead. +func (SocketOption_SocketState) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0} +} + +// Generic socket option message. This would be used to set socket options that +// might not exist in upstream kernels or precompiled Envoy binaries. +// +// For example: +// +// .. code-block:: json +// +// { +// "description": "support tcp keep alive", +// "state": 0, +// "level": 1, +// "name": 9, +// "int_value": 1, +// } +// +// 1 means SOL_SOCKET and 9 means SO_KEEPALIVE on Linux. +// With the above configuration, `TCP Keep-Alives `_ +// can be enabled in socket with Linux, which can be used in +// :ref:`listener's` or +// :ref:`admin's ` socket_options etc. +// +// It should be noted that the name or level may have different values on different platforms. +// [#next-free-field: 8] +type SocketOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An optional name to give this socket option for debugging, etc. + // Uniqueness is not required and no special meaning is assumed. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` + // The numeric name as passed to setsockopt + Name int64 `protobuf:"varint,3,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to Value: + // + // *SocketOption_IntValue + // *SocketOption_BufValue + Value isSocketOption_Value `protobuf_oneof:"value"` + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + State SocketOption_SocketState `protobuf:"varint,6,opt,name=state,proto3,enum=envoy.config.core.v3.SocketOption_SocketState" json:"state,omitempty"` + // Apply the socket option to the specified `socket type `_. + // If not specified, the socket option will be applied to all socket types. + Type *SocketOption_SocketType `protobuf:"bytes,7,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *SocketOption) Reset() { + *x = SocketOption{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption) ProtoMessage() {} + +func (x *SocketOption) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption.ProtoReflect.Descriptor instead. +func (*SocketOption) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0} +} + +func (x *SocketOption) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SocketOption) GetLevel() int64 { + if x != nil { + return x.Level + } + return 0 +} + +func (x *SocketOption) GetName() int64 { + if x != nil { + return x.Name + } + return 0 +} + +func (m *SocketOption) GetValue() isSocketOption_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *SocketOption) GetIntValue() int64 { + if x, ok := x.GetValue().(*SocketOption_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (x *SocketOption) GetBufValue() []byte { + if x, ok := x.GetValue().(*SocketOption_BufValue); ok { + return x.BufValue + } + return nil +} + +func (x *SocketOption) GetState() SocketOption_SocketState { + if x != nil { + return x.State + } + return SocketOption_STATE_PREBIND +} + +func (x *SocketOption) GetType() *SocketOption_SocketType { + if x != nil { + return x.Type + } + return nil +} + +type isSocketOption_Value interface { + isSocketOption_Value() +} + +type SocketOption_IntValue struct { + // Because many sockopts take an int value. + IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type SocketOption_BufValue struct { + // Otherwise it's a byte buffer. + BufValue []byte `protobuf:"bytes,5,opt,name=buf_value,json=bufValue,proto3,oneof"` +} + +func (*SocketOption_IntValue) isSocketOption_Value() {} + +func (*SocketOption_BufValue) isSocketOption_Value() {} + +type SocketOptionsOverride struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SocketOptions []*SocketOption `protobuf:"bytes,1,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *SocketOptionsOverride) Reset() { + *x = SocketOptionsOverride{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOptionsOverride) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOptionsOverride) ProtoMessage() {} + +func (x *SocketOptionsOverride) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOptionsOverride.ProtoReflect.Descriptor instead. +func (*SocketOptionsOverride) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{1} +} + +func (x *SocketOptionsOverride) GetSocketOptions() []*SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +// The `socket type `_ to apply the socket option to. +// Only one field should be set. If multiple fields are set, the precedence order will determine +// the selected one. If none of the fields is set, the socket option will be applied to all socket types. +// +// For example: +// If :ref:`stream ` is set, +// it takes precedence over :ref:`datagram `. +type SocketOption_SocketType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Apply the socket option to the stream socket type. + Stream *SocketOption_SocketType_Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + // Apply the socket option to the datagram socket type. + Datagram *SocketOption_SocketType_Datagram `protobuf:"bytes,2,opt,name=datagram,proto3" json:"datagram,omitempty"` +} + +func (x *SocketOption_SocketType) Reset() { + *x = SocketOption_SocketType{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType) ProtoMessage() {} + +func (x *SocketOption_SocketType) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SocketOption_SocketType) GetStream() *SocketOption_SocketType_Stream { + if x != nil { + return x.Stream + } + return nil +} + +func (x *SocketOption_SocketType) GetDatagram() *SocketOption_SocketType_Datagram { + if x != nil { + return x.Datagram + } + return nil +} + +// The stream socket type. +type SocketOption_SocketType_Stream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SocketOption_SocketType_Stream) Reset() { + *x = SocketOption_SocketType_Stream{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType_Stream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType_Stream) ProtoMessage() {} + +func (x *SocketOption_SocketType_Stream) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType_Stream.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType_Stream) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// The datagram socket type. +type SocketOption_SocketType_Datagram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SocketOption_SocketType_Datagram) Reset() { + *x = SocketOption_SocketType_Datagram{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SocketOption_SocketType_Datagram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SocketOption_SocketType_Datagram) ProtoMessage() {} + +func (x *SocketOption_SocketType_Datagram) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SocketOption_SocketType_Datagram.ProtoReflect.Descriptor instead. +func (*SocketOption_SocketType_Datagram) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0, 1} +} + +var File_envoy_config_core_v3_socket_option_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x04, 0x0a, 0x0c, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, + 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x62, 0x75, 0x66, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x75, + 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x0a, 0x53, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4c, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x06, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x67, 0x72, + 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x1a, 0x08, 0x0a, 0x06, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x1a, 0x0a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, + 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, + 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, + 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, + 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x62, 0x0a, + 0x15, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x42, 0x85, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x42, 0x11, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_core_v3_socket_option_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_socket_option_proto_rawDescData = file_envoy_config_core_v3_socket_option_proto_rawDesc +) + +func file_envoy_config_core_v3_socket_option_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_socket_option_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_socket_option_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_socket_option_proto_rawDescData) + }) + return file_envoy_config_core_v3_socket_option_proto_rawDescData +} + +var file_envoy_config_core_v3_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_config_core_v3_socket_option_proto_goTypes = []interface{}{ + (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState + (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption + (*SocketOptionsOverride)(nil), // 2: envoy.config.core.v3.SocketOptionsOverride + (*SocketOption_SocketType)(nil), // 3: envoy.config.core.v3.SocketOption.SocketType + (*SocketOption_SocketType_Stream)(nil), // 4: envoy.config.core.v3.SocketOption.SocketType.Stream + (*SocketOption_SocketType_Datagram)(nil), // 5: envoy.config.core.v3.SocketOption.SocketType.Datagram +} +var file_envoy_config_core_v3_socket_option_proto_depIdxs = []int32{ + 0, // 0: envoy.config.core.v3.SocketOption.state:type_name -> envoy.config.core.v3.SocketOption.SocketState + 3, // 1: envoy.config.core.v3.SocketOption.type:type_name -> envoy.config.core.v3.SocketOption.SocketType + 1, // 2: envoy.config.core.v3.SocketOptionsOverride.socket_options:type_name -> envoy.config.core.v3.SocketOption + 4, // 3: envoy.config.core.v3.SocketOption.SocketType.stream:type_name -> envoy.config.core.v3.SocketOption.SocketType.Stream + 5, // 4: envoy.config.core.v3.SocketOption.SocketType.datagram:type_name -> envoy.config.core.v3.SocketOption.SocketType.Datagram + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_socket_option_proto_init() } +func file_envoy_config_core_v3_socket_option_proto_init() { + if File_envoy_config_core_v3_socket_option_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_socket_option_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOptionsOverride); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType_Stream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SocketOption_SocketType_Datagram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*SocketOption_IntValue)(nil), + (*SocketOption_BufValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_socket_option_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_socket_option_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_socket_option_proto_depIdxs, + EnumInfos: file_envoy_config_core_v3_socket_option_proto_enumTypes, + MessageInfos: file_envoy_config_core_v3_socket_option_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_socket_option_proto = out.File + file_envoy_config_core_v3_socket_option_proto_rawDesc = nil + file_envoy_config_core_v3_socket_option_proto_goTypes = nil + file_envoy_config_core_v3_socket_option_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go new file mode 100644 index 0000000000..944e08984c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option.pb.validate.go @@ -0,0 +1,728 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SocketOption with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SocketOption) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in SocketOptionMultiError, or +// nil if none found. +func (m *SocketOption) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Description + + // no validation rules for Level + + // no validation rules for Name + + if _, ok := SocketOption_SocketState_name[int32(m.GetState())]; !ok { + err := SocketOptionValidationError{ + field: "State", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetType()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetType()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOptionValidationError{ + field: "Type", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofValuePresent := false + switch v := m.Value.(type) { + case *SocketOption_IntValue: + if v == nil { + err := SocketOptionValidationError{ + field: "Value", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValuePresent = true + // no validation rules for IntValue + case *SocketOption_BufValue: + if v == nil { + err := SocketOptionValidationError{ + field: "Value", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofValuePresent = true + // no validation rules for BufValue + default: + _ = v // ensures v is used + } + if !oneofValuePresent { + err := SocketOptionValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SocketOptionMultiError(errors) + } + + return nil +} + +// SocketOptionMultiError is an error wrapping multiple validation errors +// returned by SocketOption.ValidateAll() if the designated constraints aren't met. +type SocketOptionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOptionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOptionMultiError) AllErrors() []error { return m } + +// SocketOptionValidationError is the validation error returned by +// SocketOption.Validate if the designated constraints aren't met. +type SocketOptionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOptionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOptionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOptionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOptionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOptionValidationError) ErrorName() string { return "SocketOptionValidationError" } + +// Error satisfies the builtin error interface +func (e SocketOptionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOptionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOptionValidationError{} + +// Validate checks the field values on SocketOptionsOverride with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOptionsOverride) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOptionsOverride with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SocketOptionsOverrideMultiError, or nil if none found. +func (m *SocketOptionsOverride) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOptionsOverride) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOptionsOverrideValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SocketOptionsOverrideMultiError(errors) + } + + return nil +} + +// SocketOptionsOverrideMultiError is an error wrapping multiple validation +// errors returned by SocketOptionsOverride.ValidateAll() if the designated +// constraints aren't met. +type SocketOptionsOverrideMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOptionsOverrideMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOptionsOverrideMultiError) AllErrors() []error { return m } + +// SocketOptionsOverrideValidationError is the validation error returned by +// SocketOptionsOverride.Validate if the designated constraints aren't met. +type SocketOptionsOverrideValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOptionsOverrideValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOptionsOverrideValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOptionsOverrideValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOptionsOverrideValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOptionsOverrideValidationError) ErrorName() string { + return "SocketOptionsOverrideValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOptionsOverrideValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOptionsOverride.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOptionsOverrideValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOptionsOverrideValidationError{} + +// Validate checks the field values on SocketOption_SocketType with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOption_SocketType) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SocketOption_SocketTypeMultiError, or nil if none found. +func (m *SocketOption_SocketType) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetStream()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStream()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOption_SocketTypeValidationError{ + field: "Stream", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDatagram()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDatagram()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SocketOption_SocketTypeValidationError{ + field: "Datagram", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SocketOption_SocketTypeMultiError(errors) + } + + return nil +} + +// SocketOption_SocketTypeMultiError is an error wrapping multiple validation +// errors returned by SocketOption_SocketType.ValidateAll() if the designated +// constraints aren't met. +type SocketOption_SocketTypeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketTypeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketTypeMultiError) AllErrors() []error { return m } + +// SocketOption_SocketTypeValidationError is the validation error returned by +// SocketOption_SocketType.Validate if the designated constraints aren't met. +type SocketOption_SocketTypeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketTypeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketTypeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketTypeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketTypeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketTypeValidationError) ErrorName() string { + return "SocketOption_SocketTypeValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketTypeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketTypeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketTypeValidationError{} + +// Validate checks the field values on SocketOption_SocketType_Stream with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SocketOption_SocketType_Stream) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType_Stream with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SocketOption_SocketType_StreamMultiError, or nil if none found. +func (m *SocketOption_SocketType_Stream) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType_Stream) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SocketOption_SocketType_StreamMultiError(errors) + } + + return nil +} + +// SocketOption_SocketType_StreamMultiError is an error wrapping multiple +// validation errors returned by SocketOption_SocketType_Stream.ValidateAll() +// if the designated constraints aren't met. +type SocketOption_SocketType_StreamMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketType_StreamMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketType_StreamMultiError) AllErrors() []error { return m } + +// SocketOption_SocketType_StreamValidationError is the validation error +// returned by SocketOption_SocketType_Stream.Validate if the designated +// constraints aren't met. +type SocketOption_SocketType_StreamValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketType_StreamValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketType_StreamValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketType_StreamValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketType_StreamValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketType_StreamValidationError) ErrorName() string { + return "SocketOption_SocketType_StreamValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketType_StreamValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType_Stream.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketType_StreamValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketType_StreamValidationError{} + +// Validate checks the field values on SocketOption_SocketType_Datagram with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *SocketOption_SocketType_Datagram) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SocketOption_SocketType_Datagram with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SocketOption_SocketType_DatagramMultiError, or nil if none found. +func (m *SocketOption_SocketType_Datagram) ValidateAll() error { + return m.validate(true) +} + +func (m *SocketOption_SocketType_Datagram) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SocketOption_SocketType_DatagramMultiError(errors) + } + + return nil +} + +// SocketOption_SocketType_DatagramMultiError is an error wrapping multiple +// validation errors returned by +// SocketOption_SocketType_Datagram.ValidateAll() if the designated +// constraints aren't met. +type SocketOption_SocketType_DatagramMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SocketOption_SocketType_DatagramMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SocketOption_SocketType_DatagramMultiError) AllErrors() []error { return m } + +// SocketOption_SocketType_DatagramValidationError is the validation error +// returned by SocketOption_SocketType_Datagram.Validate if the designated +// constraints aren't met. +type SocketOption_SocketType_DatagramValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SocketOption_SocketType_DatagramValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SocketOption_SocketType_DatagramValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SocketOption_SocketType_DatagramValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SocketOption_SocketType_DatagramValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SocketOption_SocketType_DatagramValidationError) ErrorName() string { + return "SocketOption_SocketType_DatagramValidationError" +} + +// Error satisfies the builtin error interface +func (e SocketOption_SocketType_DatagramValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSocketOption_SocketType_Datagram.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SocketOption_SocketType_DatagramValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SocketOption_SocketType_DatagramValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go new file mode 100644 index 0000000000..75f5db5122 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/socket_option_vtproto.pb.go @@ -0,0 +1,391 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/socket_option.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SocketOption_SocketType_Stream) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType_Stream) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType_Stream) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType_Datagram) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType_Datagram) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType_Datagram) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption_SocketType) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_SocketType) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Datagram != nil { + size, err := m.Datagram.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Stream != nil { + size, err := m.Stream.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SocketOption) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Type != nil { + size, err := m.Type.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.State != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.Value.(*SocketOption_BufValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Value.(*SocketOption_IntValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Name != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Name)) + i-- + dAtA[i] = 0x18 + } + if m.Level != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_IntValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_IntValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *SocketOption_BufValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOption_BufValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BufValue) + copy(dAtA[i:], m.BufValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BufValue))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *SocketOptionsOverride) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SocketOptionsOverride) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SocketOptionsOverride) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SocketOptions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SocketOption_SocketType_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_SocketType_Datagram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_SocketType) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stream != nil { + l = m.Stream.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Datagram != nil { + l = m.Datagram.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SocketOption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Level != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Level)) + } + if m.Name != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Name)) + } + if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.State != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.State)) + } + if m.Type != nil { + l = m.Type.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SocketOption_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + return n +} +func (m *SocketOption_BufValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BufValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SocketOptionsOverride) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go new file mode 100644 index 0000000000..8c7fda3a1a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.go @@ -0,0 +1,439 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Optional configuration options to be used with json_format. +type JsonFormatOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The output JSON string properties will be sorted. + SortProperties bool `protobuf:"varint,1,opt,name=sort_properties,json=sortProperties,proto3" json:"sort_properties,omitempty"` +} + +func (x *JsonFormatOptions) Reset() { + *x = JsonFormatOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JsonFormatOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JsonFormatOptions) ProtoMessage() {} + +func (x *JsonFormatOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JsonFormatOptions.ProtoReflect.Descriptor instead. +func (*JsonFormatOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP(), []int{0} +} + +func (x *JsonFormatOptions) GetSortProperties() bool { + if x != nil { + return x.SortProperties + } + return false +} + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +// [#next-free-field: 8] +type SubstitutionFormatString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Format: + // + // *SubstitutionFormatString_TextFormat + // *SubstitutionFormatString_JsonFormat + // *SubstitutionFormatString_TextFormatSource + Format isSubstitutionFormatString_Format `protobuf_oneof:"format"` + // If set to true, when command operators are evaluated to null, + // + // - for “text_format“, the output of the empty operator is changed from “-“ to an + // empty string, so that empty values are omitted entirely. + // - for “json_format“ the keys with null values are omitted in the output structure. + OmitEmptyValues bool `protobuf:"varint,3,opt,name=omit_empty_values,json=omitEmptyValues,proto3" json:"omit_empty_values,omitempty"` + // Specify a “content_type“ field. + // If this field is not set then “text/plain“ is used for “text_format“ and + // “application/json“ is used for “json_format“. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Specifies a collection of Formatter plugins that can be called from the access log configuration. + // See the formatters extensions documentation for details. + // [#extension-category: envoy.formatter] + Formatters []*TypedExtensionConfig `protobuf:"bytes,6,rep,name=formatters,proto3" json:"formatters,omitempty"` + // If json_format is used, the options will be applied to the output JSON string. + JsonFormatOptions *JsonFormatOptions `protobuf:"bytes,7,opt,name=json_format_options,json=jsonFormatOptions,proto3" json:"json_format_options,omitempty"` +} + +func (x *SubstitutionFormatString) Reset() { + *x = SubstitutionFormatString{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubstitutionFormatString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubstitutionFormatString) ProtoMessage() {} + +func (x *SubstitutionFormatString) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubstitutionFormatString.ProtoReflect.Descriptor instead. +func (*SubstitutionFormatString) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP(), []int{1} +} + +func (m *SubstitutionFormatString) GetFormat() isSubstitutionFormatString_Format { + if m != nil { + return m.Format + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/core/v3/substitution_format_string.proto. +func (x *SubstitutionFormatString) GetTextFormat() string { + if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormat); ok { + return x.TextFormat + } + return "" +} + +func (x *SubstitutionFormatString) GetJsonFormat() *structpb.Struct { + if x, ok := x.GetFormat().(*SubstitutionFormatString_JsonFormat); ok { + return x.JsonFormat + } + return nil +} + +func (x *SubstitutionFormatString) GetTextFormatSource() *DataSource { + if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormatSource); ok { + return x.TextFormatSource + } + return nil +} + +func (x *SubstitutionFormatString) GetOmitEmptyValues() bool { + if x != nil { + return x.OmitEmptyValues + } + return false +} + +func (x *SubstitutionFormatString) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *SubstitutionFormatString) GetFormatters() []*TypedExtensionConfig { + if x != nil { + return x.Formatters + } + return nil +} + +func (x *SubstitutionFormatString) GetJsonFormatOptions() *JsonFormatOptions { + if x != nil { + return x.JsonFormatOptions + } + return nil +} + +type isSubstitutionFormatString_Format interface { + isSubstitutionFormatString_Format() +} + +type SubstitutionFormatString_TextFormat struct { + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // For example, setting “text_format“ like below, + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // generates plain text similar to: + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + // + // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. + // + // Deprecated: Marked as deprecated in envoy/config/core/v3/substitution_format_string.proto. + TextFormat string `protobuf:"bytes,1,opt,name=text_format,json=textFormat,proto3,oneof"` +} + +type SubstitutionFormatString_JsonFormat struct { + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + JsonFormat *structpb.Struct `protobuf:"bytes,2,opt,name=json_format,json=jsonFormat,proto3,oneof"` +} + +type SubstitutionFormatString_TextFormatSource struct { + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // For example, setting “text_format“ like below, + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format_source: + // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // generates plain text similar to: + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + TextFormatSource *DataSource `protobuf:"bytes,5,opt,name=text_format_source,json=textFormatSource,proto3,oneof"` +} + +func (*SubstitutionFormatString_TextFormat) isSubstitutionFormatString_Format() {} + +func (*SubstitutionFormatString_JsonFormat) isSubstitutionFormatString_Format() {} + +func (*SubstitutionFormatString_TextFormatSource) isSubstitutionFormatString_Format() {} + +var File_envoy_config_core_v3_substitution_format_string_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = []byte{ + 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x3c, 0x0a, 0x11, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, + 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xf2, 0x03, + 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x0b, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, + 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x50, 0x0a, 0x12, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x10, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, + 0x6d, 0x69, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2e, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x02, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4a, + 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x13, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4a, + 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x11, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x42, 0x0d, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x1d, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = file_envoy_config_core_v3_substitution_format_string_proto_rawDesc +) + +func file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_substitution_format_string_proto_rawDescData) + }) + return file_envoy_config_core_v3_substitution_format_string_proto_rawDescData +} + +var file_envoy_config_core_v3_substitution_format_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_core_v3_substitution_format_string_proto_goTypes = []interface{}{ + (*JsonFormatOptions)(nil), // 0: envoy.config.core.v3.JsonFormatOptions + (*SubstitutionFormatString)(nil), // 1: envoy.config.core.v3.SubstitutionFormatString + (*structpb.Struct)(nil), // 2: google.protobuf.Struct + (*DataSource)(nil), // 3: envoy.config.core.v3.DataSource + (*TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = []int32{ + 2, // 0: envoy.config.core.v3.SubstitutionFormatString.json_format:type_name -> google.protobuf.Struct + 3, // 1: envoy.config.core.v3.SubstitutionFormatString.text_format_source:type_name -> envoy.config.core.v3.DataSource + 4, // 2: envoy.config.core.v3.SubstitutionFormatString.formatters:type_name -> envoy.config.core.v3.TypedExtensionConfig + 0, // 3: envoy.config.core.v3.SubstitutionFormatString.json_format_options:type_name -> envoy.config.core.v3.JsonFormatOptions + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_substitution_format_string_proto_init() } +func file_envoy_config_core_v3_substitution_format_string_proto_init() { + if File_envoy_config_core_v3_substitution_format_string_proto != nil { + return + } + file_envoy_config_core_v3_base_proto_init() + file_envoy_config_core_v3_extension_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonFormatOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubstitutionFormatString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*SubstitutionFormatString_TextFormat)(nil), + (*SubstitutionFormatString_JsonFormat)(nil), + (*SubstitutionFormatString_TextFormatSource)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_substitution_format_string_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_substitution_format_string_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_substitution_format_string_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_substitution_format_string_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_substitution_format_string_proto = out.File + file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = nil + file_envoy_config_core_v3_substitution_format_string_proto_goTypes = nil + file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go new file mode 100644 index 0000000000..f21251e320 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string.pb.validate.go @@ -0,0 +1,445 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on JsonFormatOptions with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *JsonFormatOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on JsonFormatOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// JsonFormatOptionsMultiError, or nil if none found. +func (m *JsonFormatOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *JsonFormatOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SortProperties + + if len(errors) > 0 { + return JsonFormatOptionsMultiError(errors) + } + + return nil +} + +// JsonFormatOptionsMultiError is an error wrapping multiple validation errors +// returned by JsonFormatOptions.ValidateAll() if the designated constraints +// aren't met. +type JsonFormatOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m JsonFormatOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m JsonFormatOptionsMultiError) AllErrors() []error { return m } + +// JsonFormatOptionsValidationError is the validation error returned by +// JsonFormatOptions.Validate if the designated constraints aren't met. +type JsonFormatOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e JsonFormatOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e JsonFormatOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e JsonFormatOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e JsonFormatOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e JsonFormatOptionsValidationError) ErrorName() string { + return "JsonFormatOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e JsonFormatOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sJsonFormatOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = JsonFormatOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = JsonFormatOptionsValidationError{} + +// Validate checks the field values on SubstitutionFormatString with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubstitutionFormatString) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubstitutionFormatString with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubstitutionFormatStringMultiError, or nil if none found. +func (m *SubstitutionFormatString) ValidateAll() error { + return m.validate(true) +} + +func (m *SubstitutionFormatString) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for OmitEmptyValues + + if !_SubstitutionFormatString_ContentType_Pattern.MatchString(m.GetContentType()) { + err := SubstitutionFormatStringValidationError{ + field: "ContentType", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFormatters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: fmt.Sprintf("Formatters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: fmt.Sprintf("Formatters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: fmt.Sprintf("Formatters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetJsonFormatOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormatOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetJsonFormatOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: "JsonFormatOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofFormatPresent := false + switch v := m.Format.(type) { + case *SubstitutionFormatString_TextFormat: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true + // no validation rules for TextFormat + case *SubstitutionFormatString_JsonFormat: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true + + if m.GetJsonFormat() == nil { + err := SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetJsonFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: "JsonFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SubstitutionFormatString_TextFormatSource: + if v == nil { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFormatPresent = true + + if all { + switch v := interface{}(m.GetTextFormatSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "TextFormatSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubstitutionFormatStringValidationError{ + field: "TextFormatSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTextFormatSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubstitutionFormatStringValidationError{ + field: "TextFormatSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofFormatPresent { + err := SubstitutionFormatStringValidationError{ + field: "Format", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SubstitutionFormatStringMultiError(errors) + } + + return nil +} + +// SubstitutionFormatStringMultiError is an error wrapping multiple validation +// errors returned by SubstitutionFormatString.ValidateAll() if the designated +// constraints aren't met. +type SubstitutionFormatStringMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubstitutionFormatStringMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubstitutionFormatStringMultiError) AllErrors() []error { return m } + +// SubstitutionFormatStringValidationError is the validation error returned by +// SubstitutionFormatString.Validate if the designated constraints aren't met. +type SubstitutionFormatStringValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubstitutionFormatStringValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubstitutionFormatStringValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubstitutionFormatStringValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubstitutionFormatStringValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubstitutionFormatStringValidationError) ErrorName() string { + return "SubstitutionFormatStringValidationError" +} + +// Error satisfies the builtin error interface +func (e SubstitutionFormatStringValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubstitutionFormatString.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubstitutionFormatStringValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubstitutionFormatStringValidationError{} + +var _SubstitutionFormatString_ContentType_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go new file mode 100644 index 0000000000..2887f4d1a0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/substitution_format_string_vtproto.pb.go @@ -0,0 +1,298 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/substitution_format_string.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *JsonFormatOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JsonFormatOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *JsonFormatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SortProperties { + i-- + if m.SortProperties { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubstitutionFormatString) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubstitutionFormatString) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.JsonFormatOptions != nil { + size, err := m.JsonFormatOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.Formatters) > 0 { + for iNdEx := len(m.Formatters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Formatters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if msg, ok := m.Format.(*SubstitutionFormatString_TextFormatSource); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ContentType) > 0 { + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + } + if m.OmitEmptyValues { + i-- + if m.OmitEmptyValues { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if msg, ok := m.Format.(*SubstitutionFormatString_JsonFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Format.(*SubstitutionFormatString_TextFormat); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *SubstitutionFormatString_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TextFormat) + copy(dAtA[i:], m.TextFormat) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *SubstitutionFormatString_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.JsonFormat != nil { + size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SubstitutionFormatString_TextFormatSource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubstitutionFormatString_TextFormatSource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TextFormatSource != nil { + size, err := m.TextFormatSource.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *JsonFormatOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SortProperties { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SubstitutionFormatString) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Format.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.OmitEmptyValues { + n += 2 + } + l = len(m.ContentType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Formatters) > 0 { + for _, e := range m.Formatters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.JsonFormatOptions != nil { + l = m.JsonFormatOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubstitutionFormatString_TextFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TextFormat) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *SubstitutionFormatString_JsonFormat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.JsonFormat != nil { + l = (*structpb.Struct)(m.JsonFormat).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *SubstitutionFormatString_TextFormatSource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TextFormatSource != nil { + l = m.TextFormatSource.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go new file mode 100644 index 0000000000..ebef4e6425 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Generic UDP socket configuration. +type UdpSocketConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate + // more memory per socket. Received datagrams above this size will be dropped. If not set + // defaults to 1500 bytes. + MaxRxDatagramSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=max_rx_datagram_size,json=maxRxDatagramSize,proto3" json:"max_rx_datagram_size,omitempty"` + // Configures whether Generic Receive Offload (GRO) + // _ is preferred when reading from the + // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. + // This option affects performance but not functionality. If GRO is not supported by the operating + // system, non-GRO receive will be used. + PreferGro *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=prefer_gro,json=preferGro,proto3" json:"prefer_gro,omitempty"` +} + +func (x *UdpSocketConfig) Reset() { + *x = UdpSocketConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UdpSocketConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UdpSocketConfig) ProtoMessage() {} + +func (x *UdpSocketConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UdpSocketConfig.ProtoReflect.Descriptor instead. +func (*UdpSocketConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP(), []int{0} +} + +func (x *UdpSocketConfig) GetMaxRxDatagramSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MaxRxDatagramSize + } + return nil +} + +func (x *UdpSocketConfig) GetPreferGro() *wrapperspb.BoolValue { + if x != nil { + return x.PreferGro + } + return nil +} + +var File_envoy_config_core_v3_udp_socket_config_proto protoreflect.FileDescriptor + +var file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, + 0x0f, 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x5a, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x67, + 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x32, 0x06, 0x10, 0x80, 0x80, 0x04, 0x20, 0x00, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x52, 0x78, + 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x42, 0x88, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce sync.Once + file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = file_envoy_config_core_v3_udp_socket_config_proto_rawDesc +) + +func file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP() []byte { + file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce.Do(func() { + file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_udp_socket_config_proto_rawDescData) + }) + return file_envoy_config_core_v3_udp_socket_config_proto_rawDescData +} + +var file_envoy_config_core_v3_udp_socket_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_core_v3_udp_socket_config_proto_goTypes = []interface{}{ + (*UdpSocketConfig)(nil), // 0: envoy.config.core.v3.UdpSocketConfig + (*wrapperspb.UInt64Value)(nil), // 1: google.protobuf.UInt64Value + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue +} +var file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = []int32{ + 1, // 0: envoy.config.core.v3.UdpSocketConfig.max_rx_datagram_size:type_name -> google.protobuf.UInt64Value + 2, // 1: envoy.config.core.v3.UdpSocketConfig.prefer_gro:type_name -> google.protobuf.BoolValue + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_core_v3_udp_socket_config_proto_init() } +func file_envoy_config_core_v3_udp_socket_config_proto_init() { + if File_envoy_config_core_v3_udp_socket_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UdpSocketConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_core_v3_udp_socket_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_core_v3_udp_socket_config_proto_goTypes, + DependencyIndexes: file_envoy_config_core_v3_udp_socket_config_proto_depIdxs, + MessageInfos: file_envoy_config_core_v3_udp_socket_config_proto_msgTypes, + }.Build() + File_envoy_config_core_v3_udp_socket_config_proto = out.File + file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = nil + file_envoy_config_core_v3_udp_socket_config_proto_goTypes = nil + file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go new file mode 100644 index 0000000000..f977eda2f5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config.pb.validate.go @@ -0,0 +1,181 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UdpSocketConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *UdpSocketConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UdpSocketConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UdpSocketConfigMultiError, or nil if none found. +func (m *UdpSocketConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UdpSocketConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetMaxRxDatagramSize(); wrapper != nil { + + if val := wrapper.GetValue(); val <= 0 || val >= 65536 { + err := UdpSocketConfigValidationError{ + field: "MaxRxDatagramSize", + reason: "value must be inside range (0, 65536)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetPreferGro()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpSocketConfigValidationError{ + field: "PreferGro", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpSocketConfigValidationError{ + field: "PreferGro", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPreferGro()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpSocketConfigValidationError{ + field: "PreferGro", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UdpSocketConfigMultiError(errors) + } + + return nil +} + +// UdpSocketConfigMultiError is an error wrapping multiple validation errors +// returned by UdpSocketConfig.ValidateAll() if the designated constraints +// aren't met. +type UdpSocketConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UdpSocketConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UdpSocketConfigMultiError) AllErrors() []error { return m } + +// UdpSocketConfigValidationError is the validation error returned by +// UdpSocketConfig.Validate if the designated constraints aren't met. +type UdpSocketConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UdpSocketConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UdpSocketConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UdpSocketConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UdpSocketConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UdpSocketConfigValidationError) ErrorName() string { return "UdpSocketConfigValidationError" } + +// Error satisfies the builtin error interface +func (e UdpSocketConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUdpSocketConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UdpSocketConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UdpSocketConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go new file mode 100644 index 0000000000..2809993aec --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/udp_socket_config_vtproto.pb.go @@ -0,0 +1,91 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/core/v3/udp_socket_config.proto + +package corev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UdpSocketConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpSocketConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UdpSocketConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PreferGro != nil { + size, err := (*wrapperspb.BoolValue)(m.PreferGro).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxRxDatagramSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaxRxDatagramSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UdpSocketConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxRxDatagramSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaxRxDatagramSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreferGro != nil { + l = (*wrapperspb.BoolValue)(m.PreferGro).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go new file mode 100644 index 0000000000..96122a01d0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.go @@ -0,0 +1,508 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Each route from RDS will map to a single cluster or traffic split across +// clusters using weights expressed in the RDS WeightedCluster. +// +// With EDS, each cluster is treated independently from a LB perspective, with +// LB taking place between the Localities within a cluster and at a finer +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. +// [#next-free-field: 6] +type ClusterLoadAssignment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // List of endpoints to load balance to. + Endpoints []*LocalityLbEndpoints `protobuf:"bytes,2,rep,name=endpoints,proto3" json:"endpoints,omitempty"` + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + // [#not-implemented-hide:] + NamedEndpoints map[string]*Endpoint `protobuf:"bytes,5,rep,name=named_endpoints,json=namedEndpoints,proto3" json:"named_endpoints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Load balancing policy settings. + Policy *ClusterLoadAssignment_Policy `protobuf:"bytes,4,opt,name=policy,proto3" json:"policy,omitempty"` +} + +func (x *ClusterLoadAssignment) Reset() { + *x = ClusterLoadAssignment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterLoadAssignment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterLoadAssignment) ProtoMessage() {} + +func (x *ClusterLoadAssignment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterLoadAssignment.ProtoReflect.Descriptor instead. +func (*ClusterLoadAssignment) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP(), []int{0} +} + +func (x *ClusterLoadAssignment) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *ClusterLoadAssignment) GetEndpoints() []*LocalityLbEndpoints { + if x != nil { + return x.Endpoints + } + return nil +} + +func (x *ClusterLoadAssignment) GetNamedEndpoints() map[string]*Endpoint { + if x != nil { + return x.NamedEndpoints + } + return nil +} + +func (x *ClusterLoadAssignment) GetPolicy() *ClusterLoadAssignment_Policy { + if x != nil { + return x.Policy + } + return nil +} + +// Load balancing policy settings. +// [#next-free-field: 7] +type ClusterLoadAssignment_Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Action to trim the overall incoming traffic to protect the upstream + // hosts. This action allows protection in case the hosts are unable to + // recover from an outage, or unable to autoscale or unable to handle + // incoming traffic volume for any reason. + // + // At the client each category is applied one after the other to generate + // the 'actual' drop percentage on all outgoing traffic. For example: + // + // .. code-block:: json + // + // { "drop_overloads": [ + // { "category": "throttle", "drop_percentage": 60 } + // { "category": "lb", "drop_percentage": 50 } + // ]} + // + // The actual drop percentages applied to the traffic at the clients will be + // + // "throttle"_drop = 60% + // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. + // actual_outgoing_load = 20% // remaining after applying all categories. + // + // Envoy supports only one element and will NACK if more than one element is present. + // Other xDS-capable data planes will not necessarily have this limitation. + // + // In Envoy, this “drop_overloads“ config can be overridden by a runtime key + // "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to + // any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%. + // When both “drop_overloads“ config and "load_balancing_policy.drop_overload_limit" + // setting are in place, the min of these two wins. + DropOverloads []*ClusterLoadAssignment_Policy_DropOverload `protobuf:"bytes,2,rep,name=drop_overloads,json=dropOverloads,proto3" json:"drop_overloads,omitempty"` + // Priority levels and localities are considered overprovisioned with this + // factor (in percentage). This means that we don't consider a priority + // level or locality unhealthy until the fraction of healthy hosts + // multiplied by the overprovisioning factor drops below 100. + // With the default value 140(1.4), Envoy doesn't consider a priority level + // or a locality unhealthy until their percentage of healthy hosts drops + // below 72%. For example: + // + // .. code-block:: json + // + // { "overprovisioning_factor": 100 } + // + // Read more at :ref:`priority levels ` and + // :ref:`localities `. + OverprovisioningFactor *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=overprovisioning_factor,json=overprovisioningFactor,proto3" json:"overprovisioning_factor,omitempty"` + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + EndpointStaleAfter *durationpb.Duration `protobuf:"bytes,4,opt,name=endpoint_stale_after,json=endpointStaleAfter,proto3" json:"endpoint_stale_after,omitempty"` + // If true, use the :ref:`load balancing weight + // ` of healthy and unhealthy + // hosts to determine the health of the priority level. If false, use the number of healthy and unhealthy hosts + // to determine the health of the priority level, or in other words assume each host has a weight of 1 for + // this calculation. + // + // Note: this is not currently implemented for + // :ref:`locality weighted load balancing `. + WeightedPriorityHealth bool `protobuf:"varint,6,opt,name=weighted_priority_health,json=weightedPriorityHealth,proto3" json:"weighted_priority_health,omitempty"` +} + +func (x *ClusterLoadAssignment_Policy) Reset() { + *x = ClusterLoadAssignment_Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterLoadAssignment_Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterLoadAssignment_Policy) ProtoMessage() {} + +func (x *ClusterLoadAssignment_Policy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterLoadAssignment_Policy.ProtoReflect.Descriptor instead. +func (*ClusterLoadAssignment_Policy) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ClusterLoadAssignment_Policy) GetDropOverloads() []*ClusterLoadAssignment_Policy_DropOverload { + if x != nil { + return x.DropOverloads + } + return nil +} + +func (x *ClusterLoadAssignment_Policy) GetOverprovisioningFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.OverprovisioningFactor + } + return nil +} + +func (x *ClusterLoadAssignment_Policy) GetEndpointStaleAfter() *durationpb.Duration { + if x != nil { + return x.EndpointStaleAfter + } + return nil +} + +func (x *ClusterLoadAssignment_Policy) GetWeightedPriorityHealth() bool { + if x != nil { + return x.WeightedPriorityHealth + } + return false +} + +type ClusterLoadAssignment_Policy_DropOverload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the policy specifying the drop. + Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` + // Percentage of traffic that should be dropped for the category. + DropPercentage *v3.FractionalPercent `protobuf:"bytes,2,opt,name=drop_percentage,json=dropPercentage,proto3" json:"drop_percentage,omitempty"` +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) Reset() { + *x = ClusterLoadAssignment_Policy_DropOverload{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterLoadAssignment_Policy_DropOverload) ProtoMessage() {} + +func (x *ClusterLoadAssignment_Policy_DropOverload) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterLoadAssignment_Policy_DropOverload.ProtoReflect.Descriptor instead. +func (*ClusterLoadAssignment_Policy_DropOverload) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +func (x *ClusterLoadAssignment_Policy_DropOverload) GetDropPercentage() *v3.FractionalPercent { + if x != nil { + return x.DropPercentage + } + return nil +} + +var File_envoy_config_endpoint_v3_endpoint_proto protoreflect.FileDescriptor + +var file_envoy_config_endpoint_v3_endpoint_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x1a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xe0, 0x08, 0x0a, 0x15, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, + 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x12, 0x4e, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x1a, 0xfd, 0x04, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x6a, 0x0a, 0x0e, 0x64, + 0x72, 0x6f, 0x70, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x72, 0x6f, 0x70, + 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0d, 0x64, 0x72, 0x6f, 0x70, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x5e, 0x0a, 0x17, 0x6f, 0x76, 0x65, 0x72, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, + 0x16, 0x6f, 0x76, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x55, 0x0a, 0x14, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x38, + 0x0a, 0x18, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x16, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x1a, 0xbd, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, + 0x70, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x23, 0x0a, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x49, + 0x0a, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, + 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x72, 0x6f, 0x70, + 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, + 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x18, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6f, 0x76, 0x65, 0x72, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x1a, 0x65, 0x0a, 0x13, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_endpoint_v3_endpoint_proto_rawDescOnce sync.Once + file_envoy_config_endpoint_v3_endpoint_proto_rawDescData = file_envoy_config_endpoint_v3_endpoint_proto_rawDesc +) + +func file_envoy_config_endpoint_v3_endpoint_proto_rawDescGZIP() []byte { + file_envoy_config_endpoint_v3_endpoint_proto_rawDescOnce.Do(func() { + file_envoy_config_endpoint_v3_endpoint_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_endpoint_v3_endpoint_proto_rawDescData) + }) + return file_envoy_config_endpoint_v3_endpoint_proto_rawDescData +} + +var file_envoy_config_endpoint_v3_endpoint_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_config_endpoint_v3_endpoint_proto_goTypes = []interface{}{ + (*ClusterLoadAssignment)(nil), // 0: envoy.config.endpoint.v3.ClusterLoadAssignment + (*ClusterLoadAssignment_Policy)(nil), // 1: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy + nil, // 2: envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry + (*ClusterLoadAssignment_Policy_DropOverload)(nil), // 3: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload + (*LocalityLbEndpoints)(nil), // 4: envoy.config.endpoint.v3.LocalityLbEndpoints + (*wrapperspb.UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (*Endpoint)(nil), // 7: envoy.config.endpoint.v3.Endpoint + (*v3.FractionalPercent)(nil), // 8: envoy.type.v3.FractionalPercent +} +var file_envoy_config_endpoint_v3_endpoint_proto_depIdxs = []int32{ + 4, // 0: envoy.config.endpoint.v3.ClusterLoadAssignment.endpoints:type_name -> envoy.config.endpoint.v3.LocalityLbEndpoints + 2, // 1: envoy.config.endpoint.v3.ClusterLoadAssignment.named_endpoints:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry + 1, // 2: envoy.config.endpoint.v3.ClusterLoadAssignment.policy:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment.Policy + 3, // 3: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.drop_overloads:type_name -> envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload + 5, // 4: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.overprovisioning_factor:type_name -> google.protobuf.UInt32Value + 6, // 5: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.endpoint_stale_after:type_name -> google.protobuf.Duration + 7, // 6: envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry.value:type_name -> envoy.config.endpoint.v3.Endpoint + 8, // 7: envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload.drop_percentage:type_name -> envoy.type.v3.FractionalPercent + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_config_endpoint_v3_endpoint_proto_init() } +func file_envoy_config_endpoint_v3_endpoint_proto_init() { + if File_envoy_config_endpoint_v3_endpoint_proto != nil { + return + } + file_envoy_config_endpoint_v3_endpoint_components_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterLoadAssignment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterLoadAssignment_Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterLoadAssignment_Policy_DropOverload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_endpoint_v3_endpoint_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_endpoint_v3_endpoint_proto_goTypes, + DependencyIndexes: file_envoy_config_endpoint_v3_endpoint_proto_depIdxs, + MessageInfos: file_envoy_config_endpoint_v3_endpoint_proto_msgTypes, + }.Build() + File_envoy_config_endpoint_v3_endpoint_proto = out.File + file_envoy_config_endpoint_v3_endpoint_proto_rawDesc = nil + file_envoy_config_endpoint_v3_endpoint_proto_goTypes = nil + file_envoy_config_endpoint_v3_endpoint_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go new file mode 100644 index 0000000000..1b9f2c1fe3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint.pb.validate.go @@ -0,0 +1,589 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClusterLoadAssignment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterLoadAssignment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterLoadAssignment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterLoadAssignmentMultiError, or nil if none found. +func (m *ClusterLoadAssignment) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterLoadAssignment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetClusterName()) < 1 { + err := ClusterLoadAssignmentValidationError{ + field: "ClusterName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetEndpoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("Endpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("Endpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("Endpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + { + sorted_keys := make([]string, len(m.GetNamedEndpoints())) + i := 0 + for key := range m.GetNamedEndpoints() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetNamedEndpoints()[key] + _ = val + + // no validation rules for NamedEndpoints[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("NamedEndpoints[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("NamedEndpoints[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignmentValidationError{ + field: fmt.Sprintf("NamedEndpoints[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: "Policy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignmentValidationError{ + field: "Policy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignmentValidationError{ + field: "Policy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterLoadAssignmentMultiError(errors) + } + + return nil +} + +// ClusterLoadAssignmentMultiError is an error wrapping multiple validation +// errors returned by ClusterLoadAssignment.ValidateAll() if the designated +// constraints aren't met. +type ClusterLoadAssignmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterLoadAssignmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterLoadAssignmentMultiError) AllErrors() []error { return m } + +// ClusterLoadAssignmentValidationError is the validation error returned by +// ClusterLoadAssignment.Validate if the designated constraints aren't met. +type ClusterLoadAssignmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterLoadAssignmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterLoadAssignmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterLoadAssignmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterLoadAssignmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterLoadAssignmentValidationError) ErrorName() string { + return "ClusterLoadAssignmentValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterLoadAssignmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterLoadAssignment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterLoadAssignmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterLoadAssignmentValidationError{} + +// Validate checks the field values on ClusterLoadAssignment_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterLoadAssignment_Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterLoadAssignment_Policy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterLoadAssignment_PolicyMultiError, or nil if none found. +func (m *ClusterLoadAssignment_Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterLoadAssignment_Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDropOverloads() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignment_PolicyValidationError{ + field: fmt.Sprintf("DropOverloads[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignment_PolicyValidationError{ + field: fmt.Sprintf("DropOverloads[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignment_PolicyValidationError{ + field: fmt.Sprintf("DropOverloads[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if wrapper := m.GetOverprovisioningFactor(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := ClusterLoadAssignment_PolicyValidationError{ + field: "OverprovisioningFactor", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if d := m.GetEndpointStaleAfter(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = ClusterLoadAssignment_PolicyValidationError{ + field: "EndpointStaleAfter", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := ClusterLoadAssignment_PolicyValidationError{ + field: "EndpointStaleAfter", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for WeightedPriorityHealth + + if len(errors) > 0 { + return ClusterLoadAssignment_PolicyMultiError(errors) + } + + return nil +} + +// ClusterLoadAssignment_PolicyMultiError is an error wrapping multiple +// validation errors returned by ClusterLoadAssignment_Policy.ValidateAll() if +// the designated constraints aren't met. +type ClusterLoadAssignment_PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterLoadAssignment_PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterLoadAssignment_PolicyMultiError) AllErrors() []error { return m } + +// ClusterLoadAssignment_PolicyValidationError is the validation error returned +// by ClusterLoadAssignment_Policy.Validate if the designated constraints +// aren't met. +type ClusterLoadAssignment_PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterLoadAssignment_PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterLoadAssignment_PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterLoadAssignment_PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterLoadAssignment_PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterLoadAssignment_PolicyValidationError) ErrorName() string { + return "ClusterLoadAssignment_PolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterLoadAssignment_PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterLoadAssignment_Policy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterLoadAssignment_PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterLoadAssignment_PolicyValidationError{} + +// Validate checks the field values on +// ClusterLoadAssignment_Policy_DropOverload with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterLoadAssignment_Policy_DropOverload) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ClusterLoadAssignment_Policy_DropOverload with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ClusterLoadAssignment_Policy_DropOverloadMultiError, or nil if none found. +func (m *ClusterLoadAssignment_Policy_DropOverload) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCategory()) < 1 { + err := ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "Category", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDropPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "DropPercentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "DropPercentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDropPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterLoadAssignment_Policy_DropOverloadValidationError{ + field: "DropPercentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterLoadAssignment_Policy_DropOverloadMultiError(errors) + } + + return nil +} + +// ClusterLoadAssignment_Policy_DropOverloadMultiError is an error wrapping +// multiple validation errors returned by +// ClusterLoadAssignment_Policy_DropOverload.ValidateAll() if the designated +// constraints aren't met. +type ClusterLoadAssignment_Policy_DropOverloadMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterLoadAssignment_Policy_DropOverloadMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterLoadAssignment_Policy_DropOverloadMultiError) AllErrors() []error { return m } + +// ClusterLoadAssignment_Policy_DropOverloadValidationError is the validation +// error returned by ClusterLoadAssignment_Policy_DropOverload.Validate if the +// designated constraints aren't met. +type ClusterLoadAssignment_Policy_DropOverloadValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) ErrorName() string { + return "ClusterLoadAssignment_Policy_DropOverloadValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterLoadAssignment_Policy_DropOverloadValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterLoadAssignment_Policy_DropOverload.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterLoadAssignment_Policy_DropOverloadValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterLoadAssignment_Policy_DropOverloadValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go new file mode 100644 index 0000000000..4cff3e6df0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.go @@ -0,0 +1,1004 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Upstream host identifier. +type Endpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream host address. + // + // .. attention:: + // + // The form of host address depends on the given cluster type. For STATIC or EDS, + // it is expected to be a direct IP address (or something resolvable by the + // specified :ref:`resolver ` + // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, + // and will be resolved via DNS. + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // The optional health check configuration is used as configuration for the + // health checker to contact the health checked host. + // + // .. attention:: + // + // This takes into effect only for upstream clusters with + // :ref:`active health checking ` enabled. + HealthCheckConfig *Endpoint_HealthCheckConfig `protobuf:"bytes,2,opt,name=health_check_config,json=healthCheckConfig,proto3" json:"health_check_config,omitempty"` + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname,omitempty"` + // An ordered list of addresses that together with “address“ comprise the + // list of addresses for an endpoint. The address given in the “address“ is + // prepended to this list. It is assumed that the list must already be + // sorted by preference order of the addresses. This will only be supported + // for STATIC and EDS clusters. + AdditionalAddresses []*Endpoint_AdditionalAddress `protobuf:"bytes,4,rep,name=additional_addresses,json=additionalAddresses,proto3" json:"additional_addresses,omitempty"` +} + +func (x *Endpoint) Reset() { + *x = Endpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint) ProtoMessage() {} + +func (x *Endpoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. +func (*Endpoint) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{0} +} + +func (x *Endpoint) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Endpoint) GetHealthCheckConfig() *Endpoint_HealthCheckConfig { + if x != nil { + return x.HealthCheckConfig + } + return nil +} + +func (x *Endpoint) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *Endpoint) GetAdditionalAddresses() []*Endpoint_AdditionalAddress { + if x != nil { + return x.AdditionalAddresses + } + return nil +} + +// An Endpoint that Envoy can route traffic to. +// [#next-free-field: 6] +type LbEndpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Upstream host identifier or a named reference. + // + // Types that are assignable to HostIdentifier: + // + // *LbEndpoint_Endpoint + // *LbEndpoint_EndpointName + HostIdentifier isLbEndpoint_HostIdentifier `protobuf_oneof:"host_identifier"` + // Optional health status when known and supplied by EDS server. + HealthStatus v3.HealthStatus `protobuf:"varint,2,opt,name=health_status,json=healthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"health_status,omitempty"` + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as “envoy.lb“. An example boolean key-value pair + // is “canary“, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + Metadata *v3.Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, will be treated as 1. The sum + // of the weights of all endpoints in the endpoint's locality must not + // exceed uint32_t maximal value (4294967295). + LoadBalancingWeight *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` +} + +func (x *LbEndpoint) Reset() { + *x = LbEndpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LbEndpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LbEndpoint) ProtoMessage() {} + +func (x *LbEndpoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LbEndpoint.ProtoReflect.Descriptor instead. +func (*LbEndpoint) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{1} +} + +func (m *LbEndpoint) GetHostIdentifier() isLbEndpoint_HostIdentifier { + if m != nil { + return m.HostIdentifier + } + return nil +} + +func (x *LbEndpoint) GetEndpoint() *Endpoint { + if x, ok := x.GetHostIdentifier().(*LbEndpoint_Endpoint); ok { + return x.Endpoint + } + return nil +} + +func (x *LbEndpoint) GetEndpointName() string { + if x, ok := x.GetHostIdentifier().(*LbEndpoint_EndpointName); ok { + return x.EndpointName + } + return "" +} + +func (x *LbEndpoint) GetHealthStatus() v3.HealthStatus { + if x != nil { + return x.HealthStatus + } + return v3.HealthStatus(0) +} + +func (x *LbEndpoint) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *LbEndpoint) GetLoadBalancingWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.LoadBalancingWeight + } + return nil +} + +type isLbEndpoint_HostIdentifier interface { + isLbEndpoint_HostIdentifier() +} + +type LbEndpoint_Endpoint struct { + Endpoint *Endpoint `protobuf:"bytes,1,opt,name=endpoint,proto3,oneof"` +} + +type LbEndpoint_EndpointName struct { + // [#not-implemented-hide:] + EndpointName string `protobuf:"bytes,5,opt,name=endpoint_name,json=endpointName,proto3,oneof"` +} + +func (*LbEndpoint_Endpoint) isLbEndpoint_HostIdentifier() {} + +func (*LbEndpoint_EndpointName) isLbEndpoint_HostIdentifier() {} + +// [#not-implemented-hide:] +// A configuration for a LEDS collection. +type LedsClusterLocalityConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration for the source of LEDS updates for a Locality. + LedsConfig *v3.ConfigSource `protobuf:"bytes,1,opt,name=leds_config,json=ledsConfig,proto3" json:"leds_config,omitempty"` + // The xDS transport protocol glob collection resource name. + // The service is only supported in delta xDS (incremental) mode. + LedsCollectionName string `protobuf:"bytes,2,opt,name=leds_collection_name,json=ledsCollectionName,proto3" json:"leds_collection_name,omitempty"` +} + +func (x *LedsClusterLocalityConfig) Reset() { + *x = LedsClusterLocalityConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LedsClusterLocalityConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LedsClusterLocalityConfig) ProtoMessage() {} + +func (x *LedsClusterLocalityConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LedsClusterLocalityConfig.ProtoReflect.Descriptor instead. +func (*LedsClusterLocalityConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{2} +} + +func (x *LedsClusterLocalityConfig) GetLedsConfig() *v3.ConfigSource { + if x != nil { + return x.LedsConfig + } + return nil +} + +func (x *LedsClusterLocalityConfig) GetLedsCollectionName() string { + if x != nil { + return x.LedsCollectionName + } + return "" +} + +// A group of endpoints belonging to a Locality. +// One can have multiple LocalityLbEndpoints for a locality, but only if +// they have different priorities. +// [#next-free-field: 10] +type LocalityLbEndpoints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies location of where the upstream hosts run. + Locality *v3.Locality `protobuf:"bytes,1,opt,name=locality,proto3" json:"locality,omitempty"` + // Metadata to provide additional information about the locality endpoints in aggregate. + Metadata *v3.Metadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The group of endpoints belonging to the locality specified. + // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + // deprecated and replaced by “load_balancer_endpoints“.] + LbEndpoints []*LbEndpoint `protobuf:"bytes,2,rep,name=lb_endpoints,json=lbEndpoints,proto3" json:"lb_endpoints,omitempty"` + // [#not-implemented-hide:] + // + // Types that are assignable to LbConfig: + // + // *LocalityLbEndpoints_LoadBalancerEndpoints + // *LocalityLbEndpoints_LedsClusterLocalityConfig + LbConfig isLocalityLbEndpoints_LbConfig `protobuf_oneof:"lb_config"` + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load + // balancing weight for a locality is divided by the sum of the weights of all + // localities at the same priority level to produce the effective percentage + // of traffic for the locality. The sum of the weights of all localities at + // the same priority level must not exceed uint32_t maximal value (4294967295). + // + // Locality weights are only considered when :ref:`locality weighted load + // balancing ` is + // configured. These weights are ignored otherwise. If no weights are + // specified when locality weighted load balancing is enabled, the locality is + // assigned no load. + LoadBalancingWeight *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=load_balancing_weight,json=loadBalancingWeight,proto3" json:"load_balancing_weight,omitempty"` + // Optional: the priority for this LocalityLbEndpoints. If unspecified this will + // default to the highest priority (0). + // + // Under usual circumstances, Envoy will only select endpoints for the highest + // priority (0). In the event that enough endpoints for a particular priority are + // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the + // next highest priority group. Read more at :ref:`priority levels `. + // + // Priorities should range from 0 (highest) to N (lowest) without skipping. + Priority uint32 `protobuf:"varint,5,opt,name=priority,proto3" json:"priority,omitempty"` + // Optional: Per locality proximity value which indicates how close this + // locality is from the source locality. This value only provides ordering + // information (lower the value, closer it is to the source locality). + // This will be consumed by load balancing schemes that need proximity order + // to determine where to route the requests. + // [#not-implemented-hide:] + Proximity *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=proximity,proto3" json:"proximity,omitempty"` +} + +func (x *LocalityLbEndpoints) Reset() { + *x = LocalityLbEndpoints{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbEndpoints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbEndpoints) ProtoMessage() {} + +func (x *LocalityLbEndpoints) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbEndpoints.ProtoReflect.Descriptor instead. +func (*LocalityLbEndpoints) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{3} +} + +func (x *LocalityLbEndpoints) GetLocality() *v3.Locality { + if x != nil { + return x.Locality + } + return nil +} + +func (x *LocalityLbEndpoints) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *LocalityLbEndpoints) GetLbEndpoints() []*LbEndpoint { + if x != nil { + return x.LbEndpoints + } + return nil +} + +func (m *LocalityLbEndpoints) GetLbConfig() isLocalityLbEndpoints_LbConfig { + if m != nil { + return m.LbConfig + } + return nil +} + +func (x *LocalityLbEndpoints) GetLoadBalancerEndpoints() *LocalityLbEndpoints_LbEndpointList { + if x, ok := x.GetLbConfig().(*LocalityLbEndpoints_LoadBalancerEndpoints); ok { + return x.LoadBalancerEndpoints + } + return nil +} + +func (x *LocalityLbEndpoints) GetLedsClusterLocalityConfig() *LedsClusterLocalityConfig { + if x, ok := x.GetLbConfig().(*LocalityLbEndpoints_LedsClusterLocalityConfig); ok { + return x.LedsClusterLocalityConfig + } + return nil +} + +func (x *LocalityLbEndpoints) GetLoadBalancingWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.LoadBalancingWeight + } + return nil +} + +func (x *LocalityLbEndpoints) GetPriority() uint32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *LocalityLbEndpoints) GetProximity() *wrapperspb.UInt32Value { + if x != nil { + return x.Proximity + } + return nil +} + +type isLocalityLbEndpoints_LbConfig interface { + isLocalityLbEndpoints_LbConfig() +} + +type LocalityLbEndpoints_LoadBalancerEndpoints struct { + // The group of endpoints belonging to the locality. + // [#comment:TODO(adisuissa): Once LEDS is implemented the “lb_endpoints“ field + // needs to be deprecated.] + LoadBalancerEndpoints *LocalityLbEndpoints_LbEndpointList `protobuf:"bytes,7,opt,name=load_balancer_endpoints,json=loadBalancerEndpoints,proto3,oneof"` +} + +type LocalityLbEndpoints_LedsClusterLocalityConfig struct { + // LEDS Configuration for the current locality. + LedsClusterLocalityConfig *LedsClusterLocalityConfig `protobuf:"bytes,8,opt,name=leds_cluster_locality_config,json=ledsClusterLocalityConfig,proto3,oneof"` +} + +func (*LocalityLbEndpoints_LoadBalancerEndpoints) isLocalityLbEndpoints_LbConfig() {} + +func (*LocalityLbEndpoints_LedsClusterLocalityConfig) isLocalityLbEndpoints_LbConfig() {} + +// The optional health check configuration. +type Endpoint_HealthCheckConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + PortValue uint32 `protobuf:"varint,1,opt,name=port_value,json=portValue,proto3" json:"port_value,omitempty"` + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Optional alternative health check host address. + // + // .. attention:: + // + // The form of the health check host address is expected to be a direct IP address. + Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + // Optional flag to control if perform active health check for this endpoint. + // Active health check is enabled by default if there is a health checker. + DisableActiveHealthCheck bool `protobuf:"varint,4,opt,name=disable_active_health_check,json=disableActiveHealthCheck,proto3" json:"disable_active_health_check,omitempty"` +} + +func (x *Endpoint_HealthCheckConfig) Reset() { + *x = Endpoint_HealthCheckConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint_HealthCheckConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint_HealthCheckConfig) ProtoMessage() {} + +func (x *Endpoint_HealthCheckConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint_HealthCheckConfig.ProtoReflect.Descriptor instead. +func (*Endpoint_HealthCheckConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Endpoint_HealthCheckConfig) GetPortValue() uint32 { + if x != nil { + return x.PortValue + } + return 0 +} + +func (x *Endpoint_HealthCheckConfig) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *Endpoint_HealthCheckConfig) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Endpoint_HealthCheckConfig) GetDisableActiveHealthCheck() bool { + if x != nil { + return x.DisableActiveHealthCheck + } + return false +} + +type Endpoint_AdditionalAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Additional address that is associated with the endpoint. + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *Endpoint_AdditionalAddress) Reset() { + *x = Endpoint_AdditionalAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint_AdditionalAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint_AdditionalAddress) ProtoMessage() {} + +func (x *Endpoint_AdditionalAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint_AdditionalAddress.ProtoReflect.Descriptor instead. +func (*Endpoint_AdditionalAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *Endpoint_AdditionalAddress) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +// [#not-implemented-hide:] +// A list of endpoints of a specific locality. +type LocalityLbEndpoints_LbEndpointList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LbEndpoints []*LbEndpoint `protobuf:"bytes,1,rep,name=lb_endpoints,json=lbEndpoints,proto3" json:"lb_endpoints,omitempty"` +} + +func (x *LocalityLbEndpoints_LbEndpointList) Reset() { + *x = LocalityLbEndpoints_LbEndpointList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbEndpoints_LbEndpointList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbEndpoints_LbEndpointList) ProtoMessage() {} + +func (x *LocalityLbEndpoints_LbEndpointList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbEndpoints_LbEndpointList.ProtoReflect.Descriptor instead. +func (*LocalityLbEndpoints_LbEndpointList) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *LocalityLbEndpoints_LbEndpointList) GetLbEndpoints() []*LbEndpoint { + if x != nil { + return x.LbEndpoints + } + return nil +} + +var File_envoy_config_endpoint_v3_endpoint_components_proto protoreflect.FileDescriptor + +var file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xb0, 0x05, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, + 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x41, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x13, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x1a, 0x8a, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, + 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x4c, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x25, 0x9a, + 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x91, 0x03, 0x0a, 0x0a, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x08, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x3a, 0x27, 0x9a, 0xc5, + 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x4c, 0x65, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x0a, 0x0b, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x0a, 0x6c, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x6c, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x64, 0x73, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x9d, 0x06, + 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, + 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, 0x0a, 0x17, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, + 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x76, + 0x0a, 0x1c, 0x6c, 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x65, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x19, 0x6c, 0x65, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x13, 0x6c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x57, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x24, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x08, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, + 0x6d, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, + 0x69, 0x74, 0x79, 0x1a, 0x59, 0x0a, 0x0e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x62, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0b, 0x6c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x3a, 0x30, + 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x97, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x42, 0x17, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescOnce sync.Once + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData = file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc +) + +func file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescGZIP() []byte { + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescOnce.Do(func() { + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData) + }) + return file_envoy_config_endpoint_v3_endpoint_components_proto_rawDescData +} + +var file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes = []interface{}{ + (*Endpoint)(nil), // 0: envoy.config.endpoint.v3.Endpoint + (*LbEndpoint)(nil), // 1: envoy.config.endpoint.v3.LbEndpoint + (*LedsClusterLocalityConfig)(nil), // 2: envoy.config.endpoint.v3.LedsClusterLocalityConfig + (*LocalityLbEndpoints)(nil), // 3: envoy.config.endpoint.v3.LocalityLbEndpoints + (*Endpoint_HealthCheckConfig)(nil), // 4: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig + (*Endpoint_AdditionalAddress)(nil), // 5: envoy.config.endpoint.v3.Endpoint.AdditionalAddress + (*LocalityLbEndpoints_LbEndpointList)(nil), // 6: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList + (*v3.Address)(nil), // 7: envoy.config.core.v3.Address + (v3.HealthStatus)(0), // 8: envoy.config.core.v3.HealthStatus + (*v3.Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*wrapperspb.UInt32Value)(nil), // 10: google.protobuf.UInt32Value + (*v3.ConfigSource)(nil), // 11: envoy.config.core.v3.ConfigSource + (*v3.Locality)(nil), // 12: envoy.config.core.v3.Locality +} +var file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs = []int32{ + 7, // 0: envoy.config.endpoint.v3.Endpoint.address:type_name -> envoy.config.core.v3.Address + 4, // 1: envoy.config.endpoint.v3.Endpoint.health_check_config:type_name -> envoy.config.endpoint.v3.Endpoint.HealthCheckConfig + 5, // 2: envoy.config.endpoint.v3.Endpoint.additional_addresses:type_name -> envoy.config.endpoint.v3.Endpoint.AdditionalAddress + 0, // 3: envoy.config.endpoint.v3.LbEndpoint.endpoint:type_name -> envoy.config.endpoint.v3.Endpoint + 8, // 4: envoy.config.endpoint.v3.LbEndpoint.health_status:type_name -> envoy.config.core.v3.HealthStatus + 9, // 5: envoy.config.endpoint.v3.LbEndpoint.metadata:type_name -> envoy.config.core.v3.Metadata + 10, // 6: envoy.config.endpoint.v3.LbEndpoint.load_balancing_weight:type_name -> google.protobuf.UInt32Value + 11, // 7: envoy.config.endpoint.v3.LedsClusterLocalityConfig.leds_config:type_name -> envoy.config.core.v3.ConfigSource + 12, // 8: envoy.config.endpoint.v3.LocalityLbEndpoints.locality:type_name -> envoy.config.core.v3.Locality + 9, // 9: envoy.config.endpoint.v3.LocalityLbEndpoints.metadata:type_name -> envoy.config.core.v3.Metadata + 1, // 10: envoy.config.endpoint.v3.LocalityLbEndpoints.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 6, // 11: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancer_endpoints:type_name -> envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList + 2, // 12: envoy.config.endpoint.v3.LocalityLbEndpoints.leds_cluster_locality_config:type_name -> envoy.config.endpoint.v3.LedsClusterLocalityConfig + 10, // 13: envoy.config.endpoint.v3.LocalityLbEndpoints.load_balancing_weight:type_name -> google.protobuf.UInt32Value + 10, // 14: envoy.config.endpoint.v3.LocalityLbEndpoints.proximity:type_name -> google.protobuf.UInt32Value + 7, // 15: envoy.config.endpoint.v3.Endpoint.HealthCheckConfig.address:type_name -> envoy.config.core.v3.Address + 7, // 16: envoy.config.endpoint.v3.Endpoint.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address + 1, // 17: envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList.lb_endpoints:type_name -> envoy.config.endpoint.v3.LbEndpoint + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_envoy_config_endpoint_v3_endpoint_components_proto_init() } +func file_envoy_config_endpoint_v3_endpoint_components_proto_init() { + if File_envoy_config_endpoint_v3_endpoint_components_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LbEndpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LedsClusterLocalityConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbEndpoints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint_HealthCheckConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Endpoint_AdditionalAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbEndpoints_LbEndpointList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*LbEndpoint_Endpoint)(nil), + (*LbEndpoint_EndpointName)(nil), + } + file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*LocalityLbEndpoints_LoadBalancerEndpoints)(nil), + (*LocalityLbEndpoints_LedsClusterLocalityConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes, + DependencyIndexes: file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs, + MessageInfos: file_envoy_config_endpoint_v3_endpoint_components_proto_msgTypes, + }.Build() + File_envoy_config_endpoint_v3_endpoint_components_proto = out.File + file_envoy_config_endpoint_v3_endpoint_components_proto_rawDesc = nil + file_envoy_config_endpoint_v3_endpoint_components_proto_goTypes = nil + file_envoy_config_endpoint_v3_endpoint_components_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go new file mode 100644 index 0000000000..f11d8c5088 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components.pb.validate.go @@ -0,0 +1,1322 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.HealthStatus(0) +) + +// Validate checks the field values on Endpoint with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Endpoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Endpoint with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EndpointMultiError, or nil +// if none found. +func (m *Endpoint) ValidateAll() error { + return m.validate(true) +} + +func (m *Endpoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHealthCheckConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "HealthCheckConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointValidationError{ + field: "HealthCheckConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHealthCheckConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointValidationError{ + field: "HealthCheckConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Hostname + + for idx, item := range m.GetAdditionalAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EndpointValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EndpointValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EndpointValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EndpointMultiError(errors) + } + + return nil +} + +// EndpointMultiError is an error wrapping multiple validation errors returned +// by Endpoint.ValidateAll() if the designated constraints aren't met. +type EndpointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointMultiError) AllErrors() []error { return m } + +// EndpointValidationError is the validation error returned by +// Endpoint.Validate if the designated constraints aren't met. +type EndpointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointValidationError) ErrorName() string { return "EndpointValidationError" } + +// Error satisfies the builtin error interface +func (e EndpointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointValidationError{} + +// Validate checks the field values on LbEndpoint with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LbEndpoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LbEndpoint with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LbEndpointMultiError, or +// nil if none found. +func (m *LbEndpoint) ValidateAll() error { + return m.validate(true) +} + +func (m *LbEndpoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for HealthStatus + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LbEndpointValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetLoadBalancingWeight(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := LbEndpointValidationError{ + field: "LoadBalancingWeight", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + switch v := m.HostIdentifier.(type) { + case *LbEndpoint_Endpoint: + if v == nil { + err := LbEndpointValidationError{ + field: "HostIdentifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpoint()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Endpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LbEndpointValidationError{ + field: "Endpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpoint()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LbEndpointValidationError{ + field: "Endpoint", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LbEndpoint_EndpointName: + if v == nil { + err := LbEndpointValidationError{ + field: "HostIdentifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for EndpointName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return LbEndpointMultiError(errors) + } + + return nil +} + +// LbEndpointMultiError is an error wrapping multiple validation errors +// returned by LbEndpoint.ValidateAll() if the designated constraints aren't met. +type LbEndpointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LbEndpointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LbEndpointMultiError) AllErrors() []error { return m } + +// LbEndpointValidationError is the validation error returned by +// LbEndpoint.Validate if the designated constraints aren't met. +type LbEndpointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LbEndpointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LbEndpointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LbEndpointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LbEndpointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LbEndpointValidationError) ErrorName() string { return "LbEndpointValidationError" } + +// Error satisfies the builtin error interface +func (e LbEndpointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLbEndpoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LbEndpointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LbEndpointValidationError{} + +// Validate checks the field values on LedsClusterLocalityConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LedsClusterLocalityConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LedsClusterLocalityConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LedsClusterLocalityConfigMultiError, or nil if none found. +func (m *LedsClusterLocalityConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LedsClusterLocalityConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLedsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LedsClusterLocalityConfigValidationError{ + field: "LedsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LedsClusterLocalityConfigValidationError{ + field: "LedsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLedsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LedsClusterLocalityConfigValidationError{ + field: "LedsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for LedsCollectionName + + if len(errors) > 0 { + return LedsClusterLocalityConfigMultiError(errors) + } + + return nil +} + +// LedsClusterLocalityConfigMultiError is an error wrapping multiple validation +// errors returned by LedsClusterLocalityConfig.ValidateAll() if the +// designated constraints aren't met. +type LedsClusterLocalityConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LedsClusterLocalityConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LedsClusterLocalityConfigMultiError) AllErrors() []error { return m } + +// LedsClusterLocalityConfigValidationError is the validation error returned by +// LedsClusterLocalityConfig.Validate if the designated constraints aren't met. +type LedsClusterLocalityConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LedsClusterLocalityConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LedsClusterLocalityConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LedsClusterLocalityConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LedsClusterLocalityConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LedsClusterLocalityConfigValidationError) ErrorName() string { + return "LedsClusterLocalityConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LedsClusterLocalityConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLedsClusterLocalityConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LedsClusterLocalityConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LedsClusterLocalityConfigValidationError{} + +// Validate checks the field values on LocalityLbEndpoints with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *LocalityLbEndpoints) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbEndpoints with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalityLbEndpointsMultiError, or nil if none found. +func (m *LocalityLbEndpoints) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbEndpoints) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetLbEndpoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if wrapper := m.GetLoadBalancingWeight(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := LocalityLbEndpointsValidationError{ + field: "LoadBalancingWeight", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if m.GetPriority() > 128 { + err := LocalityLbEndpointsValidationError{ + field: "Priority", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetProximity()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Proximity", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "Proximity", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProximity()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "Proximity", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.LbConfig.(type) { + case *LocalityLbEndpoints_LoadBalancerEndpoints: + if v == nil { + err := LocalityLbEndpointsValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLoadBalancerEndpoints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LoadBalancerEndpoints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LoadBalancerEndpoints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadBalancerEndpoints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "LoadBalancerEndpoints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LocalityLbEndpoints_LedsClusterLocalityConfig: + if v == nil { + err := LocalityLbEndpointsValidationError{ + field: "LbConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetLedsClusterLocalityConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LedsClusterLocalityConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpointsValidationError{ + field: "LedsClusterLocalityConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLedsClusterLocalityConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpointsValidationError{ + field: "LedsClusterLocalityConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return LocalityLbEndpointsMultiError(errors) + } + + return nil +} + +// LocalityLbEndpointsMultiError is an error wrapping multiple validation +// errors returned by LocalityLbEndpoints.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbEndpointsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbEndpointsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbEndpointsMultiError) AllErrors() []error { return m } + +// LocalityLbEndpointsValidationError is the validation error returned by +// LocalityLbEndpoints.Validate if the designated constraints aren't met. +type LocalityLbEndpointsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbEndpointsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbEndpointsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbEndpointsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbEndpointsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbEndpointsValidationError) ErrorName() string { + return "LocalityLbEndpointsValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbEndpointsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbEndpoints.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbEndpointsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbEndpointsValidationError{} + +// Validate checks the field values on Endpoint_HealthCheckConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Endpoint_HealthCheckConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Endpoint_HealthCheckConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Endpoint_HealthCheckConfigMultiError, or nil if none found. +func (m *Endpoint_HealthCheckConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Endpoint_HealthCheckConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPortValue() > 65535 { + err := Endpoint_HealthCheckConfigValidationError{ + field: "PortValue", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Hostname + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Endpoint_HealthCheckConfigValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableActiveHealthCheck + + if len(errors) > 0 { + return Endpoint_HealthCheckConfigMultiError(errors) + } + + return nil +} + +// Endpoint_HealthCheckConfigMultiError is an error wrapping multiple +// validation errors returned by Endpoint_HealthCheckConfig.ValidateAll() if +// the designated constraints aren't met. +type Endpoint_HealthCheckConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Endpoint_HealthCheckConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Endpoint_HealthCheckConfigMultiError) AllErrors() []error { return m } + +// Endpoint_HealthCheckConfigValidationError is the validation error returned +// by Endpoint_HealthCheckConfig.Validate if the designated constraints aren't met. +type Endpoint_HealthCheckConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Endpoint_HealthCheckConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Endpoint_HealthCheckConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Endpoint_HealthCheckConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Endpoint_HealthCheckConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Endpoint_HealthCheckConfigValidationError) ErrorName() string { + return "Endpoint_HealthCheckConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Endpoint_HealthCheckConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpoint_HealthCheckConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Endpoint_HealthCheckConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Endpoint_HealthCheckConfigValidationError{} + +// Validate checks the field values on Endpoint_AdditionalAddress with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Endpoint_AdditionalAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Endpoint_AdditionalAddress with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Endpoint_AdditionalAddressMultiError, or nil if none found. +func (m *Endpoint_AdditionalAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *Endpoint_AdditionalAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Endpoint_AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Endpoint_AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Endpoint_AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Endpoint_AdditionalAddressMultiError(errors) + } + + return nil +} + +// Endpoint_AdditionalAddressMultiError is an error wrapping multiple +// validation errors returned by Endpoint_AdditionalAddress.ValidateAll() if +// the designated constraints aren't met. +type Endpoint_AdditionalAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Endpoint_AdditionalAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Endpoint_AdditionalAddressMultiError) AllErrors() []error { return m } + +// Endpoint_AdditionalAddressValidationError is the validation error returned +// by Endpoint_AdditionalAddress.Validate if the designated constraints aren't met. +type Endpoint_AdditionalAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Endpoint_AdditionalAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Endpoint_AdditionalAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Endpoint_AdditionalAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Endpoint_AdditionalAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Endpoint_AdditionalAddressValidationError) ErrorName() string { + return "Endpoint_AdditionalAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e Endpoint_AdditionalAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpoint_AdditionalAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Endpoint_AdditionalAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Endpoint_AdditionalAddressValidationError{} + +// Validate checks the field values on LocalityLbEndpoints_LbEndpointList with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *LocalityLbEndpoints_LbEndpointList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbEndpoints_LbEndpointList +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// LocalityLbEndpoints_LbEndpointListMultiError, or nil if none found. +func (m *LocalityLbEndpoints_LbEndpointList) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbEndpoints_LbEndpointList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetLbEndpoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbEndpoints_LbEndpointListValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbEndpoints_LbEndpointListValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbEndpoints_LbEndpointListValidationError{ + field: fmt.Sprintf("LbEndpoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LocalityLbEndpoints_LbEndpointListMultiError(errors) + } + + return nil +} + +// LocalityLbEndpoints_LbEndpointListMultiError is an error wrapping multiple +// validation errors returned by +// LocalityLbEndpoints_LbEndpointList.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbEndpoints_LbEndpointListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbEndpoints_LbEndpointListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbEndpoints_LbEndpointListMultiError) AllErrors() []error { return m } + +// LocalityLbEndpoints_LbEndpointListValidationError is the validation error +// returned by LocalityLbEndpoints_LbEndpointList.Validate if the designated +// constraints aren't met. +type LocalityLbEndpoints_LbEndpointListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbEndpoints_LbEndpointListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbEndpoints_LbEndpointListValidationError) ErrorName() string { + return "LocalityLbEndpoints_LbEndpointListValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbEndpoints_LbEndpointListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbEndpoints_LbEndpointList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbEndpoints_LbEndpointListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbEndpoints_LbEndpointListValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go new file mode 100644 index 0000000000..b86ffd0d1d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_components_vtproto.pb.go @@ -0,0 +1,896 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint_components.proto + +package endpointv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Endpoint_HealthCheckConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_HealthCheckConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint_HealthCheckConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DisableActiveHealthCheck { + i-- + if m.DisableActiveHealthCheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + } + if m.PortValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortValue)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Endpoint_AdditionalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_AdditionalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint_AdditionalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Endpoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AdditionalAddresses) > 0 { + for iNdEx := len(m.AdditionalAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + } + if m.HealthCheckConfig != nil { + size, err := m.HealthCheckConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LbEndpoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LbEndpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.HostIdentifier.(*LbEndpoint_EndpointName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.LoadBalancingWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.HealthStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HealthStatus)) + i-- + dAtA[i] = 0x10 + } + if msg, ok := m.HostIdentifier.(*LbEndpoint_Endpoint); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *LbEndpoint_Endpoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint_Endpoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Endpoint != nil { + size, err := m.Endpoint.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *LbEndpoint_EndpointName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LbEndpoint_EndpointName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.EndpointName) + copy(dAtA[i:], m.EndpointName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EndpointName))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *LedsClusterLocalityConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LedsClusterLocalityConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LedsClusterLocalityConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LedsCollectionName) > 0 { + i -= len(m.LedsCollectionName) + copy(dAtA[i:], m.LedsCollectionName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LedsCollectionName))) + i-- + dAtA[i] = 0x12 + } + if m.LedsConfig != nil { + if vtmsg, ok := interface{}(m.LedsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LedsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LbEndpointList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LbEndpoints) > 0 { + for iNdEx := len(m.LbEndpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LbEndpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbEndpoints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if msg, ok := m.LbConfig.(*LocalityLbEndpoints_LedsClusterLocalityConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LbConfig.(*LocalityLbEndpoints_LoadBalancerEndpoints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Proximity != nil { + size, err := (*wrapperspb.UInt32Value)(m.Proximity).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x28 + } + if m.LoadBalancingWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.LbEndpoints) > 0 { + for iNdEx := len(m.LbEndpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LbEndpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadBalancerEndpoints != nil { + size, err := m.LoadBalancerEndpoints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LedsClusterLocalityConfig != nil { + size, err := m.LedsClusterLocalityConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Endpoint_HealthCheckConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortValue)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableActiveHealthCheck { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Endpoint_AdditionalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Endpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HealthCheckConfig != nil { + l = m.HealthCheckConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalAddresses) > 0 { + for _, e := range m.AdditionalAddresses { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LbEndpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.HostIdentifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.HealthStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HealthStatus)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LoadBalancingWeight != nil { + l = (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LbEndpoint_Endpoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Endpoint != nil { + l = m.Endpoint.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LbEndpoint_EndpointName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EndpointName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *LedsClusterLocalityConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LedsConfig != nil { + if size, ok := interface{}(m.LedsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LedsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.LedsCollectionName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints_LbEndpointList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LbEndpoints) > 0 { + for _, e := range m.LbEndpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LbEndpoints) > 0 { + for _, e := range m.LbEndpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadBalancingWeight != nil { + l = (*wrapperspb.UInt32Value)(m.LoadBalancingWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if m.Proximity != nil { + l = (*wrapperspb.UInt32Value)(m.Proximity).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.LbConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbEndpoints_LoadBalancerEndpoints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadBalancerEndpoints != nil { + l = m.LoadBalancerEndpoints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LocalityLbEndpoints_LedsClusterLocalityConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LedsClusterLocalityConfig != nil { + l = m.LedsClusterLocalityConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go new file mode 100644 index 0000000000..368eefe03e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/endpoint_vtproto.pb.go @@ -0,0 +1,331 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/endpoint.proto + +package endpointv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DropPercentage != nil { + if vtmsg, ok := interface{}(m.DropPercentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DropPercentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment_Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment_Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment_Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WeightedPriorityHealth { + i-- + if m.WeightedPriorityHealth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.EndpointStaleAfter != nil { + size, err := (*durationpb.Duration)(m.EndpointStaleAfter).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.OverprovisioningFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.OverprovisioningFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.DropOverloads) > 0 { + for iNdEx := len(m.DropOverloads) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DropOverloads[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterLoadAssignment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterLoadAssignment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NamedEndpoints) > 0 { + for k := range m.NamedEndpoints { + v := m.NamedEndpoints[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Policy != nil { + size, err := m.Policy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Endpoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterLoadAssignment_Policy_DropOverload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DropPercentage != nil { + if size, ok := interface{}(m.DropPercentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DropPercentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterLoadAssignment_Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DropOverloads) > 0 { + for _, e := range m.DropOverloads { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.OverprovisioningFactor != nil { + l = (*wrapperspb.UInt32Value)(m.OverprovisioningFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EndpointStaleAfter != nil { + l = (*durationpb.Duration)(m.EndpointStaleAfter).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightedPriorityHealth { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterLoadAssignment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Policy != nil { + l = m.Policy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NamedEndpoints) > 0 { + for k, v := range m.NamedEndpoints { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go new file mode 100644 index 0000000000..07d96da496 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.go @@ -0,0 +1,978 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// These are stats Envoy reports to the management server at a frequency defined by +// :ref:`LoadStatsResponse.load_reporting_interval`. +// Stats per upstream region/zone and optionally per subzone. +// [#next-free-field: 15] +type UpstreamLocalityStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of zone, region and optionally endpoint group these metrics were + // collected from. Zone and region names could be empty if unknown. + Locality *v3.Locality `protobuf:"bytes,1,opt,name=locality,proto3" json:"locality,omitempty"` + // The total number of requests successfully completed by the endpoints in the + // locality. + TotalSuccessfulRequests uint64 `protobuf:"varint,2,opt,name=total_successful_requests,json=totalSuccessfulRequests,proto3" json:"total_successful_requests,omitempty"` + // The total number of unfinished requests + TotalRequestsInProgress uint64 `protobuf:"varint,3,opt,name=total_requests_in_progress,json=totalRequestsInProgress,proto3" json:"total_requests_in_progress,omitempty"` + // The total number of requests that failed due to errors at the endpoint, + // aggregated over all endpoints in the locality. + TotalErrorRequests uint64 `protobuf:"varint,4,opt,name=total_error_requests,json=totalErrorRequests,proto3" json:"total_error_requests,omitempty"` + // The total number of requests that were issued by this Envoy since + // the last report. This information is aggregated over all the + // upstream endpoints in the locality. + TotalIssuedRequests uint64 `protobuf:"varint,8,opt,name=total_issued_requests,json=totalIssuedRequests,proto3" json:"total_issued_requests,omitempty"` + // The total number of connections in an established state at the time of the + // report. This field is aggregated over all the upstream endpoints in the + // locality. + // In Envoy, this information may be based on “upstream_cx_active metric“. + // [#not-implemented-hide:] + TotalActiveConnections uint64 `protobuf:"varint,9,opt,name=total_active_connections,json=totalActiveConnections,proto3" json:"total_active_connections,omitempty"` + // The total number of connections opened since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on “upstream_cx_total“ metric + // compared to itself between start and end of an interval, i.e. + // “upstream_cx_total“(now) - “upstream_cx_total“(now - + // load_report_interval). + // [#not-implemented-hide:] + TotalNewConnections uint64 `protobuf:"varint,10,opt,name=total_new_connections,json=totalNewConnections,proto3" json:"total_new_connections,omitempty"` + // The total number of connection failures since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on “upstream_cx_connect_fail“ + // metric compared to itself between start and end of an interval, i.e. + // “upstream_cx_connect_fail“(now) - “upstream_cx_connect_fail“(now - + // load_report_interval). + // [#not-implemented-hide:] + TotalFailConnections uint64 `protobuf:"varint,11,opt,name=total_fail_connections,json=totalFailConnections,proto3" json:"total_fail_connections,omitempty"` + // CPU utilization stats for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + CpuUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,12,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + // Memory utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + MemUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,13,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"` + // Blended application-defined utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + ApplicationUtilization *UnnamedEndpointLoadMetricStats `protobuf:"bytes,14,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"` + // Named stats for multi-dimensional load balancing. + // These typically come from endpoint metrics reported via ORCA. + LoadMetricStats []*EndpointLoadMetricStats `protobuf:"bytes,5,rep,name=load_metric_stats,json=loadMetricStats,proto3" json:"load_metric_stats,omitempty"` + // Endpoint granularity stats information for this locality. This information + // is populated if the Server requests it by setting + // :ref:`LoadStatsResponse.report_endpoint_granularity`. + UpstreamEndpointStats []*UpstreamEndpointStats `protobuf:"bytes,7,rep,name=upstream_endpoint_stats,json=upstreamEndpointStats,proto3" json:"upstream_endpoint_stats,omitempty"` + // [#not-implemented-hide:] The priority of the endpoint group these metrics + // were collected from. + Priority uint32 `protobuf:"varint,6,opt,name=priority,proto3" json:"priority,omitempty"` +} + +func (x *UpstreamLocalityStats) Reset() { + *x = UpstreamLocalityStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamLocalityStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamLocalityStats) ProtoMessage() {} + +func (x *UpstreamLocalityStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamLocalityStats.ProtoReflect.Descriptor instead. +func (*UpstreamLocalityStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{0} +} + +func (x *UpstreamLocalityStats) GetLocality() *v3.Locality { + if x != nil { + return x.Locality + } + return nil +} + +func (x *UpstreamLocalityStats) GetTotalSuccessfulRequests() uint64 { + if x != nil { + return x.TotalSuccessfulRequests + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalRequestsInProgress() uint64 { + if x != nil { + return x.TotalRequestsInProgress + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalErrorRequests() uint64 { + if x != nil { + return x.TotalErrorRequests + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalIssuedRequests() uint64 { + if x != nil { + return x.TotalIssuedRequests + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalActiveConnections() uint64 { + if x != nil { + return x.TotalActiveConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalNewConnections() uint64 { + if x != nil { + return x.TotalNewConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetTotalFailConnections() uint64 { + if x != nil { + return x.TotalFailConnections + } + return 0 +} + +func (x *UpstreamLocalityStats) GetCpuUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.CpuUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetMemUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.MemUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetApplicationUtilization() *UnnamedEndpointLoadMetricStats { + if x != nil { + return x.ApplicationUtilization + } + return nil +} + +func (x *UpstreamLocalityStats) GetLoadMetricStats() []*EndpointLoadMetricStats { + if x != nil { + return x.LoadMetricStats + } + return nil +} + +func (x *UpstreamLocalityStats) GetUpstreamEndpointStats() []*UpstreamEndpointStats { + if x != nil { + return x.UpstreamEndpointStats + } + return nil +} + +func (x *UpstreamLocalityStats) GetPriority() uint32 { + if x != nil { + return x.Priority + } + return 0 +} + +// [#next-free-field: 8] +type UpstreamEndpointStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Upstream host address. + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Opaque and implementation dependent metadata of the + // endpoint. Envoy will pass this directly to the management server. + Metadata *structpb.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The total number of requests successfully completed by the endpoints in the + // locality. These include non-5xx responses for HTTP, where errors + // originate at the client and the endpoint responded successfully. For gRPC, + // the grpc-status values are those not covered by total_error_requests below. + TotalSuccessfulRequests uint64 `protobuf:"varint,2,opt,name=total_successful_requests,json=totalSuccessfulRequests,proto3" json:"total_successful_requests,omitempty"` + // The total number of unfinished requests for this endpoint. + TotalRequestsInProgress uint64 `protobuf:"varint,3,opt,name=total_requests_in_progress,json=totalRequestsInProgress,proto3" json:"total_requests_in_progress,omitempty"` + // The total number of requests that failed due to errors at the endpoint. + // For HTTP these are responses with 5xx status codes and for gRPC the + // grpc-status values: + // + // - DeadlineExceeded + // - Unimplemented + // - Internal + // - Unavailable + // - Unknown + // - DataLoss + TotalErrorRequests uint64 `protobuf:"varint,4,opt,name=total_error_requests,json=totalErrorRequests,proto3" json:"total_error_requests,omitempty"` + // The total number of requests that were issued to this endpoint + // since the last report. A single TCP connection, HTTP or gRPC + // request or stream is counted as one request. + TotalIssuedRequests uint64 `protobuf:"varint,7,opt,name=total_issued_requests,json=totalIssuedRequests,proto3" json:"total_issued_requests,omitempty"` + // Stats for multi-dimensional load balancing. + LoadMetricStats []*EndpointLoadMetricStats `protobuf:"bytes,5,rep,name=load_metric_stats,json=loadMetricStats,proto3" json:"load_metric_stats,omitempty"` +} + +func (x *UpstreamEndpointStats) Reset() { + *x = UpstreamEndpointStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamEndpointStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamEndpointStats) ProtoMessage() {} + +func (x *UpstreamEndpointStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamEndpointStats.ProtoReflect.Descriptor instead. +func (*UpstreamEndpointStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{1} +} + +func (x *UpstreamEndpointStats) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *UpstreamEndpointStats) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *UpstreamEndpointStats) GetTotalSuccessfulRequests() uint64 { + if x != nil { + return x.TotalSuccessfulRequests + } + return 0 +} + +func (x *UpstreamEndpointStats) GetTotalRequestsInProgress() uint64 { + if x != nil { + return x.TotalRequestsInProgress + } + return 0 +} + +func (x *UpstreamEndpointStats) GetTotalErrorRequests() uint64 { + if x != nil { + return x.TotalErrorRequests + } + return 0 +} + +func (x *UpstreamEndpointStats) GetTotalIssuedRequests() uint64 { + if x != nil { + return x.TotalIssuedRequests + } + return 0 +} + +func (x *UpstreamEndpointStats) GetLoadMetricStats() []*EndpointLoadMetricStats { + if x != nil { + return x.LoadMetricStats + } + return nil +} + +type EndpointLoadMetricStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the metric; may be empty. + MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName,proto3" json:"metric_name,omitempty"` + // Number of calls that finished and included this metric. + NumRequestsFinishedWithMetric uint64 `protobuf:"varint,2,opt,name=num_requests_finished_with_metric,json=numRequestsFinishedWithMetric,proto3" json:"num_requests_finished_with_metric,omitempty"` + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + TotalMetricValue float64 `protobuf:"fixed64,3,opt,name=total_metric_value,json=totalMetricValue,proto3" json:"total_metric_value,omitempty"` +} + +func (x *EndpointLoadMetricStats) Reset() { + *x = EndpointLoadMetricStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EndpointLoadMetricStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EndpointLoadMetricStats) ProtoMessage() {} + +func (x *EndpointLoadMetricStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EndpointLoadMetricStats.ProtoReflect.Descriptor instead. +func (*EndpointLoadMetricStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{2} +} + +func (x *EndpointLoadMetricStats) GetMetricName() string { + if x != nil { + return x.MetricName + } + return "" +} + +func (x *EndpointLoadMetricStats) GetNumRequestsFinishedWithMetric() uint64 { + if x != nil { + return x.NumRequestsFinishedWithMetric + } + return 0 +} + +func (x *EndpointLoadMetricStats) GetTotalMetricValue() float64 { + if x != nil { + return x.TotalMetricValue + } + return 0 +} + +// Same as EndpointLoadMetricStats, except without the metric_name field. +type UnnamedEndpointLoadMetricStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of calls that finished and included this metric. + NumRequestsFinishedWithMetric uint64 `protobuf:"varint,1,opt,name=num_requests_finished_with_metric,json=numRequestsFinishedWithMetric,proto3" json:"num_requests_finished_with_metric,omitempty"` + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + TotalMetricValue float64 `protobuf:"fixed64,2,opt,name=total_metric_value,json=totalMetricValue,proto3" json:"total_metric_value,omitempty"` +} + +func (x *UnnamedEndpointLoadMetricStats) Reset() { + *x = UnnamedEndpointLoadMetricStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnnamedEndpointLoadMetricStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnnamedEndpointLoadMetricStats) ProtoMessage() {} + +func (x *UnnamedEndpointLoadMetricStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnnamedEndpointLoadMetricStats.ProtoReflect.Descriptor instead. +func (*UnnamedEndpointLoadMetricStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{3} +} + +func (x *UnnamedEndpointLoadMetricStats) GetNumRequestsFinishedWithMetric() uint64 { + if x != nil { + return x.NumRequestsFinishedWithMetric + } + return 0 +} + +func (x *UnnamedEndpointLoadMetricStats) GetTotalMetricValue() float64 { + if x != nil { + return x.TotalMetricValue + } + return 0 +} + +// Per cluster load stats. Envoy reports these stats a management server in a +// :ref:`LoadStatsRequest` +// Next ID: 7 +// [#next-free-field: 7] +type ClusterStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cluster. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The eds_cluster_config service_name of the cluster. + // It's possible that two clusters send the same service_name to EDS, + // in that case, the management server is supposed to do aggregation on the load reports. + ClusterServiceName string `protobuf:"bytes,6,opt,name=cluster_service_name,json=clusterServiceName,proto3" json:"cluster_service_name,omitempty"` + // Need at least one. + UpstreamLocalityStats []*UpstreamLocalityStats `protobuf:"bytes,2,rep,name=upstream_locality_stats,json=upstreamLocalityStats,proto3" json:"upstream_locality_stats,omitempty"` + // Cluster-level stats such as total_successful_requests may be computed by + // summing upstream_locality_stats. In addition, below there are additional + // cluster-wide stats. + // + // The total number of dropped requests. This covers requests + // deliberately dropped by the drop_overload policy and circuit breaking. + TotalDroppedRequests uint64 `protobuf:"varint,3,opt,name=total_dropped_requests,json=totalDroppedRequests,proto3" json:"total_dropped_requests,omitempty"` + // Information about deliberately dropped requests for each category specified + // in the DropOverload policy. + DroppedRequests []*ClusterStats_DroppedRequests `protobuf:"bytes,5,rep,name=dropped_requests,json=droppedRequests,proto3" json:"dropped_requests,omitempty"` + // Period over which the actual load report occurred. This will be guaranteed to include every + // request reported. Due to system load and delays between the “LoadStatsRequest“ sent from Envoy + // and the “LoadStatsResponse“ message sent from the management server, this may be longer than + // the requested load reporting interval in the “LoadStatsResponse“. + LoadReportInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=load_report_interval,json=loadReportInterval,proto3" json:"load_report_interval,omitempty"` +} + +func (x *ClusterStats) Reset() { + *x = ClusterStats{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterStats) ProtoMessage() {} + +func (x *ClusterStats) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterStats.ProtoReflect.Descriptor instead. +func (*ClusterStats) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{4} +} + +func (x *ClusterStats) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *ClusterStats) GetClusterServiceName() string { + if x != nil { + return x.ClusterServiceName + } + return "" +} + +func (x *ClusterStats) GetUpstreamLocalityStats() []*UpstreamLocalityStats { + if x != nil { + return x.UpstreamLocalityStats + } + return nil +} + +func (x *ClusterStats) GetTotalDroppedRequests() uint64 { + if x != nil { + return x.TotalDroppedRequests + } + return 0 +} + +func (x *ClusterStats) GetDroppedRequests() []*ClusterStats_DroppedRequests { + if x != nil { + return x.DroppedRequests + } + return nil +} + +func (x *ClusterStats) GetLoadReportInterval() *durationpb.Duration { + if x != nil { + return x.LoadReportInterval + } + return nil +} + +type ClusterStats_DroppedRequests struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the policy specifying the drop. + Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` + // Total number of deliberately dropped requests for the category. + DroppedCount uint64 `protobuf:"varint,2,opt,name=dropped_count,json=droppedCount,proto3" json:"dropped_count,omitempty"` +} + +func (x *ClusterStats_DroppedRequests) Reset() { + *x = ClusterStats_DroppedRequests{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterStats_DroppedRequests) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterStats_DroppedRequests) ProtoMessage() {} + +func (x *ClusterStats_DroppedRequests) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterStats_DroppedRequests.ProtoReflect.Descriptor instead. +func (*ClusterStats_DroppedRequests) Descriptor() ([]byte, []int) { + return file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ClusterStats_DroppedRequests) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +func (x *ClusterStats_DroppedRequests) GetDroppedCount() uint64 { + if x != nil { + return x.DroppedCount + } + return 0 +} + +var File_envoy_config_endpoint_v3_load_report_proto protoreflect.FileDescriptor + +var file_envoy_config_endpoint_v3_load_report_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x08, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x19, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x18, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, + 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4e, 0x65, + 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3e, 0x0a, 0x16, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x42, 0x08, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x46, 0x61, 0x69, + 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x61, 0x0a, 0x0f, + 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x61, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, + 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x16, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xf7, 0x03, + 0x0a, 0x15, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x19, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, + 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x17, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, + 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x2c, + 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x34, 0x9a, 0xc5, + 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x1e, 0x55, 0x6e, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, + 0x69, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x46, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x89, 0x05, + 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2a, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x71, 0x0a, 0x17, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x1a, 0x96, 0x01, 0x0a, 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x08, 0x63, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x44, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, + 0x3b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_endpoint_v3_load_report_proto_rawDescOnce sync.Once + file_envoy_config_endpoint_v3_load_report_proto_rawDescData = file_envoy_config_endpoint_v3_load_report_proto_rawDesc +) + +func file_envoy_config_endpoint_v3_load_report_proto_rawDescGZIP() []byte { + file_envoy_config_endpoint_v3_load_report_proto_rawDescOnce.Do(func() { + file_envoy_config_endpoint_v3_load_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_endpoint_v3_load_report_proto_rawDescData) + }) + return file_envoy_config_endpoint_v3_load_report_proto_rawDescData +} + +var file_envoy_config_endpoint_v3_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_envoy_config_endpoint_v3_load_report_proto_goTypes = []interface{}{ + (*UpstreamLocalityStats)(nil), // 0: envoy.config.endpoint.v3.UpstreamLocalityStats + (*UpstreamEndpointStats)(nil), // 1: envoy.config.endpoint.v3.UpstreamEndpointStats + (*EndpointLoadMetricStats)(nil), // 2: envoy.config.endpoint.v3.EndpointLoadMetricStats + (*UnnamedEndpointLoadMetricStats)(nil), // 3: envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + (*ClusterStats)(nil), // 4: envoy.config.endpoint.v3.ClusterStats + (*ClusterStats_DroppedRequests)(nil), // 5: envoy.config.endpoint.v3.ClusterStats.DroppedRequests + (*v3.Locality)(nil), // 6: envoy.config.core.v3.Locality + (*v3.Address)(nil), // 7: envoy.config.core.v3.Address + (*structpb.Struct)(nil), // 8: google.protobuf.Struct + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_envoy_config_endpoint_v3_load_report_proto_depIdxs = []int32{ + 6, // 0: envoy.config.endpoint.v3.UpstreamLocalityStats.locality:type_name -> envoy.config.core.v3.Locality + 3, // 1: envoy.config.endpoint.v3.UpstreamLocalityStats.cpu_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 3, // 2: envoy.config.endpoint.v3.UpstreamLocalityStats.mem_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 3, // 3: envoy.config.endpoint.v3.UpstreamLocalityStats.application_utilization:type_name -> envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats + 2, // 4: envoy.config.endpoint.v3.UpstreamLocalityStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats + 1, // 5: envoy.config.endpoint.v3.UpstreamLocalityStats.upstream_endpoint_stats:type_name -> envoy.config.endpoint.v3.UpstreamEndpointStats + 7, // 6: envoy.config.endpoint.v3.UpstreamEndpointStats.address:type_name -> envoy.config.core.v3.Address + 8, // 7: envoy.config.endpoint.v3.UpstreamEndpointStats.metadata:type_name -> google.protobuf.Struct + 2, // 8: envoy.config.endpoint.v3.UpstreamEndpointStats.load_metric_stats:type_name -> envoy.config.endpoint.v3.EndpointLoadMetricStats + 0, // 9: envoy.config.endpoint.v3.ClusterStats.upstream_locality_stats:type_name -> envoy.config.endpoint.v3.UpstreamLocalityStats + 5, // 10: envoy.config.endpoint.v3.ClusterStats.dropped_requests:type_name -> envoy.config.endpoint.v3.ClusterStats.DroppedRequests + 9, // 11: envoy.config.endpoint.v3.ClusterStats.load_report_interval:type_name -> google.protobuf.Duration + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_config_endpoint_v3_load_report_proto_init() } +func file_envoy_config_endpoint_v3_load_report_proto_init() { + if File_envoy_config_endpoint_v3_load_report_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamLocalityStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamEndpointStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EndpointLoadMetricStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnnamedEndpointLoadMetricStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_endpoint_v3_load_report_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterStats_DroppedRequests); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_endpoint_v3_load_report_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_endpoint_v3_load_report_proto_goTypes, + DependencyIndexes: file_envoy_config_endpoint_v3_load_report_proto_depIdxs, + MessageInfos: file_envoy_config_endpoint_v3_load_report_proto_msgTypes, + }.Build() + File_envoy_config_endpoint_v3_load_report_proto = out.File + file_envoy_config_endpoint_v3_load_report_proto_rawDesc = nil + file_envoy_config_endpoint_v3_load_report_proto_goTypes = nil + file_envoy_config_endpoint_v3_load_report_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go new file mode 100644 index 0000000000..fd7a0c6c1c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report.pb.validate.go @@ -0,0 +1,1094 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpstreamLocalityStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamLocalityStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamLocalityStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamLocalityStatsMultiError, or nil if none found. +func (m *UpstreamLocalityStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamLocalityStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLocality()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "Locality", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TotalSuccessfulRequests + + // no validation rules for TotalRequestsInProgress + + // no validation rules for TotalErrorRequests + + // no validation rules for TotalIssuedRequests + + // no validation rules for TotalActiveConnections + + // no validation rules for TotalNewConnections + + // no validation rules for TotalFailConnections + + if all { + switch v := interface{}(m.GetCpuUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCpuUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "CpuUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMemUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMemUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "MemUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApplicationUtilization()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApplicationUtilization()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: "ApplicationUtilization", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetLoadMetricStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetUpstreamEndpointStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("UpstreamEndpointStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("UpstreamEndpointStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamLocalityStatsValidationError{ + field: fmt.Sprintf("UpstreamEndpointStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Priority + + if len(errors) > 0 { + return UpstreamLocalityStatsMultiError(errors) + } + + return nil +} + +// UpstreamLocalityStatsMultiError is an error wrapping multiple validation +// errors returned by UpstreamLocalityStats.ValidateAll() if the designated +// constraints aren't met. +type UpstreamLocalityStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamLocalityStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamLocalityStatsMultiError) AllErrors() []error { return m } + +// UpstreamLocalityStatsValidationError is the validation error returned by +// UpstreamLocalityStats.Validate if the designated constraints aren't met. +type UpstreamLocalityStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamLocalityStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamLocalityStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamLocalityStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamLocalityStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamLocalityStatsValidationError) ErrorName() string { + return "UpstreamLocalityStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamLocalityStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamLocalityStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamLocalityStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamLocalityStatsValidationError{} + +// Validate checks the field values on UpstreamEndpointStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamEndpointStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamEndpointStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamEndpointStatsMultiError, or nil if none found. +func (m *UpstreamEndpointStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamEndpointStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamEndpointStatsValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamEndpointStatsValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TotalSuccessfulRequests + + // no validation rules for TotalRequestsInProgress + + // no validation rules for TotalErrorRequests + + // no validation rules for TotalIssuedRequests + + for idx, item := range m.GetLoadMetricStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamEndpointStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamEndpointStatsValidationError{ + field: fmt.Sprintf("LoadMetricStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return UpstreamEndpointStatsMultiError(errors) + } + + return nil +} + +// UpstreamEndpointStatsMultiError is an error wrapping multiple validation +// errors returned by UpstreamEndpointStats.ValidateAll() if the designated +// constraints aren't met. +type UpstreamEndpointStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamEndpointStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamEndpointStatsMultiError) AllErrors() []error { return m } + +// UpstreamEndpointStatsValidationError is the validation error returned by +// UpstreamEndpointStats.Validate if the designated constraints aren't met. +type UpstreamEndpointStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamEndpointStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamEndpointStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamEndpointStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamEndpointStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamEndpointStatsValidationError) ErrorName() string { + return "UpstreamEndpointStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamEndpointStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamEndpointStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamEndpointStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamEndpointStatsValidationError{} + +// Validate checks the field values on EndpointLoadMetricStats with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EndpointLoadMetricStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EndpointLoadMetricStats with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EndpointLoadMetricStatsMultiError, or nil if none found. +func (m *EndpointLoadMetricStats) ValidateAll() error { + return m.validate(true) +} + +func (m *EndpointLoadMetricStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MetricName + + // no validation rules for NumRequestsFinishedWithMetric + + // no validation rules for TotalMetricValue + + if len(errors) > 0 { + return EndpointLoadMetricStatsMultiError(errors) + } + + return nil +} + +// EndpointLoadMetricStatsMultiError is an error wrapping multiple validation +// errors returned by EndpointLoadMetricStats.ValidateAll() if the designated +// constraints aren't met. +type EndpointLoadMetricStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EndpointLoadMetricStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EndpointLoadMetricStatsMultiError) AllErrors() []error { return m } + +// EndpointLoadMetricStatsValidationError is the validation error returned by +// EndpointLoadMetricStats.Validate if the designated constraints aren't met. +type EndpointLoadMetricStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EndpointLoadMetricStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EndpointLoadMetricStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EndpointLoadMetricStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EndpointLoadMetricStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EndpointLoadMetricStatsValidationError) ErrorName() string { + return "EndpointLoadMetricStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e EndpointLoadMetricStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEndpointLoadMetricStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EndpointLoadMetricStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EndpointLoadMetricStatsValidationError{} + +// Validate checks the field values on UnnamedEndpointLoadMetricStats with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UnnamedEndpointLoadMetricStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UnnamedEndpointLoadMetricStats with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// UnnamedEndpointLoadMetricStatsMultiError, or nil if none found. +func (m *UnnamedEndpointLoadMetricStats) ValidateAll() error { + return m.validate(true) +} + +func (m *UnnamedEndpointLoadMetricStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumRequestsFinishedWithMetric + + // no validation rules for TotalMetricValue + + if len(errors) > 0 { + return UnnamedEndpointLoadMetricStatsMultiError(errors) + } + + return nil +} + +// UnnamedEndpointLoadMetricStatsMultiError is an error wrapping multiple +// validation errors returned by UnnamedEndpointLoadMetricStats.ValidateAll() +// if the designated constraints aren't met. +type UnnamedEndpointLoadMetricStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UnnamedEndpointLoadMetricStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UnnamedEndpointLoadMetricStatsMultiError) AllErrors() []error { return m } + +// UnnamedEndpointLoadMetricStatsValidationError is the validation error +// returned by UnnamedEndpointLoadMetricStats.Validate if the designated +// constraints aren't met. +type UnnamedEndpointLoadMetricStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UnnamedEndpointLoadMetricStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UnnamedEndpointLoadMetricStatsValidationError) ErrorName() string { + return "UnnamedEndpointLoadMetricStatsValidationError" +} + +// Error satisfies the builtin error interface +func (e UnnamedEndpointLoadMetricStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUnnamedEndpointLoadMetricStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UnnamedEndpointLoadMetricStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UnnamedEndpointLoadMetricStatsValidationError{} + +// Validate checks the field values on ClusterStats with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterStats) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterStats with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterStatsMultiError, or +// nil if none found. +func (m *ClusterStats) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterStats) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetClusterName()) < 1 { + err := ClusterStatsValidationError{ + field: "ClusterName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ClusterServiceName + + if len(m.GetUpstreamLocalityStats()) < 1 { + err := ClusterStatsValidationError{ + field: "UpstreamLocalityStats", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetUpstreamLocalityStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("UpstreamLocalityStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("UpstreamLocalityStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatsValidationError{ + field: fmt.Sprintf("UpstreamLocalityStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TotalDroppedRequests + + for idx, item := range m.GetDroppedRequests() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("DroppedRequests[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: fmt.Sprintf("DroppedRequests[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatsValidationError{ + field: fmt.Sprintf("DroppedRequests[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLoadReportInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: "LoadReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterStatsValidationError{ + field: "LoadReportInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadReportInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterStatsValidationError{ + field: "LoadReportInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ClusterStatsMultiError(errors) + } + + return nil +} + +// ClusterStatsMultiError is an error wrapping multiple validation errors +// returned by ClusterStats.ValidateAll() if the designated constraints aren't met. +type ClusterStatsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterStatsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterStatsMultiError) AllErrors() []error { return m } + +// ClusterStatsValidationError is the validation error returned by +// ClusterStats.Validate if the designated constraints aren't met. +type ClusterStatsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterStatsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterStatsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterStatsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterStatsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterStatsValidationError) ErrorName() string { return "ClusterStatsValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterStatsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterStats.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterStatsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterStatsValidationError{} + +// Validate checks the field values on ClusterStats_DroppedRequests with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterStats_DroppedRequests) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterStats_DroppedRequests with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterStats_DroppedRequestsMultiError, or nil if none found. +func (m *ClusterStats_DroppedRequests) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterStats_DroppedRequests) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCategory()) < 1 { + err := ClusterStats_DroppedRequestsValidationError{ + field: "Category", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DroppedCount + + if len(errors) > 0 { + return ClusterStats_DroppedRequestsMultiError(errors) + } + + return nil +} + +// ClusterStats_DroppedRequestsMultiError is an error wrapping multiple +// validation errors returned by ClusterStats_DroppedRequests.ValidateAll() if +// the designated constraints aren't met. +type ClusterStats_DroppedRequestsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterStats_DroppedRequestsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterStats_DroppedRequestsMultiError) AllErrors() []error { return m } + +// ClusterStats_DroppedRequestsValidationError is the validation error returned +// by ClusterStats_DroppedRequests.Validate if the designated constraints +// aren't met. +type ClusterStats_DroppedRequestsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterStats_DroppedRequestsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterStats_DroppedRequestsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterStats_DroppedRequestsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterStats_DroppedRequestsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterStats_DroppedRequestsValidationError) ErrorName() string { + return "ClusterStats_DroppedRequestsValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterStats_DroppedRequestsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterStats_DroppedRequests.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterStats_DroppedRequestsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterStats_DroppedRequestsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go new file mode 100644 index 0000000000..31b280a340 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3/load_report_vtproto.pb.go @@ -0,0 +1,696 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/endpoint/v3/load_report.proto + +package endpointv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpstreamLocalityStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamLocalityStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamLocalityStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ApplicationUtilization != nil { + size, err := m.ApplicationUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.MemUtilization != nil { + size, err := m.MemUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.CpuUtilization != nil { + size, err := m.CpuUtilization.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.TotalFailConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalFailConnections)) + i-- + dAtA[i] = 0x58 + } + if m.TotalNewConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalNewConnections)) + i-- + dAtA[i] = 0x50 + } + if m.TotalActiveConnections != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalActiveConnections)) + i-- + dAtA[i] = 0x48 + } + if m.TotalIssuedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalIssuedRequests)) + i-- + dAtA[i] = 0x40 + } + if len(m.UpstreamEndpointStats) > 0 { + for iNdEx := len(m.UpstreamEndpointStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpstreamEndpointStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x30 + } + if len(m.LoadMetricStats) > 0 { + for iNdEx := len(m.LoadMetricStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadMetricStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TotalErrorRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalErrorRequests)) + i-- + dAtA[i] = 0x20 + } + if m.TotalRequestsInProgress != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalRequestsInProgress)) + i-- + dAtA[i] = 0x18 + } + if m.TotalSuccessfulRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalSuccessfulRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Locality != nil { + if vtmsg, ok := interface{}(m.Locality).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Locality) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamEndpointStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamEndpointStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamEndpointStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalIssuedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalIssuedRequests)) + i-- + dAtA[i] = 0x38 + } + if m.Metadata != nil { + size, err := (*structpb.Struct)(m.Metadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.LoadMetricStats) > 0 { + for iNdEx := len(m.LoadMetricStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadMetricStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TotalErrorRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalErrorRequests)) + i-- + dAtA[i] = 0x20 + } + if m.TotalRequestsInProgress != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalRequestsInProgress)) + i-- + dAtA[i] = 0x18 + } + if m.TotalSuccessfulRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalSuccessfulRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EndpointLoadMetricStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointLoadMetricStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EndpointLoadMetricStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalMetricValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TotalMetricValue)))) + i-- + dAtA[i] = 0x19 + } + if m.NumRequestsFinishedWithMetric != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumRequestsFinishedWithMetric)) + i-- + dAtA[i] = 0x10 + } + if len(m.MetricName) > 0 { + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UnnamedEndpointLoadMetricStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TotalMetricValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TotalMetricValue)))) + i-- + dAtA[i] = 0x11 + } + if m.NumRequestsFinishedWithMetric != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumRequestsFinishedWithMetric)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClusterStats_DroppedRequests) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStats_DroppedRequests) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStats_DroppedRequests) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DroppedCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Category) > 0 { + i -= len(m.Category) + copy(dAtA[i:], m.Category) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Category))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterStats) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterStats) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterServiceName) > 0 { + i -= len(m.ClusterServiceName) + copy(dAtA[i:], m.ClusterServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterServiceName))) + i-- + dAtA[i] = 0x32 + } + if len(m.DroppedRequests) > 0 { + for iNdEx := len(m.DroppedRequests) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DroppedRequests[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.LoadReportInterval != nil { + size, err := (*durationpb.Duration)(m.LoadReportInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TotalDroppedRequests != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalDroppedRequests)) + i-- + dAtA[i] = 0x18 + } + if len(m.UpstreamLocalityStats) > 0 { + for iNdEx := len(m.UpstreamLocalityStats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpstreamLocalityStats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterName) > 0 { + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpstreamLocalityStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Locality != nil { + if size, ok := interface{}(m.Locality).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Locality) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalSuccessfulRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalSuccessfulRequests)) + } + if m.TotalRequestsInProgress != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalRequestsInProgress)) + } + if m.TotalErrorRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalErrorRequests)) + } + if len(m.LoadMetricStats) > 0 { + for _, e := range m.LoadMetricStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if len(m.UpstreamEndpointStats) > 0 { + for _, e := range m.UpstreamEndpointStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TotalIssuedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalIssuedRequests)) + } + if m.TotalActiveConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalActiveConnections)) + } + if m.TotalNewConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalNewConnections)) + } + if m.TotalFailConnections != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalFailConnections)) + } + if m.CpuUtilization != nil { + l = m.CpuUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MemUtilization != nil { + l = m.MemUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApplicationUtilization != nil { + l = m.ApplicationUtilization.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpstreamEndpointStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalSuccessfulRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalSuccessfulRequests)) + } + if m.TotalRequestsInProgress != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalRequestsInProgress)) + } + if m.TotalErrorRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalErrorRequests)) + } + if len(m.LoadMetricStats) > 0 { + for _, e := range m.LoadMetricStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Metadata != nil { + l = (*structpb.Struct)(m.Metadata).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalIssuedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalIssuedRequests)) + } + n += len(m.unknownFields) + return n +} + +func (m *EndpointLoadMetricStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRequestsFinishedWithMetric != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumRequestsFinishedWithMetric)) + } + if m.TotalMetricValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *UnnamedEndpointLoadMetricStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumRequestsFinishedWithMetric != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumRequestsFinishedWithMetric)) + } + if m.TotalMetricValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStats_DroppedRequests) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Category) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DroppedCount != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DroppedCount)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClusterStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpstreamLocalityStats) > 0 { + for _, e := range m.UpstreamLocalityStats { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TotalDroppedRequests != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalDroppedRequests)) + } + if m.LoadReportInterval != nil { + l = (*durationpb.Duration)(m.LoadReportInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DroppedRequests) > 0 { + for _, e := range m.DroppedRequests { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.ClusterServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go new file mode 100644 index 0000000000..1adc7d96f4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +type ApiListener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + ApiListener *anypb.Any `protobuf:"bytes,1,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` +} + +func (x *ApiListener) Reset() { + *x = ApiListener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_api_listener_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiListener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiListener) ProtoMessage() {} + +func (x *ApiListener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_api_listener_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiListener.ProtoReflect.Descriptor instead. +func (*ApiListener) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP(), []int{0} +} + +func (x *ApiListener) GetApiListener() *anypb.Any { + if x != nil { + return x.ApiListener + } + return nil +} + +var File_envoy_config_listener_v3_api_listener_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_api_listener_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x70, 0x69, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0b, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0b, 0x61, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2b, 0x9a, 0xc5, + 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, + 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x41, 0x70, + 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_listener_v3_api_listener_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_api_listener_proto_rawDescData = file_envoy_config_listener_v3_api_listener_proto_rawDesc +) + +func file_envoy_config_listener_v3_api_listener_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_api_listener_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_api_listener_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_api_listener_proto_rawDescData) + }) + return file_envoy_config_listener_v3_api_listener_proto_rawDescData +} + +var file_envoy_config_listener_v3_api_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_listener_v3_api_listener_proto_goTypes = []interface{}{ + (*ApiListener)(nil), // 0: envoy.config.listener.v3.ApiListener + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_envoy_config_listener_v3_api_listener_proto_depIdxs = []int32{ + 1, // 0: envoy.config.listener.v3.ApiListener.api_listener:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_api_listener_proto_init() } +func file_envoy_config_listener_v3_api_listener_proto_init() { + if File_envoy_config_listener_v3_api_listener_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_api_listener_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiListener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_api_listener_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_api_listener_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_api_listener_proto_depIdxs, + MessageInfos: file_envoy_config_listener_v3_api_listener_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_api_listener_proto = out.File + file_envoy_config_listener_v3_api_listener_proto_rawDesc = nil + file_envoy_config_listener_v3_api_listener_proto_goTypes = nil + file_envoy_config_listener_v3_api_listener_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go new file mode 100644 index 0000000000..56954a35df --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener.pb.validate.go @@ -0,0 +1,165 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ApiListener with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ApiListener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiListener with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ApiListenerMultiError, or +// nil if none found. +func (m *ApiListener) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiListener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetApiListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApiListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApiListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApiListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApiListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ApiListenerMultiError(errors) + } + + return nil +} + +// ApiListenerMultiError is an error wrapping multiple validation errors +// returned by ApiListener.ValidateAll() if the designated constraints aren't met. +type ApiListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiListenerMultiError) AllErrors() []error { return m } + +// ApiListenerValidationError is the validation error returned by +// ApiListener.Validate if the designated constraints aren't met. +type ApiListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiListenerValidationError) ErrorName() string { return "ApiListenerValidationError" } + +// Error satisfies the builtin error interface +func (e ApiListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiListenerValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go new file mode 100644 index 0000000000..0d24f32c6d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/api_listener_vtproto.pb.go @@ -0,0 +1,77 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/api_listener.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ApiListener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ApiListener != nil { + size, err := (*anypb.Any)(m.ApiListener).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApiListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApiListener != nil { + l = (*anypb.Any)(m.ApiListener).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go new file mode 100644 index 0000000000..c639e62b1c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.go @@ -0,0 +1,1593 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v31 "github.com/cncf/xds/go/xds/core/v3" + v32 "github.com/cncf/xds/go/xds/type/matcher/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v33 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Listener_DrainType int32 + +const ( + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + Listener_DEFAULT Listener_DrainType = 0 + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + Listener_MODIFY_ONLY Listener_DrainType = 1 +) + +// Enum value maps for Listener_DrainType. +var ( + Listener_DrainType_name = map[int32]string{ + 0: "DEFAULT", + 1: "MODIFY_ONLY", + } + Listener_DrainType_value = map[string]int32{ + "DEFAULT": 0, + "MODIFY_ONLY": 1, + } +) + +func (x Listener_DrainType) Enum() *Listener_DrainType { + p := new(Listener_DrainType) + *p = x + return p +} + +func (x Listener_DrainType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Listener_DrainType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_listener_v3_listener_proto_enumTypes[0].Descriptor() +} + +func (Listener_DrainType) Type() protoreflect.EnumType { + return &file_envoy_config_listener_v3_listener_proto_enumTypes[0] +} + +func (x Listener_DrainType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Listener_DrainType.Descriptor instead. +func (Listener_DrainType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 0} +} + +// The additional address the listener is listening on. +type AdditionalAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address *v3.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the listener. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptions *v3.SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` +} + +func (x *AdditionalAddress) Reset() { + *x = AdditionalAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalAddress) ProtoMessage() {} + +func (x *AdditionalAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalAddress.ProtoReflect.Descriptor instead. +func (*AdditionalAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{0} +} + +func (x *AdditionalAddress) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *AdditionalAddress) GetSocketOptions() *v3.SocketOptionsOverride { + if x != nil { + return x.SocketOptions + } + return nil +} + +// Listener list collections. Entries are “Listener“ resources or references. +// [#not-implemented-hide:] +type ListenerCollection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*v31.CollectionEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *ListenerCollection) Reset() { + *x = ListenerCollection{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerCollection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerCollection) ProtoMessage() {} + +func (x *ListenerCollection) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerCollection.ProtoReflect.Descriptor instead. +func (*ListenerCollection) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenerCollection) GetEntries() []*v31.CollectionEntry { + if x != nil { + return x.Entries + } + return nil +} + +// [#next-free-field: 36] +type Listener struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + // Required unless “api_listener“ or “listener_specifier“ is populated. + Address *v3.Address `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // The additional addresses the listener should listen on. The addresses must be unique across all + // listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener, + // all addresses use the same protocol, and multiple internal addresses are not supported. + AdditionalAddresses []*AdditionalAddress `protobuf:"bytes,33,rep,name=additional_addresses,json=additionalAddresses,proto3" json:"additional_addresses,omitempty"` + // Optional prefix to use on listener stats. If empty, the stats will be rooted at + // “listener.
.“. If non-empty, stats will be rooted at + // “listener..“. + StatPrefix string `protobuf:"bytes,28,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + FilterChains []*FilterChain `protobuf:"bytes,3,rep,name=filter_chains,json=filterChains,proto3" json:"filter_chains,omitempty"` + // :ref:`Matcher API ` resolving the filter chain name from the + // network properties. This matcher is used as a replacement for the filter chain match condition + // :ref:`filter_chain_match + // `. If specified, all + // :ref:`filter_chains ` must have a + // non-empty and unique :ref:`name ` field + // and not specify :ref:`filter_chain_match + // ` field. + // + // .. note:: + // + // Once matched, each connection is permanently bound to its filter chain. + // If the matcher changes but the filter chain remains the same, the + // connections bound to the filter chain are not drained. If, however, the + // filter chain is removed or structurally modified, then the drain for its + // connections is initiated. + FilterChainMatcher *v32.Matcher `protobuf:"bytes,32,opt,name=filter_chain_matcher,json=filterChainMatcher,proto3" json:"filter_chain_matcher,omitempty"` + // If a connection is redirected using “iptables“, the port on which the proxy + // receives it might be different from the original destination address. When this flag is set to + // true, the listener hands off redirected connections to the listener associated with the + // original destination address. If there is no listener associated with the original destination + // address, the connection is handled by the listener that receives it. Defaults to false. + UseOriginalDst *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=use_original_dst,json=useOriginalDst,proto3" json:"use_original_dst,omitempty"` + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + DefaultFilterChain *FilterChain `protobuf:"bytes,25,opt,name=default_filter_chain,json=defaultFilterChain,proto3" json:"default_filter_chain,omitempty"` + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"` + // Listener metadata. + Metadata *v3.Metadata `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. + DeprecatedV1 *Listener_DeprecatedV1 `protobuf:"bytes,7,opt,name=deprecated_v1,json=deprecatedV1,proto3" json:"deprecated_v1,omitempty"` + // The type of draining to perform at a listener-wide level. + DrainType Listener_DrainType `protobuf:"varint,8,opt,name=drain_type,json=drainType,proto3,enum=envoy.config.listener.v3.Listener_DrainType" json:"drain_type,omitempty"` + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // ` and no + // :ref:`quic_options ` is specified in :ref:`udp_listener_config `. + // QUIC listener filters can be specified when :ref:`quic_options + // ` is + // specified in :ref:`udp_listener_config `. + // They are processed sequentially right before connection creation. And like TCP Listener filters, they can be used to manipulate the connection metadata and socket. But the difference is that they can't be used to pause connection creation. + ListenerFilters []*ListenerFilter `protobuf:"bytes,9,rep,name=listener_filters,json=listenerFilters,proto3" json:"listener_filters,omitempty"` + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // “continue_on_listener_filters_timeout“ is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + ListenerFiltersTimeout *durationpb.Duration `protobuf:"bytes,15,opt,name=listener_filters_timeout,json=listenerFiltersTimeout,proto3" json:"listener_filters_timeout,omitempty"` + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + ContinueOnListenerFiltersTimeout bool `protobuf:"varint,17,opt,name=continue_on_listener_filters_timeout,json=continueOnListenerFiltersTimeout,proto3" json:"continue_on_listener_filters_timeout,omitempty"` + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // “iptables“ “TPROXY“ target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using “TPROXY“ cannot be distinguished from connections redirected using “TPROXY“ and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the “CAP_NET_ADMIN“ capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + Transparent *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=transparent,proto3" json:"transparent,omitempty"` + // Whether the listener should set the “IP_FREEBIND“ socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option “IP_FREEBIND“ is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + Freebind *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=freebind,proto3" json:"freebind,omitempty"` + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. The socket options can be updated for a listener when + // :ref:`enable_reuse_port ` + // is “true“. Otherwise, if socket options change during a listener update the update will be rejected + // to make it clear that the options were not updated. + SocketOptions []*v3.SocketOption `protobuf:"bytes,13,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"` + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + TcpFastOpenQueueLength *wrapperspb.UInt32Value `protobuf:"bytes,12,opt,name=tcp_fast_open_queue_length,json=tcpFastOpenQueueLength,proto3" json:"tcp_fast_open_queue_length,omitempty"` + // Specifies the intended direction of the traffic relative to the local Envoy. + // This property is required on Windows for listeners using the original destination filter, + // see :ref:`Original Destination `. + TrafficDirection v3.TrafficDirection `protobuf:"varint,16,opt,name=traffic_direction,json=trafficDirection,proto3,enum=envoy.config.core.v3.TrafficDirection" json:"traffic_direction,omitempty"` + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies UDP + // listener specific configuration. + UdpListenerConfig *UdpListenerConfig `protobuf:"bytes,18,opt,name=udp_listener_config,json=udpListenerConfig,proto3" json:"udp_listener_config,omitempty"` + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener *ApiListener `protobuf:"bytes,19,opt,name=api_listener,json=apiListener,proto3" json:"api_listener,omitempty"` + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + // + // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 + // by setting :ref:`use_original_dst ` in X + // and :ref:`bind_to_port ` to false in Y1 and Y2, + // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and + // enable the balance config in Y1 and Y2 to balance the connections among the workers. + ConnectionBalanceConfig *Listener_ConnectionBalanceConfig `protobuf:"bytes,20,opt,name=connection_balance_config,json=connectionBalanceConfig,proto3" json:"connection_balance_config,omitempty"` + // Deprecated. Use “enable_reuse_port“ instead. + // + // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. + ReusePort bool `protobuf:"varint,21,opt,name=reuse_port,json=reusePort,proto3" json:"reuse_port,omitempty"` + // When this flag is set to true, listeners set the “SO_REUSEPORT“ socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. The change of field will be rejected during an listener update when the + // runtime flag “envoy.reloadable_features.enable_update_listener_socket_options“ is enabled. + // Otherwise, the update of this field will be ignored quietly. + // + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. + // + // - On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // - On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // - On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + EnableReusePort *wrapperspb.BoolValue `protobuf:"bytes,29,opt,name=enable_reuse_port,json=enableReusePort,proto3" json:"enable_reuse_port,omitempty"` + // Configuration for :ref:`access logs ` + // emitted by this listener. + AccessLog []*v33.AccessLog `protobuf:"bytes,22,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The maximum length a tcp listener's pending connections queue can grow to. If no value is + // provided net.core.somaxconn will be used on Linux and 128 otherwise. + TcpBacklogSize *wrapperspb.UInt32Value `protobuf:"bytes,24,opt,name=tcp_backlog_size,json=tcpBacklogSize,proto3" json:"tcp_backlog_size,omitempty"` + // The maximum number of connections to accept from the kernel per socket + // event. Envoy may decide to close these connections after accepting them + // from the kernel e.g. due to load shedding, or other policies. + // If there are more than max_connections_to_accept_per_socket_event + // connections pending accept, connections over this threshold will be + // accepted in later event loop iterations. + // If no value is provided Envoy will accept all connections pending accept + // from the kernel. + MaxConnectionsToAcceptPerSocketEvent *wrapperspb.UInt32Value `protobuf:"bytes,34,opt,name=max_connections_to_accept_per_socket_event,json=maxConnectionsToAcceptPerSocketEvent,proto3" json:"max_connections_to_accept_per_socket_event,omitempty"` + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that set + // :ref:`use_original_dst ` + // to true. Default is true. + BindToPort *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` + // The exclusive listener type and the corresponding config. + // + // Types that are assignable to ListenerSpecifier: + // + // *Listener_InternalListener + ListenerSpecifier isListener_ListenerSpecifier `protobuf_oneof:"listener_specifier"` + // Enable MPTCP (multi-path TCP) on this listener. Clients will be allowed to establish + // MPTCP connections. Non-MPTCP clients will fall back to regular TCP. + EnableMptcp bool `protobuf:"varint,30,opt,name=enable_mptcp,json=enableMptcp,proto3" json:"enable_mptcp,omitempty"` + // Whether the listener should limit connections based upon the value of + // :ref:`global_downstream_max_connections `. + IgnoreGlobalConnLimit bool `protobuf:"varint,31,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"` + // Whether the listener bypasses configured overload manager actions. + BypassOverloadManager bool `protobuf:"varint,35,opt,name=bypass_overload_manager,json=bypassOverloadManager,proto3" json:"bypass_overload_manager,omitempty"` +} + +func (x *Listener) Reset() { + *x = Listener{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener) ProtoMessage() {} + +func (x *Listener) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener.ProtoReflect.Descriptor instead. +func (*Listener) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2} +} + +func (x *Listener) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Listener) GetAddress() *v3.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *Listener) GetAdditionalAddresses() []*AdditionalAddress { + if x != nil { + return x.AdditionalAddresses + } + return nil +} + +func (x *Listener) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +func (x *Listener) GetFilterChains() []*FilterChain { + if x != nil { + return x.FilterChains + } + return nil +} + +func (x *Listener) GetFilterChainMatcher() *v32.Matcher { + if x != nil { + return x.FilterChainMatcher + } + return nil +} + +func (x *Listener) GetUseOriginalDst() *wrapperspb.BoolValue { + if x != nil { + return x.UseOriginalDst + } + return nil +} + +func (x *Listener) GetDefaultFilterChain() *FilterChain { + if x != nil { + return x.DefaultFilterChain + } + return nil +} + +func (x *Listener) GetPerConnectionBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerConnectionBufferLimitBytes + } + return nil +} + +func (x *Listener) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. +func (x *Listener) GetDeprecatedV1() *Listener_DeprecatedV1 { + if x != nil { + return x.DeprecatedV1 + } + return nil +} + +func (x *Listener) GetDrainType() Listener_DrainType { + if x != nil { + return x.DrainType + } + return Listener_DEFAULT +} + +func (x *Listener) GetListenerFilters() []*ListenerFilter { + if x != nil { + return x.ListenerFilters + } + return nil +} + +func (x *Listener) GetListenerFiltersTimeout() *durationpb.Duration { + if x != nil { + return x.ListenerFiltersTimeout + } + return nil +} + +func (x *Listener) GetContinueOnListenerFiltersTimeout() bool { + if x != nil { + return x.ContinueOnListenerFiltersTimeout + } + return false +} + +func (x *Listener) GetTransparent() *wrapperspb.BoolValue { + if x != nil { + return x.Transparent + } + return nil +} + +func (x *Listener) GetFreebind() *wrapperspb.BoolValue { + if x != nil { + return x.Freebind + } + return nil +} + +func (x *Listener) GetSocketOptions() []*v3.SocketOption { + if x != nil { + return x.SocketOptions + } + return nil +} + +func (x *Listener) GetTcpFastOpenQueueLength() *wrapperspb.UInt32Value { + if x != nil { + return x.TcpFastOpenQueueLength + } + return nil +} + +func (x *Listener) GetTrafficDirection() v3.TrafficDirection { + if x != nil { + return x.TrafficDirection + } + return v3.TrafficDirection(0) +} + +func (x *Listener) GetUdpListenerConfig() *UdpListenerConfig { + if x != nil { + return x.UdpListenerConfig + } + return nil +} + +func (x *Listener) GetApiListener() *ApiListener { + if x != nil { + return x.ApiListener + } + return nil +} + +func (x *Listener) GetConnectionBalanceConfig() *Listener_ConnectionBalanceConfig { + if x != nil { + return x.ConnectionBalanceConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/listener/v3/listener.proto. +func (x *Listener) GetReusePort() bool { + if x != nil { + return x.ReusePort + } + return false +} + +func (x *Listener) GetEnableReusePort() *wrapperspb.BoolValue { + if x != nil { + return x.EnableReusePort + } + return nil +} + +func (x *Listener) GetAccessLog() []*v33.AccessLog { + if x != nil { + return x.AccessLog + } + return nil +} + +func (x *Listener) GetTcpBacklogSize() *wrapperspb.UInt32Value { + if x != nil { + return x.TcpBacklogSize + } + return nil +} + +func (x *Listener) GetMaxConnectionsToAcceptPerSocketEvent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConnectionsToAcceptPerSocketEvent + } + return nil +} + +func (x *Listener) GetBindToPort() *wrapperspb.BoolValue { + if x != nil { + return x.BindToPort + } + return nil +} + +func (m *Listener) GetListenerSpecifier() isListener_ListenerSpecifier { + if m != nil { + return m.ListenerSpecifier + } + return nil +} + +func (x *Listener) GetInternalListener() *Listener_InternalListenerConfig { + if x, ok := x.GetListenerSpecifier().(*Listener_InternalListener); ok { + return x.InternalListener + } + return nil +} + +func (x *Listener) GetEnableMptcp() bool { + if x != nil { + return x.EnableMptcp + } + return false +} + +func (x *Listener) GetIgnoreGlobalConnLimit() bool { + if x != nil { + return x.IgnoreGlobalConnLimit + } + return false +} + +func (x *Listener) GetBypassOverloadManager() bool { + if x != nil { + return x.BypassOverloadManager + } + return false +} + +type isListener_ListenerSpecifier interface { + isListener_ListenerSpecifier() +} + +type Listener_InternalListener struct { + // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the + // :ref:`envoy cluster ` to create a user space connection to. + // The internal listener acts as a TCP listener. It supports listener filters and network filter chains. + // Upstream clusters refer to the internal listeners by their :ref:`name + // `. :ref:`Address + // ` must not be set on the internal listeners. + // + // There are some limitations that are derived from the implementation. The known limitations include: + // + // - :ref:`ConnectionBalanceConfig ` is not + // allowed because both the cluster connection and the listener connection must be owned by the same dispatcher. + // - :ref:`tcp_backlog_size ` + // - :ref:`freebind ` + // - :ref:`transparent ` + InternalListener *Listener_InternalListenerConfig `protobuf:"bytes,27,opt,name=internal_listener,json=internalListener,proto3,oneof"` +} + +func (*Listener_InternalListener) isListener_ListenerSpecifier() {} + +// A placeholder proto so that users can explicitly configure the standard +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListenerManager) Reset() { + *x = ListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerManager) ProtoMessage() {} + +func (x *ListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerManager.ProtoReflect.Descriptor instead. +func (*ListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{3} +} + +// A placeholder proto so that users can explicitly configure the standard +// Validation Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ValidationListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValidationListenerManager) Reset() { + *x = ValidationListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidationListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidationListenerManager) ProtoMessage() {} + +func (x *ValidationListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidationListenerManager.ProtoReflect.Descriptor instead. +func (*ValidationListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{4} +} + +// A placeholder proto so that users can explicitly configure the API +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +type ApiListenerManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApiListenerManager) Reset() { + *x = ApiListenerManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiListenerManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiListenerManager) ProtoMessage() {} + +func (x *ApiListenerManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiListenerManager.ProtoReflect.Descriptor instead. +func (*ApiListenerManager) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{5} +} + +// [#not-implemented-hide:] +type Listener_DeprecatedV1 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated. Use :ref:`Listener.bind_to_port + // ` + BindToPort *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=bind_to_port,json=bindToPort,proto3" json:"bind_to_port,omitempty"` +} + +func (x *Listener_DeprecatedV1) Reset() { + *x = Listener_DeprecatedV1{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_DeprecatedV1) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_DeprecatedV1) ProtoMessage() {} + +func (x *Listener_DeprecatedV1) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_DeprecatedV1.ProtoReflect.Descriptor instead. +func (*Listener_DeprecatedV1) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Listener_DeprecatedV1) GetBindToPort() *wrapperspb.BoolValue { + if x != nil { + return x.BindToPort + } + return nil +} + +// Configuration for listener connection balancing. +type Listener_ConnectionBalanceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to BalanceType: + // + // *Listener_ConnectionBalanceConfig_ExactBalance_ + // *Listener_ConnectionBalanceConfig_ExtendBalance + BalanceType isListener_ConnectionBalanceConfig_BalanceType `protobuf_oneof:"balance_type"` +} + +func (x *Listener_ConnectionBalanceConfig) Reset() { + *x = Listener_ConnectionBalanceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_ConnectionBalanceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_ConnectionBalanceConfig) ProtoMessage() {} + +func (x *Listener_ConnectionBalanceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_ConnectionBalanceConfig.ProtoReflect.Descriptor instead. +func (*Listener_ConnectionBalanceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 1} +} + +func (m *Listener_ConnectionBalanceConfig) GetBalanceType() isListener_ConnectionBalanceConfig_BalanceType { + if m != nil { + return m.BalanceType + } + return nil +} + +func (x *Listener_ConnectionBalanceConfig) GetExactBalance() *Listener_ConnectionBalanceConfig_ExactBalance { + if x, ok := x.GetBalanceType().(*Listener_ConnectionBalanceConfig_ExactBalance_); ok { + return x.ExactBalance + } + return nil +} + +func (x *Listener_ConnectionBalanceConfig) GetExtendBalance() *v3.TypedExtensionConfig { + if x, ok := x.GetBalanceType().(*Listener_ConnectionBalanceConfig_ExtendBalance); ok { + return x.ExtendBalance + } + return nil +} + +type isListener_ConnectionBalanceConfig_BalanceType interface { + isListener_ConnectionBalanceConfig_BalanceType() +} + +type Listener_ConnectionBalanceConfig_ExactBalance_ struct { + // If specified, the listener will use the exact connection balancer. + ExactBalance *Listener_ConnectionBalanceConfig_ExactBalance `protobuf:"bytes,1,opt,name=exact_balance,json=exactBalance,proto3,oneof"` +} + +type Listener_ConnectionBalanceConfig_ExtendBalance struct { + // The listener will use the connection balancer according to “type_url“. If “type_url“ is invalid, + // Envoy will not attempt to balance active connections between worker threads. + // [#extension-category: envoy.network.connection_balance] + ExtendBalance *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=extend_balance,json=extendBalance,proto3,oneof"` +} + +func (*Listener_ConnectionBalanceConfig_ExactBalance_) isListener_ConnectionBalanceConfig_BalanceType() { +} + +func (*Listener_ConnectionBalanceConfig_ExtendBalance) isListener_ConnectionBalanceConfig_BalanceType() { +} + +// Configuration for envoy internal listener. All the future internal listener features should be added here. +type Listener_InternalListenerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Listener_InternalListenerConfig) Reset() { + *x = Listener_InternalListenerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_InternalListenerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_InternalListenerConfig) ProtoMessage() {} + +func (x *Listener_InternalListenerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_InternalListenerConfig.ProtoReflect.Descriptor instead. +func (*Listener_InternalListenerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 2} +} + +// A connection balancer implementation that does exact balancing. This means that a lock is +// held during balancing so that connection counts are nearly exactly balanced between worker +// threads. This is "nearly" exact in the sense that a connection might close in parallel thus +// making the counts incorrect, but this should be rectified on the next accept. This balancer +// sacrifices accept throughput for accuracy and should be used when there are a small number of +// connections that rarely cycle (e.g., service mesh gRPC egress). +type Listener_ConnectionBalanceConfig_ExactBalance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Listener_ConnectionBalanceConfig_ExactBalance) Reset() { + *x = Listener_ConnectionBalanceConfig_ExactBalance{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Listener_ConnectionBalanceConfig_ExactBalance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Listener_ConnectionBalanceConfig_ExactBalance) ProtoMessage() {} + +func (x *Listener_ConnectionBalanceConfig_ExactBalance) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Listener_ConnectionBalanceConfig_ExactBalance.ProtoReflect.Descriptor instead. +func (*Listener_ConnectionBalanceConfig_ExactBalance) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_proto_rawDescGZIP(), []int{2, 1, 0} +} + +var File_envoy_config_listener_v3_listener_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_listener_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x70, + 0x69, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x75, + 0x64, 0x70, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, + 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x22, 0xbe, 0x18, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x5e, 0x0a, 0x14, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x13, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x4a, 0x0a, + 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, + 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x4f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x14, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x12, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x6f, 0x0a, 0x21, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, + 0x2a, 0x02, 0x08, 0x01, 0x52, 0x1d, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x61, 0x0a, 0x0d, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x31, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x56, 0x31, 0x12, 0x4b, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x53, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x18, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x24, 0x63, 0x6f, 0x6e, + 0x74, 0x69, 0x6e, 0x75, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, + 0x65, 0x4f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, + 0x69, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, + 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x1a, 0x74, 0x63, + 0x70, 0x5f, 0x66, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x74, 0x63, + 0x70, 0x46, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x53, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x13, 0x75, 0x64, 0x70, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x11, 0x75, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x52, 0x0b, 0x61, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x76, 0x0a, 0x19, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x17, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0a, 0x72, 0x65, 0x75, 0x73, + 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x75, 0x73, 0x65, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, + 0x65, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x43, 0x0a, 0x0a, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x12, 0x46, 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x42, 0x61, + 0x63, 0x6b, 0x6c, 0x6f, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x7f, 0x0a, 0x2a, 0x6d, 0x61, 0x78, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x74, 0x6f, 0x5f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x2a, 0x02, 0x20, 0x00, 0x52, 0x24, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x50, 0x65, 0x72, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, + 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, + 0x6e, 0x64, 0x54, 0x6f, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x68, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x70, 0x74, + 0x63, 0x70, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x4d, 0x70, 0x74, 0x63, 0x70, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x47, + 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x36, + 0x0a, 0x17, 0x62, 0x79, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x23, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x62, 0x79, 0x70, 0x61, 0x73, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x1a, 0x77, 0x0a, 0x0c, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x12, 0x3c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x74, + 0x6f, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x64, 0x54, 0x6f, + 0x50, 0x6f, 0x72, 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x56, 0x31, 0x1a, + 0xfc, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x0d, 0x65, + 0x78, 0x61, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, + 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x65, + 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x53, 0x0a, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x1a, 0x51, 0x0a, 0x0c, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x3a, 0x41, 0x9a, 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x78, 0x61, 0x63, 0x74, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x13, 0x0a, 0x0c, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x18, + 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x29, 0x0a, 0x09, 0x44, 0x72, 0x61, 0x69, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x42, 0x14, 0x0a, 0x12, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, + 0x17, 0x10, 0x18, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x41, 0x70, 0x69, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_listener_v3_listener_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_listener_proto_rawDescData = file_envoy_config_listener_v3_listener_proto_rawDesc +) + +func file_envoy_config_listener_v3_listener_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_listener_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_listener_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_listener_proto_rawDescData) + }) + return file_envoy_config_listener_v3_listener_proto_rawDescData +} + +var file_envoy_config_listener_v3_listener_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_listener_v3_listener_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_envoy_config_listener_v3_listener_proto_goTypes = []interface{}{ + (Listener_DrainType)(0), // 0: envoy.config.listener.v3.Listener.DrainType + (*AdditionalAddress)(nil), // 1: envoy.config.listener.v3.AdditionalAddress + (*ListenerCollection)(nil), // 2: envoy.config.listener.v3.ListenerCollection + (*Listener)(nil), // 3: envoy.config.listener.v3.Listener + (*ListenerManager)(nil), // 4: envoy.config.listener.v3.ListenerManager + (*ValidationListenerManager)(nil), // 5: envoy.config.listener.v3.ValidationListenerManager + (*ApiListenerManager)(nil), // 6: envoy.config.listener.v3.ApiListenerManager + (*Listener_DeprecatedV1)(nil), // 7: envoy.config.listener.v3.Listener.DeprecatedV1 + (*Listener_ConnectionBalanceConfig)(nil), // 8: envoy.config.listener.v3.Listener.ConnectionBalanceConfig + (*Listener_InternalListenerConfig)(nil), // 9: envoy.config.listener.v3.Listener.InternalListenerConfig + (*Listener_ConnectionBalanceConfig_ExactBalance)(nil), // 10: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance + (*v3.Address)(nil), // 11: envoy.config.core.v3.Address + (*v3.SocketOptionsOverride)(nil), // 12: envoy.config.core.v3.SocketOptionsOverride + (*v31.CollectionEntry)(nil), // 13: xds.core.v3.CollectionEntry + (*FilterChain)(nil), // 14: envoy.config.listener.v3.FilterChain + (*v32.Matcher)(nil), // 15: xds.type.matcher.v3.Matcher + (*wrapperspb.BoolValue)(nil), // 16: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*v3.Metadata)(nil), // 18: envoy.config.core.v3.Metadata + (*ListenerFilter)(nil), // 19: envoy.config.listener.v3.ListenerFilter + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*v3.SocketOption)(nil), // 21: envoy.config.core.v3.SocketOption + (v3.TrafficDirection)(0), // 22: envoy.config.core.v3.TrafficDirection + (*UdpListenerConfig)(nil), // 23: envoy.config.listener.v3.UdpListenerConfig + (*ApiListener)(nil), // 24: envoy.config.listener.v3.ApiListener + (*v33.AccessLog)(nil), // 25: envoy.config.accesslog.v3.AccessLog + (*v3.TypedExtensionConfig)(nil), // 26: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_listener_v3_listener_proto_depIdxs = []int32{ + 11, // 0: envoy.config.listener.v3.AdditionalAddress.address:type_name -> envoy.config.core.v3.Address + 12, // 1: envoy.config.listener.v3.AdditionalAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride + 13, // 2: envoy.config.listener.v3.ListenerCollection.entries:type_name -> xds.core.v3.CollectionEntry + 11, // 3: envoy.config.listener.v3.Listener.address:type_name -> envoy.config.core.v3.Address + 1, // 4: envoy.config.listener.v3.Listener.additional_addresses:type_name -> envoy.config.listener.v3.AdditionalAddress + 14, // 5: envoy.config.listener.v3.Listener.filter_chains:type_name -> envoy.config.listener.v3.FilterChain + 15, // 6: envoy.config.listener.v3.Listener.filter_chain_matcher:type_name -> xds.type.matcher.v3.Matcher + 16, // 7: envoy.config.listener.v3.Listener.use_original_dst:type_name -> google.protobuf.BoolValue + 14, // 8: envoy.config.listener.v3.Listener.default_filter_chain:type_name -> envoy.config.listener.v3.FilterChain + 17, // 9: envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 18, // 10: envoy.config.listener.v3.Listener.metadata:type_name -> envoy.config.core.v3.Metadata + 7, // 11: envoy.config.listener.v3.Listener.deprecated_v1:type_name -> envoy.config.listener.v3.Listener.DeprecatedV1 + 0, // 12: envoy.config.listener.v3.Listener.drain_type:type_name -> envoy.config.listener.v3.Listener.DrainType + 19, // 13: envoy.config.listener.v3.Listener.listener_filters:type_name -> envoy.config.listener.v3.ListenerFilter + 20, // 14: envoy.config.listener.v3.Listener.listener_filters_timeout:type_name -> google.protobuf.Duration + 16, // 15: envoy.config.listener.v3.Listener.transparent:type_name -> google.protobuf.BoolValue + 16, // 16: envoy.config.listener.v3.Listener.freebind:type_name -> google.protobuf.BoolValue + 21, // 17: envoy.config.listener.v3.Listener.socket_options:type_name -> envoy.config.core.v3.SocketOption + 17, // 18: envoy.config.listener.v3.Listener.tcp_fast_open_queue_length:type_name -> google.protobuf.UInt32Value + 22, // 19: envoy.config.listener.v3.Listener.traffic_direction:type_name -> envoy.config.core.v3.TrafficDirection + 23, // 20: envoy.config.listener.v3.Listener.udp_listener_config:type_name -> envoy.config.listener.v3.UdpListenerConfig + 24, // 21: envoy.config.listener.v3.Listener.api_listener:type_name -> envoy.config.listener.v3.ApiListener + 8, // 22: envoy.config.listener.v3.Listener.connection_balance_config:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig + 16, // 23: envoy.config.listener.v3.Listener.enable_reuse_port:type_name -> google.protobuf.BoolValue + 25, // 24: envoy.config.listener.v3.Listener.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 17, // 25: envoy.config.listener.v3.Listener.tcp_backlog_size:type_name -> google.protobuf.UInt32Value + 17, // 26: envoy.config.listener.v3.Listener.max_connections_to_accept_per_socket_event:type_name -> google.protobuf.UInt32Value + 16, // 27: envoy.config.listener.v3.Listener.bind_to_port:type_name -> google.protobuf.BoolValue + 9, // 28: envoy.config.listener.v3.Listener.internal_listener:type_name -> envoy.config.listener.v3.Listener.InternalListenerConfig + 16, // 29: envoy.config.listener.v3.Listener.DeprecatedV1.bind_to_port:type_name -> google.protobuf.BoolValue + 10, // 30: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.exact_balance:type_name -> envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance + 26, // 31: envoy.config.listener.v3.Listener.ConnectionBalanceConfig.extend_balance:type_name -> envoy.config.core.v3.TypedExtensionConfig + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_listener_proto_init() } +func file_envoy_config_listener_v3_listener_proto_init() { + if File_envoy_config_listener_v3_listener_proto != nil { + return + } + file_envoy_config_listener_v3_api_listener_proto_init() + file_envoy_config_listener_v3_listener_components_proto_init() + file_envoy_config_listener_v3_udp_listener_config_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_listener_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerCollection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidationListenerManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiListenerManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_DeprecatedV1); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_ConnectionBalanceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_InternalListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Listener_ConnectionBalanceConfig_ExactBalance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_listener_v3_listener_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Listener_InternalListener)(nil), + } + file_envoy_config_listener_v3_listener_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*Listener_ConnectionBalanceConfig_ExactBalance_)(nil), + (*Listener_ConnectionBalanceConfig_ExtendBalance)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_listener_proto_rawDesc, + NumEnums: 1, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_listener_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_listener_proto_depIdxs, + EnumInfos: file_envoy_config_listener_v3_listener_proto_enumTypes, + MessageInfos: file_envoy_config_listener_v3_listener_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_listener_proto = out.File + file_envoy_config_listener_v3_listener_proto_rawDesc = nil + file_envoy_config_listener_v3_listener_proto_goTypes = nil + file_envoy_config_listener_v3_listener_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go new file mode 100644 index 0000000000..9fd429baed --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener.pb.validate.go @@ -0,0 +1,2030 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.TrafficDirection(0) +) + +// Validate checks the field values on AdditionalAddress with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AdditionalAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AdditionalAddress with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AdditionalAddressMultiError, or nil if none found. +func (m *AdditionalAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *AdditionalAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdditionalAddressValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSocketOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AdditionalAddressValidationError{ + field: "SocketOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return AdditionalAddressMultiError(errors) + } + + return nil +} + +// AdditionalAddressMultiError is an error wrapping multiple validation errors +// returned by AdditionalAddress.ValidateAll() if the designated constraints +// aren't met. +type AdditionalAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AdditionalAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AdditionalAddressMultiError) AllErrors() []error { return m } + +// AdditionalAddressValidationError is the validation error returned by +// AdditionalAddress.Validate if the designated constraints aren't met. +type AdditionalAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AdditionalAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AdditionalAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AdditionalAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AdditionalAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AdditionalAddressValidationError) ErrorName() string { + return "AdditionalAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e AdditionalAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAdditionalAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AdditionalAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AdditionalAddressValidationError{} + +// Validate checks the field values on ListenerCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListenerCollection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenerCollectionMultiError, or nil if none found. +func (m *ListenerCollection) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerCollection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetEntries() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerCollectionValidationError{ + field: fmt.Sprintf("Entries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerCollectionValidationError{ + field: fmt.Sprintf("Entries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerCollectionValidationError{ + field: fmt.Sprintf("Entries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenerCollectionMultiError(errors) + } + + return nil +} + +// ListenerCollectionMultiError is an error wrapping multiple validation errors +// returned by ListenerCollection.ValidateAll() if the designated constraints +// aren't met. +type ListenerCollectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerCollectionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerCollectionMultiError) AllErrors() []error { return m } + +// ListenerCollectionValidationError is the validation error returned by +// ListenerCollection.Validate if the designated constraints aren't met. +type ListenerCollectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerCollectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerCollectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerCollectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerCollectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerCollectionValidationError) ErrorName() string { + return "ListenerCollectionValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenerCollectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerCollection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerCollectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerCollectionValidationError{} + +// Validate checks the field values on Listener with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Listener) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenerMultiError, or nil +// if none found. +func (m *Listener) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAdditionalAddresses() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("AdditionalAddresses[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for StatPrefix + + for idx, item := range m.GetFilterChains() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("FilterChains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("FilterChains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("FilterChains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetFilterChainMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "FilterChainMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "FilterChainMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterChainMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "FilterChainMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUseOriginalDst()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UseOriginalDst", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UseOriginalDst", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseOriginalDst()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "UseOriginalDst", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDefaultFilterChain()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DefaultFilterChain", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DefaultFilterChain", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultFilterChain()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "DefaultFilterChain", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerConnectionBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerConnectionBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "PerConnectionBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDeprecatedV1()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DeprecatedV1", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "DeprecatedV1", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDeprecatedV1()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "DeprecatedV1", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DrainType + + for idx, item := range m.GetListenerFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("ListenerFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("ListenerFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("ListenerFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetListenerFiltersTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ListenerFiltersTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ListenerFiltersTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerFiltersTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "ListenerFiltersTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ContinueOnListenerFiltersTimeout + + if all { + switch v := interface{}(m.GetTransparent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Transparent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Transparent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransparent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Transparent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFreebind()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "Freebind", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSocketOptions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("SocketOptions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTcpFastOpenQueueLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpFastOpenQueueLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpFastOpenQueueLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpFastOpenQueueLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "TcpFastOpenQueueLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TrafficDirection + + if all { + switch v := interface{}(m.GetUdpListenerConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UdpListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "UdpListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUdpListenerConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "UdpListenerConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetApiListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetApiListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "ApiListener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionBalanceConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ConnectionBalanceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "ConnectionBalanceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionBalanceConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "ConnectionBalanceConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ReusePort + + if all { + switch v := interface{}(m.GetEnableReusePort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "EnableReusePort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "EnableReusePort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableReusePort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "EnableReusePort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAccessLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTcpBacklogSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpBacklogSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "TcpBacklogSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTcpBacklogSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "TcpBacklogSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxConnectionsToAcceptPerSocketEvent(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := ListenerValidationError{ + field: "MaxConnectionsToAcceptPerSocketEvent", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetBindToPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBindToPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EnableMptcp + + // no validation rules for IgnoreGlobalConnLimit + + // no validation rules for BypassOverloadManager + + switch v := m.ListenerSpecifier.(type) { + case *Listener_InternalListener: + if v == nil { + err := ListenerValidationError{ + field: "ListenerSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInternalListener()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "InternalListener", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerValidationError{ + field: "InternalListener", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInternalListener()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerValidationError{ + field: "InternalListener", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ListenerMultiError(errors) + } + + return nil +} + +// ListenerMultiError is an error wrapping multiple validation errors returned +// by Listener.ValidateAll() if the designated constraints aren't met. +type ListenerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerMultiError) AllErrors() []error { return m } + +// ListenerValidationError is the validation error returned by +// Listener.Validate if the designated constraints aren't met. +type ListenerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerValidationError) ErrorName() string { return "ListenerValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerValidationError{} + +// Validate checks the field values on ListenerManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListenerManagerMultiError, or nil if none found. +func (m *ListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ListenerManagerMultiError(errors) + } + + return nil +} + +// ListenerManagerMultiError is an error wrapping multiple validation errors +// returned by ListenerManager.ValidateAll() if the designated constraints +// aren't met. +type ListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerManagerMultiError) AllErrors() []error { return m } + +// ListenerManagerValidationError is the validation error returned by +// ListenerManager.Validate if the designated constraints aren't met. +type ListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerManagerValidationError) ErrorName() string { return "ListenerManagerValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerManagerValidationError{} + +// Validate checks the field values on ValidationListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ValidationListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValidationListenerManager with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ValidationListenerManagerMultiError, or nil if none found. +func (m *ValidationListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ValidationListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ValidationListenerManagerMultiError(errors) + } + + return nil +} + +// ValidationListenerManagerMultiError is an error wrapping multiple validation +// errors returned by ValidationListenerManager.ValidateAll() if the +// designated constraints aren't met. +type ValidationListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValidationListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValidationListenerManagerMultiError) AllErrors() []error { return m } + +// ValidationListenerManagerValidationError is the validation error returned by +// ValidationListenerManager.Validate if the designated constraints aren't met. +type ValidationListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValidationListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValidationListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValidationListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValidationListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValidationListenerManagerValidationError) ErrorName() string { + return "ValidationListenerManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e ValidationListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValidationListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValidationListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValidationListenerManagerValidationError{} + +// Validate checks the field values on ApiListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ApiListenerManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApiListenerManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApiListenerManagerMultiError, or nil if none found. +func (m *ApiListenerManager) ValidateAll() error { + return m.validate(true) +} + +func (m *ApiListenerManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ApiListenerManagerMultiError(errors) + } + + return nil +} + +// ApiListenerManagerMultiError is an error wrapping multiple validation errors +// returned by ApiListenerManager.ValidateAll() if the designated constraints +// aren't met. +type ApiListenerManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApiListenerManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApiListenerManagerMultiError) AllErrors() []error { return m } + +// ApiListenerManagerValidationError is the validation error returned by +// ApiListenerManager.Validate if the designated constraints aren't met. +type ApiListenerManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApiListenerManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApiListenerManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApiListenerManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApiListenerManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApiListenerManagerValidationError) ErrorName() string { + return "ApiListenerManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e ApiListenerManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApiListenerManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApiListenerManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApiListenerManagerValidationError{} + +// Validate checks the field values on Listener_DeprecatedV1 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Listener_DeprecatedV1) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener_DeprecatedV1 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Listener_DeprecatedV1MultiError, or nil if none found. +func (m *Listener_DeprecatedV1) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_DeprecatedV1) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBindToPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Listener_DeprecatedV1ValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Listener_DeprecatedV1ValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBindToPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Listener_DeprecatedV1ValidationError{ + field: "BindToPort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Listener_DeprecatedV1MultiError(errors) + } + + return nil +} + +// Listener_DeprecatedV1MultiError is an error wrapping multiple validation +// errors returned by Listener_DeprecatedV1.ValidateAll() if the designated +// constraints aren't met. +type Listener_DeprecatedV1MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_DeprecatedV1MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_DeprecatedV1MultiError) AllErrors() []error { return m } + +// Listener_DeprecatedV1ValidationError is the validation error returned by +// Listener_DeprecatedV1.Validate if the designated constraints aren't met. +type Listener_DeprecatedV1ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_DeprecatedV1ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_DeprecatedV1ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Listener_DeprecatedV1ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_DeprecatedV1ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_DeprecatedV1ValidationError) ErrorName() string { + return "Listener_DeprecatedV1ValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_DeprecatedV1ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_DeprecatedV1.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_DeprecatedV1ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_DeprecatedV1ValidationError{} + +// Validate checks the field values on Listener_ConnectionBalanceConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *Listener_ConnectionBalanceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener_ConnectionBalanceConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Listener_ConnectionBalanceConfigMultiError, or nil if none found. +func (m *Listener_ConnectionBalanceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_ConnectionBalanceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofBalanceTypePresent := false + switch v := m.BalanceType.(type) { + case *Listener_ConnectionBalanceConfig_ExactBalance_: + if v == nil { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofBalanceTypePresent = true + + if all { + switch v := interface{}(m.GetExactBalance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExactBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExactBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExactBalance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Listener_ConnectionBalanceConfigValidationError{ + field: "ExactBalance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Listener_ConnectionBalanceConfig_ExtendBalance: + if v == nil { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofBalanceTypePresent = true + + if all { + switch v := interface{}(m.GetExtendBalance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExtendBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Listener_ConnectionBalanceConfigValidationError{ + field: "ExtendBalance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtendBalance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Listener_ConnectionBalanceConfigValidationError{ + field: "ExtendBalance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofBalanceTypePresent { + err := Listener_ConnectionBalanceConfigValidationError{ + field: "BalanceType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return Listener_ConnectionBalanceConfigMultiError(errors) + } + + return nil +} + +// Listener_ConnectionBalanceConfigMultiError is an error wrapping multiple +// validation errors returned by +// Listener_ConnectionBalanceConfig.ValidateAll() if the designated +// constraints aren't met. +type Listener_ConnectionBalanceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_ConnectionBalanceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_ConnectionBalanceConfigMultiError) AllErrors() []error { return m } + +// Listener_ConnectionBalanceConfigValidationError is the validation error +// returned by Listener_ConnectionBalanceConfig.Validate if the designated +// constraints aren't met. +type Listener_ConnectionBalanceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_ConnectionBalanceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_ConnectionBalanceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Listener_ConnectionBalanceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_ConnectionBalanceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_ConnectionBalanceConfigValidationError) ErrorName() string { + return "Listener_ConnectionBalanceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_ConnectionBalanceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_ConnectionBalanceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_ConnectionBalanceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_ConnectionBalanceConfigValidationError{} + +// Validate checks the field values on Listener_InternalListenerConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Listener_InternalListenerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Listener_InternalListenerConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Listener_InternalListenerConfigMultiError, or nil if none found. +func (m *Listener_InternalListenerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_InternalListenerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Listener_InternalListenerConfigMultiError(errors) + } + + return nil +} + +// Listener_InternalListenerConfigMultiError is an error wrapping multiple +// validation errors returned by Listener_InternalListenerConfig.ValidateAll() +// if the designated constraints aren't met. +type Listener_InternalListenerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_InternalListenerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_InternalListenerConfigMultiError) AllErrors() []error { return m } + +// Listener_InternalListenerConfigValidationError is the validation error +// returned by Listener_InternalListenerConfig.Validate if the designated +// constraints aren't met. +type Listener_InternalListenerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_InternalListenerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_InternalListenerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Listener_InternalListenerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_InternalListenerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_InternalListenerConfigValidationError) ErrorName() string { + return "Listener_InternalListenerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_InternalListenerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_InternalListenerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_InternalListenerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_InternalListenerConfigValidationError{} + +// Validate checks the field values on +// Listener_ConnectionBalanceConfig_ExactBalance with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Listener_ConnectionBalanceConfig_ExactBalance) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// Listener_ConnectionBalanceConfig_ExactBalance with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// Listener_ConnectionBalanceConfig_ExactBalanceMultiError, or nil if none found. +func (m *Listener_ConnectionBalanceConfig_ExactBalance) ValidateAll() error { + return m.validate(true) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return Listener_ConnectionBalanceConfig_ExactBalanceMultiError(errors) + } + + return nil +} + +// Listener_ConnectionBalanceConfig_ExactBalanceMultiError is an error wrapping +// multiple validation errors returned by +// Listener_ConnectionBalanceConfig_ExactBalance.ValidateAll() if the +// designated constraints aren't met. +type Listener_ConnectionBalanceConfig_ExactBalanceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Listener_ConnectionBalanceConfig_ExactBalanceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Listener_ConnectionBalanceConfig_ExactBalanceMultiError) AllErrors() []error { return m } + +// Listener_ConnectionBalanceConfig_ExactBalanceValidationError is the +// validation error returned by +// Listener_ConnectionBalanceConfig_ExactBalance.Validate if the designated +// constraints aren't met. +type Listener_ConnectionBalanceConfig_ExactBalanceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) ErrorName() string { + return "Listener_ConnectionBalanceConfig_ExactBalanceValidationError" +} + +// Error satisfies the builtin error interface +func (e Listener_ConnectionBalanceConfig_ExactBalanceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListener_ConnectionBalanceConfig_ExactBalance.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Listener_ConnectionBalanceConfig_ExactBalanceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Listener_ConnectionBalanceConfig_ExactBalanceValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go new file mode 100644 index 0000000000..b6dad7e30b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.go @@ -0,0 +1,1353 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FilterChainMatch_ConnectionSourceType int32 + +const ( + // Any connection source matches. + FilterChainMatch_ANY FilterChainMatch_ConnectionSourceType = 0 + // Match a connection originating from the same host. + FilterChainMatch_SAME_IP_OR_LOOPBACK FilterChainMatch_ConnectionSourceType = 1 + // Match a connection originating from a different host. + FilterChainMatch_EXTERNAL FilterChainMatch_ConnectionSourceType = 2 +) + +// Enum value maps for FilterChainMatch_ConnectionSourceType. +var ( + FilterChainMatch_ConnectionSourceType_name = map[int32]string{ + 0: "ANY", + 1: "SAME_IP_OR_LOOPBACK", + 2: "EXTERNAL", + } + FilterChainMatch_ConnectionSourceType_value = map[string]int32{ + "ANY": 0, + "SAME_IP_OR_LOOPBACK": 1, + "EXTERNAL": 2, + } +) + +func (x FilterChainMatch_ConnectionSourceType) Enum() *FilterChainMatch_ConnectionSourceType { + p := new(FilterChainMatch_ConnectionSourceType) + *p = x + return p +} + +func (x FilterChainMatch_ConnectionSourceType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FilterChainMatch_ConnectionSourceType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_listener_v3_listener_components_proto_enumTypes[0].Descriptor() +} + +func (FilterChainMatch_ConnectionSourceType) Type() protoreflect.EnumType { + return &file_envoy_config_listener_v3_listener_components_proto_enumTypes[0] +} + +func (x FilterChainMatch_ConnectionSourceType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FilterChainMatch_ConnectionSourceType.Descriptor instead. +func (FilterChainMatch_ConnectionSourceType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{1, 0} +} + +// [#next-free-field: 6] +type Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *Filter_TypedConfig + // *Filter_ConfigDiscovery + ConfigType isFilter_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *Filter) Reset() { + *x = Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Filter) ProtoMessage() {} + +func (x *Filter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Filter.ProtoReflect.Descriptor instead. +func (*Filter) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{0} +} + +func (x *Filter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Filter) GetConfigType() isFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *Filter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*Filter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *Filter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x, ok := x.GetConfigType().(*Filter_ConfigDiscovery); ok { + return x.ConfigDiscovery + } + return nil +} + +type isFilter_ConfigType interface { + isFilter_ConfigType() +} + +type Filter_TypedConfig struct { + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // [#extension-category: envoy.filters.network] + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +type Filter_ConfigDiscovery struct { + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,5,opt,name=config_discovery,json=configDiscovery,proto3,oneof"` +} + +func (*Filter_TypedConfig) isFilter_ConfigType() {} + +func (*Filter_ConfigDiscovery) isFilter_ConfigType() {} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI “www.example.com“ the most specific match would be +// “www.example.com“, then “*.example.com“, then “*.com“, then any filter +// chain without “server_names“ requirements). +// +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 14] +type FilterChainMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + DestinationPort *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + PrefixRanges []*v3.CidrRange `protobuf:"bytes,3,rep,name=prefix_ranges,json=prefixRanges,proto3" json:"prefix_ranges,omitempty"` + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + AddressSuffix string `protobuf:"bytes,4,opt,name=address_suffix,json=addressSuffix,proto3" json:"address_suffix,omitempty"` + // [#not-implemented-hide:] + SuffixLen *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=suffix_len,json=suffixLen,proto3" json:"suffix_len,omitempty"` + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + DirectSourcePrefixRanges []*v3.CidrRange `protobuf:"bytes,13,rep,name=direct_source_prefix_ranges,json=directSourcePrefixRanges,proto3" json:"direct_source_prefix_ranges,omitempty"` + // Specifies the connection source IP match type. Can be any, local or external network. + SourceType FilterChainMatch_ConnectionSourceType `protobuf:"varint,12,opt,name=source_type,json=sourceType,proto3,enum=envoy.config.listener.v3.FilterChainMatch_ConnectionSourceType" json:"source_type,omitempty"` + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + SourcePrefixRanges []*v3.CidrRange `protobuf:"bytes,6,rep,name=source_prefix_ranges,json=sourcePrefixRanges,proto3" json:"source_prefix_ranges,omitempty"` + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + SourcePorts []uint32 `protobuf:"varint,7,rep,packed,name=source_ports,json=sourcePorts,proto3" json:"source_ports,omitempty"` + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. “www.example.com“ + // will be first matched against “www.example.com“, then “*.example.com“, then “*.com“. + // + // Note that partial wildcards are not supported, and values like “*w.example.com“ are invalid. + // The value “*“ is also not supported, and “server_names“ should be omitted instead. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + ServerNames []string `protobuf:"bytes,11,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"` + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // - “raw_buffer“ - default, used when no transport protocol is detected, + // - “tls“ - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + TransportProtocol string `protobuf:"bytes,9,opt,name=transport_protocol,json=transportProtocol,proto3" json:"transport_protocol,omitempty"` + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // - “http/1.1“ - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // - “h2“ - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + ApplicationProtocols []string `protobuf:"bytes,10,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` +} + +func (x *FilterChainMatch) Reset() { + *x = FilterChainMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterChainMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterChainMatch) ProtoMessage() {} + +func (x *FilterChainMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterChainMatch.ProtoReflect.Descriptor instead. +func (*FilterChainMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{1} +} + +func (x *FilterChainMatch) GetDestinationPort() *wrapperspb.UInt32Value { + if x != nil { + return x.DestinationPort + } + return nil +} + +func (x *FilterChainMatch) GetPrefixRanges() []*v3.CidrRange { + if x != nil { + return x.PrefixRanges + } + return nil +} + +func (x *FilterChainMatch) GetAddressSuffix() string { + if x != nil { + return x.AddressSuffix + } + return "" +} + +func (x *FilterChainMatch) GetSuffixLen() *wrapperspb.UInt32Value { + if x != nil { + return x.SuffixLen + } + return nil +} + +func (x *FilterChainMatch) GetDirectSourcePrefixRanges() []*v3.CidrRange { + if x != nil { + return x.DirectSourcePrefixRanges + } + return nil +} + +func (x *FilterChainMatch) GetSourceType() FilterChainMatch_ConnectionSourceType { + if x != nil { + return x.SourceType + } + return FilterChainMatch_ANY +} + +func (x *FilterChainMatch) GetSourcePrefixRanges() []*v3.CidrRange { + if x != nil { + return x.SourcePrefixRanges + } + return nil +} + +func (x *FilterChainMatch) GetSourcePorts() []uint32 { + if x != nil { + return x.SourcePorts + } + return nil +} + +func (x *FilterChainMatch) GetServerNames() []string { + if x != nil { + return x.ServerNames + } + return nil +} + +func (x *FilterChainMatch) GetTransportProtocol() string { + if x != nil { + return x.TransportProtocol + } + return "" +} + +func (x *FilterChainMatch) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 10] +type FilterChain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch *FilterChainMatch `protobuf:"bytes,1,opt,name=filter_chain_match,json=filterChainMatch,proto3" json:"filter_chain_match,omitempty"` + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + // + // For QUIC listeners, network filters other than HTTP Connection Manager (HCM) + // can be created, but due to differences in the connection implementation compared + // to TCP, the onData() method will never be called. Therefore, network filters + // for QUIC listeners should only expect to do work at the start of a new connection + // (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain. + Filters []*Filter `protobuf:"bytes,3,rep,name=filters,proto3" json:"filters,omitempty"` + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + // + // This field is deprecated. Add a + // :ref:`PROXY protocol listener filter ` + // explicitly instead. + // + // Deprecated: Marked as deprecated in envoy/config/listener/v3/listener_components.proto. + UseProxyProto *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=use_proxy_proto,json=useProxyProto,proto3" json:"use_proxy_proto,omitempty"` + // [#not-implemented-hide:] filter chain metadata. + Metadata *v3.Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name “envoy.transport_sockets.tls“ and + // :ref:`DownstreamTlsContext ` in the “typed_config“. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + // [#extension-category: envoy.transport_sockets.downstream] + TransportSocket *v3.TransportSocket `protobuf:"bytes,6,opt,name=transport_socket,json=transportSocket,proto3" json:"transport_socket,omitempty"` + // If present and nonzero, the amount of time to allow incoming connections to complete any + // transport socket negotiations. If this expires before the transport reports connection + // establishment, the connection is summarily closed. + TransportSocketConnectTimeout *durationpb.Duration `protobuf:"bytes,9,opt,name=transport_socket_connect_timeout,json=transportSocketConnectTimeout,proto3" json:"transport_socket_connect_timeout,omitempty"` + // The unique name (or empty) by which this filter chain is known. + // Note: :ref:`filter_chain_matcher + // ` + // requires that filter chains are uniquely named within a listener. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. + // If this field is not empty, the filter chain will be built on-demand. + // Otherwise, the filter chain will be built normally and block listener warming. + OnDemandConfiguration *FilterChain_OnDemandConfiguration `protobuf:"bytes,8,opt,name=on_demand_configuration,json=onDemandConfiguration,proto3" json:"on_demand_configuration,omitempty"` +} + +func (x *FilterChain) Reset() { + *x = FilterChain{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterChain) ProtoMessage() {} + +func (x *FilterChain) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterChain.ProtoReflect.Descriptor instead. +func (*FilterChain) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{2} +} + +func (x *FilterChain) GetFilterChainMatch() *FilterChainMatch { + if x != nil { + return x.FilterChainMatch + } + return nil +} + +func (x *FilterChain) GetFilters() []*Filter { + if x != nil { + return x.Filters + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/listener/v3/listener_components.proto. +func (x *FilterChain) GetUseProxyProto() *wrapperspb.BoolValue { + if x != nil { + return x.UseProxyProto + } + return nil +} + +func (x *FilterChain) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *FilterChain) GetTransportSocket() *v3.TransportSocket { + if x != nil { + return x.TransportSocket + } + return nil +} + +func (x *FilterChain) GetTransportSocketConnectTimeout() *durationpb.Duration { + if x != nil { + return x.TransportSocketConnectTimeout + } + return nil +} + +func (x *FilterChain) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FilterChain) GetOnDemandConfiguration() *FilterChain_OnDemandConfiguration { + if x != nil { + return x.OnDemandConfiguration + } + return nil +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3307 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +type ListenerFilterChainMatchPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *ListenerFilterChainMatchPredicate_OrMatch + // *ListenerFilterChainMatchPredicate_AndMatch + // *ListenerFilterChainMatchPredicate_NotMatch + // *ListenerFilterChainMatchPredicate_AnyMatch + // *ListenerFilterChainMatchPredicate_DestinationPortRange + Rule isListenerFilterChainMatchPredicate_Rule `protobuf_oneof:"rule"` +} + +func (x *ListenerFilterChainMatchPredicate) Reset() { + *x = ListenerFilterChainMatchPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerFilterChainMatchPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerFilterChainMatchPredicate) ProtoMessage() {} + +func (x *ListenerFilterChainMatchPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerFilterChainMatchPredicate.ProtoReflect.Descriptor instead. +func (*ListenerFilterChainMatchPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{3} +} + +func (m *ListenerFilterChainMatchPredicate) GetRule() isListenerFilterChainMatchPredicate_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetOrMatch() *ListenerFilterChainMatchPredicate_MatchSet { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_OrMatch); ok { + return x.OrMatch + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetAndMatch() *ListenerFilterChainMatchPredicate_MatchSet { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_AndMatch); ok { + return x.AndMatch + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetNotMatch() *ListenerFilterChainMatchPredicate { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_NotMatch); ok { + return x.NotMatch + } + return nil +} + +func (x *ListenerFilterChainMatchPredicate) GetAnyMatch() bool { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_AnyMatch); ok { + return x.AnyMatch + } + return false +} + +func (x *ListenerFilterChainMatchPredicate) GetDestinationPortRange() *v31.Int32Range { + if x, ok := x.GetRule().(*ListenerFilterChainMatchPredicate_DestinationPortRange); ok { + return x.DestinationPortRange + } + return nil +} + +type isListenerFilterChainMatchPredicate_Rule interface { + isListenerFilterChainMatchPredicate_Rule() +} + +type ListenerFilterChainMatchPredicate_OrMatch struct { + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + OrMatch *ListenerFilterChainMatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_AndMatch struct { + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + AndMatch *ListenerFilterChainMatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_NotMatch struct { + // A negation match. The match configuration will match if the negated match condition matches. + NotMatch *ListenerFilterChainMatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_AnyMatch struct { + // The match configuration will always match. + AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` +} + +type ListenerFilterChainMatchPredicate_DestinationPortRange struct { + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + DestinationPortRange *v31.Int32Range `protobuf:"bytes,5,opt,name=destination_port_range,json=destinationPortRange,proto3,oneof"` +} + +func (*ListenerFilterChainMatchPredicate_OrMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_AndMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_NotMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_AnyMatch) isListenerFilterChainMatchPredicate_Rule() {} + +func (*ListenerFilterChainMatchPredicate_DestinationPortRange) isListenerFilterChainMatchPredicate_Rule() { +} + +// [#next-free-field: 6] +type ListenerFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *ListenerFilter_TypedConfig + // *ListenerFilter_ConfigDiscovery + ConfigType isListenerFilter_ConfigType `protobuf_oneof:"config_type"` + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + FilterDisabled *ListenerFilterChainMatchPredicate `protobuf:"bytes,4,opt,name=filter_disabled,json=filterDisabled,proto3" json:"filter_disabled,omitempty"` +} + +func (x *ListenerFilter) Reset() { + *x = ListenerFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerFilter) ProtoMessage() {} + +func (x *ListenerFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerFilter.ProtoReflect.Descriptor instead. +func (*ListenerFilter) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{4} +} + +func (x *ListenerFilter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ListenerFilter) GetConfigType() isListenerFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *ListenerFilter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*ListenerFilter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *ListenerFilter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x, ok := x.GetConfigType().(*ListenerFilter_ConfigDiscovery); ok { + return x.ConfigDiscovery + } + return nil +} + +func (x *ListenerFilter) GetFilterDisabled() *ListenerFilterChainMatchPredicate { + if x != nil { + return x.FilterDisabled + } + return nil +} + +type isListenerFilter_ConfigType interface { + isListenerFilter_ConfigType() +} + +type ListenerFilter_TypedConfig struct { + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +type ListenerFilter_ConfigDiscovery struct { + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,5,opt,name=config_discovery,json=configDiscovery,proto3,oneof"` +} + +func (*ListenerFilter_TypedConfig) isListenerFilter_ConfigType() {} + +func (*ListenerFilter_ConfigDiscovery) isListenerFilter_ConfigType() {} + +// The configuration for on-demand filter chain. If this field is not empty in FilterChain message, +// a filter chain will be built on-demand. +// On-demand filter chains help speedup the warming up of listeners since the building and initialization of +// an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. +// Filter chains that are not often used can be set as on-demand. +type FilterChain_OnDemandConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timeout to wait for filter chain placeholders to complete rebuilding. + // 1. If this field is set to 0, timeout is disabled. + // 2. If not specified, a default timeout of 15s is used. + // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. + // Upon failure or timeout, all connections related to this filter chain will be closed. + // Rebuilding will start again on the next new connection. + RebuildTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=rebuild_timeout,json=rebuildTimeout,proto3" json:"rebuild_timeout,omitempty"` +} + +func (x *FilterChain_OnDemandConfiguration) Reset() { + *x = FilterChain_OnDemandConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterChain_OnDemandConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterChain_OnDemandConfiguration) ProtoMessage() {} + +func (x *FilterChain_OnDemandConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterChain_OnDemandConfiguration.ProtoReflect.Descriptor instead. +func (*FilterChain_OnDemandConfiguration) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *FilterChain_OnDemandConfiguration) GetRebuildTimeout() *durationpb.Duration { + if x != nil { + return x.RebuildTimeout + } + return nil +} + +// A set of match configurations used for logical operations. +type ListenerFilterChainMatchPredicate_MatchSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of rules that make up the set. + Rules []*ListenerFilterChainMatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) Reset() { + *x = ListenerFilterChainMatchPredicate_MatchSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenerFilterChainMatchPredicate_MatchSet) ProtoMessage() {} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_listener_components_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenerFilterChainMatchPredicate_MatchSet.ProtoReflect.Descriptor instead. +func (*ListenerFilterChainMatchPredicate_MatchSet) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ListenerFilterChainMatchPredicate_MatchSet) GetRules() []*ListenerFilterChainMatchPredicate { + if x != nil { + return x.Rules + } + return nil +} + +var File_envoy_config_listener_v3_listener_components_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_listener_components_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x82, 0x02, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xea, 0x06, 0x0a, 0x10, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x54, 0x0a, + 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x18, 0xff, 0xff, 0x03, + 0x28, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x44, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x12, 0x3b, 0x0a, 0x0a, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x09, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x12, 0x5e, 0x0a, + 0x1b, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x18, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x6a, 0x0a, + 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0d, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x2a, 0x06, 0x18, 0xff, + 0xff, 0x03, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x46, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x41, 0x4d, + 0x45, 0x5f, 0x49, 0x50, 0x5f, 0x4f, 0x52, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, + 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x02, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x89, 0x06, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x58, 0x0a, 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x10, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x3a, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x0f, 0x75, + 0x73, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0d, 0x75, + 0x73, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3a, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x62, 0x0a, 0x20, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x1d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x73, 0x0a, 0x17, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x2e, 0x4f, 0x6e, 0x44, 0x65, 0x6d, + 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x15, 0x6f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x15, 0x4f, 0x6e, 0x44, 0x65, 0x6d, + 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x22, 0xc2, 0x05, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x61, 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x63, 0x0a, 0x09, 0x61, 0x6e, + 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x5a, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, + 0x6e, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x51, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x74, 0x12, 0x5b, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x3a, 0x47, 0x9a, 0xc5, 0x88, 0x1e, 0x42, 0x0a, 0x40, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, + 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, + 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf2, 0x02, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x58, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x64, 0x0a, 0x0f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x97, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x17, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_listener_v3_listener_components_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_listener_components_proto_rawDescData = file_envoy_config_listener_v3_listener_components_proto_rawDesc +) + +func file_envoy_config_listener_v3_listener_components_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_listener_components_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_listener_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_listener_components_proto_rawDescData) + }) + return file_envoy_config_listener_v3_listener_components_proto_rawDescData +} + +var file_envoy_config_listener_v3_listener_components_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_listener_v3_listener_components_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_config_listener_v3_listener_components_proto_goTypes = []interface{}{ + (FilterChainMatch_ConnectionSourceType)(0), // 0: envoy.config.listener.v3.FilterChainMatch.ConnectionSourceType + (*Filter)(nil), // 1: envoy.config.listener.v3.Filter + (*FilterChainMatch)(nil), // 2: envoy.config.listener.v3.FilterChainMatch + (*FilterChain)(nil), // 3: envoy.config.listener.v3.FilterChain + (*ListenerFilterChainMatchPredicate)(nil), // 4: envoy.config.listener.v3.ListenerFilterChainMatchPredicate + (*ListenerFilter)(nil), // 5: envoy.config.listener.v3.ListenerFilter + (*FilterChain_OnDemandConfiguration)(nil), // 6: envoy.config.listener.v3.FilterChain.OnDemandConfiguration + (*ListenerFilterChainMatchPredicate_MatchSet)(nil), // 7: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 9: envoy.config.core.v3.ExtensionConfigSource + (*wrapperspb.UInt32Value)(nil), // 10: google.protobuf.UInt32Value + (*v3.CidrRange)(nil), // 11: envoy.config.core.v3.CidrRange + (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue + (*v3.Metadata)(nil), // 13: envoy.config.core.v3.Metadata + (*v3.TransportSocket)(nil), // 14: envoy.config.core.v3.TransportSocket + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + (*v31.Int32Range)(nil), // 16: envoy.type.v3.Int32Range +} +var file_envoy_config_listener_v3_listener_components_proto_depIdxs = []int32{ + 8, // 0: envoy.config.listener.v3.Filter.typed_config:type_name -> google.protobuf.Any + 9, // 1: envoy.config.listener.v3.Filter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 10, // 2: envoy.config.listener.v3.FilterChainMatch.destination_port:type_name -> google.protobuf.UInt32Value + 11, // 3: envoy.config.listener.v3.FilterChainMatch.prefix_ranges:type_name -> envoy.config.core.v3.CidrRange + 10, // 4: envoy.config.listener.v3.FilterChainMatch.suffix_len:type_name -> google.protobuf.UInt32Value + 11, // 5: envoy.config.listener.v3.FilterChainMatch.direct_source_prefix_ranges:type_name -> envoy.config.core.v3.CidrRange + 0, // 6: envoy.config.listener.v3.FilterChainMatch.source_type:type_name -> envoy.config.listener.v3.FilterChainMatch.ConnectionSourceType + 11, // 7: envoy.config.listener.v3.FilterChainMatch.source_prefix_ranges:type_name -> envoy.config.core.v3.CidrRange + 2, // 8: envoy.config.listener.v3.FilterChain.filter_chain_match:type_name -> envoy.config.listener.v3.FilterChainMatch + 1, // 9: envoy.config.listener.v3.FilterChain.filters:type_name -> envoy.config.listener.v3.Filter + 12, // 10: envoy.config.listener.v3.FilterChain.use_proxy_proto:type_name -> google.protobuf.BoolValue + 13, // 11: envoy.config.listener.v3.FilterChain.metadata:type_name -> envoy.config.core.v3.Metadata + 14, // 12: envoy.config.listener.v3.FilterChain.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 15, // 13: envoy.config.listener.v3.FilterChain.transport_socket_connect_timeout:type_name -> google.protobuf.Duration + 6, // 14: envoy.config.listener.v3.FilterChain.on_demand_configuration:type_name -> envoy.config.listener.v3.FilterChain.OnDemandConfiguration + 7, // 15: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.or_match:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet + 7, // 16: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.and_match:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet + 4, // 17: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.not_match:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate + 16, // 18: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.destination_port_range:type_name -> envoy.type.v3.Int32Range + 8, // 19: envoy.config.listener.v3.ListenerFilter.typed_config:type_name -> google.protobuf.Any + 9, // 20: envoy.config.listener.v3.ListenerFilter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 4, // 21: envoy.config.listener.v3.ListenerFilter.filter_disabled:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate + 15, // 22: envoy.config.listener.v3.FilterChain.OnDemandConfiguration.rebuild_timeout:type_name -> google.protobuf.Duration + 4, // 23: envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet.rules:type_name -> envoy.config.listener.v3.ListenerFilterChainMatchPredicate + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_listener_components_proto_init() } +func file_envoy_config_listener_v3_listener_components_proto_init() { + if File_envoy_config_listener_v3_listener_components_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_listener_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterChainMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterChain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerFilterChainMatchPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterChain_OnDemandConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListenerFilterChainMatchPredicate_MatchSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Filter_TypedConfig)(nil), + (*Filter_ConfigDiscovery)(nil), + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*ListenerFilterChainMatchPredicate_OrMatch)(nil), + (*ListenerFilterChainMatchPredicate_AndMatch)(nil), + (*ListenerFilterChainMatchPredicate_NotMatch)(nil), + (*ListenerFilterChainMatchPredicate_AnyMatch)(nil), + (*ListenerFilterChainMatchPredicate_DestinationPortRange)(nil), + } + file_envoy_config_listener_v3_listener_components_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*ListenerFilter_TypedConfig)(nil), + (*ListenerFilter_ConfigDiscovery)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_listener_components_proto_rawDesc, + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_listener_components_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_listener_components_proto_depIdxs, + EnumInfos: file_envoy_config_listener_v3_listener_components_proto_enumTypes, + MessageInfos: file_envoy_config_listener_v3_listener_components_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_listener_components_proto = out.File + file_envoy_config_listener_v3_listener_components_proto_rawDesc = nil + file_envoy_config_listener_v3_listener_components_proto_goTypes = nil + file_envoy_config_listener_v3_listener_components_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go new file mode 100644 index 0000000000..4a18f8b084 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components.pb.validate.go @@ -0,0 +1,1644 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Filter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Filter with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in FilterMultiError, or nil if none found. +func (m *Filter) ValidateAll() error { + return m.validate(true) +} + +func (m *Filter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := FilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *Filter_TypedConfig: + if v == nil { + err := FilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Filter_ConfigDiscovery: + if v == nil { + err := FilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return FilterMultiError(errors) + } + + return nil +} + +// FilterMultiError is an error wrapping multiple validation errors returned by +// Filter.ValidateAll() if the designated constraints aren't met. +type FilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterMultiError) AllErrors() []error { return m } + +// FilterValidationError is the validation error returned by Filter.Validate if +// the designated constraints aren't met. +type FilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterValidationError) ErrorName() string { return "FilterValidationError" } + +// Error satisfies the builtin error interface +func (e FilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterValidationError{} + +// Validate checks the field values on FilterChainMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *FilterChainMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterChainMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FilterChainMatchMultiError, or nil if none found. +func (m *FilterChainMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterChainMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetDestinationPort(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 65535 { + err := FilterChainMatchValidationError{ + field: "DestinationPort", + reason: "value must be inside range [1, 65535]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetPrefixRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("PrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("PrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: fmt.Sprintf("PrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AddressSuffix + + if all { + switch v := interface{}(m.GetSuffixLen()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: "SuffixLen", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: "SuffixLen", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSuffixLen()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: "SuffixLen", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetDirectSourcePrefixRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("DirectSourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("DirectSourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: fmt.Sprintf("DirectSourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if _, ok := FilterChainMatch_ConnectionSourceType_name[int32(m.GetSourceType())]; !ok { + err := FilterChainMatchValidationError{ + field: "SourceType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetSourcePrefixRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePrefixRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetSourcePorts() { + _, _ = idx, item + + if val := item; val < 1 || val > 65535 { + err := FilterChainMatchValidationError{ + field: fmt.Sprintf("SourcePorts[%v]", idx), + reason: "value must be inside range [1, 65535]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for TransportProtocol + + if len(errors) > 0 { + return FilterChainMatchMultiError(errors) + } + + return nil +} + +// FilterChainMatchMultiError is an error wrapping multiple validation errors +// returned by FilterChainMatch.ValidateAll() if the designated constraints +// aren't met. +type FilterChainMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterChainMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterChainMatchMultiError) AllErrors() []error { return m } + +// FilterChainMatchValidationError is the validation error returned by +// FilterChainMatch.Validate if the designated constraints aren't met. +type FilterChainMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterChainMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterChainMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterChainMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterChainMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterChainMatchValidationError) ErrorName() string { return "FilterChainMatchValidationError" } + +// Error satisfies the builtin error interface +func (e FilterChainMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterChainMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterChainMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterChainMatchValidationError{} + +// Validate checks the field values on FilterChain with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilterChain) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterChain with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilterChainMultiError, or +// nil if none found. +func (m *FilterChain) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterChain) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetFilterChainMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "FilterChainMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "FilterChainMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterChainMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "FilterChainMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetUseProxyProto()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "UseProxyProto", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "UseProxyProto", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseProxyProto()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "UseProxyProto", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "TransportSocket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTransportSocketConnectTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocketConnectTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "TransportSocketConnectTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTransportSocketConnectTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "TransportSocketConnectTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetOnDemandConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "OnDemandConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChainValidationError{ + field: "OnDemandConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOnDemandConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChainValidationError{ + field: "OnDemandConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterChainMultiError(errors) + } + + return nil +} + +// FilterChainMultiError is an error wrapping multiple validation errors +// returned by FilterChain.ValidateAll() if the designated constraints aren't met. +type FilterChainMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterChainMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterChainMultiError) AllErrors() []error { return m } + +// FilterChainValidationError is the validation error returned by +// FilterChain.Validate if the designated constraints aren't met. +type FilterChainValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterChainValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterChainValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterChainValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterChainValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterChainValidationError) ErrorName() string { return "FilterChainValidationError" } + +// Error satisfies the builtin error interface +func (e FilterChainValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterChain.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterChainValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterChainValidationError{} + +// Validate checks the field values on ListenerFilterChainMatchPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ListenerFilterChainMatchPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerFilterChainMatchPredicate +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ListenerFilterChainMatchPredicateMultiError, or nil if none found. +func (m *ListenerFilterChainMatchPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerFilterChainMatchPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *ListenerFilterChainMatchPredicate_OrMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilterChainMatchPredicate_AndMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilterChainMatchPredicate_NotMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilterChainMatchPredicate_AnyMatch: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAnyMatch() != true { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "AnyMatch", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *ListenerFilterChainMatchPredicate_DestinationPortRange: + if v == nil { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetDestinationPortRange()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicateValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationPortRange()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicateValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := ListenerFilterChainMatchPredicateValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ListenerFilterChainMatchPredicateMultiError(errors) + } + + return nil +} + +// ListenerFilterChainMatchPredicateMultiError is an error wrapping multiple +// validation errors returned by +// ListenerFilterChainMatchPredicate.ValidateAll() if the designated +// constraints aren't met. +type ListenerFilterChainMatchPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerFilterChainMatchPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerFilterChainMatchPredicateMultiError) AllErrors() []error { return m } + +// ListenerFilterChainMatchPredicateValidationError is the validation error +// returned by ListenerFilterChainMatchPredicate.Validate if the designated +// constraints aren't met. +type ListenerFilterChainMatchPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerFilterChainMatchPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerFilterChainMatchPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerFilterChainMatchPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerFilterChainMatchPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerFilterChainMatchPredicateValidationError) ErrorName() string { + return "ListenerFilterChainMatchPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenerFilterChainMatchPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerFilterChainMatchPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerFilterChainMatchPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerFilterChainMatchPredicateValidationError{} + +// Validate checks the field values on ListenerFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListenerFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListenerFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListenerFilterMultiError, +// or nil if none found. +func (m *ListenerFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ListenerFilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFilterDisabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "FilterDisabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "FilterDisabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterDisabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterValidationError{ + field: "FilterDisabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ConfigType.(type) { + case *ListenerFilter_TypedConfig: + if v == nil { + err := ListenerFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ListenerFilter_ConfigDiscovery: + if v == nil { + err := ListenerFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ListenerFilterMultiError(errors) + } + + return nil +} + +// ListenerFilterMultiError is an error wrapping multiple validation errors +// returned by ListenerFilter.ValidateAll() if the designated constraints +// aren't met. +type ListenerFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerFilterMultiError) AllErrors() []error { return m } + +// ListenerFilterValidationError is the validation error returned by +// ListenerFilter.Validate if the designated constraints aren't met. +type ListenerFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerFilterValidationError) ErrorName() string { return "ListenerFilterValidationError" } + +// Error satisfies the builtin error interface +func (e ListenerFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerFilterValidationError{} + +// Validate checks the field values on FilterChain_OnDemandConfiguration with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *FilterChain_OnDemandConfiguration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterChain_OnDemandConfiguration +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// FilterChain_OnDemandConfigurationMultiError, or nil if none found. +func (m *FilterChain_OnDemandConfiguration) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterChain_OnDemandConfiguration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRebuildTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterChain_OnDemandConfigurationValidationError{ + field: "RebuildTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterChain_OnDemandConfigurationValidationError{ + field: "RebuildTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRebuildTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterChain_OnDemandConfigurationValidationError{ + field: "RebuildTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterChain_OnDemandConfigurationMultiError(errors) + } + + return nil +} + +// FilterChain_OnDemandConfigurationMultiError is an error wrapping multiple +// validation errors returned by +// FilterChain_OnDemandConfiguration.ValidateAll() if the designated +// constraints aren't met. +type FilterChain_OnDemandConfigurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterChain_OnDemandConfigurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterChain_OnDemandConfigurationMultiError) AllErrors() []error { return m } + +// FilterChain_OnDemandConfigurationValidationError is the validation error +// returned by FilterChain_OnDemandConfiguration.Validate if the designated +// constraints aren't met. +type FilterChain_OnDemandConfigurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterChain_OnDemandConfigurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterChain_OnDemandConfigurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterChain_OnDemandConfigurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterChain_OnDemandConfigurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterChain_OnDemandConfigurationValidationError) ErrorName() string { + return "FilterChain_OnDemandConfigurationValidationError" +} + +// Error satisfies the builtin error interface +func (e FilterChain_OnDemandConfigurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterChain_OnDemandConfiguration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterChain_OnDemandConfigurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterChain_OnDemandConfigurationValidationError{} + +// Validate checks the field values on +// ListenerFilterChainMatchPredicate_MatchSet with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListenerFilterChainMatchPredicate_MatchSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ListenerFilterChainMatchPredicate_MatchSet with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ListenerFilterChainMatchPredicate_MatchSetMultiError, or nil if none found. +func (m *ListenerFilterChainMatchPredicate_MatchSet) ValidateAll() error { + return m.validate(true) +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 2 { + err := ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: "Rules", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListenerFilterChainMatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListenerFilterChainMatchPredicate_MatchSetMultiError(errors) + } + + return nil +} + +// ListenerFilterChainMatchPredicate_MatchSetMultiError is an error wrapping +// multiple validation errors returned by +// ListenerFilterChainMatchPredicate_MatchSet.ValidateAll() if the designated +// constraints aren't met. +type ListenerFilterChainMatchPredicate_MatchSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListenerFilterChainMatchPredicate_MatchSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListenerFilterChainMatchPredicate_MatchSetMultiError) AllErrors() []error { return m } + +// ListenerFilterChainMatchPredicate_MatchSetValidationError is the validation +// error returned by ListenerFilterChainMatchPredicate_MatchSet.Validate if +// the designated constraints aren't met. +type ListenerFilterChainMatchPredicate_MatchSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) ErrorName() string { + return "ListenerFilterChainMatchPredicate_MatchSetValidationError" +} + +// Error satisfies the builtin error interface +func (e ListenerFilterChainMatchPredicate_MatchSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListenerFilterChainMatchPredicate_MatchSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListenerFilterChainMatchPredicate_MatchSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListenerFilterChainMatchPredicate_MatchSetValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go new file mode 100644 index 0000000000..7896458b8a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_components_vtproto.pb.go @@ -0,0 +1,1213 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/listener_components.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Filter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Filter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*Filter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigType.(*Filter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Filter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Filter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Filter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FilterChainMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChainMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChainMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DirectSourcePrefixRanges) > 0 { + for iNdEx := len(m.DirectSourcePrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DirectSourcePrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DirectSourcePrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.SourceType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SourceType)) + i-- + dAtA[i] = 0x60 + } + if len(m.ServerNames) > 0 { + for iNdEx := len(m.ServerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServerNames[iNdEx]) + copy(dAtA[i:], m.ServerNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerNames[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ApplicationProtocols) > 0 { + for iNdEx := len(m.ApplicationProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ApplicationProtocols[iNdEx]) + copy(dAtA[i:], m.ApplicationProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ApplicationProtocols[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.TransportProtocol) > 0 { + i -= len(m.TransportProtocol) + copy(dAtA[i:], m.TransportProtocol) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TransportProtocol))) + i-- + dAtA[i] = 0x4a + } + if m.DestinationPort != nil { + size, err := (*wrapperspb.UInt32Value)(m.DestinationPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.SourcePorts) > 0 { + var pksize2 int + for _, num := range m.SourcePorts { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.SourcePorts { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x3a + } + if len(m.SourcePrefixRanges) > 0 { + for iNdEx := len(m.SourcePrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SourcePrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SourcePrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SuffixLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.SuffixLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.AddressSuffix) > 0 { + i -= len(m.AddressSuffix) + copy(dAtA[i:], m.AddressSuffix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AddressSuffix))) + i-- + dAtA[i] = 0x22 + } + if len(m.PrefixRanges) > 0 { + for iNdEx := len(m.PrefixRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.PrefixRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrefixRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + return len(dAtA) - i, nil +} + +func (m *FilterChain_OnDemandConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChain_OnDemandConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChain_OnDemandConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RebuildTimeout != nil { + size, err := (*durationpb.Duration)(m.RebuildTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterChain) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterChain) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterChain) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TransportSocketConnectTimeout != nil { + size, err := (*durationpb.Duration)(m.TransportSocketConnectTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.OnDemandConfiguration != nil { + size, err := m.OnDemandConfiguration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x3a + } + if m.TransportSocket != nil { + if vtmsg, ok := interface{}(m.TransportSocket).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TransportSocket) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.UseProxyProto != nil { + size, err := (*wrapperspb.BoolValue)(m.UseProxyProto).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.FilterChainMatch != nil { + size, err := m.FilterChainMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilterChainMatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_DestinationPortRange); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*ListenerFilterChainMatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationPortRange != nil { + if vtmsg, ok := interface{}(m.DestinationPortRange).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationPortRange) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ListenerFilter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.FilterDisabled != nil { + size, err := m.FilterDisabled.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ConfigType.(*ListenerFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ListenerFilter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerFilter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Filter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Filter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Filter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FilterChainMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PrefixRanges) > 0 { + for _, e := range m.PrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.AddressSuffix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SuffixLen != nil { + l = (*wrapperspb.UInt32Value)(m.SuffixLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SourcePrefixRanges) > 0 { + for _, e := range m.SourcePrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SourcePorts) > 0 { + l = 0 + for _, e := range m.SourcePorts { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.DestinationPort != nil { + l = (*wrapperspb.UInt32Value)(m.DestinationPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TransportProtocol) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ApplicationProtocols) > 0 { + for _, s := range m.ApplicationProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ServerNames) > 0 { + for _, s := range m.ServerNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SourceType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SourceType)) + } + if len(m.DirectSourcePrefixRanges) > 0 { + for _, e := range m.DirectSourcePrefixRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FilterChain_OnDemandConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RebuildTimeout != nil { + l = (*durationpb.Duration)(m.RebuildTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilterChain) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterChainMatch != nil { + l = m.FilterChainMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseProxyProto != nil { + l = (*wrapperspb.BoolValue)(m.UseProxyProto).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocket != nil { + if size, ok := interface{}(m.TransportSocket).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TransportSocket) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnDemandConfiguration != nil { + l = m.OnDemandConfiguration.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportSocketConnectTimeout != nil { + l = (*durationpb.Duration)(m.TransportSocketConnectTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilterChainMatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilterChainMatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ListenerFilterChainMatchPredicate_DestinationPortRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationPortRange != nil { + if size, ok := interface{}(m.DestinationPortRange).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationPortRange) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.FilterDisabled != nil { + l = m.FilterDisabled.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListenerFilter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go new file mode 100644 index 0000000000..cdccfea8e1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/listener_vtproto.pb.go @@ -0,0 +1,1297 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/listener.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AdditionalAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdditionalAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AdditionalAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SocketOptions != nil { + if vtmsg, ok := interface{}(m.SocketOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListenerCollection) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerCollection) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerCollection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Entries[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Entries[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Listener_DeprecatedV1) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_DeprecatedV1) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_DeprecatedV1) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BindToPort != nil { + size, err := (*wrapperspb.BoolValue)(m.BindToPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_ConnectionBalanceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.BalanceType.(*Listener_ConnectionBalanceConfig_ExtendBalance); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.BalanceType.(*Listener_ConnectionBalanceConfig_ExactBalance_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExactBalance != nil { + size, err := m.ExactBalance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendBalance != nil { + if vtmsg, ok := interface{}(m.ExtendBalance).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExtendBalance) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Listener_InternalListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener_InternalListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_InternalListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Listener) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Listener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BypassOverloadManager { + i-- + if m.BypassOverloadManager { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x98 + } + if m.MaxConnectionsToAcceptPerSocketEvent != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionsToAcceptPerSocketEvent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if len(m.AdditionalAddresses) > 0 { + for iNdEx := len(m.AdditionalAddresses) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AdditionalAddresses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + } + if m.FilterChainMatcher != nil { + if vtmsg, ok := interface{}(m.FilterChainMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterChainMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.IgnoreGlobalConnLimit { + i-- + if m.IgnoreGlobalConnLimit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.EnableMptcp { + i-- + if m.EnableMptcp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.EnableReusePort != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableReusePort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if msg, ok := m.ListenerSpecifier.(*Listener_InternalListener); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.BindToPort != nil { + size, err := (*wrapperspb.BoolValue)(m.BindToPort).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.DefaultFilterChain != nil { + size, err := m.DefaultFilterChain.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.TcpBacklogSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.TcpBacklogSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.ReusePort { + i-- + if m.ReusePort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.ConnectionBalanceConfig != nil { + size, err := m.ConnectionBalanceConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.ApiListener != nil { + size, err := m.ApiListener.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.UdpListenerConfig != nil { + size, err := m.UdpListenerConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.ContinueOnListenerFiltersTimeout { + i-- + if m.ContinueOnListenerFiltersTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.TrafficDirection != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TrafficDirection)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.ListenerFiltersTimeout != nil { + size, err := (*durationpb.Duration)(m.ListenerFiltersTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if len(m.SocketOptions) > 0 { + for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SocketOptions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.TcpFastOpenQueueLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.TcpFastOpenQueueLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.Freebind != nil { + size, err := (*wrapperspb.BoolValue)(m.Freebind).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.Transparent != nil { + size, err := (*wrapperspb.BoolValue)(m.Transparent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if len(m.ListenerFilters) > 0 { + for iNdEx := len(m.ListenerFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ListenerFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.DrainType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainType)) + i-- + dAtA[i] = 0x40 + } + if m.DeprecatedV1 != nil { + size, err := m.DeprecatedV1.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.PerConnectionBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UseOriginalDst != nil { + size, err := (*wrapperspb.BoolValue)(m.UseOriginalDst).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.FilterChains) > 0 { + for iNdEx := len(m.FilterChains) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FilterChains[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Listener_InternalListener) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Listener_InternalListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InternalListener != nil { + size, err := m.InternalListener.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + return len(dAtA) - i, nil +} +func (m *ListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ValidationListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidationListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValidationListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ApiListenerManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApiListenerManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ApiListenerManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *AdditionalAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SocketOptions != nil { + if size, ok := interface{}(m.SocketOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SocketOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListenerCollection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_DeprecatedV1) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BindToPort != nil { + l = (*wrapperspb.BoolValue)(m.BindToPort).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.BalanceType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_ConnectionBalanceConfig_ExactBalance_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExactBalance != nil { + l = m.ExactBalance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Listener_ConnectionBalanceConfig_ExtendBalance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendBalance != nil { + if size, ok := interface{}(m.ExtendBalance).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ExtendBalance) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Listener_InternalListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Listener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FilterChains) > 0 { + for _, e := range m.FilterChains { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseOriginalDst != nil { + l = (*wrapperspb.BoolValue)(m.UseOriginalDst).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerConnectionBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerConnectionBufferLimitBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DeprecatedV1 != nil { + l = m.DeprecatedV1.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DrainType)) + } + if len(m.ListenerFilters) > 0 { + for _, e := range m.ListenerFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Transparent != nil { + l = (*wrapperspb.BoolValue)(m.Transparent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Freebind != nil { + l = (*wrapperspb.BoolValue)(m.Freebind).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TcpFastOpenQueueLength != nil { + l = (*wrapperspb.UInt32Value)(m.TcpFastOpenQueueLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SocketOptions) > 0 { + for _, e := range m.SocketOptions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ListenerFiltersTimeout != nil { + l = (*durationpb.Duration)(m.ListenerFiltersTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrafficDirection != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.TrafficDirection)) + } + if m.ContinueOnListenerFiltersTimeout { + n += 3 + } + if m.UdpListenerConfig != nil { + l = m.UdpListenerConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ApiListener != nil { + l = m.ApiListener.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionBalanceConfig != nil { + l = m.ConnectionBalanceConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReusePort { + n += 3 + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TcpBacklogSize != nil { + l = (*wrapperspb.UInt32Value)(m.TcpBacklogSize).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DefaultFilterChain != nil { + l = m.DefaultFilterChain.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BindToPort != nil { + l = (*wrapperspb.BoolValue)(m.BindToPort).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ListenerSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.StatPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableReusePort != nil { + l = (*wrapperspb.BoolValue)(m.EnableReusePort).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableMptcp { + n += 3 + } + if m.IgnoreGlobalConnLimit { + n += 3 + } + if m.FilterChainMatcher != nil { + if size, ok := interface{}(m.FilterChainMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterChainMatcher) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AdditionalAddresses) > 0 { + for _, e := range m.AdditionalAddresses { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxConnectionsToAcceptPerSocketEvent != nil { + l = (*wrapperspb.UInt32Value)(m.MaxConnectionsToAcceptPerSocketEvent).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BypassOverloadManager { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *Listener_InternalListener) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InternalListener != nil { + l = m.InternalListener.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *ListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ValidationListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApiListenerManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go new file mode 100644 index 0000000000..a2f3a48e23 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go @@ -0,0 +1,380 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration specific to the UDP QUIC listener. +// [#next-free-field: 12] +type QuicProtocolOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuicProtocolOptions *v3.QuicProtocolOptions `protobuf:"bytes,1,opt,name=quic_protocol_options,json=quicProtocolOptions,proto3" json:"quic_protocol_options,omitempty"` + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. + // + // If it is less than 1ms, Envoy will use 1ms. 300000ms if not specified. + IdleTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // Connection timeout in milliseconds before the crypto handshake is finished. + // + // If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. + CryptoHandshakeTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=crypto_handshake_timeout,json=cryptoHandshakeTimeout,proto3" json:"crypto_handshake_timeout,omitempty"` + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + Enabled *v3.RuntimeFeatureFlag `protobuf:"bytes,4,opt,name=enabled,proto3" json:"enabled,omitempty"` + // A multiplier to number of connections which is used to determine how many packets to read per + // event loop. A reasonable number should allow the listener to process enough payload but not + // starve TCP and other UDP sockets and also prevent long event loop duration. + // The default value is 32. This means if there are N QUIC connections, the total number of + // packets to read in each read event will be 32 * N. + // The actual number of packets to read in total by the UDP listener is also + // bound by 6000, regardless of this field or how many connections there are. + PacketsToReadToConnectionCountRatio *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=packets_to_read_to_connection_count_ratio,json=packetsToReadToConnectionCountRatio,proto3" json:"packets_to_read_to_connection_count_ratio,omitempty"` + // Configure which implementation of “quic::QuicCryptoClientStreamBase“ to be used for this listener. + // If not specified the :ref:`QUICHE default one configured by ` will be used. + // [#extension-category: envoy.quic.server.crypto_stream] + CryptoStreamConfig *v3.TypedExtensionConfig `protobuf:"bytes,6,opt,name=crypto_stream_config,json=cryptoStreamConfig,proto3" json:"crypto_stream_config,omitempty"` + // Configure which implementation of “quic::ProofSource“ to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.proof_source] + ProofSourceConfig *v3.TypedExtensionConfig `protobuf:"bytes,7,opt,name=proof_source_config,json=proofSourceConfig,proto3" json:"proof_source_config,omitempty"` + // Config which implementation of “quic::ConnectionIdGeneratorInterface“ to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.connection_id_generator] + ConnectionIdGeneratorConfig *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=connection_id_generator_config,json=connectionIdGeneratorConfig,proto3" json:"connection_id_generator_config,omitempty"` + // Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. + // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with. + // If not specified, Envoy will not advertise any server's preferred address. + // [#extension-category: envoy.quic.server_preferred_address] + ServerPreferredAddressConfig *v3.TypedExtensionConfig `protobuf:"bytes,9,opt,name=server_preferred_address_config,json=serverPreferredAddressConfig,proto3" json:"server_preferred_address_config,omitempty"` + // Configure the server to send transport parameter `disable_active_migration `_. + // Defaults to false (do not send this transport parameter). + SendDisableActiveMigration *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=send_disable_active_migration,json=sendDisableActiveMigration,proto3" json:"send_disable_active_migration,omitempty"` + // Configure which implementation of “quic::QuicConnectionDebugVisitor“ to be used for this listener. + // If not specified, no debug visitor will be attached to connections. + // [#extension-category: envoy.quic.connection_debug_visitor] + ConnectionDebugVisitorConfig *v3.TypedExtensionConfig `protobuf:"bytes,11,opt,name=connection_debug_visitor_config,json=connectionDebugVisitorConfig,proto3" json:"connection_debug_visitor_config,omitempty"` +} + +func (x *QuicProtocolOptions) Reset() { + *x = QuicProtocolOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_quic_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuicProtocolOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuicProtocolOptions) ProtoMessage() {} + +func (x *QuicProtocolOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_quic_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuicProtocolOptions.ProtoReflect.Descriptor instead. +func (*QuicProtocolOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_quic_config_proto_rawDescGZIP(), []int{0} +} + +func (x *QuicProtocolOptions) GetQuicProtocolOptions() *v3.QuicProtocolOptions { + if x != nil { + return x.QuicProtocolOptions + } + return nil +} + +func (x *QuicProtocolOptions) GetIdleTimeout() *durationpb.Duration { + if x != nil { + return x.IdleTimeout + } + return nil +} + +func (x *QuicProtocolOptions) GetCryptoHandshakeTimeout() *durationpb.Duration { + if x != nil { + return x.CryptoHandshakeTimeout + } + return nil +} + +func (x *QuicProtocolOptions) GetEnabled() *v3.RuntimeFeatureFlag { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *QuicProtocolOptions) GetPacketsToReadToConnectionCountRatio() *wrapperspb.UInt32Value { + if x != nil { + return x.PacketsToReadToConnectionCountRatio + } + return nil +} + +func (x *QuicProtocolOptions) GetCryptoStreamConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.CryptoStreamConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetProofSourceConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ProofSourceConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionIdGeneratorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ConnectionIdGeneratorConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetServerPreferredAddressConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ServerPreferredAddressConfig + } + return nil +} + +func (x *QuicProtocolOptions) GetSendDisableActiveMigration() *wrapperspb.BoolValue { + if x != nil { + return x.SendDisableActiveMigration + } + return nil +} + +func (x *QuicProtocolOptions) GetConnectionDebugVisitorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.ConnectionDebugVisitorConfig + } + return nil +} + +var File_envoy_config_listener_v3_quic_config_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x69, 0x63, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, + 0x08, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, + 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x53, 0x0a, 0x18, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, + 0x6c, 0x61, 0x67, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x7d, 0x0a, 0x29, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, + 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x23, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, + 0x6f, 0x52, 0x65, 0x61, 0x64, 0x54, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x5c, 0x0a, 0x14, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, 0x13, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x1d, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x73, 0x65, 0x6e, 0x64, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x1f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x56, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_config_listener_v3_quic_config_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_quic_config_proto_rawDescData = file_envoy_config_listener_v3_quic_config_proto_rawDesc +) + +func file_envoy_config_listener_v3_quic_config_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_quic_config_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_quic_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_quic_config_proto_rawDescData) + }) + return file_envoy_config_listener_v3_quic_config_proto_rawDescData +} + +var file_envoy_config_listener_v3_quic_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_listener_v3_quic_config_proto_goTypes = []interface{}{ + (*QuicProtocolOptions)(nil), // 0: envoy.config.listener.v3.QuicProtocolOptions + (*v3.QuicProtocolOptions)(nil), // 1: envoy.config.core.v3.QuicProtocolOptions + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*v3.RuntimeFeatureFlag)(nil), // 3: envoy.config.core.v3.RuntimeFeatureFlag + (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value + (*v3.TypedExtensionConfig)(nil), // 5: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue +} +var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ + 1, // 0: envoy.config.listener.v3.QuicProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 2, // 1: envoy.config.listener.v3.QuicProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 2, // 2: envoy.config.listener.v3.QuicProtocolOptions.crypto_handshake_timeout:type_name -> google.protobuf.Duration + 3, // 3: envoy.config.listener.v3.QuicProtocolOptions.enabled:type_name -> envoy.config.core.v3.RuntimeFeatureFlag + 4, // 4: envoy.config.listener.v3.QuicProtocolOptions.packets_to_read_to_connection_count_ratio:type_name -> google.protobuf.UInt32Value + 5, // 5: envoy.config.listener.v3.QuicProtocolOptions.crypto_stream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 6: envoy.config.listener.v3.QuicProtocolOptions.proof_source_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 7: envoy.config.listener.v3.QuicProtocolOptions.connection_id_generator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 6, // 9: envoy.config.listener.v3.QuicProtocolOptions.send_disable_active_migration:type_name -> google.protobuf.BoolValue + 5, // 10: envoy.config.listener.v3.QuicProtocolOptions.connection_debug_visitor_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_quic_config_proto_init() } +func file_envoy_config_listener_v3_quic_config_proto_init() { + if File_envoy_config_listener_v3_quic_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_quic_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuicProtocolOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_quic_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_quic_config_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_quic_config_proto_depIdxs, + MessageInfos: file_envoy_config_listener_v3_quic_config_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_quic_config_proto = out.File + file_envoy_config_listener_v3_quic_config_proto_rawDesc = nil + file_envoy_config_listener_v3_quic_config_proto_goTypes = nil + file_envoy_config_listener_v3_quic_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go new file mode 100644 index 0000000000..efabacb665 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go @@ -0,0 +1,444 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QuicProtocolOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuicProtocolOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QuicProtocolOptionsMultiError, or nil if none found. +func (m *QuicProtocolOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *QuicProtocolOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetQuicProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuicProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "QuicProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCryptoHandshakeTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoHandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoHandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCryptoHandshakeTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "CryptoHandshakeTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetPacketsToReadToConnectionCountRatio(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := QuicProtocolOptionsValidationError{ + field: "PacketsToReadToConnectionCountRatio", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetCryptoStreamConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoStreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "CryptoStreamConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCryptoStreamConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "CryptoStreamConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetProofSourceConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ProofSourceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ProofSourceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProofSourceConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ProofSourceConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionIdGeneratorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionIdGeneratorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionIdGeneratorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetServerPreferredAddressConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServerPreferredAddressConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ServerPreferredAddressConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSendDisableActiveMigration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSendDisableActiveMigration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "SendDisableActiveMigration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionDebugVisitorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionDebugVisitorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuicProtocolOptionsValidationError{ + field: "ConnectionDebugVisitorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return QuicProtocolOptionsMultiError(errors) + } + + return nil +} + +// QuicProtocolOptionsMultiError is an error wrapping multiple validation +// errors returned by QuicProtocolOptions.ValidateAll() if the designated +// constraints aren't met. +type QuicProtocolOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuicProtocolOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuicProtocolOptionsMultiError) AllErrors() []error { return m } + +// QuicProtocolOptionsValidationError is the validation error returned by +// QuicProtocolOptions.Validate if the designated constraints aren't met. +type QuicProtocolOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuicProtocolOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuicProtocolOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuicProtocolOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuicProtocolOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuicProtocolOptionsValidationError) ErrorName() string { + return "QuicProtocolOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e QuicProtocolOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuicProtocolOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuicProtocolOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuicProtocolOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go new file mode 100644 index 0000000000..4b0826804a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go @@ -0,0 +1,345 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/quic_config.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *QuicProtocolOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuicProtocolOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionDebugVisitorConfig != nil { + if vtmsg, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConnectionDebugVisitorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.SendDisableActiveMigration != nil { + size, err := (*wrapperspb.BoolValue)(m.SendDisableActiveMigration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.ServerPreferredAddressConfig != nil { + if vtmsg, ok := interface{}(m.ServerPreferredAddressConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ServerPreferredAddressConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.ConnectionIdGeneratorConfig != nil { + if vtmsg, ok := interface{}(m.ConnectionIdGeneratorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConnectionIdGeneratorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ProofSourceConfig != nil { + if vtmsg, ok := interface{}(m.ProofSourceConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ProofSourceConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.CryptoStreamConfig != nil { + if vtmsg, ok := interface{}(m.CryptoStreamConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CryptoStreamConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.PacketsToReadToConnectionCountRatio != nil { + size, err := (*wrapperspb.UInt32Value)(m.PacketsToReadToConnectionCountRatio).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Enabled != nil { + if vtmsg, ok := interface{}(m.Enabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Enabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.CryptoHandshakeTimeout != nil { + size, err := (*durationpb.Duration)(m.CryptoHandshakeTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QuicProtocolOptions != nil { + if vtmsg, ok := interface{}(m.QuicProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.QuicProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QuicProtocolOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QuicProtocolOptions != nil { + if size, ok := interface{}(m.QuicProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.QuicProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CryptoHandshakeTimeout != nil { + l = (*durationpb.Duration)(m.CryptoHandshakeTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Enabled != nil { + if size, ok := interface{}(m.Enabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Enabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PacketsToReadToConnectionCountRatio != nil { + l = (*wrapperspb.UInt32Value)(m.PacketsToReadToConnectionCountRatio).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CryptoStreamConfig != nil { + if size, ok := interface{}(m.CryptoStreamConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CryptoStreamConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProofSourceConfig != nil { + if size, ok := interface{}(m.ProofSourceConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ProofSourceConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionIdGeneratorConfig != nil { + if size, ok := interface{}(m.ConnectionIdGeneratorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConnectionIdGeneratorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ServerPreferredAddressConfig != nil { + if size, ok := interface{}(m.ServerPreferredAddressConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ServerPreferredAddressConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SendDisableActiveMigration != nil { + l = (*wrapperspb.BoolValue)(m.SendDisableActiveMigration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionDebugVisitorConfig != nil { + if size, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConnectionDebugVisitorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go new file mode 100644 index 0000000000..a5fb162223 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.go @@ -0,0 +1,283 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 9] +type UdpListenerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // UDP socket configuration for the listener. The default for + // :ref:`prefer_gro ` is false for + // listener sockets. If receiving a large amount of datagrams from a small number of sources, it + // may be worthwhile to enable this option after performance testing. + DownstreamSocketConfig *v3.UdpSocketConfig `protobuf:"bytes,5,opt,name=downstream_socket_config,json=downstreamSocketConfig,proto3" json:"downstream_socket_config,omitempty"` + // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set + // to the default object to enable QUIC without modifying any additional options. + QuicOptions *QuicProtocolOptions `protobuf:"bytes,7,opt,name=quic_options,json=quicOptions,proto3" json:"quic_options,omitempty"` + // Configuration for the UDP packet writer. If empty, HTTP/3 will use GSO if available + // (:ref:`UdpDefaultWriterFactory `) + // or the default kernel sendmsg if not, + // (:ref:`UdpDefaultWriterFactory `) + // and raw UDP will use kernel sendmsg. + // [#extension-category: envoy.udp_packet_writer] + UdpPacketPacketWriterConfig *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=udp_packet_packet_writer_config,json=udpPacketPacketWriterConfig,proto3" json:"udp_packet_packet_writer_config,omitempty"` +} + +func (x *UdpListenerConfig) Reset() { + *x = UdpListenerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UdpListenerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UdpListenerConfig) ProtoMessage() {} + +func (x *UdpListenerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UdpListenerConfig.ProtoReflect.Descriptor instead. +func (*UdpListenerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_udp_listener_config_proto_rawDescGZIP(), []int{0} +} + +func (x *UdpListenerConfig) GetDownstreamSocketConfig() *v3.UdpSocketConfig { + if x != nil { + return x.DownstreamSocketConfig + } + return nil +} + +func (x *UdpListenerConfig) GetQuicOptions() *QuicProtocolOptions { + if x != nil { + return x.QuicOptions + } + return nil +} + +func (x *UdpListenerConfig) GetUdpPacketPacketWriterConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.UdpPacketPacketWriterConfig + } + return nil +} + +type ActiveRawUdpListenerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ActiveRawUdpListenerConfig) Reset() { + *x = ActiveRawUdpListenerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ActiveRawUdpListenerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActiveRawUdpListenerConfig) ProtoMessage() {} + +func (x *ActiveRawUdpListenerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActiveRawUdpListenerConfig.ProtoReflect.Descriptor instead. +func (*ActiveRawUdpListenerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_listener_v3_udp_listener_config_proto_rawDescGZIP(), []int{1} +} + +var File_envoy_config_listener_v3_udp_listener_config_proto protoreflect.FileDescriptor + +var file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x69, + 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x8e, 0x03, 0x0a, 0x11, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5f, 0x0a, 0x18, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x16, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x0c, 0x71, 0x75, 0x69, 0x63, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x71, 0x75, + 0x69, 0x63, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x70, 0x0a, 0x1f, 0x75, 0x64, 0x70, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, + 0x75, 0x64, 0x70, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, + 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x55, 0x0a, 0x1a, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x77, 0x55, 0x64, + 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, + 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x77, 0x55, 0x64, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x96, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x55, 0x64, 0x70, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescOnce sync.Once + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData = file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc +) + +func file_envoy_config_listener_v3_udp_listener_config_proto_rawDescGZIP() []byte { + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescOnce.Do(func() { + file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData) + }) + return file_envoy_config_listener_v3_udp_listener_config_proto_rawDescData +} + +var file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_listener_v3_udp_listener_config_proto_goTypes = []interface{}{ + (*UdpListenerConfig)(nil), // 0: envoy.config.listener.v3.UdpListenerConfig + (*ActiveRawUdpListenerConfig)(nil), // 1: envoy.config.listener.v3.ActiveRawUdpListenerConfig + (*v3.UdpSocketConfig)(nil), // 2: envoy.config.core.v3.UdpSocketConfig + (*QuicProtocolOptions)(nil), // 3: envoy.config.listener.v3.QuicProtocolOptions + (*v3.TypedExtensionConfig)(nil), // 4: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_listener_v3_udp_listener_config_proto_depIdxs = []int32{ + 2, // 0: envoy.config.listener.v3.UdpListenerConfig.downstream_socket_config:type_name -> envoy.config.core.v3.UdpSocketConfig + 3, // 1: envoy.config.listener.v3.UdpListenerConfig.quic_options:type_name -> envoy.config.listener.v3.QuicProtocolOptions + 4, // 2: envoy.config.listener.v3.UdpListenerConfig.udp_packet_packet_writer_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_listener_v3_udp_listener_config_proto_init() } +func file_envoy_config_listener_v3_udp_listener_config_proto_init() { + if File_envoy_config_listener_v3_udp_listener_config_proto != nil { + return + } + file_envoy_config_listener_v3_quic_config_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UdpListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ActiveRawUdpListenerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_listener_v3_udp_listener_config_proto_goTypes, + DependencyIndexes: file_envoy_config_listener_v3_udp_listener_config_proto_depIdxs, + MessageInfos: file_envoy_config_listener_v3_udp_listener_config_proto_msgTypes, + }.Build() + File_envoy_config_listener_v3_udp_listener_config_proto = out.File + file_envoy_config_listener_v3_udp_listener_config_proto_rawDesc = nil + file_envoy_config_listener_v3_udp_listener_config_proto_goTypes = nil + file_envoy_config_listener_v3_udp_listener_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go new file mode 100644 index 0000000000..e52578e981 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config.pb.validate.go @@ -0,0 +1,328 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UdpListenerConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *UdpListenerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UdpListenerConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UdpListenerConfigMultiError, or nil if none found. +func (m *UdpListenerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *UdpListenerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDownstreamSocketConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "DownstreamSocketConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "DownstreamSocketConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamSocketConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpListenerConfigValidationError{ + field: "DownstreamSocketConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetQuicOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "QuicOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "QuicOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuicOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpListenerConfigValidationError{ + field: "QuicOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUdpPacketPacketWriterConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "UdpPacketPacketWriterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UdpListenerConfigValidationError{ + field: "UdpPacketPacketWriterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUdpPacketPacketWriterConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UdpListenerConfigValidationError{ + field: "UdpPacketPacketWriterConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UdpListenerConfigMultiError(errors) + } + + return nil +} + +// UdpListenerConfigMultiError is an error wrapping multiple validation errors +// returned by UdpListenerConfig.ValidateAll() if the designated constraints +// aren't met. +type UdpListenerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UdpListenerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UdpListenerConfigMultiError) AllErrors() []error { return m } + +// UdpListenerConfigValidationError is the validation error returned by +// UdpListenerConfig.Validate if the designated constraints aren't met. +type UdpListenerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UdpListenerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UdpListenerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UdpListenerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UdpListenerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UdpListenerConfigValidationError) ErrorName() string { + return "UdpListenerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e UdpListenerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUdpListenerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UdpListenerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UdpListenerConfigValidationError{} + +// Validate checks the field values on ActiveRawUdpListenerConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ActiveRawUdpListenerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ActiveRawUdpListenerConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ActiveRawUdpListenerConfigMultiError, or nil if none found. +func (m *ActiveRawUdpListenerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ActiveRawUdpListenerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ActiveRawUdpListenerConfigMultiError(errors) + } + + return nil +} + +// ActiveRawUdpListenerConfigMultiError is an error wrapping multiple +// validation errors returned by ActiveRawUdpListenerConfig.ValidateAll() if +// the designated constraints aren't met. +type ActiveRawUdpListenerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActiveRawUdpListenerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActiveRawUdpListenerConfigMultiError) AllErrors() []error { return m } + +// ActiveRawUdpListenerConfigValidationError is the validation error returned +// by ActiveRawUdpListenerConfig.Validate if the designated constraints aren't met. +type ActiveRawUdpListenerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActiveRawUdpListenerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActiveRawUdpListenerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActiveRawUdpListenerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActiveRawUdpListenerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActiveRawUdpListenerConfigValidationError) ErrorName() string { + return "ActiveRawUdpListenerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ActiveRawUdpListenerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sActiveRawUdpListenerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActiveRawUdpListenerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActiveRawUdpListenerConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go new file mode 100644 index 0000000000..3262593354 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/udp_listener_config_vtproto.pb.go @@ -0,0 +1,184 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/listener/v3/udp_listener_config.proto + +package listenerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UdpListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UdpListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UdpListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UdpPacketPacketWriterConfig != nil { + if vtmsg, ok := interface{}(m.UdpPacketPacketWriterConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UdpPacketPacketWriterConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.QuicOptions != nil { + size, err := m.QuicOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.DownstreamSocketConfig != nil { + if vtmsg, ok := interface{}(m.DownstreamSocketConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamSocketConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} + +func (m *ActiveRawUdpListenerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ActiveRawUdpListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ActiveRawUdpListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *UdpListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DownstreamSocketConfig != nil { + if size, ok := interface{}(m.DownstreamSocketConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamSocketConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.QuicOptions != nil { + l = m.QuicOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UdpPacketPacketWriterConfig != nil { + if size, ok := interface{}(m.UdpPacketPacketWriterConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UdpPacketPacketWriterConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ActiveRawUdpListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go new file mode 100644 index 0000000000..7ff4dff165 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HistogramEmitMode is used to configure which metric types should be emitted for histograms. +type HistogramEmitMode int32 + +const ( + // Emit Histogram and Summary metric types. + HistogramEmitMode_SUMMARY_AND_HISTOGRAM HistogramEmitMode = 0 + // Emit only Summary metric types. + HistogramEmitMode_SUMMARY HistogramEmitMode = 1 + // Emit only Histogram metric types. + HistogramEmitMode_HISTOGRAM HistogramEmitMode = 2 +) + +// Enum value maps for HistogramEmitMode. +var ( + HistogramEmitMode_name = map[int32]string{ + 0: "SUMMARY_AND_HISTOGRAM", + 1: "SUMMARY", + 2: "HISTOGRAM", + } + HistogramEmitMode_value = map[string]int32{ + "SUMMARY_AND_HISTOGRAM": 0, + "SUMMARY": 1, + "HISTOGRAM": 2, + } +) + +func (x HistogramEmitMode) Enum() *HistogramEmitMode { + p := new(HistogramEmitMode) + *p = x + return p +} + +func (x HistogramEmitMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HistogramEmitMode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_metrics_v3_metrics_service_proto_enumTypes[0].Descriptor() +} + +func (HistogramEmitMode) Type() protoreflect.EnumType { + return &file_envoy_config_metrics_v3_metrics_service_proto_enumTypes[0] +} + +func (x HistogramEmitMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HistogramEmitMode.Descriptor instead. +func (HistogramEmitMode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP(), []int{0} +} + +// Metrics Service is configured as a built-in “envoy.stat_sinks.metrics_service“ :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +// +// Example: +// +// .. code-block:: yaml +// +// stats_sinks: +// - name: envoy.stat_sinks.metrics_service +// typed_config: +// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig +// +// [#extension: envoy.stat_sinks.metrics_service] +// [#next-free-field: 6] +type MetricsServiceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream gRPC cluster that hosts the metrics service. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + // API version for metric service transport protocol. This describes the metric service gRPC + // endpoint and version of messages used on the wire. + TransportApiVersion v3.ApiVersion `protobuf:"varint,3,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"` + // If true, counters are reported as the delta between flushing intervals. Otherwise, the current + // counter value is reported. Defaults to false. + // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the + // sink will take updates from the :ref:`MetricsResponse `. + ReportCountersAsDeltas *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=report_counters_as_deltas,json=reportCountersAsDeltas,proto3" json:"report_counters_as_deltas,omitempty"` + // If true, metrics will have their tags emitted as labels on the metrics objects sent to the MetricsService, + // and the tag extracted name will be used instead of the full name, which may contain values used by the tag + // extractor or additional tags added during stats creation. + EmitTagsAsLabels bool `protobuf:"varint,4,opt,name=emit_tags_as_labels,json=emitTagsAsLabels,proto3" json:"emit_tags_as_labels,omitempty"` + // Specify which metrics types to emit for histograms. Defaults to SUMMARY_AND_HISTOGRAM. + HistogramEmitMode HistogramEmitMode `protobuf:"varint,5,opt,name=histogram_emit_mode,json=histogramEmitMode,proto3,enum=envoy.config.metrics.v3.HistogramEmitMode" json:"histogram_emit_mode,omitempty"` +} + +func (x *MetricsServiceConfig) Reset() { + *x = MetricsServiceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_metrics_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricsServiceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricsServiceConfig) ProtoMessage() {} + +func (x *MetricsServiceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_metrics_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsServiceConfig.ProtoReflect.Descriptor instead. +func (*MetricsServiceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP(), []int{0} +} + +func (x *MetricsServiceConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +func (x *MetricsServiceConfig) GetTransportApiVersion() v3.ApiVersion { + if x != nil { + return x.TransportApiVersion + } + return v3.ApiVersion(0) +} + +func (x *MetricsServiceConfig) GetReportCountersAsDeltas() *wrapperspb.BoolValue { + if x != nil { + return x.ReportCountersAsDeltas + } + return nil +} + +func (x *MetricsServiceConfig) GetEmitTagsAsLabels() bool { + if x != nil { + return x.EmitTagsAsLabels + } + return false +} + +func (x *MetricsServiceConfig) GetHistogramEmitMode() HistogramEmitMode { + if x != nil { + return x.HistogramEmitMode + } + return HistogramEmitMode_SUMMARY_AND_HISTOGRAM +} + +var File_envoy_config_metrics_v3_metrics_service_proto protoreflect.FileDescriptor + +var file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x03, 0x0a, 0x14, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x55, 0x0a, 0x19, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, + 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x41, 0x73, + 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x65, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x61, 0x67, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6d, 0x69, 0x74, 0x54, 0x61, 0x67, 0x73, 0x41, 0x73, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x5f, 0x65, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x11, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x45, 0x6d, 0x69, + 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x02, 0x42, 0x90, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x13, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_metrics_v3_metrics_service_proto_rawDescOnce sync.Once + file_envoy_config_metrics_v3_metrics_service_proto_rawDescData = file_envoy_config_metrics_v3_metrics_service_proto_rawDesc +) + +func file_envoy_config_metrics_v3_metrics_service_proto_rawDescGZIP() []byte { + file_envoy_config_metrics_v3_metrics_service_proto_rawDescOnce.Do(func() { + file_envoy_config_metrics_v3_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_metrics_v3_metrics_service_proto_rawDescData) + }) + return file_envoy_config_metrics_v3_metrics_service_proto_rawDescData +} + +var file_envoy_config_metrics_v3_metrics_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_metrics_v3_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_metrics_v3_metrics_service_proto_goTypes = []interface{}{ + (HistogramEmitMode)(0), // 0: envoy.config.metrics.v3.HistogramEmitMode + (*MetricsServiceConfig)(nil), // 1: envoy.config.metrics.v3.MetricsServiceConfig + (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService + (v3.ApiVersion)(0), // 3: envoy.config.core.v3.ApiVersion + (*wrapperspb.BoolValue)(nil), // 4: google.protobuf.BoolValue +} +var file_envoy_config_metrics_v3_metrics_service_proto_depIdxs = []int32{ + 2, // 0: envoy.config.metrics.v3.MetricsServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 3, // 1: envoy.config.metrics.v3.MetricsServiceConfig.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion + 4, // 2: envoy.config.metrics.v3.MetricsServiceConfig.report_counters_as_deltas:type_name -> google.protobuf.BoolValue + 0, // 3: envoy.config.metrics.v3.MetricsServiceConfig.histogram_emit_mode:type_name -> envoy.config.metrics.v3.HistogramEmitMode + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_metrics_v3_metrics_service_proto_init() } +func file_envoy_config_metrics_v3_metrics_service_proto_init() { + if File_envoy_config_metrics_v3_metrics_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_metrics_v3_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsServiceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_metrics_v3_metrics_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_metrics_v3_metrics_service_proto_goTypes, + DependencyIndexes: file_envoy_config_metrics_v3_metrics_service_proto_depIdxs, + EnumInfos: file_envoy_config_metrics_v3_metrics_service_proto_enumTypes, + MessageInfos: file_envoy_config_metrics_v3_metrics_service_proto_msgTypes, + }.Build() + File_envoy_config_metrics_v3_metrics_service_proto = out.File + file_envoy_config_metrics_v3_metrics_service_proto_rawDesc = nil + file_envoy_config_metrics_v3_metrics_service_proto_goTypes = nil + file_envoy_config_metrics_v3_metrics_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go new file mode 100644 index 0000000000..803f930ba7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service.pb.validate.go @@ -0,0 +1,236 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.ApiVersion(0) +) + +// Validate checks the field values on MetricsServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetricsServiceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetricsServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetricsServiceConfigMultiError, or nil if none found. +func (m *MetricsServiceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *MetricsServiceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetGrpcService() == nil { + err := MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetricsServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := v3.ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok { + err := MetricsServiceConfigValidationError{ + field: "TransportApiVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetReportCountersAsDeltas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "ReportCountersAsDeltas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetricsServiceConfigValidationError{ + field: "ReportCountersAsDeltas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportCountersAsDeltas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetricsServiceConfigValidationError{ + field: "ReportCountersAsDeltas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EmitTagsAsLabels + + if _, ok := HistogramEmitMode_name[int32(m.GetHistogramEmitMode())]; !ok { + err := MetricsServiceConfigValidationError{ + field: "HistogramEmitMode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetricsServiceConfigMultiError(errors) + } + + return nil +} + +// MetricsServiceConfigMultiError is an error wrapping multiple validation +// errors returned by MetricsServiceConfig.ValidateAll() if the designated +// constraints aren't met. +type MetricsServiceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetricsServiceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetricsServiceConfigMultiError) AllErrors() []error { return m } + +// MetricsServiceConfigValidationError is the validation error returned by +// MetricsServiceConfig.Validate if the designated constraints aren't met. +type MetricsServiceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetricsServiceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetricsServiceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetricsServiceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetricsServiceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetricsServiceConfigValidationError) ErrorName() string { + return "MetricsServiceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e MetricsServiceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetricsServiceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetricsServiceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetricsServiceConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go new file mode 100644 index 0000000000..b3f692c45c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/metrics_service_vtproto.pb.go @@ -0,0 +1,139 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/metrics/v3/metrics_service.proto + +package metricsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetricsServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetricsServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HistogramEmitMode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HistogramEmitMode)) + i-- + dAtA[i] = 0x28 + } + if m.EmitTagsAsLabels { + i-- + if m.EmitTagsAsLabels { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.TransportApiVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TransportApiVersion)) + i-- + dAtA[i] = 0x18 + } + if m.ReportCountersAsDeltas != nil { + size, err := (*wrapperspb.BoolValue)(m.ReportCountersAsDeltas).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetricsServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReportCountersAsDeltas != nil { + l = (*wrapperspb.BoolValue)(m.ReportCountersAsDeltas).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TransportApiVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TransportApiVersion)) + } + if m.EmitTagsAsLabels { + n += 2 + } + if m.HistogramEmitMode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HistogramEmitMode)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go new file mode 100644 index 0000000000..0268990028 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.go @@ -0,0 +1,1185 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for pluggable stats sinks. +type StatsSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. + // See the :ref:`extensions listed in typed_config below ` for the default list of available stats sink. + // Sinks optionally support tagged/multiple dimensional metrics. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Stats sink specific configuration which depends on the sink being instantiated. See + // :ref:`StatsdSink ` for an example. + // [#extension-category: envoy.stats_sinks] + // + // Types that are assignable to ConfigType: + // + // *StatsSink_TypedConfig + ConfigType isStatsSink_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *StatsSink) Reset() { + *x = StatsSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsSink) ProtoMessage() {} + +func (x *StatsSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsSink.ProtoReflect.Descriptor instead. +func (*StatsSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *StatsSink) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *StatsSink) GetConfigType() isStatsSink_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *StatsSink) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*StatsSink_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isStatsSink_ConfigType interface { + isStatsSink_ConfigType() +} + +type StatsSink_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*StatsSink_TypedConfig) isStatsSink_ConfigType() {} + +// Statistics configuration such as tagging. +type StatsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each stat name is independently processed through these tag specifiers. When a tag is + // matched, the first capture group is not immediately removed from the name, so later + // :ref:`TagSpecifiers ` can also match that + // same portion of the match. After all tag matching is complete, a tag-extracted version of + // the name is produced and is used in stats sinks that represent tags, such as Prometheus. + StatsTags []*TagSpecifier `protobuf:"bytes,1,rep,name=stats_tags,json=statsTags,proto3" json:"stats_tags,omitempty"` + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See :repo:`well_known_names.h ` for a list of the + // default tags in Envoy. + // + // If not provided, the value is assumed to be true. + UseAllDefaultTags *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=use_all_default_tags,json=useAllDefaultTags,proto3" json:"use_all_default_tags,omitempty"` + // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated + // as normal. Preventing the instantiation of certain families of stats can improve memory + // performance for Envoys running especially large configs. + // + // .. warning:: + // + // Excluding stats may affect Envoy's behavior in undocumented ways. See + // `issue #8771 `_ for more information. + // If any unexpected behavior changes are observed, please open a new issue immediately. + StatsMatcher *StatsMatcher `protobuf:"bytes,3,opt,name=stats_matcher,json=statsMatcher,proto3" json:"stats_matcher,omitempty"` + // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first + // match is applied. If no match is found (or if no rules are set), the following default buckets + // are used: + // + // .. code-block:: json + // + // [ + // 0.5, + // 1, + // 5, + // 10, + // 25, + // 50, + // 100, + // 250, + // 500, + // 1000, + // 2500, + // 5000, + // 10000, + // 30000, + // 60000, + // 300000, + // 600000, + // 1800000, + // 3600000 + // ] + HistogramBucketSettings []*HistogramBucketSettings `protobuf:"bytes,4,rep,name=histogram_bucket_settings,json=histogramBucketSettings,proto3" json:"histogram_bucket_settings,omitempty"` +} + +func (x *StatsConfig) Reset() { + *x = StatsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsConfig) ProtoMessage() {} + +func (x *StatsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsConfig.ProtoReflect.Descriptor instead. +func (*StatsConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *StatsConfig) GetStatsTags() []*TagSpecifier { + if x != nil { + return x.StatsTags + } + return nil +} + +func (x *StatsConfig) GetUseAllDefaultTags() *wrapperspb.BoolValue { + if x != nil { + return x.UseAllDefaultTags + } + return nil +} + +func (x *StatsConfig) GetStatsMatcher() *StatsMatcher { + if x != nil { + return x.StatsMatcher + } + return nil +} + +func (x *StatsConfig) GetHistogramBucketSettings() []*HistogramBucketSettings { + if x != nil { + return x.HistogramBucketSettings + } + return nil +} + +// Configuration for disabling stat instantiation. +type StatsMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to StatsMatcher: + // + // *StatsMatcher_RejectAll + // *StatsMatcher_ExclusionList + // *StatsMatcher_InclusionList + StatsMatcher isStatsMatcher_StatsMatcher `protobuf_oneof:"stats_matcher"` +} + +func (x *StatsMatcher) Reset() { + *x = StatsMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsMatcher) ProtoMessage() {} + +func (x *StatsMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsMatcher.ProtoReflect.Descriptor instead. +func (*StatsMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{2} +} + +func (m *StatsMatcher) GetStatsMatcher() isStatsMatcher_StatsMatcher { + if m != nil { + return m.StatsMatcher + } + return nil +} + +func (x *StatsMatcher) GetRejectAll() bool { + if x, ok := x.GetStatsMatcher().(*StatsMatcher_RejectAll); ok { + return x.RejectAll + } + return false +} + +func (x *StatsMatcher) GetExclusionList() *v3.ListStringMatcher { + if x, ok := x.GetStatsMatcher().(*StatsMatcher_ExclusionList); ok { + return x.ExclusionList + } + return nil +} + +func (x *StatsMatcher) GetInclusionList() *v3.ListStringMatcher { + if x, ok := x.GetStatsMatcher().(*StatsMatcher_InclusionList); ok { + return x.InclusionList + } + return nil +} + +type isStatsMatcher_StatsMatcher interface { + isStatsMatcher_StatsMatcher() +} + +type StatsMatcher_RejectAll struct { + // If “reject_all“ is true, then all stats are disabled. If “reject_all“ is false, then all + // stats are enabled. + RejectAll bool `protobuf:"varint,1,opt,name=reject_all,json=rejectAll,proto3,oneof"` +} + +type StatsMatcher_ExclusionList struct { + // Exclusive match. All stats are enabled except for those matching one of the supplied + // StringMatcher protos. + ExclusionList *v3.ListStringMatcher `protobuf:"bytes,2,opt,name=exclusion_list,json=exclusionList,proto3,oneof"` +} + +type StatsMatcher_InclusionList struct { + // Inclusive match. No stats are enabled except for those matching one of the supplied + // StringMatcher protos. + InclusionList *v3.ListStringMatcher `protobuf:"bytes,3,opt,name=inclusion_list,json=inclusionList,proto3,oneof"` +} + +func (*StatsMatcher_RejectAll) isStatsMatcher_StatsMatcher() {} + +func (*StatsMatcher_ExclusionList) isStatsMatcher_StatsMatcher() {} + +func (*StatsMatcher_InclusionList) isStatsMatcher_StatsMatcher() {} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +type TagSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in :repo:`well_known_names.h + // ` in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // A stat name may be spelled in such a way that it matches two different + // tag extractors for the same tag name. In that case, all but one of the + // tag values will be dropped. It is not specified which tag value will be + // retained. The extraction will only occur for one of the extractors, and + // only the matched extraction will be removed from the tag name. + TagName string `protobuf:"bytes,1,opt,name=tag_name,json=tagName,proto3" json:"tag_name,omitempty"` + // Types that are assignable to TagValue: + // + // *TagSpecifier_Regex + // *TagSpecifier_FixedValue + TagValue isTagSpecifier_TagValue `protobuf_oneof:"tag_value"` +} + +func (x *TagSpecifier) Reset() { + *x = TagSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TagSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TagSpecifier) ProtoMessage() {} + +func (x *TagSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TagSpecifier.ProtoReflect.Descriptor instead. +func (*TagSpecifier) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{3} +} + +func (x *TagSpecifier) GetTagName() string { + if x != nil { + return x.TagName + } + return "" +} + +func (m *TagSpecifier) GetTagValue() isTagSpecifier_TagValue { + if m != nil { + return m.TagValue + } + return nil +} + +func (x *TagSpecifier) GetRegex() string { + if x, ok := x.GetTagValue().(*TagSpecifier_Regex); ok { + return x.Regex + } + return "" +} + +func (x *TagSpecifier) GetFixedValue() string { + if x, ok := x.GetTagValue().(*TagSpecifier_FixedValue); ok { + return x.FixedValue + } + return "" +} + +type isTagSpecifier_TagValue interface { + isTagSpecifier_TagValue() +} + +type TagSpecifier_Regex struct { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name “cluster.foo_cluster.upstream_rq_timeout“ and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\\.((.+?)\\.)" + // } + // + // Note that the regex will remove “foo_cluster.“ making the tag extracted + // name “cluster.upstream_rq_timeout“ and the tag value for + // “envoy.cluster_name“ will be “foo_cluster“ (note: there will be no + // “.“ character because of the second capture group). + // + // Example 2. a stat name + // “http.connection_manager_1.user_agent.ios.downstream_cx_total“ and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\\.((.*?)\\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed from the elaborated + // stat name. + // + // The first regex will save “ios.“ as the tag value for “envoy.http_user_agent“. It will + // leave it in the name for potential matching with additional tag specifiers. After all tag + // specifiers are processed the tags will be removed from the name. + // + // The second regex will populate tag “envoy.http_conn_manager_prefix“ with value + // “connection_manager_1.“, based on the original stat name. + // + // As a final step, the matched tags are removed, leaving + // “http.user_agent.downstream_cx_total“ as the tag extracted name. + Regex string `protobuf:"bytes,2,opt,name=regex,proto3,oneof"` +} + +type TagSpecifier_FixedValue struct { + // Specifies a fixed tag value for the “tag_name“. + FixedValue string `protobuf:"bytes,3,opt,name=fixed_value,json=fixedValue,proto3,oneof"` +} + +func (*TagSpecifier_Regex) isTagSpecifier_TagValue() {} + +func (*TagSpecifier_FixedValue) isTagSpecifier_TagValue() {} + +// Specifies a matcher for stats and the buckets that matching stats should use. +type HistogramBucketSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stats that this rule applies to. The match is applied to the original stat name + // before tag-extraction, for example “cluster.exampleclustername.upstream_cx_length_ms“. + Match *v3.StringMatcher `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` + // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. + // The order of the buckets does not matter. + Buckets []float64 `protobuf:"fixed64,2,rep,packed,name=buckets,proto3" json:"buckets,omitempty"` +} + +func (x *HistogramBucketSettings) Reset() { + *x = HistogramBucketSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HistogramBucketSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HistogramBucketSettings) ProtoMessage() {} + +func (x *HistogramBucketSettings) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistogramBucketSettings.ProtoReflect.Descriptor instead. +func (*HistogramBucketSettings) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{4} +} + +func (x *HistogramBucketSettings) GetMatch() *v3.StringMatcher { + if x != nil { + return x.Match + } + return nil +} + +func (x *HistogramBucketSettings) GetBuckets() []float64 { + if x != nil { + return x.Buckets + } + return nil +} + +// Stats configuration proto schema for built-in “envoy.stat_sinks.statsd“ sink. This sink does not support +// tagged metrics. +// [#extension: envoy.stat_sinks.statsd] +type StatsdSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to StatsdSpecifier: + // + // *StatsdSink_Address + // *StatsdSink_TcpClusterName + StatsdSpecifier isStatsdSink_StatsdSpecifier `protobuf_oneof:"statsd_specifier"` + // Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` +} + +func (x *StatsdSink) Reset() { + *x = StatsdSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsdSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsdSink) ProtoMessage() {} + +func (x *StatsdSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsdSink.ProtoReflect.Descriptor instead. +func (*StatsdSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{5} +} + +func (m *StatsdSink) GetStatsdSpecifier() isStatsdSink_StatsdSpecifier { + if m != nil { + return m.StatsdSpecifier + } + return nil +} + +func (x *StatsdSink) GetAddress() *v31.Address { + if x, ok := x.GetStatsdSpecifier().(*StatsdSink_Address); ok { + return x.Address + } + return nil +} + +func (x *StatsdSink) GetTcpClusterName() string { + if x, ok := x.GetStatsdSpecifier().(*StatsdSink_TcpClusterName); ok { + return x.TcpClusterName + } + return "" +} + +func (x *StatsdSink) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +type isStatsdSink_StatsdSpecifier interface { + isStatsdSink_StatsdSpecifier() +} + +type StatsdSink_Address struct { + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + Address *v31.Address `protobuf:"bytes,1,opt,name=address,proto3,oneof"` +} + +type StatsdSink_TcpClusterName struct { + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + TcpClusterName string `protobuf:"bytes,2,opt,name=tcp_cluster_name,json=tcpClusterName,proto3,oneof"` +} + +func (*StatsdSink_Address) isStatsdSink_StatsdSpecifier() {} + +func (*StatsdSink_TcpClusterName) isStatsdSink_StatsdSpecifier() {} + +// Stats configuration proto schema for built-in “envoy.stat_sinks.dog_statsd“ sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#extension: envoy.stat_sinks.dog_statsd] +type DogStatsdSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to DogStatsdSpecifier: + // + // *DogStatsdSink_Address + DogStatsdSpecifier isDogStatsdSink_DogStatsdSpecifier `protobuf_oneof:"dog_statsd_specifier"` + // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field + // ` for more details. + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Optional max datagram size to use when sending UDP messages. By default Envoy + // will emit one metric per datagram. By specifying a max-size larger than a single + // metric, Envoy will emit multiple, new-line separated metrics. The max datagram + // size should not exceed your network's MTU. + // + // Note that this value may not be respected if smaller than a single metric. + MaxBytesPerDatagram *wrapperspb.UInt64Value `protobuf:"bytes,4,opt,name=max_bytes_per_datagram,json=maxBytesPerDatagram,proto3" json:"max_bytes_per_datagram,omitempty"` +} + +func (x *DogStatsdSink) Reset() { + *x = DogStatsdSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DogStatsdSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DogStatsdSink) ProtoMessage() {} + +func (x *DogStatsdSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DogStatsdSink.ProtoReflect.Descriptor instead. +func (*DogStatsdSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{6} +} + +func (m *DogStatsdSink) GetDogStatsdSpecifier() isDogStatsdSink_DogStatsdSpecifier { + if m != nil { + return m.DogStatsdSpecifier + } + return nil +} + +func (x *DogStatsdSink) GetAddress() *v31.Address { + if x, ok := x.GetDogStatsdSpecifier().(*DogStatsdSink_Address); ok { + return x.Address + } + return nil +} + +func (x *DogStatsdSink) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *DogStatsdSink) GetMaxBytesPerDatagram() *wrapperspb.UInt64Value { + if x != nil { + return x.MaxBytesPerDatagram + } + return nil +} + +type isDogStatsdSink_DogStatsdSpecifier interface { + isDogStatsdSink_DogStatsdSpecifier() +} + +type DogStatsdSink_Address struct { + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + Address *v31.Address `protobuf:"bytes,1,opt,name=address,proto3,oneof"` +} + +func (*DogStatsdSink_Address) isDogStatsdSink_DogStatsdSpecifier() {} + +// Stats configuration proto schema for built-in “envoy.stat_sinks.hystrix“ sink. +// The sink emits stats in `text/event-stream +// `_ +// formatted stream for use by `Hystrix dashboard +// `_. +// +// Note that only a single HystrixSink should be configured. +// +// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. +// [#extension: envoy.stat_sinks.hystrix] +type HystrixSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of buckets the rolling statistical window is divided into. + // + // Each time the sink is flushed, all relevant Envoy statistics are sampled and + // added to the rolling window (removing the oldest samples in the window + // in the process). The sink then outputs the aggregate statistics across the + // current rolling window to the event stream(s). + // + // “rolling_window(ms)“ = “stats_flush_interval(ms)“ * “num_of_buckets“ + // + // More detailed explanation can be found in `Hystrix wiki + // `_. + NumBuckets int64 `protobuf:"varint,1,opt,name=num_buckets,json=numBuckets,proto3" json:"num_buckets,omitempty"` +} + +func (x *HystrixSink) Reset() { + *x = HystrixSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HystrixSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HystrixSink) ProtoMessage() {} + +func (x *HystrixSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_metrics_v3_stats_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HystrixSink.ProtoReflect.Descriptor instead. +func (*HystrixSink) Descriptor() ([]byte, []int) { + return file_envoy_config_metrics_v3_stats_proto_rawDescGZIP(), []int{7} +} + +func (x *HystrixSink) GetNumBuckets() int64 { + if x != nil { + return x.NumBuckets + } + return 0 +} + +var File_envoy_config_metrics_v3_stats_proto protoreflect.FileDescriptor + +var file_envoy_config_metrics_v3_stats_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x01, 0x0a, + 0x09, 0x53, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, + 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x53, + 0x69, 0x6e, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x86, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x44, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, + 0x61, 0x67, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x54, 0x61, 0x67, 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6c, + 0x6c, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x11, 0x75, 0x73, 0x65, 0x41, 0x6c, 0x6c, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, + 0x61, 0x67, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x6c, 0x0a, 0x19, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x17, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3a, 0x2a, 0x9a, + 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x98, 0x02, 0x0a, 0x0c, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0a, 0x72, 0x65, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x09, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x6c, 0x12, 0x51, 0x0a, 0x0e, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x51, + 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, + 0x74, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa8, 0x01, 0x0a, 0x0c, 0x54, 0x61, 0x67, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x20, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x28, 0x80, 0x08, 0x48, 0x00, 0x52, 0x05, 0x72, 0x65, 0x67, + 0x65, 0x78, 0x12, 0x21, 0x0a, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x09, 0x74, 0x61, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x92, 0x01, 0x0a, 0x17, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x31, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x92, 0x01, 0x11, 0x08, 0x01, 0x18, 0x01, 0x22, 0x0b, + 0x12, 0x09, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x07, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x64, 0x53, + 0x69, 0x6e, 0x6b, 0x12, 0x39, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, + 0x0a, 0x10, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x64, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x17, 0x0a, + 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x8f, 0x02, 0x0a, 0x0d, 0x44, 0x6f, 0x67, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x64, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x39, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x5a, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, + 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, + 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x64, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x1b, 0x0a, 0x14, 0x64, 0x6f, 0x67, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x64, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x5a, 0x0a, 0x0b, 0x48, 0x79, 0x73, 0x74, + 0x72, 0x69, 0x78, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6e, 0x75, + 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x79, 0x73, 0x74, 0x72, 0x69, 0x78, + 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x87, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_metrics_v3_stats_proto_rawDescOnce sync.Once + file_envoy_config_metrics_v3_stats_proto_rawDescData = file_envoy_config_metrics_v3_stats_proto_rawDesc +) + +func file_envoy_config_metrics_v3_stats_proto_rawDescGZIP() []byte { + file_envoy_config_metrics_v3_stats_proto_rawDescOnce.Do(func() { + file_envoy_config_metrics_v3_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_metrics_v3_stats_proto_rawDescData) + }) + return file_envoy_config_metrics_v3_stats_proto_rawDescData +} + +var file_envoy_config_metrics_v3_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_envoy_config_metrics_v3_stats_proto_goTypes = []interface{}{ + (*StatsSink)(nil), // 0: envoy.config.metrics.v3.StatsSink + (*StatsConfig)(nil), // 1: envoy.config.metrics.v3.StatsConfig + (*StatsMatcher)(nil), // 2: envoy.config.metrics.v3.StatsMatcher + (*TagSpecifier)(nil), // 3: envoy.config.metrics.v3.TagSpecifier + (*HistogramBucketSettings)(nil), // 4: envoy.config.metrics.v3.HistogramBucketSettings + (*StatsdSink)(nil), // 5: envoy.config.metrics.v3.StatsdSink + (*DogStatsdSink)(nil), // 6: envoy.config.metrics.v3.DogStatsdSink + (*HystrixSink)(nil), // 7: envoy.config.metrics.v3.HystrixSink + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue + (*v3.ListStringMatcher)(nil), // 10: envoy.type.matcher.v3.ListStringMatcher + (*v3.StringMatcher)(nil), // 11: envoy.type.matcher.v3.StringMatcher + (*v31.Address)(nil), // 12: envoy.config.core.v3.Address + (*wrapperspb.UInt64Value)(nil), // 13: google.protobuf.UInt64Value +} +var file_envoy_config_metrics_v3_stats_proto_depIdxs = []int32{ + 8, // 0: envoy.config.metrics.v3.StatsSink.typed_config:type_name -> google.protobuf.Any + 3, // 1: envoy.config.metrics.v3.StatsConfig.stats_tags:type_name -> envoy.config.metrics.v3.TagSpecifier + 9, // 2: envoy.config.metrics.v3.StatsConfig.use_all_default_tags:type_name -> google.protobuf.BoolValue + 2, // 3: envoy.config.metrics.v3.StatsConfig.stats_matcher:type_name -> envoy.config.metrics.v3.StatsMatcher + 4, // 4: envoy.config.metrics.v3.StatsConfig.histogram_bucket_settings:type_name -> envoy.config.metrics.v3.HistogramBucketSettings + 10, // 5: envoy.config.metrics.v3.StatsMatcher.exclusion_list:type_name -> envoy.type.matcher.v3.ListStringMatcher + 10, // 6: envoy.config.metrics.v3.StatsMatcher.inclusion_list:type_name -> envoy.type.matcher.v3.ListStringMatcher + 11, // 7: envoy.config.metrics.v3.HistogramBucketSettings.match:type_name -> envoy.type.matcher.v3.StringMatcher + 12, // 8: envoy.config.metrics.v3.StatsdSink.address:type_name -> envoy.config.core.v3.Address + 12, // 9: envoy.config.metrics.v3.DogStatsdSink.address:type_name -> envoy.config.core.v3.Address + 13, // 10: envoy.config.metrics.v3.DogStatsdSink.max_bytes_per_datagram:type_name -> google.protobuf.UInt64Value + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_envoy_config_metrics_v3_stats_proto_init() } +func file_envoy_config_metrics_v3_stats_proto_init() { + if File_envoy_config_metrics_v3_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_metrics_v3_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TagSpecifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HistogramBucketSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsdSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DogStatsdSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HystrixSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*StatsSink_TypedConfig)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*StatsMatcher_RejectAll)(nil), + (*StatsMatcher_ExclusionList)(nil), + (*StatsMatcher_InclusionList)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*TagSpecifier_Regex)(nil), + (*TagSpecifier_FixedValue)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*StatsdSink_Address)(nil), + (*StatsdSink_TcpClusterName)(nil), + } + file_envoy_config_metrics_v3_stats_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*DogStatsdSink_Address)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_metrics_v3_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_metrics_v3_stats_proto_goTypes, + DependencyIndexes: file_envoy_config_metrics_v3_stats_proto_depIdxs, + MessageInfos: file_envoy_config_metrics_v3_stats_proto_msgTypes, + }.Build() + File_envoy_config_metrics_v3_stats_proto = out.File + file_envoy_config_metrics_v3_stats_proto_rawDesc = nil + file_envoy_config_metrics_v3_stats_proto_goTypes = nil + file_envoy_config_metrics_v3_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go new file mode 100644 index 0000000000..60e9fdaaf4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats.pb.validate.go @@ -0,0 +1,1394 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StatsSink with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsSinkMultiError, or nil +// if none found. +func (m *StatsSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.ConfigType.(type) { + case *StatsSink_TypedConfig: + if v == nil { + err := StatsSinkValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsSinkValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsSinkValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsSinkValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return StatsSinkMultiError(errors) + } + + return nil +} + +// StatsSinkMultiError is an error wrapping multiple validation errors returned +// by StatsSink.ValidateAll() if the designated constraints aren't met. +type StatsSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsSinkMultiError) AllErrors() []error { return m } + +// StatsSinkValidationError is the validation error returned by +// StatsSink.Validate if the designated constraints aren't met. +type StatsSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsSinkValidationError) ErrorName() string { return "StatsSinkValidationError" } + +// Error satisfies the builtin error interface +func (e StatsSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsSinkValidationError{} + +// Validate checks the field values on StatsConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsConfigMultiError, or +// nil if none found. +func (m *StatsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetStatsTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("StatsTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("StatsTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: fmt.Sprintf("StatsTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetUseAllDefaultTags()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "UseAllDefaultTags", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "UseAllDefaultTags", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseAllDefaultTags()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: "UseAllDefaultTags", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStatsMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "StatsMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: "StatsMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStatsMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: "StatsMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHistogramBucketSettings() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("HistogramBucketSettings[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsConfigValidationError{ + field: fmt.Sprintf("HistogramBucketSettings[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsConfigValidationError{ + field: fmt.Sprintf("HistogramBucketSettings[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return StatsConfigMultiError(errors) + } + + return nil +} + +// StatsConfigMultiError is an error wrapping multiple validation errors +// returned by StatsConfig.ValidateAll() if the designated constraints aren't met. +type StatsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsConfigMultiError) AllErrors() []error { return m } + +// StatsConfigValidationError is the validation error returned by +// StatsConfig.Validate if the designated constraints aren't met. +type StatsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsConfigValidationError) ErrorName() string { return "StatsConfigValidationError" } + +// Error satisfies the builtin error interface +func (e StatsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsConfigValidationError{} + +// Validate checks the field values on StatsMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsMatcherMultiError, or +// nil if none found. +func (m *StatsMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofStatsMatcherPresent := false + switch v := m.StatsMatcher.(type) { + case *StatsMatcher_RejectAll: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true + // no validation rules for RejectAll + case *StatsMatcher_ExclusionList: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true + + if all { + switch v := interface{}(m.GetExclusionList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "ExclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "ExclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExclusionList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsMatcherValidationError{ + field: "ExclusionList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StatsMatcher_InclusionList: + if v == nil { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsMatcherPresent = true + + if all { + switch v := interface{}(m.GetInclusionList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "InclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsMatcherValidationError{ + field: "InclusionList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInclusionList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsMatcherValidationError{ + field: "InclusionList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofStatsMatcherPresent { + err := StatsMatcherValidationError{ + field: "StatsMatcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StatsMatcherMultiError(errors) + } + + return nil +} + +// StatsMatcherMultiError is an error wrapping multiple validation errors +// returned by StatsMatcher.ValidateAll() if the designated constraints aren't met. +type StatsMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsMatcherMultiError) AllErrors() []error { return m } + +// StatsMatcherValidationError is the validation error returned by +// StatsMatcher.Validate if the designated constraints aren't met. +type StatsMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsMatcherValidationError) ErrorName() string { return "StatsMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StatsMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsMatcherValidationError{} + +// Validate checks the field values on TagSpecifier with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TagSpecifier) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TagSpecifier with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TagSpecifierMultiError, or +// nil if none found. +func (m *TagSpecifier) ValidateAll() error { + return m.validate(true) +} + +func (m *TagSpecifier) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TagName + + switch v := m.TagValue.(type) { + case *TagSpecifier_Regex: + if v == nil { + err := TagSpecifierValidationError{ + field: "TagValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetRegex()) > 1024 { + err := TagSpecifierValidationError{ + field: "Regex", + reason: "value length must be at most 1024 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *TagSpecifier_FixedValue: + if v == nil { + err := TagSpecifierValidationError{ + field: "TagValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for FixedValue + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TagSpecifierMultiError(errors) + } + + return nil +} + +// TagSpecifierMultiError is an error wrapping multiple validation errors +// returned by TagSpecifier.ValidateAll() if the designated constraints aren't met. +type TagSpecifierMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TagSpecifierMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TagSpecifierMultiError) AllErrors() []error { return m } + +// TagSpecifierValidationError is the validation error returned by +// TagSpecifier.Validate if the designated constraints aren't met. +type TagSpecifierValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TagSpecifierValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TagSpecifierValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TagSpecifierValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TagSpecifierValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TagSpecifierValidationError) ErrorName() string { return "TagSpecifierValidationError" } + +// Error satisfies the builtin error interface +func (e TagSpecifierValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTagSpecifier.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TagSpecifierValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TagSpecifierValidationError{} + +// Validate checks the field values on HistogramBucketSettings with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HistogramBucketSettings) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HistogramBucketSettings with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HistogramBucketSettingsMultiError, or nil if none found. +func (m *HistogramBucketSettings) ValidateAll() error { + return m.validate(true) +} + +func (m *HistogramBucketSettings) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMatch() == nil { + err := HistogramBucketSettingsValidationError{ + field: "Match", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HistogramBucketSettingsValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HistogramBucketSettingsValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HistogramBucketSettingsValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetBuckets()) < 1 { + err := HistogramBucketSettingsValidationError{ + field: "Buckets", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + _HistogramBucketSettings_Buckets_Unique := make(map[float64]struct{}, len(m.GetBuckets())) + + for idx, item := range m.GetBuckets() { + _, _ = idx, item + + if _, exists := _HistogramBucketSettings_Buckets_Unique[item]; exists { + err := HistogramBucketSettingsValidationError{ + field: fmt.Sprintf("Buckets[%v]", idx), + reason: "repeated value must contain unique items", + } + if !all { + return err + } + errors = append(errors, err) + } else { + _HistogramBucketSettings_Buckets_Unique[item] = struct{}{} + } + + if item <= 0 { + err := HistogramBucketSettingsValidationError{ + field: fmt.Sprintf("Buckets[%v]", idx), + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return HistogramBucketSettingsMultiError(errors) + } + + return nil +} + +// HistogramBucketSettingsMultiError is an error wrapping multiple validation +// errors returned by HistogramBucketSettings.ValidateAll() if the designated +// constraints aren't met. +type HistogramBucketSettingsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HistogramBucketSettingsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HistogramBucketSettingsMultiError) AllErrors() []error { return m } + +// HistogramBucketSettingsValidationError is the validation error returned by +// HistogramBucketSettings.Validate if the designated constraints aren't met. +type HistogramBucketSettingsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HistogramBucketSettingsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HistogramBucketSettingsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HistogramBucketSettingsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HistogramBucketSettingsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HistogramBucketSettingsValidationError) ErrorName() string { + return "HistogramBucketSettingsValidationError" +} + +// Error satisfies the builtin error interface +func (e HistogramBucketSettingsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHistogramBucketSettings.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HistogramBucketSettingsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HistogramBucketSettingsValidationError{} + +// Validate checks the field values on StatsdSink with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StatsdSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StatsdSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StatsdSinkMultiError, or +// nil if none found. +func (m *StatsdSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StatsdSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Prefix + + oneofStatsdSpecifierPresent := false + switch v := m.StatsdSpecifier.(type) { + case *StatsdSink_Address: + if v == nil { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsdSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StatsdSink_TcpClusterName: + if v == nil { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStatsdSpecifierPresent = true + // no validation rules for TcpClusterName + default: + _ = v // ensures v is used + } + if !oneofStatsdSpecifierPresent { + err := StatsdSinkValidationError{ + field: "StatsdSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StatsdSinkMultiError(errors) + } + + return nil +} + +// StatsdSinkMultiError is an error wrapping multiple validation errors +// returned by StatsdSink.ValidateAll() if the designated constraints aren't met. +type StatsdSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StatsdSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StatsdSinkMultiError) AllErrors() []error { return m } + +// StatsdSinkValidationError is the validation error returned by +// StatsdSink.Validate if the designated constraints aren't met. +type StatsdSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StatsdSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StatsdSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StatsdSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StatsdSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StatsdSinkValidationError) ErrorName() string { return "StatsdSinkValidationError" } + +// Error satisfies the builtin error interface +func (e StatsdSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStatsdSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StatsdSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StatsdSinkValidationError{} + +// Validate checks the field values on DogStatsdSink with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DogStatsdSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DogStatsdSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DogStatsdSinkMultiError, or +// nil if none found. +func (m *DogStatsdSink) ValidateAll() error { + return m.validate(true) +} + +func (m *DogStatsdSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Prefix + + if wrapper := m.GetMaxBytesPerDatagram(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := DogStatsdSinkValidationError{ + field: "MaxBytesPerDatagram", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + oneofDogStatsdSpecifierPresent := false + switch v := m.DogStatsdSpecifier.(type) { + case *DogStatsdSink_Address: + if v == nil { + err := DogStatsdSinkValidationError{ + field: "DogStatsdSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofDogStatsdSpecifierPresent = true + + if all { + switch v := interface{}(m.GetAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DogStatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DogStatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DogStatsdSinkValidationError{ + field: "Address", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofDogStatsdSpecifierPresent { + err := DogStatsdSinkValidationError{ + field: "DogStatsdSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DogStatsdSinkMultiError(errors) + } + + return nil +} + +// DogStatsdSinkMultiError is an error wrapping multiple validation errors +// returned by DogStatsdSink.ValidateAll() if the designated constraints +// aren't met. +type DogStatsdSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DogStatsdSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DogStatsdSinkMultiError) AllErrors() []error { return m } + +// DogStatsdSinkValidationError is the validation error returned by +// DogStatsdSink.Validate if the designated constraints aren't met. +type DogStatsdSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DogStatsdSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DogStatsdSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DogStatsdSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DogStatsdSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DogStatsdSinkValidationError) ErrorName() string { return "DogStatsdSinkValidationError" } + +// Error satisfies the builtin error interface +func (e DogStatsdSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDogStatsdSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DogStatsdSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DogStatsdSinkValidationError{} + +// Validate checks the field values on HystrixSink with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HystrixSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HystrixSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HystrixSinkMultiError, or +// nil if none found. +func (m *HystrixSink) ValidateAll() error { + return m.validate(true) +} + +func (m *HystrixSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumBuckets + + if len(errors) > 0 { + return HystrixSinkMultiError(errors) + } + + return nil +} + +// HystrixSinkMultiError is an error wrapping multiple validation errors +// returned by HystrixSink.ValidateAll() if the designated constraints aren't met. +type HystrixSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HystrixSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HystrixSinkMultiError) AllErrors() []error { return m } + +// HystrixSinkValidationError is the validation error returned by +// HystrixSink.Validate if the designated constraints aren't met. +type HystrixSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HystrixSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HystrixSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HystrixSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HystrixSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HystrixSinkValidationError) ErrorName() string { return "HystrixSinkValidationError" } + +// Error satisfies the builtin error interface +func (e HystrixSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHystrixSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HystrixSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HystrixSinkValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go new file mode 100644 index 0000000000..3f53827ea0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3/stats_vtproto.pb.go @@ -0,0 +1,976 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/metrics/v3/stats.proto + +package metricsv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StatsSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*StatsSink_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsSink_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsSink_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *StatsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HistogramBucketSettings) > 0 { + for iNdEx := len(m.HistogramBucketSettings) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HistogramBucketSettings[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.StatsMatcher != nil { + size, err := m.StatsMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.UseAllDefaultTags != nil { + size, err := (*wrapperspb.BoolValue)(m.UseAllDefaultTags).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.StatsTags) > 0 { + for iNdEx := len(m.StatsTags) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StatsTags[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatsMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_InclusionList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_ExclusionList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsMatcher.(*StatsMatcher_RejectAll); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StatsMatcher_RejectAll) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_RejectAll) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.RejectAll { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *StatsMatcher_ExclusionList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_ExclusionList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExclusionList != nil { + if vtmsg, ok := interface{}(m.ExclusionList).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ExclusionList) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *StatsMatcher_InclusionList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsMatcher_InclusionList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InclusionList != nil { + if vtmsg, ok := interface{}(m.InclusionList).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.InclusionList) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *TagSpecifier) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagSpecifier) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TagValue.(*TagSpecifier_FixedValue); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TagValue.(*TagSpecifier_Regex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TagName) > 0 { + i -= len(m.TagName) + copy(dAtA[i:], m.TagName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TagName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TagSpecifier_Regex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier_Regex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TagSpecifier_FixedValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TagSpecifier_FixedValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.FixedValue) + copy(dAtA[i:], m.FixedValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FixedValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *HistogramBucketSettings) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HistogramBucketSettings) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HistogramBucketSettings) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Buckets) > 0 { + for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.Buckets[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Buckets)*8)) + i-- + dAtA[i] = 0x12 + } + if m.Match != nil { + if vtmsg, ok := interface{}(m.Match).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Match) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsdSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsdSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.StatsdSpecifier.(*StatsdSink_TcpClusterName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.StatsdSpecifier.(*StatsdSink_Address); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StatsdSink_Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink_Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *StatsdSink_TcpClusterName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StatsdSink_TcpClusterName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.TcpClusterName) + copy(dAtA[i:], m.TcpClusterName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TcpClusterName))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DogStatsdSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DogStatsdSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DogStatsdSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxBytesPerDatagram != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaxBytesPerDatagram).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.DogStatsdSpecifier.(*DogStatsdSink_Address); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DogStatsdSink_Address) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DogStatsdSink_Address) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Address != nil { + if vtmsg, ok := interface{}(m.Address).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Address) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *HystrixSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HystrixSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HystrixSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NumBuckets != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumBuckets)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatsSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StatsSink_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StatsTags) > 0 { + for _, e := range m.StatsTags { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseAllDefaultTags != nil { + l = (*wrapperspb.BoolValue)(m.UseAllDefaultTags).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatsMatcher != nil { + l = m.StatsMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HistogramBucketSettings) > 0 { + for _, e := range m.HistogramBucketSettings { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StatsMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.StatsMatcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StatsMatcher_RejectAll) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *StatsMatcher_ExclusionList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExclusionList != nil { + if size, ok := interface{}(m.ExclusionList).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ExclusionList) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsMatcher_InclusionList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InclusionList != nil { + if size, ok := interface{}(m.InclusionList).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.InclusionList) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TagSpecifier) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TagName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TagValue.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TagSpecifier_Regex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Regex) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TagSpecifier_FixedValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FixedValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HistogramBucketSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Match != nil { + if size, ok := interface{}(m.Match).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Match) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Buckets) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.Buckets)*8)) + len(m.Buckets)*8 + } + n += len(m.unknownFields) + return n +} + +func (m *StatsdSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.StatsdSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatsdSink_Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StatsdSink_TcpClusterName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TcpClusterName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DogStatsdSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.DogStatsdSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Prefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxBytesPerDatagram != nil { + l = (*wrapperspb.UInt64Value)(m.MaxBytesPerDatagram).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DogStatsdSink_Address) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Address != nil { + if size, ok := interface{}(m.Address).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Address) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HystrixSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NumBuckets != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.NumBuckets)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go new file mode 100644 index 0000000000..6feac91299 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.go @@ -0,0 +1,1186 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ScaleTimersOverloadActionConfig_TimerType int32 + +const ( + // Unsupported value; users must explicitly specify the timer they want scaled. + ScaleTimersOverloadActionConfig_UNSPECIFIED ScaleTimersOverloadActionConfig_TimerType = 0 + // Adjusts the idle timer for downstream HTTP connections that takes effect when there are no active streams. + // This affects the value of :ref:`HttpConnectionManager.common_http_protocol_options.idle_timeout + // ` + ScaleTimersOverloadActionConfig_HTTP_DOWNSTREAM_CONNECTION_IDLE ScaleTimersOverloadActionConfig_TimerType = 1 + // Adjusts the idle timer for HTTP streams initiated by downstream clients. + // This affects the value of :ref:`RouteAction.idle_timeout ` and + // :ref:`HttpConnectionManager.stream_idle_timeout + // ` + ScaleTimersOverloadActionConfig_HTTP_DOWNSTREAM_STREAM_IDLE ScaleTimersOverloadActionConfig_TimerType = 2 + // Adjusts the timer for how long downstream clients have to finish transport-level negotiations + // before the connection is closed. + // This affects the value of + // :ref:`FilterChain.transport_socket_connect_timeout `. + ScaleTimersOverloadActionConfig_TRANSPORT_SOCKET_CONNECT ScaleTimersOverloadActionConfig_TimerType = 3 +) + +// Enum value maps for ScaleTimersOverloadActionConfig_TimerType. +var ( + ScaleTimersOverloadActionConfig_TimerType_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "HTTP_DOWNSTREAM_CONNECTION_IDLE", + 2: "HTTP_DOWNSTREAM_STREAM_IDLE", + 3: "TRANSPORT_SOCKET_CONNECT", + } + ScaleTimersOverloadActionConfig_TimerType_value = map[string]int32{ + "UNSPECIFIED": 0, + "HTTP_DOWNSTREAM_CONNECTION_IDLE": 1, + "HTTP_DOWNSTREAM_STREAM_IDLE": 2, + "TRANSPORT_SOCKET_CONNECT": 3, + } +) + +func (x ScaleTimersOverloadActionConfig_TimerType) Enum() *ScaleTimersOverloadActionConfig_TimerType { + p := new(ScaleTimersOverloadActionConfig_TimerType) + *p = x + return p +} + +func (x ScaleTimersOverloadActionConfig_TimerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ScaleTimersOverloadActionConfig_TimerType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_overload_v3_overload_proto_enumTypes[0].Descriptor() +} + +func (ScaleTimersOverloadActionConfig_TimerType) Type() protoreflect.EnumType { + return &file_envoy_config_overload_v3_overload_proto_enumTypes[0] +} + +func (x ScaleTimersOverloadActionConfig_TimerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ScaleTimersOverloadActionConfig_TimerType.Descriptor instead. +func (ScaleTimersOverloadActionConfig_TimerType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{4, 0} +} + +type ResourceMonitor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the resource monitor to instantiate. Must match a registered + // resource monitor type. + // See the :ref:`extensions listed in typed_config below ` for the default list of available resource monitor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Configuration for the resource monitor being instantiated. + // [#extension-category: envoy.resource_monitors] + // + // Types that are assignable to ConfigType: + // + // *ResourceMonitor_TypedConfig + ConfigType isResourceMonitor_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *ResourceMonitor) Reset() { + *x = ResourceMonitor{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceMonitor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceMonitor) ProtoMessage() {} + +func (x *ResourceMonitor) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceMonitor.ProtoReflect.Descriptor instead. +func (*ResourceMonitor) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceMonitor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *ResourceMonitor) GetConfigType() isResourceMonitor_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *ResourceMonitor) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*ResourceMonitor_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isResourceMonitor_ConfigType interface { + isResourceMonitor_ConfigType() +} + +type ResourceMonitor_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*ResourceMonitor_TypedConfig) isResourceMonitor_ConfigType() {} + +type ThresholdTrigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the resource pressure is greater than or equal to this value, the trigger + // will enter saturation. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ThresholdTrigger) Reset() { + *x = ThresholdTrigger{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ThresholdTrigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ThresholdTrigger) ProtoMessage() {} + +func (x *ThresholdTrigger) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ThresholdTrigger.ProtoReflect.Descriptor instead. +func (*ThresholdTrigger) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{1} +} + +func (x *ThresholdTrigger) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +type ScaledTrigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the resource pressure is greater than this value, the trigger will be in the + // :ref:`scaling ` state with value + // “(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)“. + ScalingThreshold float64 `protobuf:"fixed64,1,opt,name=scaling_threshold,json=scalingThreshold,proto3" json:"scaling_threshold,omitempty"` + // If the resource pressure is greater than this value, the trigger will enter saturation. + SaturationThreshold float64 `protobuf:"fixed64,2,opt,name=saturation_threshold,json=saturationThreshold,proto3" json:"saturation_threshold,omitempty"` +} + +func (x *ScaledTrigger) Reset() { + *x = ScaledTrigger{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaledTrigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaledTrigger) ProtoMessage() {} + +func (x *ScaledTrigger) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaledTrigger.ProtoReflect.Descriptor instead. +func (*ScaledTrigger) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{2} +} + +func (x *ScaledTrigger) GetScalingThreshold() float64 { + if x != nil { + return x.ScalingThreshold + } + return 0 +} + +func (x *ScaledTrigger) GetSaturationThreshold() float64 { + if x != nil { + return x.SaturationThreshold + } + return 0 +} + +type Trigger struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the resource this is a trigger for. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to TriggerOneof: + // + // *Trigger_Threshold + // *Trigger_Scaled + TriggerOneof isTrigger_TriggerOneof `protobuf_oneof:"trigger_oneof"` +} + +func (x *Trigger) Reset() { + *x = Trigger{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trigger) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trigger) ProtoMessage() {} + +func (x *Trigger) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trigger.ProtoReflect.Descriptor instead. +func (*Trigger) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{3} +} + +func (x *Trigger) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Trigger) GetTriggerOneof() isTrigger_TriggerOneof { + if m != nil { + return m.TriggerOneof + } + return nil +} + +func (x *Trigger) GetThreshold() *ThresholdTrigger { + if x, ok := x.GetTriggerOneof().(*Trigger_Threshold); ok { + return x.Threshold + } + return nil +} + +func (x *Trigger) GetScaled() *ScaledTrigger { + if x, ok := x.GetTriggerOneof().(*Trigger_Scaled); ok { + return x.Scaled + } + return nil +} + +type isTrigger_TriggerOneof interface { + isTrigger_TriggerOneof() +} + +type Trigger_Threshold struct { + Threshold *ThresholdTrigger `protobuf:"bytes,2,opt,name=threshold,proto3,oneof"` +} + +type Trigger_Scaled struct { + Scaled *ScaledTrigger `protobuf:"bytes,3,opt,name=scaled,proto3,oneof"` +} + +func (*Trigger_Threshold) isTrigger_TriggerOneof() {} + +func (*Trigger_Scaled) isTrigger_TriggerOneof() {} + +// Typed configuration for the "envoy.overload_actions.reduce_timeouts" action. See +// :ref:`the docs ` for an example of how to configure +// the action with different timeouts and minimum values. +type ScaleTimersOverloadActionConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of timer scaling rules to be applied. + TimerScaleFactors []*ScaleTimersOverloadActionConfig_ScaleTimer `protobuf:"bytes,1,rep,name=timer_scale_factors,json=timerScaleFactors,proto3" json:"timer_scale_factors,omitempty"` +} + +func (x *ScaleTimersOverloadActionConfig) Reset() { + *x = ScaleTimersOverloadActionConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaleTimersOverloadActionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleTimersOverloadActionConfig) ProtoMessage() {} + +func (x *ScaleTimersOverloadActionConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleTimersOverloadActionConfig.ProtoReflect.Descriptor instead. +func (*ScaleTimersOverloadActionConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{4} +} + +func (x *ScaleTimersOverloadActionConfig) GetTimerScaleFactors() []*ScaleTimersOverloadActionConfig_ScaleTimer { + if x != nil { + return x.TimerScaleFactors + } + return nil +} + +type OverloadAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the overload action. This is just a well-known string that listeners can + // use for registering callbacks. Custom overload actions should be named using reverse + // DNS to ensure uniqueness. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of triggers for this action. The state of the action is the maximum + // state of all triggers, which can be scalar values between 0 and 1 or + // saturated. Listeners are notified when the overload action changes state. + // An overload manager action can only have one trigger for a given resource + // e.g. :ref:`Trigger.name + // ` must be unique + // in this list. + Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` + // Configuration for the action being instantiated. + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *OverloadAction) Reset() { + *x = OverloadAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OverloadAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OverloadAction) ProtoMessage() {} + +func (x *OverloadAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OverloadAction.ProtoReflect.Descriptor instead. +func (*OverloadAction) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{5} +} + +func (x *OverloadAction) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *OverloadAction) GetTriggers() []*Trigger { + if x != nil { + return x.Triggers + } + return nil +} + +func (x *OverloadAction) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +// A point within the connection or request lifecycle that provides context on +// whether to shed load at that given stage for the current entity at the +// point. +type LoadShedPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is just a well-known string for the LoadShedPoint. + // Deployment specific LoadShedPoints e.g. within a custom extension should + // be prefixed by the company / deployment name to avoid colliding with any + // open source LoadShedPoints. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of triggers for this LoadShedPoint. The LoadShedPoint will use the + // the maximum state of all triggers, which can be scalar values between 0 and + // 1 or saturated. A LoadShedPoint can only have one trigger for a given + // resource e.g. :ref:`Trigger.name + // ` must be unique in + // this list. + Triggers []*Trigger `protobuf:"bytes,2,rep,name=triggers,proto3" json:"triggers,omitempty"` +} + +func (x *LoadShedPoint) Reset() { + *x = LoadShedPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadShedPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadShedPoint) ProtoMessage() {} + +func (x *LoadShedPoint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadShedPoint.ProtoReflect.Descriptor instead. +func (*LoadShedPoint) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{6} +} + +func (x *LoadShedPoint) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *LoadShedPoint) GetTriggers() []*Trigger { + if x != nil { + return x.Triggers + } + return nil +} + +// Configuration for which accounts the WatermarkBuffer Factories should +// track. +type BufferFactoryConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The minimum power of two at which Envoy starts tracking an account. + // + // Envoy has 8 power of two buckets starting with the provided exponent below. + // Concretely the 1st bucket contains accounts for streams that use + // [2^minimum_account_to_track_power_of_two, + // 2^(minimum_account_to_track_power_of_two + 1)) bytes. + // With the 8th bucket tracking accounts + // >= 128 * 2^minimum_account_to_track_power_of_two. + // + // The maximum value is 56, since we're using uint64_t for bytes counting, + // and that's the last value that would use the 8 buckets. In practice, + // we don't expect the proxy to be holding 2^56 bytes. + // + // If omitted, Envoy should not do any tracking. + MinimumAccountToTrackPowerOfTwo uint32 `protobuf:"varint,1,opt,name=minimum_account_to_track_power_of_two,json=minimumAccountToTrackPowerOfTwo,proto3" json:"minimum_account_to_track_power_of_two,omitempty"` +} + +func (x *BufferFactoryConfig) Reset() { + *x = BufferFactoryConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BufferFactoryConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BufferFactoryConfig) ProtoMessage() {} + +func (x *BufferFactoryConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BufferFactoryConfig.ProtoReflect.Descriptor instead. +func (*BufferFactoryConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{7} +} + +func (x *BufferFactoryConfig) GetMinimumAccountToTrackPowerOfTwo() uint32 { + if x != nil { + return x.MinimumAccountToTrackPowerOfTwo + } + return 0 +} + +// [#next-free-field: 6] +type OverloadManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The interval for refreshing resource usage. + RefreshInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=refresh_interval,json=refreshInterval,proto3" json:"refresh_interval,omitempty"` + // The set of resources to monitor. + ResourceMonitors []*ResourceMonitor `protobuf:"bytes,2,rep,name=resource_monitors,json=resourceMonitors,proto3" json:"resource_monitors,omitempty"` + // The set of overload actions. + Actions []*OverloadAction `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` + // The set of load shed points. + LoadshedPoints []*LoadShedPoint `protobuf:"bytes,5,rep,name=loadshed_points,json=loadshedPoints,proto3" json:"loadshed_points,omitempty"` + // Configuration for buffer factory. + BufferFactoryConfig *BufferFactoryConfig `protobuf:"bytes,4,opt,name=buffer_factory_config,json=bufferFactoryConfig,proto3" json:"buffer_factory_config,omitempty"` +} + +func (x *OverloadManager) Reset() { + *x = OverloadManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OverloadManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OverloadManager) ProtoMessage() {} + +func (x *OverloadManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OverloadManager.ProtoReflect.Descriptor instead. +func (*OverloadManager) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{8} +} + +func (x *OverloadManager) GetRefreshInterval() *durationpb.Duration { + if x != nil { + return x.RefreshInterval + } + return nil +} + +func (x *OverloadManager) GetResourceMonitors() []*ResourceMonitor { + if x != nil { + return x.ResourceMonitors + } + return nil +} + +func (x *OverloadManager) GetActions() []*OverloadAction { + if x != nil { + return x.Actions + } + return nil +} + +func (x *OverloadManager) GetLoadshedPoints() []*LoadShedPoint { + if x != nil { + return x.LoadshedPoints + } + return nil +} + +func (x *OverloadManager) GetBufferFactoryConfig() *BufferFactoryConfig { + if x != nil { + return x.BufferFactoryConfig + } + return nil +} + +type ScaleTimersOverloadActionConfig_ScaleTimer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of timer this minimum applies to. + Timer ScaleTimersOverloadActionConfig_TimerType `protobuf:"varint,1,opt,name=timer,proto3,enum=envoy.config.overload.v3.ScaleTimersOverloadActionConfig_TimerType" json:"timer,omitempty"` + // Types that are assignable to OverloadAdjust: + // + // *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout + // *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale + OverloadAdjust isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust `protobuf_oneof:"overload_adjust"` +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) Reset() { + *x = ScaleTimersOverloadActionConfig_ScaleTimer{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleTimersOverloadActionConfig_ScaleTimer) ProtoMessage() {} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_overload_v3_overload_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleTimersOverloadActionConfig_ScaleTimer.ProtoReflect.Descriptor instead. +func (*ScaleTimersOverloadActionConfig_ScaleTimer) Descriptor() ([]byte, []int) { + return file_envoy_config_overload_v3_overload_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetTimer() ScaleTimersOverloadActionConfig_TimerType { + if x != nil { + return x.Timer + } + return ScaleTimersOverloadActionConfig_UNSPECIFIED +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) GetOverloadAdjust() isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust { + if m != nil { + return m.OverloadAdjust + } + return nil +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetMinTimeout() *durationpb.Duration { + if x, ok := x.GetOverloadAdjust().(*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout); ok { + return x.MinTimeout + } + return nil +} + +func (x *ScaleTimersOverloadActionConfig_ScaleTimer) GetMinScale() *v3.Percent { + if x, ok := x.GetOverloadAdjust().(*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale); ok { + return x.MinScale + } + return nil +} + +type isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust interface { + isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust() +} + +type ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout struct { + // Sets the minimum duration as an absolute value. + MinTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=min_timeout,json=minTimeout,proto3,oneof"` +} + +type ScaleTimersOverloadActionConfig_ScaleTimer_MinScale struct { + // Sets the minimum duration as a percentage of the maximum value. + MinScale *v3.Percent `protobuf:"bytes,3,opt,name=min_scale,json=minScale,proto3,oneof"` +} + +func (*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust() { +} + +func (*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) isScaleTimersOverloadActionConfig_ScaleTimer_OverloadAdjust() { +} + +var File_envoy_config_overload_v3_overload_proto protoreflect.FileDescriptor + +var file_envoy_config_overload_v3_overload_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x78, 0x0a, 0x10, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x22, + 0xa1, 0x01, 0x0a, 0x0d, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x44, 0x0a, 0x11, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, + 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x10, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x4a, 0x0a, 0x14, 0x73, 0x61, 0x74, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x13, + 0x73, 0x61, 0x74, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x22, 0xf9, 0x01, 0x0a, 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a, 0x09, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x41, 0x0a, 0x06, 0x73, 0x63, 0x61, 0x6c, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, + 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x74, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, + 0xa7, 0x04, 0x0a, 0x1f, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x73, 0x4f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x7e, 0x0a, 0x13, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x61, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x63, 0x61, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, + 0x52, 0x11, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x46, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x1a, 0x80, 0x02, 0x0a, 0x0a, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x72, 0x12, 0x65, 0x0a, 0x05, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x61, + 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, + 0x20, 0x00, 0x52, 0x05, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x6d, 0x69, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x6d, 0x69, 0x6e, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x42, 0x16, + 0x0a, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x61, 0x64, 0x6a, 0x75, 0x73, + 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x80, 0x01, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x72, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x44, 0x4f, + 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x48, 0x54, + 0x54, 0x50, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x53, 0x54, + 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x54, + 0x52, 0x41, 0x4e, 0x53, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, + 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, 0x22, 0xe4, 0x01, 0x0a, 0x0e, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x74, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x75, 0x0a, 0x0d, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, + 0x0a, 0x08, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x73, 0x22, 0x70, 0x0a, 0x13, 0x42, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, + 0x0a, 0x25, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x77, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x77, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x2a, 0x04, 0x18, 0x38, 0x28, 0x0a, 0x52, 0x1f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x6f, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x77, 0x65, 0x72, 0x4f, 0x66, 0x54, 0x77, 0x6f, 0x22, 0xe8, 0x03, 0x0a, 0x0f, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, + 0x10, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x60, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x0f, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x53, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x15, 0x62, + 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x46, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x34, + 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x32, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x76, 0x33, 0x3b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_overload_v3_overload_proto_rawDescOnce sync.Once + file_envoy_config_overload_v3_overload_proto_rawDescData = file_envoy_config_overload_v3_overload_proto_rawDesc +) + +func file_envoy_config_overload_v3_overload_proto_rawDescGZIP() []byte { + file_envoy_config_overload_v3_overload_proto_rawDescOnce.Do(func() { + file_envoy_config_overload_v3_overload_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_overload_v3_overload_proto_rawDescData) + }) + return file_envoy_config_overload_v3_overload_proto_rawDescData +} + +var file_envoy_config_overload_v3_overload_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_overload_v3_overload_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_envoy_config_overload_v3_overload_proto_goTypes = []interface{}{ + (ScaleTimersOverloadActionConfig_TimerType)(0), // 0: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType + (*ResourceMonitor)(nil), // 1: envoy.config.overload.v3.ResourceMonitor + (*ThresholdTrigger)(nil), // 2: envoy.config.overload.v3.ThresholdTrigger + (*ScaledTrigger)(nil), // 3: envoy.config.overload.v3.ScaledTrigger + (*Trigger)(nil), // 4: envoy.config.overload.v3.Trigger + (*ScaleTimersOverloadActionConfig)(nil), // 5: envoy.config.overload.v3.ScaleTimersOverloadActionConfig + (*OverloadAction)(nil), // 6: envoy.config.overload.v3.OverloadAction + (*LoadShedPoint)(nil), // 7: envoy.config.overload.v3.LoadShedPoint + (*BufferFactoryConfig)(nil), // 8: envoy.config.overload.v3.BufferFactoryConfig + (*OverloadManager)(nil), // 9: envoy.config.overload.v3.OverloadManager + (*ScaleTimersOverloadActionConfig_ScaleTimer)(nil), // 10: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer + (*anypb.Any)(nil), // 11: google.protobuf.Any + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration + (*v3.Percent)(nil), // 13: envoy.type.v3.Percent +} +var file_envoy_config_overload_v3_overload_proto_depIdxs = []int32{ + 11, // 0: envoy.config.overload.v3.ResourceMonitor.typed_config:type_name -> google.protobuf.Any + 2, // 1: envoy.config.overload.v3.Trigger.threshold:type_name -> envoy.config.overload.v3.ThresholdTrigger + 3, // 2: envoy.config.overload.v3.Trigger.scaled:type_name -> envoy.config.overload.v3.ScaledTrigger + 10, // 3: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.timer_scale_factors:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer + 4, // 4: envoy.config.overload.v3.OverloadAction.triggers:type_name -> envoy.config.overload.v3.Trigger + 11, // 5: envoy.config.overload.v3.OverloadAction.typed_config:type_name -> google.protobuf.Any + 4, // 6: envoy.config.overload.v3.LoadShedPoint.triggers:type_name -> envoy.config.overload.v3.Trigger + 12, // 7: envoy.config.overload.v3.OverloadManager.refresh_interval:type_name -> google.protobuf.Duration + 1, // 8: envoy.config.overload.v3.OverloadManager.resource_monitors:type_name -> envoy.config.overload.v3.ResourceMonitor + 6, // 9: envoy.config.overload.v3.OverloadManager.actions:type_name -> envoy.config.overload.v3.OverloadAction + 7, // 10: envoy.config.overload.v3.OverloadManager.loadshed_points:type_name -> envoy.config.overload.v3.LoadShedPoint + 8, // 11: envoy.config.overload.v3.OverloadManager.buffer_factory_config:type_name -> envoy.config.overload.v3.BufferFactoryConfig + 0, // 12: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.timer:type_name -> envoy.config.overload.v3.ScaleTimersOverloadActionConfig.TimerType + 12, // 13: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_timeout:type_name -> google.protobuf.Duration + 13, // 14: envoy.config.overload.v3.ScaleTimersOverloadActionConfig.ScaleTimer.min_scale:type_name -> envoy.type.v3.Percent + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_envoy_config_overload_v3_overload_proto_init() } +func file_envoy_config_overload_v3_overload_proto_init() { + if File_envoy_config_overload_v3_overload_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_overload_v3_overload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceMonitor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ThresholdTrigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaledTrigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trigger); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaleTimersOverloadActionConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OverloadAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadShedPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BufferFactoryConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OverloadManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaleTimersOverloadActionConfig_ScaleTimer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_overload_v3_overload_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ResourceMonitor_TypedConfig)(nil), + } + file_envoy_config_overload_v3_overload_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Trigger_Threshold)(nil), + (*Trigger_Scaled)(nil), + } + file_envoy_config_overload_v3_overload_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout)(nil), + (*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_overload_v3_overload_proto_rawDesc, + NumEnums: 1, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_overload_v3_overload_proto_goTypes, + DependencyIndexes: file_envoy_config_overload_v3_overload_proto_depIdxs, + EnumInfos: file_envoy_config_overload_v3_overload_proto_enumTypes, + MessageInfos: file_envoy_config_overload_v3_overload_proto_msgTypes, + }.Build() + File_envoy_config_overload_v3_overload_proto = out.File + file_envoy_config_overload_v3_overload_proto_rawDesc = nil + file_envoy_config_overload_v3_overload_proto_goTypes = nil + file_envoy_config_overload_v3_overload_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go new file mode 100644 index 0000000000..267297ed2c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload.pb.validate.go @@ -0,0 +1,1741 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceMonitor with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceMonitor) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceMonitor with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceMonitorMultiError, or nil if none found. +func (m *ResourceMonitor) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceMonitor) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ResourceMonitorValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *ResourceMonitor_TypedConfig: + if v == nil { + err := ResourceMonitorValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceMonitorValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceMonitorValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceMonitorValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ResourceMonitorMultiError(errors) + } + + return nil +} + +// ResourceMonitorMultiError is an error wrapping multiple validation errors +// returned by ResourceMonitor.ValidateAll() if the designated constraints +// aren't met. +type ResourceMonitorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMonitorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMonitorMultiError) AllErrors() []error { return m } + +// ResourceMonitorValidationError is the validation error returned by +// ResourceMonitor.Validate if the designated constraints aren't met. +type ResourceMonitorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceMonitorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceMonitorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceMonitorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceMonitorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceMonitorValidationError) ErrorName() string { return "ResourceMonitorValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceMonitorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceMonitor.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceMonitorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceMonitorValidationError{} + +// Validate checks the field values on ThresholdTrigger with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ThresholdTrigger) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ThresholdTrigger with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ThresholdTriggerMultiError, or nil if none found. +func (m *ThresholdTrigger) ValidateAll() error { + return m.validate(true) +} + +func (m *ThresholdTrigger) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetValue(); val < 0 || val > 1 { + err := ThresholdTriggerValidationError{ + field: "Value", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ThresholdTriggerMultiError(errors) + } + + return nil +} + +// ThresholdTriggerMultiError is an error wrapping multiple validation errors +// returned by ThresholdTrigger.ValidateAll() if the designated constraints +// aren't met. +type ThresholdTriggerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ThresholdTriggerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ThresholdTriggerMultiError) AllErrors() []error { return m } + +// ThresholdTriggerValidationError is the validation error returned by +// ThresholdTrigger.Validate if the designated constraints aren't met. +type ThresholdTriggerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ThresholdTriggerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ThresholdTriggerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ThresholdTriggerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ThresholdTriggerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ThresholdTriggerValidationError) ErrorName() string { return "ThresholdTriggerValidationError" } + +// Error satisfies the builtin error interface +func (e ThresholdTriggerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sThresholdTrigger.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ThresholdTriggerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ThresholdTriggerValidationError{} + +// Validate checks the field values on ScaledTrigger with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScaledTrigger) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScaledTrigger with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ScaledTriggerMultiError, or +// nil if none found. +func (m *ScaledTrigger) ValidateAll() error { + return m.validate(true) +} + +func (m *ScaledTrigger) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetScalingThreshold(); val < 0 || val > 1 { + err := ScaledTriggerValidationError{ + field: "ScalingThreshold", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if val := m.GetSaturationThreshold(); val < 0 || val > 1 { + err := ScaledTriggerValidationError{ + field: "SaturationThreshold", + reason: "value must be inside range [0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScaledTriggerMultiError(errors) + } + + return nil +} + +// ScaledTriggerMultiError is an error wrapping multiple validation errors +// returned by ScaledTrigger.ValidateAll() if the designated constraints +// aren't met. +type ScaledTriggerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScaledTriggerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScaledTriggerMultiError) AllErrors() []error { return m } + +// ScaledTriggerValidationError is the validation error returned by +// ScaledTrigger.Validate if the designated constraints aren't met. +type ScaledTriggerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScaledTriggerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScaledTriggerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScaledTriggerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScaledTriggerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScaledTriggerValidationError) ErrorName() string { return "ScaledTriggerValidationError" } + +// Error satisfies the builtin error interface +func (e ScaledTriggerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScaledTrigger.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScaledTriggerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScaledTriggerValidationError{} + +// Validate checks the field values on Trigger with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Trigger) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Trigger with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TriggerMultiError, or nil if none found. +func (m *Trigger) ValidateAll() error { + return m.validate(true) +} + +func (m *Trigger) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := TriggerValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofTriggerOneofPresent := false + switch v := m.TriggerOneof.(type) { + case *Trigger_Threshold: + if v == nil { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTriggerOneofPresent = true + + if all { + switch v := interface{}(m.GetThreshold()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Threshold", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Threshold", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetThreshold()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TriggerValidationError{ + field: "Threshold", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Trigger_Scaled: + if v == nil { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTriggerOneofPresent = true + + if all { + switch v := interface{}(m.GetScaled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Scaled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TriggerValidationError{ + field: "Scaled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScaled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TriggerValidationError{ + field: "Scaled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTriggerOneofPresent { + err := TriggerValidationError{ + field: "TriggerOneof", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return TriggerMultiError(errors) + } + + return nil +} + +// TriggerMultiError is an error wrapping multiple validation errors returned +// by Trigger.ValidateAll() if the designated constraints aren't met. +type TriggerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TriggerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TriggerMultiError) AllErrors() []error { return m } + +// TriggerValidationError is the validation error returned by Trigger.Validate +// if the designated constraints aren't met. +type TriggerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TriggerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TriggerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TriggerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TriggerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TriggerValidationError) ErrorName() string { return "TriggerValidationError" } + +// Error satisfies the builtin error interface +func (e TriggerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTrigger.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TriggerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TriggerValidationError{} + +// Validate checks the field values on ScaleTimersOverloadActionConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScaleTimersOverloadActionConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScaleTimersOverloadActionConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ScaleTimersOverloadActionConfigMultiError, or nil if none found. +func (m *ScaleTimersOverloadActionConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ScaleTimersOverloadActionConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetTimerScaleFactors()) < 1 { + err := ScaleTimersOverloadActionConfigValidationError{ + field: "TimerScaleFactors", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTimerScaleFactors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfigValidationError{ + field: fmt.Sprintf("TimerScaleFactors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfigValidationError{ + field: fmt.Sprintf("TimerScaleFactors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScaleTimersOverloadActionConfigValidationError{ + field: fmt.Sprintf("TimerScaleFactors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScaleTimersOverloadActionConfigMultiError(errors) + } + + return nil +} + +// ScaleTimersOverloadActionConfigMultiError is an error wrapping multiple +// validation errors returned by ScaleTimersOverloadActionConfig.ValidateAll() +// if the designated constraints aren't met. +type ScaleTimersOverloadActionConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScaleTimersOverloadActionConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScaleTimersOverloadActionConfigMultiError) AllErrors() []error { return m } + +// ScaleTimersOverloadActionConfigValidationError is the validation error +// returned by ScaleTimersOverloadActionConfig.Validate if the designated +// constraints aren't met. +type ScaleTimersOverloadActionConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScaleTimersOverloadActionConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScaleTimersOverloadActionConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScaleTimersOverloadActionConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScaleTimersOverloadActionConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScaleTimersOverloadActionConfigValidationError) ErrorName() string { + return "ScaleTimersOverloadActionConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ScaleTimersOverloadActionConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScaleTimersOverloadActionConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScaleTimersOverloadActionConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScaleTimersOverloadActionConfigValidationError{} + +// Validate checks the field values on OverloadAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OverloadAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OverloadAction with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OverloadActionMultiError, +// or nil if none found. +func (m *OverloadAction) ValidateAll() error { + return m.validate(true) +} + +func (m *OverloadAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := OverloadActionValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetTriggers()) < 1 { + err := OverloadActionValidationError{ + field: "Triggers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTriggers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadActionValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadActionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadActionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OverloadActionMultiError(errors) + } + + return nil +} + +// OverloadActionMultiError is an error wrapping multiple validation errors +// returned by OverloadAction.ValidateAll() if the designated constraints +// aren't met. +type OverloadActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OverloadActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OverloadActionMultiError) AllErrors() []error { return m } + +// OverloadActionValidationError is the validation error returned by +// OverloadAction.Validate if the designated constraints aren't met. +type OverloadActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OverloadActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OverloadActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OverloadActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OverloadActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OverloadActionValidationError) ErrorName() string { return "OverloadActionValidationError" } + +// Error satisfies the builtin error interface +func (e OverloadActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOverloadAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OverloadActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OverloadActionValidationError{} + +// Validate checks the field values on LoadShedPoint with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LoadShedPoint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadShedPoint with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LoadShedPointMultiError, or +// nil if none found. +func (m *LoadShedPoint) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadShedPoint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := LoadShedPointValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetTriggers()) < 1 { + err := LoadShedPointValidationError{ + field: "Triggers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTriggers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadShedPointValidationError{ + field: fmt.Sprintf("Triggers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadShedPointMultiError(errors) + } + + return nil +} + +// LoadShedPointMultiError is an error wrapping multiple validation errors +// returned by LoadShedPoint.ValidateAll() if the designated constraints +// aren't met. +type LoadShedPointMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadShedPointMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadShedPointMultiError) AllErrors() []error { return m } + +// LoadShedPointValidationError is the validation error returned by +// LoadShedPoint.Validate if the designated constraints aren't met. +type LoadShedPointValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadShedPointValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadShedPointValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadShedPointValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadShedPointValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadShedPointValidationError) ErrorName() string { return "LoadShedPointValidationError" } + +// Error satisfies the builtin error interface +func (e LoadShedPointValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadShedPoint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadShedPointValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadShedPointValidationError{} + +// Validate checks the field values on BufferFactoryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *BufferFactoryConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BufferFactoryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BufferFactoryConfigMultiError, or nil if none found. +func (m *BufferFactoryConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *BufferFactoryConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetMinimumAccountToTrackPowerOfTwo(); val < 10 || val > 56 { + err := BufferFactoryConfigValidationError{ + field: "MinimumAccountToTrackPowerOfTwo", + reason: "value must be inside range [10, 56]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return BufferFactoryConfigMultiError(errors) + } + + return nil +} + +// BufferFactoryConfigMultiError is an error wrapping multiple validation +// errors returned by BufferFactoryConfig.ValidateAll() if the designated +// constraints aren't met. +type BufferFactoryConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BufferFactoryConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BufferFactoryConfigMultiError) AllErrors() []error { return m } + +// BufferFactoryConfigValidationError is the validation error returned by +// BufferFactoryConfig.Validate if the designated constraints aren't met. +type BufferFactoryConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BufferFactoryConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BufferFactoryConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BufferFactoryConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BufferFactoryConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BufferFactoryConfigValidationError) ErrorName() string { + return "BufferFactoryConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e BufferFactoryConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBufferFactoryConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BufferFactoryConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BufferFactoryConfigValidationError{} + +// Validate checks the field values on OverloadManager with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OverloadManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OverloadManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OverloadManagerMultiError, or nil if none found. +func (m *OverloadManager) ValidateAll() error { + return m.validate(true) +} + +func (m *OverloadManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRefreshInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "RefreshInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "RefreshInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRefreshInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: "RefreshInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetResourceMonitors()) < 1 { + err := OverloadManagerValidationError{ + field: "ResourceMonitors", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResourceMonitors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("ResourceMonitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("ResourceMonitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("ResourceMonitors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetLoadshedPoints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: fmt.Sprintf("LoadshedPoints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetBufferFactoryConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "BufferFactoryConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverloadManagerValidationError{ + field: "BufferFactoryConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBufferFactoryConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverloadManagerValidationError{ + field: "BufferFactoryConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OverloadManagerMultiError(errors) + } + + return nil +} + +// OverloadManagerMultiError is an error wrapping multiple validation errors +// returned by OverloadManager.ValidateAll() if the designated constraints +// aren't met. +type OverloadManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OverloadManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OverloadManagerMultiError) AllErrors() []error { return m } + +// OverloadManagerValidationError is the validation error returned by +// OverloadManager.Validate if the designated constraints aren't met. +type OverloadManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OverloadManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OverloadManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OverloadManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OverloadManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OverloadManagerValidationError) ErrorName() string { return "OverloadManagerValidationError" } + +// Error satisfies the builtin error interface +func (e OverloadManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOverloadManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OverloadManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OverloadManagerValidationError{} + +// Validate checks the field values on +// ScaleTimersOverloadActionConfig_ScaleTimer with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScaleTimersOverloadActionConfig_ScaleTimer with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ScaleTimersOverloadActionConfig_ScaleTimerMultiError, or nil if none found. +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) ValidateAll() error { + return m.validate(true) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := _ScaleTimersOverloadActionConfig_ScaleTimer_Timer_NotInLookup[m.GetTimer()]; ok { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "Timer", + reason: "value must not be in list [UNSPECIFIED]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := ScaleTimersOverloadActionConfig_TimerType_name[int32(m.GetTimer())]; !ok { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "Timer", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofOverloadAdjustPresent := false + switch v := m.OverloadAdjust.(type) { + case *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout: + if v == nil { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverloadAdjustPresent = true + + if all { + switch v := interface{}(m.GetMinTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale: + if v == nil { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverloadAdjustPresent = true + + if all { + switch v := interface{}(m.GetMinScale()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinScale", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinScale", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinScale()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "MinScale", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOverloadAdjustPresent { + err := ScaleTimersOverloadActionConfig_ScaleTimerValidationError{ + field: "OverloadAdjust", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScaleTimersOverloadActionConfig_ScaleTimerMultiError(errors) + } + + return nil +} + +// ScaleTimersOverloadActionConfig_ScaleTimerMultiError is an error wrapping +// multiple validation errors returned by +// ScaleTimersOverloadActionConfig_ScaleTimer.ValidateAll() if the designated +// constraints aren't met. +type ScaleTimersOverloadActionConfig_ScaleTimerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScaleTimersOverloadActionConfig_ScaleTimerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScaleTimersOverloadActionConfig_ScaleTimerMultiError) AllErrors() []error { return m } + +// ScaleTimersOverloadActionConfig_ScaleTimerValidationError is the validation +// error returned by ScaleTimersOverloadActionConfig_ScaleTimer.Validate if +// the designated constraints aren't met. +type ScaleTimersOverloadActionConfig_ScaleTimerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) ErrorName() string { + return "ScaleTimersOverloadActionConfig_ScaleTimerValidationError" +} + +// Error satisfies the builtin error interface +func (e ScaleTimersOverloadActionConfig_ScaleTimerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScaleTimersOverloadActionConfig_ScaleTimer.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScaleTimersOverloadActionConfig_ScaleTimerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScaleTimersOverloadActionConfig_ScaleTimerValidationError{} + +var _ScaleTimersOverloadActionConfig_ScaleTimer_Timer_NotInLookup = map[ScaleTimersOverloadActionConfig_TimerType]struct{}{ + 0: {}, +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go new file mode 100644 index 0000000000..3a8ba0054f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/overload/v3/overload_vtproto.pb.go @@ -0,0 +1,938 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/overload/v3/overload.proto + +package overloadv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceMonitor) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMonitor) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceMonitor) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*ResourceMonitor_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceMonitor_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceMonitor_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ThresholdTrigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ThresholdTrigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ThresholdTrigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *ScaledTrigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaledTrigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaledTrigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SaturationThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SaturationThreshold)))) + i-- + dAtA[i] = 0x11 + } + if m.ScalingThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ScalingThreshold)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Trigger) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Trigger) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.TriggerOneof.(*Trigger_Scaled); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.TriggerOneof.(*Trigger_Threshold); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Trigger_Threshold) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger_Threshold) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Threshold != nil { + size, err := m.Threshold.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Trigger_Scaled) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Trigger_Scaled) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Scaled != nil { + size, err := m.Scaled.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OverloadAdjust.(*ScaleTimersOverloadActionConfig_ScaleTimer_MinScale); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OverloadAdjust.(*ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Timer != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timer)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MinTimeout != nil { + size, err := (*durationpb.Duration)(m.MinTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MinScale != nil { + if vtmsg, ok := interface{}(m.MinScale).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinScale) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ScaleTimersOverloadActionConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleTimersOverloadActionConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScaleTimersOverloadActionConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TimerScaleFactors) > 0 { + for iNdEx := len(m.TimerScaleFactors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TimerScaleFactors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OverloadAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OverloadAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OverloadAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Triggers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadShedPoint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadShedPoint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadShedPoint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Triggers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BufferFactoryConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BufferFactoryConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BufferFactoryConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinimumAccountToTrackPowerOfTwo != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinimumAccountToTrackPowerOfTwo)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OverloadManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OverloadManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OverloadManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LoadshedPoints) > 0 { + for iNdEx := len(m.LoadshedPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoadshedPoints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.BufferFactoryConfig != nil { + size, err := m.BufferFactoryConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceMonitors) > 0 { + for iNdEx := len(m.ResourceMonitors) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceMonitors[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.RefreshInterval != nil { + size, err := (*durationpb.Duration)(m.RefreshInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceMonitor) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceMonitor_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ThresholdTrigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ScaledTrigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScalingThreshold != 0 { + n += 9 + } + if m.SaturationThreshold != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *Trigger) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.TriggerOneof.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Trigger_Threshold) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Threshold != nil { + l = m.Threshold.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Trigger_Scaled) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Scaled != nil { + l = m.Scaled.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timer != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timer)) + } + if vtmsg, ok := m.OverloadAdjust.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinTimeout) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTimeout != nil { + l = (*durationpb.Duration)(m.MinTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig_ScaleTimer_MinScale) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinScale != nil { + if size, ok := interface{}(m.MinScale).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinScale) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScaleTimersOverloadActionConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TimerScaleFactors) > 0 { + for _, e := range m.TimerScaleFactors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OverloadAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LoadShedPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *BufferFactoryConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinimumAccountToTrackPowerOfTwo != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinimumAccountToTrackPowerOfTwo)) + } + n += len(m.unknownFields) + return n +} + +func (m *OverloadManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RefreshInterval != nil { + l = (*durationpb.Duration)(m.RefreshInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceMonitors) > 0 { + for _, e := range m.ResourceMonitors { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.BufferFactoryConfig != nil { + l = m.BufferFactoryConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LoadshedPoints) > 0 { + for _, e := range m.LoadshedPoints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go new file mode 100644 index 0000000000..c069fae842 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.go @@ -0,0 +1,1788 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Should we do safe-list or block-list style access control? +type RBAC_Action int32 + +const ( + // The policies grant access to principals. The rest are denied. This is safe-list style + // access control. This is the default type. + RBAC_ALLOW RBAC_Action = 0 + // The policies deny access to principals. The rest are allowed. This is block-list style + // access control. + RBAC_DENY RBAC_Action = 1 + // The policies set the “access_log_hint“ dynamic metadata key based on if requests match. + // All requests are allowed. + RBAC_LOG RBAC_Action = 2 +) + +// Enum value maps for RBAC_Action. +var ( + RBAC_Action_name = map[int32]string{ + 0: "ALLOW", + 1: "DENY", + 2: "LOG", + } + RBAC_Action_value = map[string]int32{ + "ALLOW": 0, + "DENY": 1, + "LOG": 2, + } +) + +func (x RBAC_Action) Enum() *RBAC_Action { + p := new(RBAC_Action) + *p = x + return p +} + +func (x RBAC_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RBAC_Action) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_rbac_v3_rbac_proto_enumTypes[0].Descriptor() +} + +func (RBAC_Action) Type() protoreflect.EnumType { + return &file_envoy_config_rbac_v3_rbac_proto_enumTypes[0] +} + +func (x RBAC_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RBAC_Action.Descriptor instead. +func (RBAC_Action) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0} +} + +// Deny and allow here refer to RBAC decisions, not actions. +type RBAC_AuditLoggingOptions_AuditCondition int32 + +const ( + // Never audit. + RBAC_AuditLoggingOptions_NONE RBAC_AuditLoggingOptions_AuditCondition = 0 + // Audit when RBAC denies the request. + RBAC_AuditLoggingOptions_ON_DENY RBAC_AuditLoggingOptions_AuditCondition = 1 + // Audit when RBAC allows the request. + RBAC_AuditLoggingOptions_ON_ALLOW RBAC_AuditLoggingOptions_AuditCondition = 2 + // Audit whether RBAC allows or denies the request. + RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW RBAC_AuditLoggingOptions_AuditCondition = 3 +) + +// Enum value maps for RBAC_AuditLoggingOptions_AuditCondition. +var ( + RBAC_AuditLoggingOptions_AuditCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_DENY", + 2: "ON_ALLOW", + 3: "ON_DENY_AND_ALLOW", + } + RBAC_AuditLoggingOptions_AuditCondition_value = map[string]int32{ + "NONE": 0, + "ON_DENY": 1, + "ON_ALLOW": 2, + "ON_DENY_AND_ALLOW": 3, + } +) + +func (x RBAC_AuditLoggingOptions_AuditCondition) Enum() *RBAC_AuditLoggingOptions_AuditCondition { + p := new(RBAC_AuditLoggingOptions_AuditCondition) + *p = x + return p +} + +func (x RBAC_AuditLoggingOptions_AuditCondition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RBAC_AuditLoggingOptions_AuditCondition) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_rbac_v3_rbac_proto_enumTypes[1].Descriptor() +} + +func (RBAC_AuditLoggingOptions_AuditCondition) Type() protoreflect.EnumType { + return &file_envoy_config_rbac_v3_rbac_proto_enumTypes[1] +} + +func (x RBAC_AuditLoggingOptions_AuditCondition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions_AuditCondition.Descriptor instead. +func (RBAC_AuditLoggingOptions_AuditCondition) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// Role Based Access Control (RBAC) provides service-level and method-level access control for a +// service. Requests are allowed or denied based on the “action“ and whether a matching policy is +// found. For instance, if the action is ALLOW and a matching policy is found the request should be +// allowed. +// +// RBAC can also be used to make access logging decisions by communicating with access loggers +// through dynamic metadata. When the action is LOG and at least one policy matches, the +// “access_log_hint“ value in the shared key namespace 'envoy.common' is set to “true“ indicating +// the request should be logged. +// +// Here is an example of RBAC configuration. It has two policies: +// +// - Service account “cluster.local/ns/default/sa/admin“ has full access to the service, and so +// does "cluster.local/ns/default/sa/superuser". +// +// - Any user can read (“GET“) the service at paths with prefix “/products“, so long as the +// destination port is either 80 or 443. +// +// .. code-block:: yaml +// +// action: ALLOW +// policies: +// "service-admin": +// permissions: +// +// - any: true +// principals: +// +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/admin" +// +// - authenticated: +// principal_name: +// exact: "cluster.local/ns/default/sa/superuser" +// "product-viewer": +// permissions: +// +// - and_rules: +// rules: +// +// - header: +// name: ":method" +// string_match: +// exact: "GET" +// +// - url_path: +// path: { prefix: "/products" } +// +// - or_rules: +// rules: +// +// - destination_port: 80 +// +// - destination_port: 443 +// principals: +// +// - any: true +type RBAC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action to take if a policy matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // - “ALLOW“: Allows the request if and only if there is a policy that matches + // the request. + // - “DENY“: Allows the request if and only if there are no policies that + // match the request. + // - “LOG“: Allows all requests. If at least one policy matches, the dynamic + // metadata key “access_log_hint“ is set to the value “true“ under the shared + // key namespace “envoy.common“. If no policies match, it is set to “false“. + // Other actions do not modify this key. + Action RBAC_Action `protobuf:"varint,1,opt,name=action,proto3,enum=envoy.config.rbac.v3.RBAC_Action" json:"action,omitempty"` + // Maps from policy name to policy. A match occurs when at least one policy matches the request. + // The policies are evaluated in lexicographic order of the policy name. + Policies map[string]*Policy `protobuf:"bytes,2,rep,name=policies,proto3" json:"policies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Audit logging options that include the condition for audit logging to happen + // and audit logger configurations. + // + // [#not-implemented-hide:] + AuditLoggingOptions *RBAC_AuditLoggingOptions `protobuf:"bytes,3,opt,name=audit_logging_options,json=auditLoggingOptions,proto3" json:"audit_logging_options,omitempty"` +} + +func (x *RBAC) Reset() { + *x = RBAC{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC) ProtoMessage() {} + +func (x *RBAC) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC.ProtoReflect.Descriptor instead. +func (*RBAC) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0} +} + +func (x *RBAC) GetAction() RBAC_Action { + if x != nil { + return x.Action + } + return RBAC_ALLOW +} + +func (x *RBAC) GetPolicies() map[string]*Policy { + if x != nil { + return x.Policies + } + return nil +} + +func (x *RBAC) GetAuditLoggingOptions() *RBAC_AuditLoggingOptions { + if x != nil { + return x.AuditLoggingOptions + } + return nil +} + +// Policy specifies a role and the principals that are assigned/denied the role. +// A policy matches if and only if at least one of its permissions match the +// action taking place AND at least one of its principals match the downstream +// AND the condition is true if specified. +type Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The set of permissions that define a role. Each permission is + // matched with OR semantics. To match all actions for this policy, a single + // Permission with the “any“ field set to true should be used. + Permissions []*Permission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + // Required. The set of principals that are assigned/denied the role based on + // “action”. Each principal is matched with OR semantics. To match all + // downstreams for this policy, a single Principal with the “any“ field set to + // true should be used. + Principals []*Principal `protobuf:"bytes,2,rep,name=principals,proto3" json:"principals,omitempty"` + // An optional symbolic expression specifying an access control + // :ref:`condition `. The condition is combined + // with the permissions and the principals as a clause with AND semantics. + // Only be used when checked_condition is not used. + Condition *v1alpha1.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` + // [#not-implemented-hide:] + // An optional symbolic expression that has been successfully type checked. + // Only be used when condition is not used. + CheckedCondition *v1alpha1.CheckedExpr `protobuf:"bytes,4,opt,name=checked_condition,json=checkedCondition,proto3" json:"checked_condition,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{1} +} + +func (x *Policy) GetPermissions() []*Permission { + if x != nil { + return x.Permissions + } + return nil +} + +func (x *Policy) GetPrincipals() []*Principal { + if x != nil { + return x.Principals + } + return nil +} + +func (x *Policy) GetCondition() *v1alpha1.Expr { + if x != nil { + return x.Condition + } + return nil +} + +func (x *Policy) GetCheckedCondition() *v1alpha1.CheckedExpr { + if x != nil { + return x.CheckedCondition + } + return nil +} + +// Permission defines an action (or actions) that a principal can take. +// [#next-free-field: 14] +type Permission struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *Permission_AndRules + // *Permission_OrRules + // *Permission_Any + // *Permission_Header + // *Permission_UrlPath + // *Permission_DestinationIp + // *Permission_DestinationPort + // *Permission_DestinationPortRange + // *Permission_Metadata + // *Permission_NotRule + // *Permission_RequestedServerName + // *Permission_Matcher + // *Permission_UriTemplate + Rule isPermission_Rule `protobuf_oneof:"rule"` +} + +func (x *Permission) Reset() { + *x = Permission{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Permission) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Permission) ProtoMessage() {} + +func (x *Permission) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Permission.ProtoReflect.Descriptor instead. +func (*Permission) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{2} +} + +func (m *Permission) GetRule() isPermission_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *Permission) GetAndRules() *Permission_Set { + if x, ok := x.GetRule().(*Permission_AndRules); ok { + return x.AndRules + } + return nil +} + +func (x *Permission) GetOrRules() *Permission_Set { + if x, ok := x.GetRule().(*Permission_OrRules); ok { + return x.OrRules + } + return nil +} + +func (x *Permission) GetAny() bool { + if x, ok := x.GetRule().(*Permission_Any); ok { + return x.Any + } + return false +} + +func (x *Permission) GetHeader() *v3.HeaderMatcher { + if x, ok := x.GetRule().(*Permission_Header); ok { + return x.Header + } + return nil +} + +func (x *Permission) GetUrlPath() *v31.PathMatcher { + if x, ok := x.GetRule().(*Permission_UrlPath); ok { + return x.UrlPath + } + return nil +} + +func (x *Permission) GetDestinationIp() *v32.CidrRange { + if x, ok := x.GetRule().(*Permission_DestinationIp); ok { + return x.DestinationIp + } + return nil +} + +func (x *Permission) GetDestinationPort() uint32 { + if x, ok := x.GetRule().(*Permission_DestinationPort); ok { + return x.DestinationPort + } + return 0 +} + +func (x *Permission) GetDestinationPortRange() *v33.Int32Range { + if x, ok := x.GetRule().(*Permission_DestinationPortRange); ok { + return x.DestinationPortRange + } + return nil +} + +func (x *Permission) GetMetadata() *v31.MetadataMatcher { + if x, ok := x.GetRule().(*Permission_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *Permission) GetNotRule() *Permission { + if x, ok := x.GetRule().(*Permission_NotRule); ok { + return x.NotRule + } + return nil +} + +func (x *Permission) GetRequestedServerName() *v31.StringMatcher { + if x, ok := x.GetRule().(*Permission_RequestedServerName); ok { + return x.RequestedServerName + } + return nil +} + +func (x *Permission) GetMatcher() *v32.TypedExtensionConfig { + if x, ok := x.GetRule().(*Permission_Matcher); ok { + return x.Matcher + } + return nil +} + +func (x *Permission) GetUriTemplate() *v32.TypedExtensionConfig { + if x, ok := x.GetRule().(*Permission_UriTemplate); ok { + return x.UriTemplate + } + return nil +} + +type isPermission_Rule interface { + isPermission_Rule() +} + +type Permission_AndRules struct { + // A set of rules that all must match in order to define the action. + AndRules *Permission_Set `protobuf:"bytes,1,opt,name=and_rules,json=andRules,proto3,oneof"` +} + +type Permission_OrRules struct { + // A set of rules where at least one must match in order to define the action. + OrRules *Permission_Set `protobuf:"bytes,2,opt,name=or_rules,json=orRules,proto3,oneof"` +} + +type Permission_Any struct { + // When any is set, it matches any action. + Any bool `protobuf:"varint,3,opt,name=any,proto3,oneof"` +} + +type Permission_Header struct { + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only + // available for HTTP request. + // Note: the pseudo-header :path includes the query and fragment string. Use the “url_path“ + // field if you want to match the URL path without the query and fragment string. + Header *v3.HeaderMatcher `protobuf:"bytes,4,opt,name=header,proto3,oneof"` +} + +type Permission_UrlPath struct { + // A URL path on the incoming HTTP request. Only available for HTTP. + UrlPath *v31.PathMatcher `protobuf:"bytes,10,opt,name=url_path,json=urlPath,proto3,oneof"` +} + +type Permission_DestinationIp struct { + // A CIDR block that describes the destination IP. + DestinationIp *v32.CidrRange `protobuf:"bytes,5,opt,name=destination_ip,json=destinationIp,proto3,oneof"` +} + +type Permission_DestinationPort struct { + // A port number that describes the destination port connecting to. + DestinationPort uint32 `protobuf:"varint,6,opt,name=destination_port,json=destinationPort,proto3,oneof"` +} + +type Permission_DestinationPortRange struct { + // A port number range that describes a range of destination ports connecting to. + DestinationPortRange *v33.Int32Range `protobuf:"bytes,11,opt,name=destination_port_range,json=destinationPortRange,proto3,oneof"` +} + +type Permission_Metadata struct { + // Metadata that describes additional information about the action. + Metadata *v31.MetadataMatcher `protobuf:"bytes,7,opt,name=metadata,proto3,oneof"` +} + +type Permission_NotRule struct { + // Negates matching the provided permission. For instance, if the value of + // “not_rule“ would match, this permission would not match. Conversely, if + // the value of “not_rule“ would not match, this permission would match. + NotRule *Permission `protobuf:"bytes,8,opt,name=not_rule,json=notRule,proto3,oneof"` +} + +type Permission_RequestedServerName struct { + // The request server from the client's connection request. This is + // typically TLS SNI. + // + // .. attention:: + // + // The behavior of this field may be affected by how Envoy is configured + // as explained below. + // + // * If the :ref:`TLS Inspector ` + // filter is not added, and if a ``FilterChainMatch`` is not defined for + // the :ref:`server name + // `, + // a TLS connection's requested SNI server name will be treated as if it + // wasn't present. + // + // * A :ref:`listener filter ` may + // overwrite a connection's requested server name within Envoy. + // + // Please refer to :ref:`this FAQ entry ` to learn to + // setup SNI. + RequestedServerName *v31.StringMatcher `protobuf:"bytes,9,opt,name=requested_server_name,json=requestedServerName,proto3,oneof"` +} + +type Permission_Matcher struct { + // Extension for configuring custom matchers for RBAC. + // [#extension-category: envoy.rbac.matchers] + Matcher *v32.TypedExtensionConfig `protobuf:"bytes,12,opt,name=matcher,proto3,oneof"` +} + +type Permission_UriTemplate struct { + // URI template path matching. + // [#extension-category: envoy.path.match] + UriTemplate *v32.TypedExtensionConfig `protobuf:"bytes,13,opt,name=uri_template,json=uriTemplate,proto3,oneof"` +} + +func (*Permission_AndRules) isPermission_Rule() {} + +func (*Permission_OrRules) isPermission_Rule() {} + +func (*Permission_Any) isPermission_Rule() {} + +func (*Permission_Header) isPermission_Rule() {} + +func (*Permission_UrlPath) isPermission_Rule() {} + +func (*Permission_DestinationIp) isPermission_Rule() {} + +func (*Permission_DestinationPort) isPermission_Rule() {} + +func (*Permission_DestinationPortRange) isPermission_Rule() {} + +func (*Permission_Metadata) isPermission_Rule() {} + +func (*Permission_NotRule) isPermission_Rule() {} + +func (*Permission_RequestedServerName) isPermission_Rule() {} + +func (*Permission_Matcher) isPermission_Rule() {} + +func (*Permission_UriTemplate) isPermission_Rule() {} + +// Principal defines an identity or a group of identities for a downstream +// subject. +// [#next-free-field: 13] +type Principal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Identifier: + // + // *Principal_AndIds + // *Principal_OrIds + // *Principal_Any + // *Principal_Authenticated_ + // *Principal_SourceIp + // *Principal_DirectRemoteIp + // *Principal_RemoteIp + // *Principal_Header + // *Principal_UrlPath + // *Principal_Metadata + // *Principal_FilterState + // *Principal_NotId + Identifier isPrincipal_Identifier `protobuf_oneof:"identifier"` +} + +func (x *Principal) Reset() { + *x = Principal{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Principal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Principal) ProtoMessage() {} + +func (x *Principal) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Principal.ProtoReflect.Descriptor instead. +func (*Principal) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{3} +} + +func (m *Principal) GetIdentifier() isPrincipal_Identifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (x *Principal) GetAndIds() *Principal_Set { + if x, ok := x.GetIdentifier().(*Principal_AndIds); ok { + return x.AndIds + } + return nil +} + +func (x *Principal) GetOrIds() *Principal_Set { + if x, ok := x.GetIdentifier().(*Principal_OrIds); ok { + return x.OrIds + } + return nil +} + +func (x *Principal) GetAny() bool { + if x, ok := x.GetIdentifier().(*Principal_Any); ok { + return x.Any + } + return false +} + +func (x *Principal) GetAuthenticated() *Principal_Authenticated { + if x, ok := x.GetIdentifier().(*Principal_Authenticated_); ok { + return x.Authenticated + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/rbac/v3/rbac.proto. +func (x *Principal) GetSourceIp() *v32.CidrRange { + if x, ok := x.GetIdentifier().(*Principal_SourceIp); ok { + return x.SourceIp + } + return nil +} + +func (x *Principal) GetDirectRemoteIp() *v32.CidrRange { + if x, ok := x.GetIdentifier().(*Principal_DirectRemoteIp); ok { + return x.DirectRemoteIp + } + return nil +} + +func (x *Principal) GetRemoteIp() *v32.CidrRange { + if x, ok := x.GetIdentifier().(*Principal_RemoteIp); ok { + return x.RemoteIp + } + return nil +} + +func (x *Principal) GetHeader() *v3.HeaderMatcher { + if x, ok := x.GetIdentifier().(*Principal_Header); ok { + return x.Header + } + return nil +} + +func (x *Principal) GetUrlPath() *v31.PathMatcher { + if x, ok := x.GetIdentifier().(*Principal_UrlPath); ok { + return x.UrlPath + } + return nil +} + +func (x *Principal) GetMetadata() *v31.MetadataMatcher { + if x, ok := x.GetIdentifier().(*Principal_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *Principal) GetFilterState() *v31.FilterStateMatcher { + if x, ok := x.GetIdentifier().(*Principal_FilterState); ok { + return x.FilterState + } + return nil +} + +func (x *Principal) GetNotId() *Principal { + if x, ok := x.GetIdentifier().(*Principal_NotId); ok { + return x.NotId + } + return nil +} + +type isPrincipal_Identifier interface { + isPrincipal_Identifier() +} + +type Principal_AndIds struct { + // A set of identifiers that all must match in order to define the + // downstream. + AndIds *Principal_Set `protobuf:"bytes,1,opt,name=and_ids,json=andIds,proto3,oneof"` +} + +type Principal_OrIds struct { + // A set of identifiers at least one must match in order to define the + // downstream. + OrIds *Principal_Set `protobuf:"bytes,2,opt,name=or_ids,json=orIds,proto3,oneof"` +} + +type Principal_Any struct { + // When any is set, it matches any downstream. + Any bool `protobuf:"varint,3,opt,name=any,proto3,oneof"` +} + +type Principal_Authenticated_ struct { + // Authenticated attributes that identify the downstream. + Authenticated *Principal_Authenticated `protobuf:"bytes,4,opt,name=authenticated,proto3,oneof"` +} + +type Principal_SourceIp struct { + // A CIDR block that describes the downstream IP. + // This address will honor proxy protocol, but will not honor XFF. + // + // This field is deprecated; either use :ref:`remote_ip + // ` for the same + // behavior, or use + // :ref:`direct_remote_ip `. + // + // Deprecated: Marked as deprecated in envoy/config/rbac/v3/rbac.proto. + SourceIp *v32.CidrRange `protobuf:"bytes,5,opt,name=source_ip,json=sourceIp,proto3,oneof"` +} + +type Principal_DirectRemoteIp struct { + // A CIDR block that describes the downstream remote/origin address. + // Note: This is always the physical peer even if the + // :ref:`remote_ip ` is + // inferred from for example the x-forwarder-for header, proxy protocol, + // etc. + DirectRemoteIp *v32.CidrRange `protobuf:"bytes,10,opt,name=direct_remote_ip,json=directRemoteIp,proto3,oneof"` +} + +type Principal_RemoteIp struct { + // A CIDR block that describes the downstream remote/origin address. + // Note: This may not be the physical peer and could be different from the + // :ref:`direct_remote_ip + // `. E.g, if the + // remote ip is inferred from for example the x-forwarder-for header, proxy + // protocol, etc. + RemoteIp *v32.CidrRange `protobuf:"bytes,11,opt,name=remote_ip,json=remoteIp,proto3,oneof"` +} + +type Principal_Header struct { + // A header (or pseudo-header such as :path or :method) on the incoming HTTP + // request. Only available for HTTP request. Note: the pseudo-header :path + // includes the query and fragment string. Use the “url_path“ field if you + // want to match the URL path without the query and fragment string. + Header *v3.HeaderMatcher `protobuf:"bytes,6,opt,name=header,proto3,oneof"` +} + +type Principal_UrlPath struct { + // A URL path on the incoming HTTP request. Only available for HTTP. + UrlPath *v31.PathMatcher `protobuf:"bytes,9,opt,name=url_path,json=urlPath,proto3,oneof"` +} + +type Principal_Metadata struct { + // Metadata that describes additional information about the principal. + Metadata *v31.MetadataMatcher `protobuf:"bytes,7,opt,name=metadata,proto3,oneof"` +} + +type Principal_FilterState struct { + // Identifies the principal using a filter state object. + FilterState *v31.FilterStateMatcher `protobuf:"bytes,12,opt,name=filter_state,json=filterState,proto3,oneof"` +} + +type Principal_NotId struct { + // Negates matching the provided principal. For instance, if the value of + // “not_id“ would match, this principal would not match. Conversely, if the + // value of “not_id“ would not match, this principal would match. + NotId *Principal `protobuf:"bytes,8,opt,name=not_id,json=notId,proto3,oneof"` +} + +func (*Principal_AndIds) isPrincipal_Identifier() {} + +func (*Principal_OrIds) isPrincipal_Identifier() {} + +func (*Principal_Any) isPrincipal_Identifier() {} + +func (*Principal_Authenticated_) isPrincipal_Identifier() {} + +func (*Principal_SourceIp) isPrincipal_Identifier() {} + +func (*Principal_DirectRemoteIp) isPrincipal_Identifier() {} + +func (*Principal_RemoteIp) isPrincipal_Identifier() {} + +func (*Principal_Header) isPrincipal_Identifier() {} + +func (*Principal_UrlPath) isPrincipal_Identifier() {} + +func (*Principal_Metadata) isPrincipal_Identifier() {} + +func (*Principal_FilterState) isPrincipal_Identifier() {} + +func (*Principal_NotId) isPrincipal_Identifier() {} + +// Action defines the result of allowance or denial when a request matches the matcher. +type Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name indicates the policy name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The action to take if the matcher matches. Every action either allows or denies a request, + // and can also carry out action-specific operations. + // + // Actions: + // + // - “ALLOW“: If the request gets matched on ALLOW, it is permitted. + // - “DENY“: If the request gets matched on DENY, it is not permitted. + // - “LOG“: If the request gets matched on LOG, it is permitted. Besides, the + // dynamic metadata key “access_log_hint“ under the shared key namespace + // “envoy.common“ will be set to the value “true“. + // - If the request cannot get matched, it will fallback to “DENY“. + // + // Log behavior: + // + // If the RBAC matcher contains at least one LOG action, the dynamic + // metadata key ``access_log_hint`` will be set based on if the request + // get matched on the LOG action. + Action RBAC_Action `protobuf:"varint,2,opt,name=action,proto3,enum=envoy.config.rbac.v3.RBAC_Action" json:"action,omitempty"` +} + +func (x *Action) Reset() { + *x = Action{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Action) ProtoMessage() {} + +func (x *Action) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Action.ProtoReflect.Descriptor instead. +func (*Action) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{4} +} + +func (x *Action) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Action) GetAction() RBAC_Action { + if x != nil { + return x.Action + } + return RBAC_ALLOW +} + +type RBAC_AuditLoggingOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Condition for the audit logging to happen. + // If this condition is met, all the audit loggers configured here will be invoked. + // + // [#not-implemented-hide:] + AuditCondition RBAC_AuditLoggingOptions_AuditCondition `protobuf:"varint,1,opt,name=audit_condition,json=auditCondition,proto3,enum=envoy.config.rbac.v3.RBAC_AuditLoggingOptions_AuditCondition" json:"audit_condition,omitempty"` + // Configurations for RBAC-based authorization audit loggers. + // + // [#not-implemented-hide:] + LoggerConfigs []*RBAC_AuditLoggingOptions_AuditLoggerConfig `protobuf:"bytes,2,rep,name=logger_configs,json=loggerConfigs,proto3" json:"logger_configs,omitempty"` +} + +func (x *RBAC_AuditLoggingOptions) Reset() { + *x = RBAC_AuditLoggingOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC_AuditLoggingOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC_AuditLoggingOptions) ProtoMessage() {} + +func (x *RBAC_AuditLoggingOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions.ProtoReflect.Descriptor instead. +func (*RBAC_AuditLoggingOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RBAC_AuditLoggingOptions) GetAuditCondition() RBAC_AuditLoggingOptions_AuditCondition { + if x != nil { + return x.AuditCondition + } + return RBAC_AuditLoggingOptions_NONE +} + +func (x *RBAC_AuditLoggingOptions) GetLoggerConfigs() []*RBAC_AuditLoggingOptions_AuditLoggerConfig { + if x != nil { + return x.LoggerConfigs + } + return nil +} + +// [#not-implemented-hide:] +type RBAC_AuditLoggingOptions_AuditLoggerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Typed logger configuration. + // + // [#extension-category: envoy.rbac.audit_loggers] + AuditLogger *v32.TypedExtensionConfig `protobuf:"bytes,1,opt,name=audit_logger,json=auditLogger,proto3" json:"audit_logger,omitempty"` + // If true, when the logger is not supported, the data plane will not NACK but simply ignore it. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) Reset() { + *x = RBAC_AuditLoggingOptions_AuditLoggerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC_AuditLoggingOptions_AuditLoggerConfig) ProtoMessage() {} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC_AuditLoggingOptions_AuditLoggerConfig.ProtoReflect.Descriptor instead. +func (*RBAC_AuditLoggingOptions_AuditLoggerConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) GetAuditLogger() *v32.TypedExtensionConfig { + if x != nil { + return x.AuditLogger + } + return nil +} + +func (x *RBAC_AuditLoggingOptions_AuditLoggerConfig) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +// Used in the “and_rules“ and “or_rules“ fields in the “rule“ oneof. Depending on the context, +// each are applied with the associated behavior. +type Permission_Set struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rules []*Permission `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *Permission_Set) Reset() { + *x = Permission_Set{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Permission_Set) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Permission_Set) ProtoMessage() {} + +func (x *Permission_Set) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Permission_Set.ProtoReflect.Descriptor instead. +func (*Permission_Set) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Permission_Set) GetRules() []*Permission { + if x != nil { + return x.Rules + } + return nil +} + +// Used in the “and_ids“ and “or_ids“ fields in the “identifier“ oneof. +// Depending on the context, each are applied with the associated behavior. +type Principal_Set struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []*Principal `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *Principal_Set) Reset() { + *x = Principal_Set{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Principal_Set) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Principal_Set) ProtoMessage() {} + +func (x *Principal_Set) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Principal_Set.ProtoReflect.Descriptor instead. +func (*Principal_Set) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *Principal_Set) GetIds() []*Principal { + if x != nil { + return x.Ids + } + return nil +} + +// Authentication attributes for a downstream. +type Principal_Authenticated struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the principal. If set, The URI SAN or DNS SAN in that order + // is used from the certificate, otherwise the subject field is used. If + // unset, it applies to any user that is authenticated. + PrincipalName *v31.StringMatcher `protobuf:"bytes,2,opt,name=principal_name,json=principalName,proto3" json:"principal_name,omitempty"` +} + +func (x *Principal_Authenticated) Reset() { + *x = Principal_Authenticated{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Principal_Authenticated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Principal_Authenticated) ProtoMessage() {} + +func (x *Principal_Authenticated) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_rbac_v3_rbac_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Principal_Authenticated.ProtoReflect.Descriptor instead. +func (*Principal_Authenticated) Descriptor() ([]byte, []int) { + return file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Principal_Authenticated) GetPrincipalName() *v31.StringMatcher { + if x != nil { + return x.PrincipalName + } + return nil +} + +var File_envoy_config_rbac_v3_rbac_proto protoreflect.FileDescriptor + +var file_envoy_config_rbac_v3_rbac_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xe1, 0x06, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x15, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc4, 0x03, 0x0a, 0x13, 0x41, 0x75, 0x64, 0x69, + 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x70, 0x0a, 0x0f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x67, 0x0a, 0x0e, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, + 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6c, 0x6f, 0x67, + 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x83, 0x01, 0x0a, 0x11, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x4d, 0x0a, 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0b, 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x22, 0x4c, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, + 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x4e, 0x5f, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x4f, 0x4e, 0x5f, 0x44, 0x45, + 0x4e, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x1a, 0x59, + 0x0a, 0x0d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x26, 0x0a, 0x06, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x47, 0x10, + 0x02, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x42, 0x41, 0x43, 0x22, 0x93, 0x03, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4c, + 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, + 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, + 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x12, 0x5a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, + 0x05, 0x16, 0x12, 0x14, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, + 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x1c, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x16, 0x12, 0x14, 0x65, + 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xab, 0x08, 0x0a, 0x0a, 0x50, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, + 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x41, 0x0a, + 0x08, 0x6f, 0x72, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x3e, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, + 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x12, 0x48, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x36, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x51, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3d, 0x0a, 0x08, 0x6e, 0x6f, 0x74, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, + 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x07, 0x6e, 0x6f, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x5a, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4f, 0x0a, 0x0c, + 0x75, 0x72, 0x69, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x0b, 0x75, 0x72, 0x69, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x1a, 0x73, 0x0a, + 0x03, 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, + 0x65, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, + 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xeb, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x6e, + 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x07, 0x61, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x06, 0x61, + 0x6e, 0x64, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x72, + 0x49, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, + 0x12, 0x55, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x70, 0x12, 0x4b, 0x0a, 0x10, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, + 0x70, 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, + 0x70, 0x12, 0x3e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x3f, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x6e, 0x6f, 0x74, + 0x49, 0x64, 0x1a, 0x6d, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x03, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x03, 0x69, 0x64, 0x73, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x53, 0x65, + 0x74, 0x1a, 0x97, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, 0x88, + 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, + 0x61, 0x6c, 0x42, 0x11, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x60, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, + 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, + 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_rbac_v3_rbac_proto_rawDescOnce sync.Once + file_envoy_config_rbac_v3_rbac_proto_rawDescData = file_envoy_config_rbac_v3_rbac_proto_rawDesc +) + +func file_envoy_config_rbac_v3_rbac_proto_rawDescGZIP() []byte { + file_envoy_config_rbac_v3_rbac_proto_rawDescOnce.Do(func() { + file_envoy_config_rbac_v3_rbac_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_rbac_v3_rbac_proto_rawDescData) + }) + return file_envoy_config_rbac_v3_rbac_proto_rawDescData +} + +var file_envoy_config_rbac_v3_rbac_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_config_rbac_v3_rbac_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_envoy_config_rbac_v3_rbac_proto_goTypes = []interface{}{ + (RBAC_Action)(0), // 0: envoy.config.rbac.v3.RBAC.Action + (RBAC_AuditLoggingOptions_AuditCondition)(0), // 1: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + (*RBAC)(nil), // 2: envoy.config.rbac.v3.RBAC + (*Policy)(nil), // 3: envoy.config.rbac.v3.Policy + (*Permission)(nil), // 4: envoy.config.rbac.v3.Permission + (*Principal)(nil), // 5: envoy.config.rbac.v3.Principal + (*Action)(nil), // 6: envoy.config.rbac.v3.Action + (*RBAC_AuditLoggingOptions)(nil), // 7: envoy.config.rbac.v3.RBAC.AuditLoggingOptions + nil, // 8: envoy.config.rbac.v3.RBAC.PoliciesEntry + (*RBAC_AuditLoggingOptions_AuditLoggerConfig)(nil), // 9: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + (*Permission_Set)(nil), // 10: envoy.config.rbac.v3.Permission.Set + (*Principal_Set)(nil), // 11: envoy.config.rbac.v3.Principal.Set + (*Principal_Authenticated)(nil), // 12: envoy.config.rbac.v3.Principal.Authenticated + (*v1alpha1.Expr)(nil), // 13: google.api.expr.v1alpha1.Expr + (*v1alpha1.CheckedExpr)(nil), // 14: google.api.expr.v1alpha1.CheckedExpr + (*v3.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*v31.PathMatcher)(nil), // 16: envoy.type.matcher.v3.PathMatcher + (*v32.CidrRange)(nil), // 17: envoy.config.core.v3.CidrRange + (*v33.Int32Range)(nil), // 18: envoy.type.v3.Int32Range + (*v31.MetadataMatcher)(nil), // 19: envoy.type.matcher.v3.MetadataMatcher + (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher + (*v32.TypedExtensionConfig)(nil), // 21: envoy.config.core.v3.TypedExtensionConfig + (*v31.FilterStateMatcher)(nil), // 22: envoy.type.matcher.v3.FilterStateMatcher +} +var file_envoy_config_rbac_v3_rbac_proto_depIdxs = []int32{ + 0, // 0: envoy.config.rbac.v3.RBAC.action:type_name -> envoy.config.rbac.v3.RBAC.Action + 8, // 1: envoy.config.rbac.v3.RBAC.policies:type_name -> envoy.config.rbac.v3.RBAC.PoliciesEntry + 7, // 2: envoy.config.rbac.v3.RBAC.audit_logging_options:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions + 4, // 3: envoy.config.rbac.v3.Policy.permissions:type_name -> envoy.config.rbac.v3.Permission + 5, // 4: envoy.config.rbac.v3.Policy.principals:type_name -> envoy.config.rbac.v3.Principal + 13, // 5: envoy.config.rbac.v3.Policy.condition:type_name -> google.api.expr.v1alpha1.Expr + 14, // 6: envoy.config.rbac.v3.Policy.checked_condition:type_name -> google.api.expr.v1alpha1.CheckedExpr + 10, // 7: envoy.config.rbac.v3.Permission.and_rules:type_name -> envoy.config.rbac.v3.Permission.Set + 10, // 8: envoy.config.rbac.v3.Permission.or_rules:type_name -> envoy.config.rbac.v3.Permission.Set + 15, // 9: envoy.config.rbac.v3.Permission.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 10: envoy.config.rbac.v3.Permission.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 17, // 11: envoy.config.rbac.v3.Permission.destination_ip:type_name -> envoy.config.core.v3.CidrRange + 18, // 12: envoy.config.rbac.v3.Permission.destination_port_range:type_name -> envoy.type.v3.Int32Range + 19, // 13: envoy.config.rbac.v3.Permission.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 4, // 14: envoy.config.rbac.v3.Permission.not_rule:type_name -> envoy.config.rbac.v3.Permission + 20, // 15: envoy.config.rbac.v3.Permission.requested_server_name:type_name -> envoy.type.matcher.v3.StringMatcher + 21, // 16: envoy.config.rbac.v3.Permission.matcher:type_name -> envoy.config.core.v3.TypedExtensionConfig + 21, // 17: envoy.config.rbac.v3.Permission.uri_template:type_name -> envoy.config.core.v3.TypedExtensionConfig + 11, // 18: envoy.config.rbac.v3.Principal.and_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 11, // 19: envoy.config.rbac.v3.Principal.or_ids:type_name -> envoy.config.rbac.v3.Principal.Set + 12, // 20: envoy.config.rbac.v3.Principal.authenticated:type_name -> envoy.config.rbac.v3.Principal.Authenticated + 17, // 21: envoy.config.rbac.v3.Principal.source_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 22: envoy.config.rbac.v3.Principal.direct_remote_ip:type_name -> envoy.config.core.v3.CidrRange + 17, // 23: envoy.config.rbac.v3.Principal.remote_ip:type_name -> envoy.config.core.v3.CidrRange + 15, // 24: envoy.config.rbac.v3.Principal.header:type_name -> envoy.config.route.v3.HeaderMatcher + 16, // 25: envoy.config.rbac.v3.Principal.url_path:type_name -> envoy.type.matcher.v3.PathMatcher + 19, // 26: envoy.config.rbac.v3.Principal.metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 22, // 27: envoy.config.rbac.v3.Principal.filter_state:type_name -> envoy.type.matcher.v3.FilterStateMatcher + 5, // 28: envoy.config.rbac.v3.Principal.not_id:type_name -> envoy.config.rbac.v3.Principal + 0, // 29: envoy.config.rbac.v3.Action.action:type_name -> envoy.config.rbac.v3.RBAC.Action + 1, // 30: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.audit_condition:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditCondition + 9, // 31: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.logger_configs:type_name -> envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig + 3, // 32: envoy.config.rbac.v3.RBAC.PoliciesEntry.value:type_name -> envoy.config.rbac.v3.Policy + 21, // 33: envoy.config.rbac.v3.RBAC.AuditLoggingOptions.AuditLoggerConfig.audit_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig + 4, // 34: envoy.config.rbac.v3.Permission.Set.rules:type_name -> envoy.config.rbac.v3.Permission + 5, // 35: envoy.config.rbac.v3.Principal.Set.ids:type_name -> envoy.config.rbac.v3.Principal + 20, // 36: envoy.config.rbac.v3.Principal.Authenticated.principal_name:type_name -> envoy.type.matcher.v3.StringMatcher + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name +} + +func init() { file_envoy_config_rbac_v3_rbac_proto_init() } +func file_envoy_config_rbac_v3_rbac_proto_init() { + if File_envoy_config_rbac_v3_rbac_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_rbac_v3_rbac_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Permission); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC_AuditLoggingOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC_AuditLoggingOptions_AuditLoggerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Permission_Set); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal_Set); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Principal_Authenticated); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Permission_AndRules)(nil), + (*Permission_OrRules)(nil), + (*Permission_Any)(nil), + (*Permission_Header)(nil), + (*Permission_UrlPath)(nil), + (*Permission_DestinationIp)(nil), + (*Permission_DestinationPort)(nil), + (*Permission_DestinationPortRange)(nil), + (*Permission_Metadata)(nil), + (*Permission_NotRule)(nil), + (*Permission_RequestedServerName)(nil), + (*Permission_Matcher)(nil), + (*Permission_UriTemplate)(nil), + } + file_envoy_config_rbac_v3_rbac_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Principal_AndIds)(nil), + (*Principal_OrIds)(nil), + (*Principal_Any)(nil), + (*Principal_Authenticated_)(nil), + (*Principal_SourceIp)(nil), + (*Principal_DirectRemoteIp)(nil), + (*Principal_RemoteIp)(nil), + (*Principal_Header)(nil), + (*Principal_UrlPath)(nil), + (*Principal_Metadata)(nil), + (*Principal_FilterState)(nil), + (*Principal_NotId)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_rbac_v3_rbac_proto_rawDesc, + NumEnums: 2, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_rbac_v3_rbac_proto_goTypes, + DependencyIndexes: file_envoy_config_rbac_v3_rbac_proto_depIdxs, + EnumInfos: file_envoy_config_rbac_v3_rbac_proto_enumTypes, + MessageInfos: file_envoy_config_rbac_v3_rbac_proto_msgTypes, + }.Build() + File_envoy_config_rbac_v3_rbac_proto = out.File + file_envoy_config_rbac_v3_rbac_proto_rawDesc = nil + file_envoy_config_rbac_v3_rbac_proto_goTypes = nil + file_envoy_config_rbac_v3_rbac_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go new file mode 100644 index 0000000000..f80fd60974 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac.pb.validate.go @@ -0,0 +1,2509 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RBAC with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *RBAC) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RBACMultiError, or nil if none found. +func (m *RBAC) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RBAC_Action_name[int32(m.GetAction())]; !ok { + err := RBACValidationError{ + field: "Action", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + { + sorted_keys := make([]string, len(m.GetPolicies())) + i := 0 + for key := range m.GetPolicies() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetPolicies()[key] + _ = val + + // no validation rules for Policies[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: fmt.Sprintf("Policies[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: fmt.Sprintf("Policies[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: fmt.Sprintf("Policies[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetAuditLoggingOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuditLoggingOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "AuditLoggingOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RBACMultiError(errors) + } + + return nil +} + +// RBACMultiError is an error wrapping multiple validation errors returned by +// RBAC.ValidateAll() if the designated constraints aren't met. +type RBACMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBACMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBACMultiError) AllErrors() []error { return m } + +// RBACValidationError is the validation error returned by RBAC.Validate if the +// designated constraints aren't met. +type RBACValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBACValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBACValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBACValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBACValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBACValidationError) ErrorName() string { return "RBACValidationError" } + +// Error satisfies the builtin error interface +func (e RBACValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBACValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBACValidationError{} + +// Validate checks the field values on Policy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Policy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Policy with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in PolicyMultiError, or nil if none found. +func (m *Policy) ValidateAll() error { + return m.validate(true) +} + +func (m *Policy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPermissions()) < 1 { + err := PolicyValidationError{ + field: "Permissions", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPermissions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(m.GetPrincipals()) < 1 { + err := PolicyValidationError{ + field: "Principals", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPrincipals() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Principals[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: fmt.Sprintf("Principals[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: fmt.Sprintf("Principals[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetCondition()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "Condition", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "Condition", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCondition()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: "Condition", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCheckedCondition()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "CheckedCondition", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PolicyValidationError{ + field: "CheckedCondition", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCheckedCondition()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PolicyValidationError{ + field: "CheckedCondition", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return PolicyMultiError(errors) + } + + return nil +} + +// PolicyMultiError is an error wrapping multiple validation errors returned by +// Policy.ValidateAll() if the designated constraints aren't met. +type PolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PolicyMultiError) AllErrors() []error { return m } + +// PolicyValidationError is the validation error returned by Policy.Validate if +// the designated constraints aren't met. +type PolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PolicyValidationError) ErrorName() string { return "PolicyValidationError" } + +// Error satisfies the builtin error interface +func (e PolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PolicyValidationError{} + +// Validate checks the field values on Permission with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Permission) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Permission with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PermissionMultiError, or +// nil if none found. +func (m *Permission) ValidateAll() error { + return m.validate(true) +} + +func (m *Permission) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *Permission_AndRules: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "AndRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "AndRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "AndRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_OrRules: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "OrRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "OrRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "OrRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_Any: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAny() != true { + err := PermissionValidationError{ + field: "Any", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Permission_Header: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_UrlPath: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetUrlPath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUrlPath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_DestinationIp: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetDestinationIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "DestinationIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_DestinationPort: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetDestinationPort() > 65535 { + err := PermissionValidationError{ + field: "DestinationPort", + reason: "value must be less than or equal to 65535", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Permission_DestinationPortRange: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetDestinationPortRange()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationPortRange()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "DestinationPortRange", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_Metadata: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_NotRule: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotRule()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "NotRule", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "NotRule", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotRule()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "NotRule", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_RequestedServerName: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetRequestedServerName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "RequestedServerName", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "RequestedServerName", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestedServerName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "RequestedServerName", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_Matcher: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Permission_UriTemplate: + if v == nil { + err := PermissionValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetUriTemplate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUriTemplate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PermissionValidationError{ + field: "UriTemplate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := PermissionValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PermissionMultiError(errors) + } + + return nil +} + +// PermissionMultiError is an error wrapping multiple validation errors +// returned by Permission.ValidateAll() if the designated constraints aren't met. +type PermissionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PermissionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PermissionMultiError) AllErrors() []error { return m } + +// PermissionValidationError is the validation error returned by +// Permission.Validate if the designated constraints aren't met. +type PermissionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PermissionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PermissionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PermissionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PermissionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PermissionValidationError) ErrorName() string { return "PermissionValidationError" } + +// Error satisfies the builtin error interface +func (e PermissionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPermission.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PermissionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PermissionValidationError{} + +// Validate checks the field values on Principal with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Principal) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Principal with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PrincipalMultiError, or nil +// if none found. +func (m *Principal) ValidateAll() error { + return m.validate(true) +} + +func (m *Principal) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofIdentifierPresent := false + switch v := m.Identifier.(type) { + case *Principal_AndIds: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetAndIds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "AndIds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "AndIds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndIds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "AndIds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_OrIds: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetOrIds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "OrIds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "OrIds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrIds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "OrIds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_Any: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if m.GetAny() != true { + err := PrincipalValidationError{ + field: "Any", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *Principal_Authenticated_: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetAuthenticated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Authenticated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Authenticated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuthenticated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "Authenticated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_SourceIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetSourceIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_DirectRemoteIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetDirectRemoteIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "DirectRemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "DirectRemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDirectRemoteIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "DirectRemoteIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_RemoteIp: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetRemoteIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "RemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "RemoteIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "RemoteIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_Header: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_UrlPath: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetUrlPath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUrlPath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "UrlPath", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_Metadata: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_FilterState: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Principal_NotId: + if v == nil { + err := PrincipalValidationError{ + field: "Identifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofIdentifierPresent = true + + if all { + switch v := interface{}(m.GetNotId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "NotId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrincipalValidationError{ + field: "NotId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrincipalValidationError{ + field: "NotId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofIdentifierPresent { + err := PrincipalValidationError{ + field: "Identifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PrincipalMultiError(errors) + } + + return nil +} + +// PrincipalMultiError is an error wrapping multiple validation errors returned +// by Principal.ValidateAll() if the designated constraints aren't met. +type PrincipalMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PrincipalMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PrincipalMultiError) AllErrors() []error { return m } + +// PrincipalValidationError is the validation error returned by +// Principal.Validate if the designated constraints aren't met. +type PrincipalValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PrincipalValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PrincipalValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PrincipalValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PrincipalValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PrincipalValidationError) ErrorName() string { return "PrincipalValidationError" } + +// Error satisfies the builtin error interface +func (e PrincipalValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrincipal.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PrincipalValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PrincipalValidationError{} + +// Validate checks the field values on Action with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Action) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Action with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in ActionMultiError, or nil if none found. +func (m *Action) ValidateAll() error { + return m.validate(true) +} + +func (m *Action) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ActionValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Action + + if len(errors) > 0 { + return ActionMultiError(errors) + } + + return nil +} + +// ActionMultiError is an error wrapping multiple validation errors returned by +// Action.ValidateAll() if the designated constraints aren't met. +type ActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActionMultiError) AllErrors() []error { return m } + +// ActionValidationError is the validation error returned by Action.Validate if +// the designated constraints aren't met. +type ActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActionValidationError) ErrorName() string { return "ActionValidationError" } + +// Error satisfies the builtin error interface +func (e ActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActionValidationError{} + +// Validate checks the field values on RBAC_AuditLoggingOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RBAC_AuditLoggingOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC_AuditLoggingOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RBAC_AuditLoggingOptionsMultiError, or nil if none found. +func (m *RBAC_AuditLoggingOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC_AuditLoggingOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RBAC_AuditLoggingOptions_AuditCondition_name[int32(m.GetAuditCondition())]; !ok { + err := RBAC_AuditLoggingOptionsValidationError{ + field: "AuditCondition", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetLoggerConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBAC_AuditLoggingOptionsValidationError{ + field: fmt.Sprintf("LoggerConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RBAC_AuditLoggingOptionsMultiError(errors) + } + + return nil +} + +// RBAC_AuditLoggingOptionsMultiError is an error wrapping multiple validation +// errors returned by RBAC_AuditLoggingOptions.ValidateAll() if the designated +// constraints aren't met. +type RBAC_AuditLoggingOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBAC_AuditLoggingOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBAC_AuditLoggingOptionsMultiError) AllErrors() []error { return m } + +// RBAC_AuditLoggingOptionsValidationError is the validation error returned by +// RBAC_AuditLoggingOptions.Validate if the designated constraints aren't met. +type RBAC_AuditLoggingOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBAC_AuditLoggingOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBAC_AuditLoggingOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBAC_AuditLoggingOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBAC_AuditLoggingOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBAC_AuditLoggingOptionsValidationError) ErrorName() string { + return "RBAC_AuditLoggingOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RBAC_AuditLoggingOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC_AuditLoggingOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBAC_AuditLoggingOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBAC_AuditLoggingOptionsValidationError{} + +// Validate checks the field values on +// RBAC_AuditLoggingOptions_AuditLoggerConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RBAC_AuditLoggingOptions_AuditLoggerConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError, or nil if none found. +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAuditLogger()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAuditLogger()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{ + field: "AuditLogger", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + if len(errors) > 0 { + return RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError(errors) + } + + return nil +} + +// RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError is an error wrapping +// multiple validation errors returned by +// RBAC_AuditLoggingOptions_AuditLoggerConfig.ValidateAll() if the designated +// constraints aren't met. +type RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBAC_AuditLoggingOptions_AuditLoggerConfigMultiError) AllErrors() []error { return m } + +// RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError is the validation +// error returned by RBAC_AuditLoggingOptions_AuditLoggerConfig.Validate if +// the designated constraints aren't met. +type RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) ErrorName() string { + return "RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC_AuditLoggingOptions_AuditLoggerConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBAC_AuditLoggingOptions_AuditLoggerConfigValidationError{} + +// Validate checks the field values on Permission_Set with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Permission_Set) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Permission_Set with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Permission_SetMultiError, +// or nil if none found. +func (m *Permission_Set) ValidateAll() error { + return m.validate(true) +} + +func (m *Permission_Set) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 1 { + err := Permission_SetValidationError{ + field: "Rules", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Permission_SetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Permission_SetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Permission_SetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Permission_SetMultiError(errors) + } + + return nil +} + +// Permission_SetMultiError is an error wrapping multiple validation errors +// returned by Permission_Set.ValidateAll() if the designated constraints +// aren't met. +type Permission_SetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Permission_SetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Permission_SetMultiError) AllErrors() []error { return m } + +// Permission_SetValidationError is the validation error returned by +// Permission_Set.Validate if the designated constraints aren't met. +type Permission_SetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Permission_SetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Permission_SetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Permission_SetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Permission_SetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Permission_SetValidationError) ErrorName() string { return "Permission_SetValidationError" } + +// Error satisfies the builtin error interface +func (e Permission_SetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPermission_Set.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Permission_SetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Permission_SetValidationError{} + +// Validate checks the field values on Principal_Set with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Principal_Set) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Principal_Set with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Principal_SetMultiError, or +// nil if none found. +func (m *Principal_Set) ValidateAll() error { + return m.validate(true) +} + +func (m *Principal_Set) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetIds()) < 1 { + err := Principal_SetValidationError{ + field: "Ids", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetIds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Principal_SetValidationError{ + field: fmt.Sprintf("Ids[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Principal_SetValidationError{ + field: fmt.Sprintf("Ids[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Principal_SetValidationError{ + field: fmt.Sprintf("Ids[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Principal_SetMultiError(errors) + } + + return nil +} + +// Principal_SetMultiError is an error wrapping multiple validation errors +// returned by Principal_Set.ValidateAll() if the designated constraints +// aren't met. +type Principal_SetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Principal_SetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Principal_SetMultiError) AllErrors() []error { return m } + +// Principal_SetValidationError is the validation error returned by +// Principal_Set.Validate if the designated constraints aren't met. +type Principal_SetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Principal_SetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Principal_SetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Principal_SetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Principal_SetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Principal_SetValidationError) ErrorName() string { return "Principal_SetValidationError" } + +// Error satisfies the builtin error interface +func (e Principal_SetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrincipal_Set.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Principal_SetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Principal_SetValidationError{} + +// Validate checks the field values on Principal_Authenticated with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Principal_Authenticated) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Principal_Authenticated with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Principal_AuthenticatedMultiError, or nil if none found. +func (m *Principal_Authenticated) ValidateAll() error { + return m.validate(true) +} + +func (m *Principal_Authenticated) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPrincipalName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Principal_AuthenticatedValidationError{ + field: "PrincipalName", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Principal_AuthenticatedValidationError{ + field: "PrincipalName", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrincipalName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Principal_AuthenticatedValidationError{ + field: "PrincipalName", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return Principal_AuthenticatedMultiError(errors) + } + + return nil +} + +// Principal_AuthenticatedMultiError is an error wrapping multiple validation +// errors returned by Principal_Authenticated.ValidateAll() if the designated +// constraints aren't met. +type Principal_AuthenticatedMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Principal_AuthenticatedMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Principal_AuthenticatedMultiError) AllErrors() []error { return m } + +// Principal_AuthenticatedValidationError is the validation error returned by +// Principal_Authenticated.Validate if the designated constraints aren't met. +type Principal_AuthenticatedValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Principal_AuthenticatedValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Principal_AuthenticatedValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Principal_AuthenticatedValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Principal_AuthenticatedValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Principal_AuthenticatedValidationError) ErrorName() string { + return "Principal_AuthenticatedValidationError" +} + +// Error satisfies the builtin error interface +func (e Principal_AuthenticatedValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrincipal_Authenticated.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Principal_AuthenticatedValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Principal_AuthenticatedValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go new file mode 100644 index 0000000000..940a9b37eb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3/rbac_vtproto.pb.go @@ -0,0 +1,2103 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/rbac/v3/rbac.proto + +package rbacv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AuditLogger != nil { + if vtmsg, ok := interface{}(m.AuditLogger).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AuditLogger) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBAC_AuditLoggingOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC_AuditLoggingOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC_AuditLoggingOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LoggerConfigs) > 0 { + for iNdEx := len(m.LoggerConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.LoggerConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.AuditCondition != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AuditCondition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RBAC) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AuditLoggingOptions != nil { + size, err := m.AuditLoggingOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Policies) > 0 { + for k := range m.Policies { + v := m.Policies[k] + baseI := i + size, err := v.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Policy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CheckedCondition != nil { + if vtmsg, ok := interface{}(m.CheckedCondition).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CheckedCondition) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Condition != nil { + if vtmsg, ok := interface{}(m.Condition).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Condition) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Principals) > 0 { + for iNdEx := len(m.Principals) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Principals[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Permissions) > 0 { + for iNdEx := len(m.Permissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Permissions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Permission_Set) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission_Set) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Set) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Permission) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*Permission_UriTemplate); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Matcher); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationPortRange); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_UrlPath); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_RequestedServerName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_NotRule); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_DestinationIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Header); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_Any); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_OrRules); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*Permission_AndRules); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Permission_AndRules) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_AndRules) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndRules != nil { + size, err := m.AndRules.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Permission_OrRules) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_OrRules) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrRules != nil { + size, err := m.OrRules.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Permission_Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Any { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *Permission_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationIp != nil { + if vtmsg, ok := interface{}(m.DestinationIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DestinationPort)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Permission_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Permission_NotRule) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_NotRule) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotRule != nil { + size, err := m.NotRule.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Permission_RequestedServerName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_RequestedServerName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestedServerName != nil { + if vtmsg, ok := interface{}(m.RequestedServerName).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestedServerName) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Permission_UrlPath) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_UrlPath) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UrlPath != nil { + if vtmsg, ok := interface{}(m.UrlPath).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UrlPath) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Permission_DestinationPortRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_DestinationPortRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationPortRange != nil { + if vtmsg, ok := interface{}(m.DestinationPortRange).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DestinationPortRange) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Permission_Matcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_Matcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Permission_UriTemplate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Permission_UriTemplate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UriTemplate != nil { + if vtmsg, ok := interface{}(m.UriTemplate).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UriTemplate) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Principal_Set) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal_Set) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Set) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ids) > 0 { + for iNdEx := len(m.Ids) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Ids[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Principal_Authenticated) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal_Authenticated) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Authenticated) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrincipalName != nil { + if vtmsg, ok := interface{}(m.PrincipalName).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrincipalName) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *Principal) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Principal) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Identifier.(*Principal_FilterState); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_RemoteIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_DirectRemoteIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_UrlPath); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_NotId); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Header); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_SourceIp); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Authenticated_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_Any); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_OrIds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Identifier.(*Principal_AndIds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Principal_AndIds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_AndIds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndIds != nil { + size, err := m.AndIds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Principal_OrIds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_OrIds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrIds != nil { + size, err := m.OrIds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Principal_Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Any { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *Principal_Authenticated_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Authenticated_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Authenticated != nil { + size, err := m.Authenticated.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Principal_SourceIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_SourceIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceIp != nil { + if vtmsg, ok := interface{}(m.SourceIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SourceIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Principal_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + if vtmsg, ok := interface{}(m.Header).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Header) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Principal_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Principal_NotId) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_NotId) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotId != nil { + size, err := m.NotId.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Principal_UrlPath) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_UrlPath) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.UrlPath != nil { + if vtmsg, ok := interface{}(m.UrlPath).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UrlPath) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Principal_DirectRemoteIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_DirectRemoteIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DirectRemoteIp != nil { + if vtmsg, ok := interface{}(m.DirectRemoteIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DirectRemoteIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Principal_RemoteIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_RemoteIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoteIp != nil { + if vtmsg, ok := interface{}(m.RemoteIp).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RemoteIp) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Principal_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Principal_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + if vtmsg, ok := interface{}(m.FilterState).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterState) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *Action) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBAC_AuditLoggingOptions_AuditLoggerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditLogger != nil { + if size, ok := interface{}(m.AuditLogger).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AuditLogger) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RBAC_AuditLoggingOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AuditCondition != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AuditCondition)) + } + if len(m.LoggerConfigs) > 0 { + for _, e := range m.LoggerConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RBAC) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + if len(m.Policies) > 0 { + for k, v := range m.Policies { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.AuditLoggingOptions != nil { + l = m.AuditLoggingOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Policy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Permissions) > 0 { + for _, e := range m.Permissions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Principals) > 0 { + for _, e := range m.Principals { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Condition != nil { + if size, ok := interface{}(m.Condition).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Condition) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CheckedCondition != nil { + if size, ok := interface{}(m.CheckedCondition).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CheckedCondition) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Permission_Set) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Permission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Permission_AndRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndRules != nil { + l = m.AndRules.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_OrRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrRules != nil { + l = m.OrRules.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Permission_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationIp != nil { + if size, ok := interface{}(m.DestinationIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.DestinationPort)) + return n +} +func (m *Permission_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_NotRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotRule != nil { + l = m.NotRule.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_RequestedServerName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestedServerName != nil { + if size, ok := interface{}(m.RequestedServerName).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RequestedServerName) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_UrlPath) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UrlPath != nil { + if size, ok := interface{}(m.UrlPath).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UrlPath) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_DestinationPortRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationPortRange != nil { + if size, ok := interface{}(m.DestinationPortRange).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DestinationPortRange) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_Matcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Permission_UriTemplate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UriTemplate != nil { + if size, ok := interface{}(m.UriTemplate).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UriTemplate) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Set) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ids) > 0 { + for _, e := range m.Ids { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Principal_Authenticated) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrincipalName != nil { + if size, ok := interface{}(m.PrincipalName).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PrincipalName) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Principal) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Identifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Principal_AndIds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndIds != nil { + l = m.AndIds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_OrIds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrIds != nil { + l = m.OrIds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Principal_Authenticated_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Authenticated != nil { + l = m.Authenticated.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_SourceIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp != nil { + if size, ok := interface{}(m.SourceIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SourceIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + if size, ok := interface{}(m.Header).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Header) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_NotId) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotId != nil { + l = m.NotId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_UrlPath) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UrlPath != nil { + if size, ok := interface{}(m.UrlPath).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UrlPath) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_DirectRemoteIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DirectRemoteIp != nil { + if size, ok := interface{}(m.DirectRemoteIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DirectRemoteIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_RemoteIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoteIp != nil { + if size, ok := interface{}(m.RemoteIp).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RemoteIp) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Principal_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + if size, ok := interface{}(m.FilterState).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterState) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Action != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Action)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go new file mode 100644 index 0000000000..a341065957 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.go @@ -0,0 +1,573 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 18] +type RouteConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // An array of virtual hosts that make up the route table. + VirtualHosts []*VirtualHost `protobuf:"bytes,2,rep,name=virtual_hosts,json=virtualHosts,proto3" json:"virtual_hosts,omitempty"` + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both “virtual_hosts“ and “vhds“ fields will be used when present. “virtual_hosts“ can be used + // for a base routing table or for infrequently changing virtual hosts. “vhds“ is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with “vhds“ derived configuration + // taking precedence. + Vhds *Vhds `protobuf:"bytes,9,opt,name=vhds,proto3" json:"vhds,omitempty"` + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + InternalOnlyHeaders []string `protobuf:"bytes,3,rep,name=internal_only_headers,json=internalOnlyHeaders,proto3" json:"internal_only_headers,omitempty"` + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + ResponseHeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,4,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + ResponseHeadersToRemove []string `protobuf:"bytes,5,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Headers mutations at all levels are evaluated, if specified. By default, the order is from most + // specific (i.e. route entry level) to least specific (i.e. route configuration level). Later header + // mutations may override earlier mutations. + // This order can be reversed by setting this field to true. In other words, most specific level mutation + // is evaluated last. + MostSpecificHeaderMutationsWins bool `protobuf:"varint,10,opt,name=most_specific_header_mutations_wins,json=mostSpecificHeaderMutationsWins,proto3" json:"most_specific_header_mutations_wins,omitempty"` + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + ValidateClusters *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=validate_clusters,json=validateClusters,proto3" json:"validate_clusters,omitempty"` + // The maximum bytes of the response :ref:`direct response body + // ` size. If not specified the default + // is 4096. + // + // .. warning:: + // + // Envoy currently holds the content of :ref:`direct response body + // ` in memory. Be careful setting + // this to be larger than the default 4KB, since the allocated memory for direct response body + // is not subject to data plane buffering controls. + MaxDirectResponseBodySizeBytes *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_direct_response_body_size_bytes,json=maxDirectResponseBodySizeBytes,proto3" json:"max_direct_response_body_size_bytes,omitempty"` + // A list of plugins and their configurations which may be used by a + // :ref:`cluster specifier plugin name ` + // within the route. All “extension.name“ fields in this list must be unique. + ClusterSpecifierPlugins []*ClusterSpecifierPlugin `protobuf:"bytes,12,rep,name=cluster_specifier_plugins,json=clusterSpecifierPlugins,proto3" json:"cluster_specifier_plugins,omitempty"` + // Specify a set of default request mirroring policies which apply to all routes under its virtual hosts. + // Note that policies are not merged, the most specific non-empty one becomes the mirror policies. + RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,13,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"` + // By default, port in :authority header (if any) is used in host matching. + // With this option enabled, Envoy will ignore the port number in the :authority header (if any) when picking VirtualHost. + // NOTE: this option will not strip the port number (if any) contained in route config + // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`.domains field. + IgnorePortInHostMatching bool `protobuf:"varint,14,opt,name=ignore_port_in_host_matching,json=ignorePortInHostMatching,proto3" json:"ignore_port_in_host_matching,omitempty"` + // Ignore path-parameters in path-matching. + // Before RFC3986, URI were like(RFC1808): :///;?# + // Envoy by default takes ":path" as ";". + // For users who want to only match path on the "" portion, this option should be true. + IgnorePathParametersInPathMatching bool `protobuf:"varint,15,opt,name=ignore_path_parameters_in_path_matching,json=ignorePathParametersInPathMatching,proto3" json:"ignore_path_parameters_in_path_matching,omitempty"` + // This field can be used to provide RouteConfiguration level per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,16,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The metadata field can be used to provide additional information + // about the route configuration. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as “envoy.filters.http.router“. + Metadata *v3.Metadata `protobuf:"bytes,17,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *RouteConfiguration) Reset() { + *x = RouteConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteConfiguration) ProtoMessage() {} + +func (x *RouteConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteConfiguration.ProtoReflect.Descriptor instead. +func (*RouteConfiguration) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_proto_rawDescGZIP(), []int{0} +} + +func (x *RouteConfiguration) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RouteConfiguration) GetVirtualHosts() []*VirtualHost { + if x != nil { + return x.VirtualHosts + } + return nil +} + +func (x *RouteConfiguration) GetVhds() *Vhds { + if x != nil { + return x.Vhds + } + return nil +} + +func (x *RouteConfiguration) GetInternalOnlyHeaders() []string { + if x != nil { + return x.InternalOnlyHeaders + } + return nil +} + +func (x *RouteConfiguration) GetResponseHeadersToAdd() []*v3.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *RouteConfiguration) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +func (x *RouteConfiguration) GetRequestHeadersToAdd() []*v3.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *RouteConfiguration) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *RouteConfiguration) GetMostSpecificHeaderMutationsWins() bool { + if x != nil { + return x.MostSpecificHeaderMutationsWins + } + return false +} + +func (x *RouteConfiguration) GetValidateClusters() *wrapperspb.BoolValue { + if x != nil { + return x.ValidateClusters + } + return nil +} + +func (x *RouteConfiguration) GetMaxDirectResponseBodySizeBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxDirectResponseBodySizeBytes + } + return nil +} + +func (x *RouteConfiguration) GetClusterSpecifierPlugins() []*ClusterSpecifierPlugin { + if x != nil { + return x.ClusterSpecifierPlugins + } + return nil +} + +func (x *RouteConfiguration) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy { + if x != nil { + return x.RequestMirrorPolicies + } + return nil +} + +func (x *RouteConfiguration) GetIgnorePortInHostMatching() bool { + if x != nil { + return x.IgnorePortInHostMatching + } + return false +} + +func (x *RouteConfiguration) GetIgnorePathParametersInPathMatching() bool { + if x != nil { + return x.IgnorePathParametersInPathMatching + } + return false +} + +func (x *RouteConfiguration) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (x *RouteConfiguration) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Vhds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration source specifier for VHDS. + ConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` +} + +func (x *Vhds) Reset() { + *x = Vhds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Vhds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Vhds) ProtoMessage() {} + +func (x *Vhds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Vhds.ProtoReflect.Descriptor instead. +func (*Vhds) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_proto_rawDescGZIP(), []int{1} +} + +func (x *Vhds) GetConfigSource() *v3.ConfigSource { + if x != nil { + return x.ConfigSource + } + return nil +} + +var File_envoy_config_route_v3_route_proto protoreflect.FileDescriptor + +var file_envoy_config_route_v3_route_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x0c, 0x0a, 0x12, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x0c, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x2f, 0x0a, + 0x04, 0x76, 0x68, 0x64, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x52, 0x04, 0x76, 0x68, 0x64, 0x73, 0x12, 0x44, + 0x0a, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, + 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, + 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4f, 0x6e, 0x6c, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, + 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, + 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, + 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x4c, 0x0a, 0x23, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x77, 0x69, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1f, 0x6d, 0x6f, 0x73, 0x74, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x57, 0x69, + 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x23, 0x6d, + 0x61, 0x78, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x53, 0x69, 0x7a, + 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x19, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x17, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, + 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x69, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, + 0x67, 0x12, 0x53, 0x0a, 0x27, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x22, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x74, 0x68, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5d, + 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x26, 0x9a, + 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x73, 0x0a, 0x04, 0x56, 0x68, 0x64, 0x73, 0x12, 0x51, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x3a, 0x18, 0x9a, 0xc5, 0x88, 0x1e, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_route_v3_route_proto_rawDescOnce sync.Once + file_envoy_config_route_v3_route_proto_rawDescData = file_envoy_config_route_v3_route_proto_rawDesc +) + +func file_envoy_config_route_v3_route_proto_rawDescGZIP() []byte { + file_envoy_config_route_v3_route_proto_rawDescOnce.Do(func() { + file_envoy_config_route_v3_route_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_route_proto_rawDescData) + }) + return file_envoy_config_route_v3_route_proto_rawDescData +} + +var file_envoy_config_route_v3_route_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_config_route_v3_route_proto_goTypes = []interface{}{ + (*RouteConfiguration)(nil), // 0: envoy.config.route.v3.RouteConfiguration + (*Vhds)(nil), // 1: envoy.config.route.v3.Vhds + nil, // 2: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry + (*VirtualHost)(nil), // 3: envoy.config.route.v3.VirtualHost + (*v3.HeaderValueOption)(nil), // 4: envoy.config.core.v3.HeaderValueOption + (*wrapperspb.BoolValue)(nil), // 5: google.protobuf.BoolValue + (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*ClusterSpecifierPlugin)(nil), // 7: envoy.config.route.v3.ClusterSpecifierPlugin + (*RouteAction_RequestMirrorPolicy)(nil), // 8: envoy.config.route.v3.RouteAction.RequestMirrorPolicy + (*v3.Metadata)(nil), // 9: envoy.config.core.v3.Metadata + (*v3.ConfigSource)(nil), // 10: envoy.config.core.v3.ConfigSource + (*anypb.Any)(nil), // 11: google.protobuf.Any +} +var file_envoy_config_route_v3_route_proto_depIdxs = []int32{ + 3, // 0: envoy.config.route.v3.RouteConfiguration.virtual_hosts:type_name -> envoy.config.route.v3.VirtualHost + 1, // 1: envoy.config.route.v3.RouteConfiguration.vhds:type_name -> envoy.config.route.v3.Vhds + 4, // 2: envoy.config.route.v3.RouteConfiguration.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 4, // 3: envoy.config.route.v3.RouteConfiguration.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 5, // 4: envoy.config.route.v3.RouteConfiguration.validate_clusters:type_name -> google.protobuf.BoolValue + 6, // 5: envoy.config.route.v3.RouteConfiguration.max_direct_response_body_size_bytes:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.config.route.v3.RouteConfiguration.cluster_specifier_plugins:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 8, // 7: envoy.config.route.v3.RouteConfiguration.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 2, // 8: envoy.config.route.v3.RouteConfiguration.typed_per_filter_config:type_name -> envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry + 9, // 9: envoy.config.route.v3.RouteConfiguration.metadata:type_name -> envoy.config.core.v3.Metadata + 10, // 10: envoy.config.route.v3.Vhds.config_source:type_name -> envoy.config.core.v3.ConfigSource + 11, // 11: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_envoy_config_route_v3_route_proto_init() } +func file_envoy_config_route_v3_route_proto_init() { + if File_envoy_config_route_v3_route_proto != nil { + return + } + file_envoy_config_route_v3_route_components_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_route_v3_route_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Vhds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_route_v3_route_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_route_v3_route_proto_goTypes, + DependencyIndexes: file_envoy_config_route_v3_route_proto_depIdxs, + MessageInfos: file_envoy_config_route_v3_route_proto_msgTypes, + }.Build() + File_envoy_config_route_v3_route_proto = out.File + file_envoy_config_route_v3_route_proto_rawDesc = nil + file_envoy_config_route_v3_route_proto_goTypes = nil + file_envoy_config_route_v3_route_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go new file mode 100644 index 0000000000..be062e5bd8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route.pb.validate.go @@ -0,0 +1,693 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RouteConfiguration with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteConfiguration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteConfiguration with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteConfigurationMultiError, or nil if none found. +func (m *RouteConfiguration) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteConfiguration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + for idx, item := range m.GetVirtualHosts() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("VirtualHosts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("VirtualHosts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("VirtualHosts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetVhds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Vhds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Vhds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetVhds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "Vhds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetInternalOnlyHeaders() { + _, _ = idx, item + + if !_RouteConfiguration_InternalOnlyHeaders_Pattern.MatchString(item) { + err := RouteConfigurationValidationError{ + field: fmt.Sprintf("InternalOnlyHeaders[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := RouteConfigurationValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if !_RouteConfiguration_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := RouteConfigurationValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := RouteConfigurationValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if !_RouteConfiguration_RequestHeadersToRemove_Pattern.MatchString(item) { + err := RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for MostSpecificHeaderMutationsWins + + if all { + switch v := interface{}(m.GetValidateClusters()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "ValidateClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "ValidateClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidateClusters()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "ValidateClusters", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxDirectResponseBodySizeBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "MaxDirectResponseBodySizeBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "MaxDirectResponseBodySizeBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxDirectResponseBodySizeBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "MaxDirectResponseBodySizeBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetClusterSpecifierPlugins() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestMirrorPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for IgnorePortInHostMatching + + // no validation rules for IgnorePathParametersInPathMatching + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteConfigurationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteConfigurationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteConfigurationMultiError(errors) + } + + return nil +} + +// RouteConfigurationMultiError is an error wrapping multiple validation errors +// returned by RouteConfiguration.ValidateAll() if the designated constraints +// aren't met. +type RouteConfigurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteConfigurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteConfigurationMultiError) AllErrors() []error { return m } + +// RouteConfigurationValidationError is the validation error returned by +// RouteConfiguration.Validate if the designated constraints aren't met. +type RouteConfigurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteConfigurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteConfigurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteConfigurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteConfigurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteConfigurationValidationError) ErrorName() string { + return "RouteConfigurationValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteConfigurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteConfiguration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteConfigurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteConfigurationValidationError{} + +var _RouteConfiguration_InternalOnlyHeaders_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteConfiguration_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteConfiguration_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on Vhds with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Vhds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Vhds with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in VhdsMultiError, or nil if none found. +func (m *Vhds) ValidateAll() error { + return m.validate(true) +} + +func (m *Vhds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetConfigSource() == nil { + err := VhdsValidationError{ + field: "ConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VhdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VhdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VhdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return VhdsMultiError(errors) + } + + return nil +} + +// VhdsMultiError is an error wrapping multiple validation errors returned by +// Vhds.ValidateAll() if the designated constraints aren't met. +type VhdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VhdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VhdsMultiError) AllErrors() []error { return m } + +// VhdsValidationError is the validation error returned by Vhds.Validate if the +// designated constraints aren't met. +type VhdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VhdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VhdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VhdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VhdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VhdsValidationError) ErrorName() string { return "VhdsValidationError" } + +// Error satisfies the builtin error interface +func (e VhdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVhds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VhdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VhdsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go new file mode 100644 index 0000000000..bd5ae667fc --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go @@ -0,0 +1,9078 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v3 "github.com/cncf/xds/go/xds/type/matcher/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v35 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VirtualHost_TlsRequirementType int32 + +const ( + // No TLS requirement for the virtual host. + VirtualHost_NONE VirtualHost_TlsRequirementType = 0 + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + VirtualHost_EXTERNAL_ONLY VirtualHost_TlsRequirementType = 1 + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + VirtualHost_ALL VirtualHost_TlsRequirementType = 2 +) + +// Enum value maps for VirtualHost_TlsRequirementType. +var ( + VirtualHost_TlsRequirementType_name = map[int32]string{ + 0: "NONE", + 1: "EXTERNAL_ONLY", + 2: "ALL", + } + VirtualHost_TlsRequirementType_value = map[string]int32{ + "NONE": 0, + "EXTERNAL_ONLY": 1, + "ALL": 2, + } +) + +func (x VirtualHost_TlsRequirementType) Enum() *VirtualHost_TlsRequirementType { + p := new(VirtualHost_TlsRequirementType) + *p = x + return p +} + +func (x VirtualHost_TlsRequirementType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VirtualHost_TlsRequirementType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[0].Descriptor() +} + +func (VirtualHost_TlsRequirementType) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[0] +} + +func (x VirtualHost_TlsRequirementType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VirtualHost_TlsRequirementType.Descriptor instead. +func (VirtualHost_TlsRequirementType) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{0, 0} +} + +type RouteAction_ClusterNotFoundResponseCode int32 + +const ( + // HTTP status code - 503 Service Unavailable. + RouteAction_SERVICE_UNAVAILABLE RouteAction_ClusterNotFoundResponseCode = 0 + // HTTP status code - 404 Not Found. + RouteAction_NOT_FOUND RouteAction_ClusterNotFoundResponseCode = 1 + // HTTP status code - 500 Internal Server Error. + RouteAction_INTERNAL_SERVER_ERROR RouteAction_ClusterNotFoundResponseCode = 2 +) + +// Enum value maps for RouteAction_ClusterNotFoundResponseCode. +var ( + RouteAction_ClusterNotFoundResponseCode_name = map[int32]string{ + 0: "SERVICE_UNAVAILABLE", + 1: "NOT_FOUND", + 2: "INTERNAL_SERVER_ERROR", + } + RouteAction_ClusterNotFoundResponseCode_value = map[string]int32{ + "SERVICE_UNAVAILABLE": 0, + "NOT_FOUND": 1, + "INTERNAL_SERVER_ERROR": 2, + } +) + +func (x RouteAction_ClusterNotFoundResponseCode) Enum() *RouteAction_ClusterNotFoundResponseCode { + p := new(RouteAction_ClusterNotFoundResponseCode) + *p = x + return p +} + +func (x RouteAction_ClusterNotFoundResponseCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteAction_ClusterNotFoundResponseCode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[1].Descriptor() +} + +func (RouteAction_ClusterNotFoundResponseCode) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[1] +} + +func (x RouteAction_ClusterNotFoundResponseCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteAction_ClusterNotFoundResponseCode.Descriptor instead. +func (RouteAction_ClusterNotFoundResponseCode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} +} + +// Configures :ref:`internal redirect ` behavior. +// [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] +// +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +type RouteAction_InternalRedirectAction int32 + +const ( + RouteAction_PASS_THROUGH_INTERNAL_REDIRECT RouteAction_InternalRedirectAction = 0 + RouteAction_HANDLE_INTERNAL_REDIRECT RouteAction_InternalRedirectAction = 1 +) + +// Enum value maps for RouteAction_InternalRedirectAction. +var ( + RouteAction_InternalRedirectAction_name = map[int32]string{ + 0: "PASS_THROUGH_INTERNAL_REDIRECT", + 1: "HANDLE_INTERNAL_REDIRECT", + } + RouteAction_InternalRedirectAction_value = map[string]int32{ + "PASS_THROUGH_INTERNAL_REDIRECT": 0, + "HANDLE_INTERNAL_REDIRECT": 1, + } +) + +func (x RouteAction_InternalRedirectAction) Enum() *RouteAction_InternalRedirectAction { + p := new(RouteAction_InternalRedirectAction) + *p = x + return p +} + +func (x RouteAction_InternalRedirectAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteAction_InternalRedirectAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[2].Descriptor() +} + +func (RouteAction_InternalRedirectAction) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[2] +} + +func (x RouteAction_InternalRedirectAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteAction_InternalRedirectAction.Descriptor instead. +func (RouteAction_InternalRedirectAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} +} + +type RetryPolicy_ResetHeaderFormat int32 + +const ( + RetryPolicy_SECONDS RetryPolicy_ResetHeaderFormat = 0 + RetryPolicy_UNIX_TIMESTAMP RetryPolicy_ResetHeaderFormat = 1 +) + +// Enum value maps for RetryPolicy_ResetHeaderFormat. +var ( + RetryPolicy_ResetHeaderFormat_name = map[int32]string{ + 0: "SECONDS", + 1: "UNIX_TIMESTAMP", + } + RetryPolicy_ResetHeaderFormat_value = map[string]int32{ + "SECONDS": 0, + "UNIX_TIMESTAMP": 1, + } +) + +func (x RetryPolicy_ResetHeaderFormat) Enum() *RetryPolicy_ResetHeaderFormat { + p := new(RetryPolicy_ResetHeaderFormat) + *p = x + return p +} + +func (x RetryPolicy_ResetHeaderFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RetryPolicy_ResetHeaderFormat) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[3].Descriptor() +} + +func (RetryPolicy_ResetHeaderFormat) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[3] +} + +func (x RetryPolicy_ResetHeaderFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RetryPolicy_ResetHeaderFormat.Descriptor instead. +func (RetryPolicy_ResetHeaderFormat) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0} +} + +type RedirectAction_RedirectResponseCode int32 + +const ( + // Moved Permanently HTTP Status Code - 301. + RedirectAction_MOVED_PERMANENTLY RedirectAction_RedirectResponseCode = 0 + // Found HTTP Status Code - 302. + RedirectAction_FOUND RedirectAction_RedirectResponseCode = 1 + // See Other HTTP Status Code - 303. + RedirectAction_SEE_OTHER RedirectAction_RedirectResponseCode = 2 + // Temporary Redirect HTTP Status Code - 307. + RedirectAction_TEMPORARY_REDIRECT RedirectAction_RedirectResponseCode = 3 + // Permanent Redirect HTTP Status Code - 308. + RedirectAction_PERMANENT_REDIRECT RedirectAction_RedirectResponseCode = 4 +) + +// Enum value maps for RedirectAction_RedirectResponseCode. +var ( + RedirectAction_RedirectResponseCode_name = map[int32]string{ + 0: "MOVED_PERMANENTLY", + 1: "FOUND", + 2: "SEE_OTHER", + 3: "TEMPORARY_REDIRECT", + 4: "PERMANENT_REDIRECT", + } + RedirectAction_RedirectResponseCode_value = map[string]int32{ + "MOVED_PERMANENTLY": 0, + "FOUND": 1, + "SEE_OTHER": 2, + "TEMPORARY_REDIRECT": 3, + "PERMANENT_REDIRECT": 4, + } +) + +func (x RedirectAction_RedirectResponseCode) Enum() *RedirectAction_RedirectResponseCode { + p := new(RedirectAction_RedirectResponseCode) + *p = x + return p +} + +func (x RedirectAction_RedirectResponseCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RedirectAction_RedirectResponseCode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[4].Descriptor() +} + +func (RedirectAction_RedirectResponseCode) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[4] +} + +func (x RedirectAction_RedirectResponseCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RedirectAction_RedirectResponseCode.Descriptor instead. +func (RedirectAction_RedirectResponseCode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11, 0} +} + +type RateLimit_Action_MetaData_Source int32 + +const ( + // Query :ref:`dynamic metadata ` + RateLimit_Action_MetaData_DYNAMIC RateLimit_Action_MetaData_Source = 0 + // Query :ref:`route entry metadata ` + RateLimit_Action_MetaData_ROUTE_ENTRY RateLimit_Action_MetaData_Source = 1 +) + +// Enum value maps for RateLimit_Action_MetaData_Source. +var ( + RateLimit_Action_MetaData_Source_name = map[int32]string{ + 0: "DYNAMIC", + 1: "ROUTE_ENTRY", + } + RateLimit_Action_MetaData_Source_value = map[string]int32{ + "DYNAMIC": 0, + "ROUTE_ENTRY": 1, + } +) + +func (x RateLimit_Action_MetaData_Source) Enum() *RateLimit_Action_MetaData_Source { + p := new(RateLimit_Action_MetaData_Source) + *p = x + return p +} + +func (x RateLimit_Action_MetaData_Source) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimit_Action_MetaData_Source) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_route_v3_route_components_proto_enumTypes[5].Descriptor() +} + +func (RateLimit_Action_MetaData_Source) Type() protoreflect.EnumType { + return &file_envoy_config_route_v3_route_components_proto_enumTypes[5] +} + +func (x RateLimit_Action_MetaData_Source) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimit_Action_MetaData_Source.Descriptor instead. +func (RateLimit_Action_MetaData_Source) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8, 0} +} + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#next-free-field: 25] +type VirtualHost struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: “www.foo.com“. + // 2. Suffix domain wildcards: “*.foo.com“ or “*-bar.foo.com“. + // 3. Prefix domain wildcards: “foo.*“ or “foo-*“. + // 4. Special wildcard “*“ matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + // + // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. + Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + // Only one of this and “matcher“ can be specified. + Routes []*Route `protobuf:"bytes,3,rep,name=routes,proto3" json:"routes,omitempty"` + // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] + // The match tree to use when resolving route actions for incoming requests. Only one of this and “routes“ + // can be specified. + Matcher *v3.Matcher `protobuf:"bytes,21,opt,name=matcher,proto3" json:"matcher,omitempty"` + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + RequireTls VirtualHost_TlsRequirementType `protobuf:"varint,4,opt,name=require_tls,json=requireTls,proto3,enum=envoy.config.route.v3.VirtualHost_TlsRequirementType" json:"require_tls,omitempty"` + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + VirtualClusters []*VirtualCluster `protobuf:"bytes,5,rep,name=virtual_clusters,json=virtualClusters,proto3" json:"virtual_clusters,omitempty"` + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + RateLimits []*RateLimit `protobuf:"bytes,6,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,7,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + RequestHeadersToRemove []string `protobuf:"bytes,13,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,10,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + ResponseHeadersToRemove []string `protobuf:"bytes,11,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is + // found in the + // :ref:`VirtualHost.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`VirtualHost.typed_per_filter_config` + // to configure the CORS HTTP filter. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + Cors *CorsPolicy `protobuf:"bytes,8,opt,name=cors,proto3" json:"cors,omitempty"` + // This field can be used to provide virtual host level per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + // + // [#next-major-version: rename to include_attempt_count_in_request.] + IncludeRequestAttemptCount bool `protobuf:"varint,14,opt,name=include_request_attempt_count,json=includeRequestAttemptCount,proto3" json:"include_request_attempt_count,omitempty"` + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the downstream response. Setting this option will cause the router to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the downstream + // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + IncludeAttemptCountInResponse bool `protobuf:"varint,19,opt,name=include_attempt_count_in_response,json=includeAttemptCountInResponse,proto3" json:"include_attempt_count_in_response,omitempty"` + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + RetryPolicy *RetryPolicy `protobuf:"bytes,16,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that setting a route level entry + // will take precedence over this config and it'll be treated independently (e.g.: values are not + // inherited). :ref:`Retry policy ` should not be + // set if this field is used. + RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + HedgePolicy *HedgePolicy `protobuf:"bytes,17,opt,name=hedge_policy,json=hedgePolicy,proto3" json:"hedge_policy,omitempty"` + // Decides whether to include the :ref:`x-envoy-is-timeout-retry ` + // request header in retries initiated by per try timeouts. + IncludeIsTimeoutRetryHeader bool `protobuf:"varint,23,opt,name=include_is_timeout_retry_header,json=includeIsTimeoutRetryHeader,proto3" json:"include_is_timeout_retry_header,omitempty"` + // The maximum bytes which will be buffered for retries and shadowing. + // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum + // value of this and the listener per_connection_buffer_limit_bytes. + PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` + // Specify a set of default request mirroring policies for every route under this virtual host. + // It takes precedence over the route config mirror policy entirely. + // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,22,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"` + // The metadata field can be used to provide additional information + // about the virtual host. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as “envoy.filters.http.router“. + Metadata *v31.Metadata `protobuf:"bytes,24,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *VirtualHost) Reset() { + *x = VirtualHost{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualHost) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualHost) ProtoMessage() {} + +func (x *VirtualHost) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualHost.ProtoReflect.Descriptor instead. +func (*VirtualHost) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{0} +} + +func (x *VirtualHost) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *VirtualHost) GetDomains() []string { + if x != nil { + return x.Domains + } + return nil +} + +func (x *VirtualHost) GetRoutes() []*Route { + if x != nil { + return x.Routes + } + return nil +} + +func (x *VirtualHost) GetMatcher() *v3.Matcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *VirtualHost) GetRequireTls() VirtualHost_TlsRequirementType { + if x != nil { + return x.RequireTls + } + return VirtualHost_NONE +} + +func (x *VirtualHost) GetVirtualClusters() []*VirtualCluster { + if x != nil { + return x.VirtualClusters + } + return nil +} + +func (x *VirtualHost) GetRateLimits() []*RateLimit { + if x != nil { + return x.RateLimits + } + return nil +} + +func (x *VirtualHost) GetRequestHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *VirtualHost) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *VirtualHost) GetResponseHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *VirtualHost) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *VirtualHost) GetCors() *CorsPolicy { + if x != nil { + return x.Cors + } + return nil +} + +func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (x *VirtualHost) GetIncludeRequestAttemptCount() bool { + if x != nil { + return x.IncludeRequestAttemptCount + } + return false +} + +func (x *VirtualHost) GetIncludeAttemptCountInResponse() bool { + if x != nil { + return x.IncludeAttemptCountInResponse + } + return false +} + +func (x *VirtualHost) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *VirtualHost) GetRetryPolicyTypedConfig() *anypb.Any { + if x != nil { + return x.RetryPolicyTypedConfig + } + return nil +} + +func (x *VirtualHost) GetHedgePolicy() *HedgePolicy { + if x != nil { + return x.HedgePolicy + } + return nil +} + +func (x *VirtualHost) GetIncludeIsTimeoutRetryHeader() bool { + if x != nil { + return x.IncludeIsTimeoutRetryHeader + } + return false +} + +func (x *VirtualHost) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerRequestBufferLimitBytes + } + return nil +} + +func (x *VirtualHost) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy { + if x != nil { + return x.RequestMirrorPolicies + } + return nil +} + +func (x *VirtualHost) GetMetadata() *v31.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// A filter-defined action type. +type FilterAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Action *anypb.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` +} + +func (x *FilterAction) Reset() { + *x = FilterAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterAction) ProtoMessage() {} + +func (x *FilterAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterAction.ProtoReflect.Descriptor instead. +func (*FilterAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{1} +} + +func (x *FilterAction) GetAction() *anypb.Any { + if x != nil { + return x.Action + } + return nil +} + +// This can be used in route matcher :ref:`VirtualHost.matcher `. +// When the matcher matches, routes will be matched and run. +type RouteList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of routes that will be matched and run, in order. The first route that matches will be used. + Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"` +} + +func (x *RouteList) Reset() { + *x = RouteList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteList) ProtoMessage() {} + +func (x *RouteList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteList.ProtoReflect.Descriptor instead. +func (*RouteList) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{2} +} + +func (x *RouteList) GetRoutes() []*Route { + if x != nil { + return x.Routes + } + return nil +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// +// [#next-free-field: 20] +type Route struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name for the route. + Name string `protobuf:"bytes,14,opt,name=name,proto3" json:"name,omitempty"` + // Route matching parameters. + Match *RouteMatch `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"` + // Types that are assignable to Action: + // + // *Route_Route + // *Route_Redirect + // *Route_DirectResponse + // *Route_FilterAction + // *Route_NonForwardingAction + Action isRoute_Action `protobuf_oneof:"action"` + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as “envoy.filters.http.router“. + Metadata *v31.Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Decorator for the matched route. + Decorator *Decorator `protobuf:"bytes,5,opt,name=decorator,proto3" json:"decorator,omitempty"` + // This field can be used to provide route specific per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,9,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + RequestHeadersToRemove []string `protobuf:"bytes,12,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,10,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + ResponseHeadersToRemove []string `protobuf:"bytes,11,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing *Tracing `protobuf:"bytes,15,opt,name=tracing,proto3" json:"tracing,omitempty"` + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"` + // The human readable prefix to use when emitting statistics for this endpoint. + // The statistics are rooted at vhost..route.. + // This should be set for highly critical + // endpoints that one wishes to get “per-route” statistics on. + // If not set, endpoint statistics are not generated. + // + // The emitted statistics are the same as those documented for :ref:`virtual clusters `. + // + // .. warning:: + // + // We do not recommend setting up a stat prefix for + // every application endpoint. This is both not easily maintainable and + // statistics use a non-trivial amount of memory(approximately 1KiB per route). + StatPrefix string `protobuf:"bytes,19,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` +} + +func (x *Route) Reset() { + *x = Route{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Route) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Route) ProtoMessage() {} + +func (x *Route) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Route.ProtoReflect.Descriptor instead. +func (*Route) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{3} +} + +func (x *Route) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Route) GetMatch() *RouteMatch { + if x != nil { + return x.Match + } + return nil +} + +func (m *Route) GetAction() isRoute_Action { + if m != nil { + return m.Action + } + return nil +} + +func (x *Route) GetRoute() *RouteAction { + if x, ok := x.GetAction().(*Route_Route); ok { + return x.Route + } + return nil +} + +func (x *Route) GetRedirect() *RedirectAction { + if x, ok := x.GetAction().(*Route_Redirect); ok { + return x.Redirect + } + return nil +} + +func (x *Route) GetDirectResponse() *DirectResponseAction { + if x, ok := x.GetAction().(*Route_DirectResponse); ok { + return x.DirectResponse + } + return nil +} + +func (x *Route) GetFilterAction() *FilterAction { + if x, ok := x.GetAction().(*Route_FilterAction); ok { + return x.FilterAction + } + return nil +} + +func (x *Route) GetNonForwardingAction() *NonForwardingAction { + if x, ok := x.GetAction().(*Route_NonForwardingAction); ok { + return x.NonForwardingAction + } + return nil +} + +func (x *Route) GetMetadata() *v31.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Route) GetDecorator() *Decorator { + if x != nil { + return x.Decorator + } + return nil +} + +func (x *Route) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (x *Route) GetRequestHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *Route) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *Route) GetResponseHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *Route) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +func (x *Route) GetTracing() *Tracing { + if x != nil { + return x.Tracing + } + return nil +} + +func (x *Route) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.PerRequestBufferLimitBytes + } + return nil +} + +func (x *Route) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +type isRoute_Action interface { + isRoute_Action() +} + +type Route_Route struct { + // Route request to some upstream cluster. + Route *RouteAction `protobuf:"bytes,2,opt,name=route,proto3,oneof"` +} + +type Route_Redirect struct { + // Return a redirect. + Redirect *RedirectAction `protobuf:"bytes,3,opt,name=redirect,proto3,oneof"` +} + +type Route_DirectResponse struct { + // Return an arbitrary HTTP response directly, without proxying. + DirectResponse *DirectResponseAction `protobuf:"bytes,7,opt,name=direct_response,json=directResponse,proto3,oneof"` +} + +type Route_FilterAction struct { + // [#not-implemented-hide:] + // A filter-defined action (e.g., it could dynamically generate the RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] + FilterAction *FilterAction `protobuf:"bytes,17,opt,name=filter_action,json=filterAction,proto3,oneof"` +} + +type Route_NonForwardingAction struct { + // [#not-implemented-hide:] + // An action used when the route will generate a response directly, + // without forwarding to an upstream host. This will be used in non-proxy + // xDS clients like the gRPC server. It could also be used in the future + // in Envoy for a filter that directly generates responses for requests. + NonForwardingAction *NonForwardingAction `protobuf:"bytes,18,opt,name=non_forwarding_action,json=nonForwardingAction,proto3,oneof"` +} + +func (*Route_Route) isRoute_Action() {} + +func (*Route_Redirect) isRoute_Action() {} + +func (*Route_DirectResponse) isRoute_Action() {} + +func (*Route_FilterAction) isRoute_Action() {} + +func (*Route_NonForwardingAction) isRoute_Action() {} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +type WeightedCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies one or more upstream clusters associated with the route. + Clusters []*WeightedCluster_ClusterWeight `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, if this is greater than 0. + // This field is now deprecated, and the client will use the sum of all + // cluster weights. It is up to the management server to supply the correct weights. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + TotalWeight *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=total_weight,json=totalWeight,proto3" json:"total_weight,omitempty"` + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the “runtime_key_prefix“ is + // specified, the router will look for weights associated with each upstream + // cluster under the key “runtime_key_prefix“ + “.“ + “cluster[i].name“ where + // “cluster[i]“ denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + RuntimeKeyPrefix string `protobuf:"bytes,2,opt,name=runtime_key_prefix,json=runtimeKeyPrefix,proto3" json:"runtime_key_prefix,omitempty"` + // Types that are assignable to RandomValueSpecifier: + // + // *WeightedCluster_HeaderName + RandomValueSpecifier isWeightedCluster_RandomValueSpecifier `protobuf_oneof:"random_value_specifier"` +} + +func (x *WeightedCluster) Reset() { + *x = WeightedCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WeightedCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WeightedCluster) ProtoMessage() {} + +func (x *WeightedCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WeightedCluster.ProtoReflect.Descriptor instead. +func (*WeightedCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4} +} + +func (x *WeightedCluster) GetClusters() []*WeightedCluster_ClusterWeight { + if x != nil { + return x.Clusters + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *WeightedCluster) GetTotalWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.TotalWeight + } + return nil +} + +func (x *WeightedCluster) GetRuntimeKeyPrefix() string { + if x != nil { + return x.RuntimeKeyPrefix + } + return "" +} + +func (m *WeightedCluster) GetRandomValueSpecifier() isWeightedCluster_RandomValueSpecifier { + if m != nil { + return m.RandomValueSpecifier + } + return nil +} + +func (x *WeightedCluster) GetHeaderName() string { + if x, ok := x.GetRandomValueSpecifier().(*WeightedCluster_HeaderName); ok { + return x.HeaderName + } + return "" +} + +type isWeightedCluster_RandomValueSpecifier interface { + isWeightedCluster_RandomValueSpecifier() +} + +type WeightedCluster_HeaderName struct { + // Specifies the header name that is used to look up the random value passed in the request header. + // This is used to ensure consistent cluster picking across multiple proxy levels for weighted traffic. + // If header is not present or invalid, Envoy will fall back to use the internally generated random value. + // This header is expected to be single-valued header as we only want to have one selected value throughout + // the process for the consistency. And the value is a unsigned number between 0 and UINT64_MAX. + HeaderName string `protobuf:"bytes,4,opt,name=header_name,json=headerName,proto3,oneof"` +} + +func (*WeightedCluster_HeaderName) isWeightedCluster_RandomValueSpecifier() {} + +// Configuration for a cluster specifier plugin. +type ClusterSpecifierPlugin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the plugin and its opaque configuration. + Extension *v31.TypedExtensionConfig `protobuf:"bytes,1,opt,name=extension,proto3" json:"extension,omitempty"` + // If is_optional is not set or is set to false and the plugin defined by this message is not a + // supported type, the containing resource is NACKed. If is_optional is set to true, the resource + // would not be NACKed for this reason. In this case, routes referencing this plugin's name would + // not be treated as an illegal configuration, but would result in a failure if the route is + // selected. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` +} + +func (x *ClusterSpecifierPlugin) Reset() { + *x = ClusterSpecifierPlugin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterSpecifierPlugin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterSpecifierPlugin) ProtoMessage() {} + +func (x *ClusterSpecifierPlugin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterSpecifierPlugin.ProtoReflect.Descriptor instead. +func (*ClusterSpecifierPlugin) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{5} +} + +func (x *ClusterSpecifierPlugin) GetExtension() *v31.TypedExtensionConfig { + if x != nil { + return x.Extension + } + return nil +} + +func (x *ClusterSpecifierPlugin) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +// [#next-free-field: 16] +type RouteMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PathSpecifier: + // + // *RouteMatch_Prefix + // *RouteMatch_Path + // *RouteMatch_SafeRegex + // *RouteMatch_ConnectMatcher_ + // *RouteMatch_PathSeparatedPrefix + // *RouteMatch_PathMatchPolicy + PathSpecifier isRouteMatch_PathSpecifier `protobuf_oneof:"path_specifier"` + // Indicates that prefix/path matching should be case sensitive. The default + // is true. Ignored for safe_regex matching. + CaseSensitive *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=case_sensitive,json=caseSensitive,proto3" json:"case_sensitive,omitempty"` + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + RuntimeFraction *v31.RuntimeFractionalPercent `protobuf:"bytes,9,opt,name=runtime_fraction,json=runtimeFraction,proto3" json:"runtime_fraction,omitempty"` + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + Headers []*HeaderMatcher `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty"` + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the “path“ header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the “path“ header's + // query string for a match to occur. In the event query parameters are + // repeated, only the first value for each key will be considered. + // + // .. note:: + // + // If query parameters are used to pass request message fields when + // `grpc_json_transcoder `_ + // is used, the transcoded message fields maybe different. The query parameters are + // url encoded, but the message fields are not. For example, if a query + // parameter is "foo%20bar", the message field will be "foo bar". + QueryParameters []*QueryParameterMatcher `protobuf:"bytes,7,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + Grpc *RouteMatch_GrpcRouteMatchOptions `protobuf:"bytes,8,opt,name=grpc,proto3" json:"grpc,omitempty"` + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContext *RouteMatch_TlsContextMatchOptions `protobuf:"bytes,11,opt,name=tls_context,json=tlsContext,proto3" json:"tls_context,omitempty"` + // Specifies a set of dynamic metadata matchers on which the route should match. + // The router will check the dynamic metadata against all the specified dynamic metadata matchers. + // If the number of specified dynamic metadata matchers is nonzero, they all must match the + // dynamic metadata for a match to occur. + DynamicMetadata []*v32.MetadataMatcher `protobuf:"bytes,13,rep,name=dynamic_metadata,json=dynamicMetadata,proto3" json:"dynamic_metadata,omitempty"` +} + +func (x *RouteMatch) Reset() { + *x = RouteMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch) ProtoMessage() {} + +func (x *RouteMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch.ProtoReflect.Descriptor instead. +func (*RouteMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6} +} + +func (m *RouteMatch) GetPathSpecifier() isRouteMatch_PathSpecifier { + if m != nil { + return m.PathSpecifier + } + return nil +} + +func (x *RouteMatch) GetPrefix() string { + if x, ok := x.GetPathSpecifier().(*RouteMatch_Prefix); ok { + return x.Prefix + } + return "" +} + +func (x *RouteMatch) GetPath() string { + if x, ok := x.GetPathSpecifier().(*RouteMatch_Path); ok { + return x.Path + } + return "" +} + +func (x *RouteMatch) GetSafeRegex() *v32.RegexMatcher { + if x, ok := x.GetPathSpecifier().(*RouteMatch_SafeRegex); ok { + return x.SafeRegex + } + return nil +} + +func (x *RouteMatch) GetConnectMatcher() *RouteMatch_ConnectMatcher { + if x, ok := x.GetPathSpecifier().(*RouteMatch_ConnectMatcher_); ok { + return x.ConnectMatcher + } + return nil +} + +func (x *RouteMatch) GetPathSeparatedPrefix() string { + if x, ok := x.GetPathSpecifier().(*RouteMatch_PathSeparatedPrefix); ok { + return x.PathSeparatedPrefix + } + return "" +} + +func (x *RouteMatch) GetPathMatchPolicy() *v31.TypedExtensionConfig { + if x, ok := x.GetPathSpecifier().(*RouteMatch_PathMatchPolicy); ok { + return x.PathMatchPolicy + } + return nil +} + +func (x *RouteMatch) GetCaseSensitive() *wrapperspb.BoolValue { + if x != nil { + return x.CaseSensitive + } + return nil +} + +func (x *RouteMatch) GetRuntimeFraction() *v31.RuntimeFractionalPercent { + if x != nil { + return x.RuntimeFraction + } + return nil +} + +func (x *RouteMatch) GetHeaders() []*HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *RouteMatch) GetQueryParameters() []*QueryParameterMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +func (x *RouteMatch) GetGrpc() *RouteMatch_GrpcRouteMatchOptions { + if x != nil { + return x.Grpc + } + return nil +} + +func (x *RouteMatch) GetTlsContext() *RouteMatch_TlsContextMatchOptions { + if x != nil { + return x.TlsContext + } + return nil +} + +func (x *RouteMatch) GetDynamicMetadata() []*v32.MetadataMatcher { + if x != nil { + return x.DynamicMetadata + } + return nil +} + +type isRouteMatch_PathSpecifier interface { + isRouteMatch_PathSpecifier() +} + +type RouteMatch_Prefix struct { + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the “:path“ header. + Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3,oneof"` +} + +type RouteMatch_Path struct { + // If specified, the route is an exact path rule meaning that the path must + // exactly match the “:path“ header once the query string is removed. + Path string `protobuf:"bytes,2,opt,name=path,proto3,oneof"` +} + +type RouteMatch_SafeRegex struct { + // If specified, the route is a regular expression rule meaning that the + // regex must match the “:path“ header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the “:path“ header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + SafeRegex *v32.RegexMatcher `protobuf:"bytes,10,opt,name=safe_regex,json=safeRegex,proto3,oneof"` +} + +type RouteMatch_ConnectMatcher_ struct { + // If this is used as the matcher, the matcher will only match CONNECT or CONNECT-UDP requests. + // Note that this will not match other Extended CONNECT requests (WebSocket and the like) as + // they are normalized in Envoy as HTTP/1.1 style upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2 and HTTP/3, + // where Extended CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + ConnectMatcher *RouteMatch_ConnectMatcher `protobuf:"bytes,12,opt,name=connect_matcher,json=connectMatcher,proto3,oneof"` +} + +type RouteMatch_PathSeparatedPrefix struct { + // If specified, the route is a path-separated prefix rule meaning that the + // “:path“ header (without the query string) must either exactly match the + // “path_separated_prefix“ or have it as a prefix, followed by “/“ + // + // For example, “/api/dev“ would match + // “/api/dev“, “/api/dev/“, “/api/dev/v1“, and “/api/dev?param=true“ + // but would not match “/api/developer“ + // + // Expect the value to not contain “?“ or “#“ and not to end in “/“ + PathSeparatedPrefix string `protobuf:"bytes,14,opt,name=path_separated_prefix,json=pathSeparatedPrefix,proto3,oneof"` +} + +type RouteMatch_PathMatchPolicy struct { + // [#extension-category: envoy.path.match] + PathMatchPolicy *v31.TypedExtensionConfig `protobuf:"bytes,15,opt,name=path_match_policy,json=pathMatchPolicy,proto3,oneof"` +} + +func (*RouteMatch_Prefix) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_Path) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_SafeRegex) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_ConnectMatcher_) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_PathSeparatedPrefix) isRouteMatch_PathSpecifier() {} + +func (*RouteMatch_PathMatchPolicy) isRouteMatch_PathSpecifier() {} + +// Cors policy configuration. +// +// .. attention:: +// +// This message has been deprecated. Please use +// :ref:`CorsPolicy in filter extension ` +// as as alternative. +// +// [#next-free-field: 14] +type CorsPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + AllowOriginStringMatch []*v32.StringMatcher `protobuf:"bytes,11,rep,name=allow_origin_string_match,json=allowOriginStringMatch,proto3" json:"allow_origin_string_match,omitempty"` + // Specifies the content for the “access-control-allow-methods“ header. + AllowMethods string `protobuf:"bytes,2,opt,name=allow_methods,json=allowMethods,proto3" json:"allow_methods,omitempty"` + // Specifies the content for the “access-control-allow-headers“ header. + AllowHeaders string `protobuf:"bytes,3,opt,name=allow_headers,json=allowHeaders,proto3" json:"allow_headers,omitempty"` + // Specifies the content for the “access-control-expose-headers“ header. + ExposeHeaders string `protobuf:"bytes,4,opt,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` + // Specifies the content for the “access-control-max-age“ header. + MaxAge string `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` + // Specifies whether the resource allows credentials. + AllowCredentials *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=allow_credentials,json=allowCredentials,proto3" json:"allow_credentials,omitempty"` + // Types that are assignable to EnabledSpecifier: + // + // *CorsPolicy_FilterEnabled + EnabledSpecifier isCorsPolicy_EnabledSpecifier `protobuf_oneof:"enabled_specifier"` + // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + // enforced. + // + // This field is intended to be used when “filter_enabled“ and “enabled“ are off. One of those + // fields have to explicitly disable the filter in order for this setting to take effect. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's “Origin“ to determine if it's valid but will not enforce any policies. + ShadowEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,10,opt,name=shadow_enabled,json=shadowEnabled,proto3" json:"shadow_enabled,omitempty"` + // Specify whether allow requests whose target server's IP address is more private than that from + // which the request initiator was fetched. + // + // More details refer to https://developer.chrome.com/blog/private-network-access-preflight. + AllowPrivateNetworkAccess *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=allow_private_network_access,json=allowPrivateNetworkAccess,proto3" json:"allow_private_network_access,omitempty"` + // Specifies if preflight requests not matching the configured allowed origin should be forwarded + // to the upstream. Default is true. + ForwardNotMatchingPreflights *wrapperspb.BoolValue `protobuf:"bytes,13,opt,name=forward_not_matching_preflights,json=forwardNotMatchingPreflights,proto3" json:"forward_not_matching_preflights,omitempty"` +} + +func (x *CorsPolicy) Reset() { + *x = CorsPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CorsPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CorsPolicy) ProtoMessage() {} + +func (x *CorsPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CorsPolicy.ProtoReflect.Descriptor instead. +func (*CorsPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{7} +} + +func (x *CorsPolicy) GetAllowOriginStringMatch() []*v32.StringMatcher { + if x != nil { + return x.AllowOriginStringMatch + } + return nil +} + +func (x *CorsPolicy) GetAllowMethods() string { + if x != nil { + return x.AllowMethods + } + return "" +} + +func (x *CorsPolicy) GetAllowHeaders() string { + if x != nil { + return x.AllowHeaders + } + return "" +} + +func (x *CorsPolicy) GetExposeHeaders() string { + if x != nil { + return x.ExposeHeaders + } + return "" +} + +func (x *CorsPolicy) GetMaxAge() string { + if x != nil { + return x.MaxAge + } + return "" +} + +func (x *CorsPolicy) GetAllowCredentials() *wrapperspb.BoolValue { + if x != nil { + return x.AllowCredentials + } + return nil +} + +func (m *CorsPolicy) GetEnabledSpecifier() isCorsPolicy_EnabledSpecifier { + if m != nil { + return m.EnabledSpecifier + } + return nil +} + +func (x *CorsPolicy) GetFilterEnabled() *v31.RuntimeFractionalPercent { + if x, ok := x.GetEnabledSpecifier().(*CorsPolicy_FilterEnabled); ok { + return x.FilterEnabled + } + return nil +} + +func (x *CorsPolicy) GetShadowEnabled() *v31.RuntimeFractionalPercent { + if x != nil { + return x.ShadowEnabled + } + return nil +} + +func (x *CorsPolicy) GetAllowPrivateNetworkAccess() *wrapperspb.BoolValue { + if x != nil { + return x.AllowPrivateNetworkAccess + } + return nil +} + +func (x *CorsPolicy) GetForwardNotMatchingPreflights() *wrapperspb.BoolValue { + if x != nil { + return x.ForwardNotMatchingPreflights + } + return nil +} + +type isCorsPolicy_EnabledSpecifier interface { + isCorsPolicy_EnabledSpecifier() +} + +type CorsPolicy_FilterEnabled struct { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither “enabled“, “filter_enabled“, nor “shadow_enabled“ are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + FilterEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,9,opt,name=filter_enabled,json=filterEnabled,proto3,oneof"` +} + +func (*CorsPolicy_FilterEnabled) isCorsPolicy_EnabledSpecifier() {} + +// [#next-free-field: 42] +type RouteAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ClusterSpecifier: + // + // *RouteAction_Cluster + // *RouteAction_ClusterHeader + // *RouteAction_WeightedClusters + // *RouteAction_ClusterSpecifierPlugin + // *RouteAction_InlineClusterSpecifierPlugin + ClusterSpecifier isRouteAction_ClusterSpecifier `protobuf_oneof:"cluster_specifier"` + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode RouteAction_ClusterNotFoundResponseCode `protobuf:"varint,20,opt,name=cluster_not_found_response_code,json=clusterNotFoundResponseCode,proto3,enum=envoy.config.route.v3.RouteAction_ClusterNotFoundResponseCode" json:"cluster_not_found_response_code,omitempty"` + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as “envoy.lb“. + MetadataMatch *v31.Metadata `protobuf:"bytes,4,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"` + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite ` + // :ref:`path_rewrite_policy `, + // or :ref:`prefix_rewrite ` may be specified. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting ``/prefix`` to ``/`` and ``/prefix/etc`` to ``/etc`` cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to ``/prefix`` will be stripped to ``/``, while + // requests to ``/prefix/etc`` will be stripped to ``/etc``. + PrefixRewrite string `protobuf:"bytes,5,opt,name=prefix_rewrite,json=prefixRewrite,proto3" json:"prefix_rewrite,omitempty"` + // Indicates that during forwarding, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. The router filter will place the original path as it was + // before the rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite `, + // :ref:`prefix_rewrite `, or + // :ref:`path_rewrite_policy `] + // may be specified. + // + // Examples using Google's `RE2 `_ engine: + // + // - The path pattern “^/service/([^/]+)(/.*)$“ paired with a substitution + // string of “\2/instance/\1“ would transform “/service/foo/v1/api“ + // into “/v1/api/instance/foo“. + // + // - The pattern “one“ paired with a substitution string of “two“ would + // transform “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/two/zzz“. + // + // - The pattern “^(.*?)one(.*)$“ paired with a substitution string of + // “\1two\2“ would replace only the first occurrence of “one“, + // transforming path “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/one/zzz“. + // + // - The pattern “(?i)/xxx/“ paired with a substitution string of “/yyy/“ + // would do a case-insensitive match and transform path “/aaa/XxX/bbb“ to + // “/aaa/yyy/bbb“. + RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,32,opt,name=regex_rewrite,json=regexRewrite,proto3" json:"regex_rewrite,omitempty"` + // [#extension-category: envoy.path.rewrite] + PathRewritePolicy *v31.TypedExtensionConfig `protobuf:"bytes,41,opt,name=path_rewrite_policy,json=pathRewritePolicy,proto3" json:"path_rewrite_policy,omitempty"` + // Types that are assignable to HostRewriteSpecifier: + // + // *RouteAction_HostRewriteLiteral + // *RouteAction_AutoHostRewrite + // *RouteAction_HostRewriteHeader + // *RouteAction_HostRewritePathRegex + HostRewriteSpecifier isRouteAction_HostRewriteSpecifier `protobuf_oneof:"host_rewrite_specifier"` + // If set, then a host rewrite action (one of + // :ref:`host_rewrite_literal `, + // :ref:`auto_host_rewrite `, + // :ref:`host_rewrite_header `, or + // :ref:`host_rewrite_path_regex `) + // causes the original value of the host header, if any, to be appended to the + // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. + AppendXForwardedHost bool `protobuf:"varint,38,opt,name=append_x_forwarded_host,json=appendXForwardedHost,proto3" json:"append_x_forwarded_host,omitempty"` + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled according to the value for + // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + IdleTimeout *durationpb.Duration `protobuf:"bytes,24,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // Specifies how to send request over TLS early data. + // If absent, allows `safe HTTP requests `_ to be sent on early data. + // [#extension-category: envoy.route.early_data_policy] + EarlyDataPolicy *v31.TypedExtensionConfig `protobuf:"bytes,40,opt,name=early_data_policy,json=earlyDataPolicy,proto3" json:"early_data_policy,omitempty"` + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + RetryPolicy *RetryPolicy `protobuf:"bytes,9,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that if this is set, it'll take + // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, + // most internal one becomes the enforced policy). :ref:`Retry policy ` + // should not be set if this field is used. + RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,33,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"` + // Specify a set of route request mirroring policies. + // It takes precedence over the virtual host and route config mirror policy entirely. + // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,30,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"` + // Optionally specifies the :ref:`routing priority `. + Priority v31.RoutingPriority `protobuf:"varint,11,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"` + // Specifies a set of rate limit configurations that could be applied to the + // route. + RateLimits []*RateLimit `protobuf:"bytes,13,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + // + // This field is deprecated. Please use :ref:`vh_rate_limits ` + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + IncludeVhRateLimits *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=include_vh_rate_limits,json=includeVhRateLimits,proto3" json:"include_vh_rate_limits,omitempty"` + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + HashPolicy []*RouteAction_HashPolicy `protobuf:"bytes,15,rep,name=hash_policy,json=hashPolicy,proto3" json:"hash_policy,omitempty"` + // Indicates that the route has a CORS policy. This field is ignored if related cors policy is + // found in the :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + // to configure the CORS HTTP filter. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + Cors *CorsPolicy `protobuf:"bytes,17,opt,name=cors,proto3" json:"cors,omitempty"` + // Deprecated by :ref:`grpc_timeout_header_max ` + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the “grpc-timeout“ header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + // + // .. note:: + // + // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + // precedence over `grpc-timeout header `_, when + // both are present. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + MaxGrpcTimeout *durationpb.Duration `protobuf:"bytes,23,opt,name=max_grpc_timeout,json=maxGrpcTimeout,proto3" json:"max_grpc_timeout,omitempty"` + // Deprecated by :ref:`grpc_timeout_header_offset `. + // If present, Envoy will adjust the timeout provided by the “grpc-timeout“ header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + GrpcTimeoutOffset *durationpb.Duration `protobuf:"bytes,28,opt,name=grpc_timeout_offset,json=grpcTimeoutOffset,proto3" json:"grpc_timeout_offset,omitempty"` + UpgradeConfigs []*RouteAction_UpgradeConfig `protobuf:"bytes,25,rep,name=upgrade_configs,json=upgradeConfigs,proto3" json:"upgrade_configs,omitempty"` + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy *InternalRedirectPolicy `protobuf:"bytes,34,opt,name=internal_redirect_policy,json=internalRedirectPolicy,proto3" json:"internal_redirect_policy,omitempty"` + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + InternalRedirectAction RouteAction_InternalRedirectAction `protobuf:"varint,26,opt,name=internal_redirect_action,json=internalRedirectAction,proto3,enum=envoy.config.route.v3.RouteAction_InternalRedirectAction" json:"internal_redirect_action,omitempty"` + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + MaxInternalRedirects *wrapperspb.UInt32Value `protobuf:"bytes,31,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy *HedgePolicy `protobuf:"bytes,27,opt,name=hedge_policy,json=hedgePolicy,proto3" json:"hedge_policy,omitempty"` + // Specifies the maximum stream duration for this route. + MaxStreamDuration *RouteAction_MaxStreamDuration `protobuf:"bytes,36,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` +} + +func (x *RouteAction) Reset() { + *x = RouteAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction) ProtoMessage() {} + +func (x *RouteAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction.ProtoReflect.Descriptor instead. +func (*RouteAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8} +} + +func (m *RouteAction) GetClusterSpecifier() isRouteAction_ClusterSpecifier { + if m != nil { + return m.ClusterSpecifier + } + return nil +} + +func (x *RouteAction) GetCluster() string { + if x, ok := x.GetClusterSpecifier().(*RouteAction_Cluster); ok { + return x.Cluster + } + return "" +} + +func (x *RouteAction) GetClusterHeader() string { + if x, ok := x.GetClusterSpecifier().(*RouteAction_ClusterHeader); ok { + return x.ClusterHeader + } + return "" +} + +func (x *RouteAction) GetWeightedClusters() *WeightedCluster { + if x, ok := x.GetClusterSpecifier().(*RouteAction_WeightedClusters); ok { + return x.WeightedClusters + } + return nil +} + +func (x *RouteAction) GetClusterSpecifierPlugin() string { + if x, ok := x.GetClusterSpecifier().(*RouteAction_ClusterSpecifierPlugin); ok { + return x.ClusterSpecifierPlugin + } + return "" +} + +func (x *RouteAction) GetInlineClusterSpecifierPlugin() *ClusterSpecifierPlugin { + if x, ok := x.GetClusterSpecifier().(*RouteAction_InlineClusterSpecifierPlugin); ok { + return x.InlineClusterSpecifierPlugin + } + return nil +} + +func (x *RouteAction) GetClusterNotFoundResponseCode() RouteAction_ClusterNotFoundResponseCode { + if x != nil { + return x.ClusterNotFoundResponseCode + } + return RouteAction_SERVICE_UNAVAILABLE +} + +func (x *RouteAction) GetMetadataMatch() *v31.Metadata { + if x != nil { + return x.MetadataMatch + } + return nil +} + +func (x *RouteAction) GetPrefixRewrite() string { + if x != nil { + return x.PrefixRewrite + } + return "" +} + +func (x *RouteAction) GetRegexRewrite() *v32.RegexMatchAndSubstitute { + if x != nil { + return x.RegexRewrite + } + return nil +} + +func (x *RouteAction) GetPathRewritePolicy() *v31.TypedExtensionConfig { + if x != nil { + return x.PathRewritePolicy + } + return nil +} + +func (m *RouteAction) GetHostRewriteSpecifier() isRouteAction_HostRewriteSpecifier { + if m != nil { + return m.HostRewriteSpecifier + } + return nil +} + +func (x *RouteAction) GetHostRewriteLiteral() string { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_HostRewriteLiteral); ok { + return x.HostRewriteLiteral + } + return "" +} + +func (x *RouteAction) GetAutoHostRewrite() *wrapperspb.BoolValue { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_AutoHostRewrite); ok { + return x.AutoHostRewrite + } + return nil +} + +func (x *RouteAction) GetHostRewriteHeader() string { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_HostRewriteHeader); ok { + return x.HostRewriteHeader + } + return "" +} + +func (x *RouteAction) GetHostRewritePathRegex() *v32.RegexMatchAndSubstitute { + if x, ok := x.GetHostRewriteSpecifier().(*RouteAction_HostRewritePathRegex); ok { + return x.HostRewritePathRegex + } + return nil +} + +func (x *RouteAction) GetAppendXForwardedHost() bool { + if x != nil { + return x.AppendXForwardedHost + } + return false +} + +func (x *RouteAction) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *RouteAction) GetIdleTimeout() *durationpb.Duration { + if x != nil { + return x.IdleTimeout + } + return nil +} + +func (x *RouteAction) GetEarlyDataPolicy() *v31.TypedExtensionConfig { + if x != nil { + return x.EarlyDataPolicy + } + return nil +} + +func (x *RouteAction) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *RouteAction) GetRetryPolicyTypedConfig() *anypb.Any { + if x != nil { + return x.RetryPolicyTypedConfig + } + return nil +} + +func (x *RouteAction) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy { + if x != nil { + return x.RequestMirrorPolicies + } + return nil +} + +func (x *RouteAction) GetPriority() v31.RoutingPriority { + if x != nil { + return x.Priority + } + return v31.RoutingPriority(0) +} + +func (x *RouteAction) GetRateLimits() []*RateLimit { + if x != nil { + return x.RateLimits + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetIncludeVhRateLimits() *wrapperspb.BoolValue { + if x != nil { + return x.IncludeVhRateLimits + } + return nil +} + +func (x *RouteAction) GetHashPolicy() []*RouteAction_HashPolicy { + if x != nil { + return x.HashPolicy + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetCors() *CorsPolicy { + if x != nil { + return x.Cors + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetMaxGrpcTimeout() *durationpb.Duration { + if x != nil { + return x.MaxGrpcTimeout + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetGrpcTimeoutOffset() *durationpb.Duration { + if x != nil { + return x.GrpcTimeoutOffset + } + return nil +} + +func (x *RouteAction) GetUpgradeConfigs() []*RouteAction_UpgradeConfig { + if x != nil { + return x.UpgradeConfigs + } + return nil +} + +func (x *RouteAction) GetInternalRedirectPolicy() *InternalRedirectPolicy { + if x != nil { + return x.InternalRedirectPolicy + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetInternalRedirectAction() RouteAction_InternalRedirectAction { + if x != nil { + return x.InternalRedirectAction + } + return RouteAction_PASS_THROUGH_INTERNAL_REDIRECT +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RouteAction) GetMaxInternalRedirects() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInternalRedirects + } + return nil +} + +func (x *RouteAction) GetHedgePolicy() *HedgePolicy { + if x != nil { + return x.HedgePolicy + } + return nil +} + +func (x *RouteAction) GetMaxStreamDuration() *RouteAction_MaxStreamDuration { + if x != nil { + return x.MaxStreamDuration + } + return nil +} + +type isRouteAction_ClusterSpecifier interface { + isRouteAction_ClusterSpecifier() +} + +type RouteAction_Cluster struct { + // Indicates the upstream cluster to which the request should be routed + // to. + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3,oneof"` +} + +type RouteAction_ClusterHeader struct { + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + ClusterHeader string `protobuf:"bytes,2,opt,name=cluster_header,json=clusterHeader,proto3,oneof"` +} + +type RouteAction_WeightedClusters struct { + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedClusters *WeightedCluster `protobuf:"bytes,3,opt,name=weighted_clusters,json=weightedClusters,proto3,oneof"` +} + +type RouteAction_ClusterSpecifierPlugin struct { + // Name of the cluster specifier plugin to use to determine the cluster for requests on this route. + // The cluster specifier plugin name must be defined in the associated + // :ref:`cluster specifier plugins ` + // in the :ref:`name ` field. + ClusterSpecifierPlugin string `protobuf:"bytes,37,opt,name=cluster_specifier_plugin,json=clusterSpecifierPlugin,proto3,oneof"` +} + +type RouteAction_InlineClusterSpecifierPlugin struct { + // Custom cluster specifier plugin configuration to use to determine the cluster for requests + // on this route. + InlineClusterSpecifierPlugin *ClusterSpecifierPlugin `protobuf:"bytes,39,opt,name=inline_cluster_specifier_plugin,json=inlineClusterSpecifierPlugin,proto3,oneof"` +} + +func (*RouteAction_Cluster) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_ClusterHeader) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_WeightedClusters) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_ClusterSpecifierPlugin) isRouteAction_ClusterSpecifier() {} + +func (*RouteAction_InlineClusterSpecifierPlugin) isRouteAction_ClusterSpecifier() {} + +type isRouteAction_HostRewriteSpecifier interface { + isRouteAction_HostRewriteSpecifier() +} + +type RouteAction_HostRewriteLiteral struct { + // Indicates that during forwarding, the host header will be swapped with + // this value. Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + HostRewriteLiteral string `protobuf:"bytes,6,opt,name=host_rewrite_literal,json=hostRewriteLiteral,proto3,oneof"` +} + +type RouteAction_AutoHostRewrite struct { + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type “strict_dns“ or “logical_dns“, + // or when :ref:`hostname ` + // field is not empty. Setting this to true with other cluster types + // has no effect. Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + AutoHostRewrite *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=auto_host_rewrite,json=autoHostRewrite,proto3,oneof"` +} + +type RouteAction_HostRewriteHeader struct { + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + HostRewriteHeader string `protobuf:"bytes,29,opt,name=host_rewrite_header,json=hostRewriteHeader,proto3,oneof"` +} + +type RouteAction_HostRewritePathRegex struct { + // Indicates that during forwarding, the host header will be swapped with + // the result of the regex substitution executed on path value with query and fragment removed. + // This is useful for transitioning variable content between path segment and subdomain. + // Using this option will append the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header if + // :ref:`append_x_forwarded_host ` + // is set. + // + // For example with the following config: + // + // .. code-block:: yaml + // + // host_rewrite_path_regex: + // pattern: + // google_re2: {} + // regex: "^/(.+)/.+$" + // substitution: \1 + // + // Would rewrite the host header to “envoyproxy.io“ given the path “/envoyproxy.io/some/path“. + HostRewritePathRegex *v32.RegexMatchAndSubstitute `protobuf:"bytes,35,opt,name=host_rewrite_path_regex,json=hostRewritePathRegex,proto3,oneof"` +} + +func (*RouteAction_HostRewriteLiteral) isRouteAction_HostRewriteSpecifier() {} + +func (*RouteAction_AutoHostRewrite) isRouteAction_HostRewriteSpecifier() {} + +func (*RouteAction_HostRewriteHeader) isRouteAction_HostRewriteSpecifier() {} + +func (*RouteAction_HostRewritePathRegex) isRouteAction_HostRewriteSpecifier() {} + +// HTTP retry :ref:`architecture overview `. +// [#next-free-field: 14] +type RetryPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + RetryOn string `protobuf:"bytes,1,opt,name=retry_on,json=retryOn,proto3" json:"retry_on,omitempty"` + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"` + // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + // parameter is optional. The same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + PerTryTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=per_try_timeout,json=perTryTimeout,proto3" json:"per_try_timeout,omitempty"` + // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + // parameter is optional and if absent there is no per try idle timeout. The semantics of the per + // try idle timeout are similar to the + // :ref:`route idle timeout ` and + // :ref:`stream idle timeout + // ` + // both enforced by the HTTP connection manager. The difference is that this idle timeout + // is enforced by the router for each individual attempt and thus after all previous filters have + // run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + // is useful in cases in which total request timeout is bounded by a number of retries and a + // :ref:`per_try_timeout `, but + // there is a desire to ensure each try is making incremental progress. Note also that similar + // to :ref:`per_try_timeout `, + // this idle timeout does not start until after both the entire request has been received by the + // router *and* a connection pool connection has been obtained. Unlike + // :ref:`per_try_timeout `, + // the idle timer continues once the response starts streaming back to the downstream client. + // This ensures that response data continues to make progress without using one of the HTTP + // connection manager idle timeouts. + PerTryIdleTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=per_try_idle_timeout,json=perTryIdleTimeout,proto3" json:"per_try_idle_timeout,omitempty"` + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority *RetryPolicy_RetryPriority `protobuf:"bytes,4,opt,name=retry_priority,json=retryPriority,proto3" json:"retry_priority,omitempty"` + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + RetryHostPredicate []*RetryPolicy_RetryHostPredicate `protobuf:"bytes,5,rep,name=retry_host_predicate,json=retryHostPredicate,proto3" json:"retry_host_predicate,omitempty"` + // Retry options predicates that will be applied prior to retrying a request. These predicates + // allow customizing request behavior between retries. + // [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + RetryOptionsPredicates []*v31.TypedExtensionConfig `protobuf:"bytes,12,rep,name=retry_options_predicates,json=retryOptionsPredicates,proto3" json:"retry_options_predicates,omitempty"` + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + HostSelectionRetryMaxAttempts int64 `protobuf:"varint,6,opt,name=host_selection_retry_max_attempts,json=hostSelectionRetryMaxAttempts,proto3" json:"host_selection_retry_max_attempts,omitempty"` + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + RetriableStatusCodes []uint32 `protobuf:"varint,7,rep,packed,name=retriable_status_codes,json=retriableStatusCodes,proto3" json:"retriable_status_codes,omitempty"` + // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // “upstream.base_retry_backoff_ms“ runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff *RetryPolicy_RetryBackOff `protobuf:"bytes,8,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"` + // Specifies parameters that control a retry back-off strategy that is used + // when the request is rate limited by the upstream server. The server may + // return a response header like “Retry-After“ or “X-RateLimit-Reset“ to + // provide feedback to the client on how long to wait before retrying. If + // configured, this back-off strategy will be used instead of the + // default exponential back off strategy (configured using “retry_back_off“) + // whenever a response includes the matching headers. + RateLimitedRetryBackOff *RetryPolicy_RateLimitedRetryBackOff `protobuf:"bytes,11,opt,name=rate_limited_retry_back_off,json=rateLimitedRetryBackOff,proto3" json:"rate_limited_retry_back_off,omitempty"` + // HTTP response headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + RetriableHeaders []*HeaderMatcher `protobuf:"bytes,9,rep,name=retriable_headers,json=retriableHeaders,proto3" json:"retriable_headers,omitempty"` + // HTTP headers which must be present in the request for retries to be attempted. + RetriableRequestHeaders []*HeaderMatcher `protobuf:"bytes,10,rep,name=retriable_request_headers,json=retriableRequestHeaders,proto3" json:"retriable_request_headers,omitempty"` +} + +func (x *RetryPolicy) Reset() { + *x = RetryPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy) ProtoMessage() {} + +func (x *RetryPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. +func (*RetryPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9} +} + +func (x *RetryPolicy) GetRetryOn() string { + if x != nil { + return x.RetryOn + } + return "" +} + +func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value { + if x != nil { + return x.NumRetries + } + return nil +} + +func (x *RetryPolicy) GetPerTryTimeout() *durationpb.Duration { + if x != nil { + return x.PerTryTimeout + } + return nil +} + +func (x *RetryPolicy) GetPerTryIdleTimeout() *durationpb.Duration { + if x != nil { + return x.PerTryIdleTimeout + } + return nil +} + +func (x *RetryPolicy) GetRetryPriority() *RetryPolicy_RetryPriority { + if x != nil { + return x.RetryPriority + } + return nil +} + +func (x *RetryPolicy) GetRetryHostPredicate() []*RetryPolicy_RetryHostPredicate { + if x != nil { + return x.RetryHostPredicate + } + return nil +} + +func (x *RetryPolicy) GetRetryOptionsPredicates() []*v31.TypedExtensionConfig { + if x != nil { + return x.RetryOptionsPredicates + } + return nil +} + +func (x *RetryPolicy) GetHostSelectionRetryMaxAttempts() int64 { + if x != nil { + return x.HostSelectionRetryMaxAttempts + } + return 0 +} + +func (x *RetryPolicy) GetRetriableStatusCodes() []uint32 { + if x != nil { + return x.RetriableStatusCodes + } + return nil +} + +func (x *RetryPolicy) GetRetryBackOff() *RetryPolicy_RetryBackOff { + if x != nil { + return x.RetryBackOff + } + return nil +} + +func (x *RetryPolicy) GetRateLimitedRetryBackOff() *RetryPolicy_RateLimitedRetryBackOff { + if x != nil { + return x.RateLimitedRetryBackOff + } + return nil +} + +func (x *RetryPolicy) GetRetriableHeaders() []*HeaderMatcher { + if x != nil { + return x.RetriableHeaders + } + return nil +} + +func (x *RetryPolicy) GetRetriableRequestHeaders() []*HeaderMatcher { + if x != nil { + return x.RetriableRequestHeaders + } + return nil +} + +// HTTP request hedging :ref:`architecture overview `. +type HedgePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + // [#not-implemented-hide:] + InitialRequests *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=initial_requests,json=initialRequests,proto3" json:"initial_requests,omitempty"` + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + // [#not-implemented-hide:] + AdditionalRequestChance *v33.FractionalPercent `protobuf:"bytes,2,opt,name=additional_request_chance,json=additionalRequestChance,proto3" json:"additional_request_chance,omitempty"` + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // - At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // - Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // - After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // + // Defaults to false. + HedgeOnPerTryTimeout bool `protobuf:"varint,3,opt,name=hedge_on_per_try_timeout,json=hedgeOnPerTryTimeout,proto3" json:"hedge_on_per_try_timeout,omitempty"` +} + +func (x *HedgePolicy) Reset() { + *x = HedgePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HedgePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HedgePolicy) ProtoMessage() {} + +func (x *HedgePolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HedgePolicy.ProtoReflect.Descriptor instead. +func (*HedgePolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{10} +} + +func (x *HedgePolicy) GetInitialRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.InitialRequests + } + return nil +} + +func (x *HedgePolicy) GetAdditionalRequestChance() *v33.FractionalPercent { + if x != nil { + return x.AdditionalRequestChance + } + return nil +} + +func (x *HedgePolicy) GetHedgeOnPerTryTimeout() bool { + if x != nil { + return x.HedgeOnPerTryTimeout + } + return false +} + +// [#next-free-field: 10] +type RedirectAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is “http“ and the port is explicitly + // set to “:80“, the port will be removed after the redirection + // 2. If the source URI scheme is “https“ and the port is explicitly + // set to “:443“, the port will be removed after the redirection + // + // Types that are assignable to SchemeRewriteSpecifier: + // + // *RedirectAction_HttpsRedirect + // *RedirectAction_SchemeRedirect + SchemeRewriteSpecifier isRedirectAction_SchemeRewriteSpecifier `protobuf_oneof:"scheme_rewrite_specifier"` + // The host portion of the URL will be swapped with this value. + HostRedirect string `protobuf:"bytes,1,opt,name=host_redirect,json=hostRedirect,proto3" json:"host_redirect,omitempty"` + // The port value of the URL will be swapped with this value. + PortRedirect uint32 `protobuf:"varint,8,opt,name=port_redirect,json=portRedirect,proto3" json:"port_redirect,omitempty"` + // Types that are assignable to PathRewriteSpecifier: + // + // *RedirectAction_PathRedirect + // *RedirectAction_PrefixRewrite + // *RedirectAction_RegexRewrite + PathRewriteSpecifier isRedirectAction_PathRewriteSpecifier `protobuf_oneof:"path_rewrite_specifier"` + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + ResponseCode RedirectAction_RedirectResponseCode `protobuf:"varint,3,opt,name=response_code,json=responseCode,proto3,enum=envoy.config.route.v3.RedirectAction_RedirectResponseCode" json:"response_code,omitempty"` + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + StripQuery bool `protobuf:"varint,6,opt,name=strip_query,json=stripQuery,proto3" json:"strip_query,omitempty"` +} + +func (x *RedirectAction) Reset() { + *x = RedirectAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RedirectAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RedirectAction) ProtoMessage() {} + +func (x *RedirectAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RedirectAction.ProtoReflect.Descriptor instead. +func (*RedirectAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11} +} + +func (m *RedirectAction) GetSchemeRewriteSpecifier() isRedirectAction_SchemeRewriteSpecifier { + if m != nil { + return m.SchemeRewriteSpecifier + } + return nil +} + +func (x *RedirectAction) GetHttpsRedirect() bool { + if x, ok := x.GetSchemeRewriteSpecifier().(*RedirectAction_HttpsRedirect); ok { + return x.HttpsRedirect + } + return false +} + +func (x *RedirectAction) GetSchemeRedirect() string { + if x, ok := x.GetSchemeRewriteSpecifier().(*RedirectAction_SchemeRedirect); ok { + return x.SchemeRedirect + } + return "" +} + +func (x *RedirectAction) GetHostRedirect() string { + if x != nil { + return x.HostRedirect + } + return "" +} + +func (x *RedirectAction) GetPortRedirect() uint32 { + if x != nil { + return x.PortRedirect + } + return 0 +} + +func (m *RedirectAction) GetPathRewriteSpecifier() isRedirectAction_PathRewriteSpecifier { + if m != nil { + return m.PathRewriteSpecifier + } + return nil +} + +func (x *RedirectAction) GetPathRedirect() string { + if x, ok := x.GetPathRewriteSpecifier().(*RedirectAction_PathRedirect); ok { + return x.PathRedirect + } + return "" +} + +func (x *RedirectAction) GetPrefixRewrite() string { + if x, ok := x.GetPathRewriteSpecifier().(*RedirectAction_PrefixRewrite); ok { + return x.PrefixRewrite + } + return "" +} + +func (x *RedirectAction) GetRegexRewrite() *v32.RegexMatchAndSubstitute { + if x, ok := x.GetPathRewriteSpecifier().(*RedirectAction_RegexRewrite); ok { + return x.RegexRewrite + } + return nil +} + +func (x *RedirectAction) GetResponseCode() RedirectAction_RedirectResponseCode { + if x != nil { + return x.ResponseCode + } + return RedirectAction_MOVED_PERMANENTLY +} + +func (x *RedirectAction) GetStripQuery() bool { + if x != nil { + return x.StripQuery + } + return false +} + +type isRedirectAction_SchemeRewriteSpecifier interface { + isRedirectAction_SchemeRewriteSpecifier() +} + +type RedirectAction_HttpsRedirect struct { + // The scheme portion of the URL will be swapped with "https". + HttpsRedirect bool `protobuf:"varint,4,opt,name=https_redirect,json=httpsRedirect,proto3,oneof"` +} + +type RedirectAction_SchemeRedirect struct { + // The scheme portion of the URL will be swapped with this value. + SchemeRedirect string `protobuf:"bytes,7,opt,name=scheme_redirect,json=schemeRedirect,proto3,oneof"` +} + +func (*RedirectAction_HttpsRedirect) isRedirectAction_SchemeRewriteSpecifier() {} + +func (*RedirectAction_SchemeRedirect) isRedirectAction_SchemeRewriteSpecifier() {} + +type isRedirectAction_PathRewriteSpecifier interface { + isRedirectAction_PathRewriteSpecifier() +} + +type RedirectAction_PathRedirect struct { + // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" + PathRedirect string `protobuf:"bytes,2,opt,name=path_redirect,json=pathRedirect,proto3,oneof"` +} + +type RedirectAction_PrefixRewrite struct { + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + PrefixRewrite string `protobuf:"bytes,5,opt,name=prefix_rewrite,json=prefixRewrite,proto3,oneof"` +} + +type RedirectAction_RegexRewrite struct { + // Indicates that during redirect, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. + // + // Examples using Google's `RE2 `_ engine: + // + // - The path pattern “^/service/([^/]+)(/.*)$“ paired with a substitution + // string of “\2/instance/\1“ would transform “/service/foo/v1/api“ + // into “/v1/api/instance/foo“. + // + // - The pattern “one“ paired with a substitution string of “two“ would + // transform “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/two/zzz“. + // + // - The pattern “^(.*?)one(.*)$“ paired with a substitution string of + // “\1two\2“ would replace only the first occurrence of “one“, + // transforming path “/xxx/one/yyy/one/zzz“ into “/xxx/two/yyy/one/zzz“. + // + // - The pattern “(?i)/xxx/“ paired with a substitution string of “/yyy/“ + // would do a case-insensitive match and transform path “/aaa/XxX/bbb“ to + // “/aaa/yyy/bbb“. + RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,9,opt,name=regex_rewrite,json=regexRewrite,proto3,oneof"` +} + +func (*RedirectAction_PathRedirect) isRedirectAction_PathRewriteSpecifier() {} + +func (*RedirectAction_PrefixRewrite) isRedirectAction_PathRewriteSpecifier() {} + +func (*RedirectAction_RegexRewrite) isRedirectAction_PathRewriteSpecifier() {} + +type DirectResponseAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the HTTP response status to be returned. + Status uint32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using ``response_headers_to_add`` in the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or + // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. + Body *v31.DataSource `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *DirectResponseAction) Reset() { + *x = DirectResponseAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DirectResponseAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DirectResponseAction) ProtoMessage() {} + +func (x *DirectResponseAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DirectResponseAction.ProtoReflect.Descriptor instead. +func (*DirectResponseAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{12} +} + +func (x *DirectResponseAction) GetStatus() uint32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *DirectResponseAction) GetBody() *v31.DataSource { + if x != nil { + return x.Body + } + return nil +} + +// [#not-implemented-hide:] +type NonForwardingAction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NonForwardingAction) Reset() { + *x = NonForwardingAction{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NonForwardingAction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NonForwardingAction) ProtoMessage() {} + +func (x *NonForwardingAction) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NonForwardingAction.ProtoReflect.Descriptor instead. +func (*NonForwardingAction) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{13} +} + +type Decorator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"` + // Whether the decorated details should be propagated to the other party. The default is true. + Propagate *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=propagate,proto3" json:"propagate,omitempty"` +} + +func (x *Decorator) Reset() { + *x = Decorator{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decorator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decorator) ProtoMessage() {} + +func (x *Decorator) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decorator.ProtoReflect.Descriptor instead. +func (*Decorator) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{14} +} + +func (x *Decorator) GetOperation() string { + if x != nil { + return x.Operation + } + return "" +} + +func (x *Decorator) GetPropagate() *wrapperspb.BoolValue { + if x != nil { + return x.Propagate + } + return nil +} + +type Tracing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + ClientSampling *v33.FractionalPercent `protobuf:"bytes,1,opt,name=client_sampling,json=clientSampling,proto3" json:"client_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + RandomSampling *v33.FractionalPercent `protobuf:"bytes,2,opt,name=random_sampling,json=randomSampling,proto3" json:"random_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + OverallSampling *v33.FractionalPercent `protobuf:"bytes,3,opt,name=overall_sampling,json=overallSampling,proto3" json:"overall_sampling,omitempty"` + // A list of custom tags with unique tag name to create tags for the active span. + // It will take effect after merging with the :ref:`corresponding configuration + // ` + // configured in the HTTP connection manager. If two tags with the same name are configured + // each in the HTTP connection manager and the route level, the one configured here takes + // priority. + CustomTags []*v34.CustomTag `protobuf:"bytes,4,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty"` +} + +func (x *Tracing) Reset() { + *x = Tracing{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tracing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tracing) ProtoMessage() {} + +func (x *Tracing) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tracing.ProtoReflect.Descriptor instead. +func (*Tracing) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{15} +} + +func (x *Tracing) GetClientSampling() *v33.FractionalPercent { + if x != nil { + return x.ClientSampling + } + return nil +} + +func (x *Tracing) GetRandomSampling() *v33.FractionalPercent { + if x != nil { + return x.RandomSampling + } + return nil +} + +func (x *Tracing) GetOverallSampling() *v33.FractionalPercent { + if x != nil { + return x.OverallSampling + } + return nil +} + +func (x *Tracing) GetCustomTags() []*v34.CustomTag { + if x != nil { + return x.CustomTags + } + return nil +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect” statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +type VirtualCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers “:path“ and “:method“ can be used to match the request path and + // method, respectively. + Headers []*HeaderMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *VirtualCluster) Reset() { + *x = VirtualCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualCluster) ProtoMessage() {} + +func (x *VirtualCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualCluster.ProtoReflect.Descriptor instead. +func (*VirtualCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{16} +} + +func (x *VirtualCluster) GetHeaders() []*HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *VirtualCluster) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Global rate limiting :ref:`architecture overview `. +// Also applies to Local rate limiting :ref:`using descriptors `. +type RateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + Stage *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=stage,proto3" json:"stage,omitempty"` + // The key to be set in runtime to disable this rate limit configuration. + DisableKey string `protobuf:"bytes,2,opt,name=disable_key,json=disableKey,proto3" json:"disable_key,omitempty"` + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + Actions []*RateLimit_Action `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Limit *RateLimit_Override `protobuf:"bytes,4,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *RateLimit) Reset() { + *x = RateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit) ProtoMessage() {} + +func (x *RateLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit.ProtoReflect.Descriptor instead. +func (*RateLimit) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17} +} + +func (x *RateLimit) GetStage() *wrapperspb.UInt32Value { + if x != nil { + return x.Stage + } + return nil +} + +func (x *RateLimit) GetDisableKey() string { + if x != nil { + return x.DisableKey + } + return "" +} + +func (x *RateLimit) GetActions() []*RateLimit_Action { + if x != nil { + return x.Actions + } + return nil +} + +func (x *RateLimit) GetLimit() *RateLimit_Override { + if x != nil { + return x.Limit + } + return nil +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 ``Host`` +// header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 ``:method`` header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "string_match": { +// "exact": "POST" +// } +// } +// +// .. attention:: +// +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +// +// [#next-free-field: 15] +type HeaderMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the name of the header in the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Specifies how the header match will be performed to route the request. + // + // Types that are assignable to HeaderMatchSpecifier: + // + // *HeaderMatcher_ExactMatch + // *HeaderMatcher_SafeRegexMatch + // *HeaderMatcher_RangeMatch + // *HeaderMatcher_PresentMatch + // *HeaderMatcher_PrefixMatch + // *HeaderMatcher_SuffixMatch + // *HeaderMatcher_ContainsMatch + // *HeaderMatcher_StringMatch + HeaderMatchSpecifier isHeaderMatcher_HeaderMatchSpecifier `protobuf_oneof:"header_match_specifier"` + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex “\d{3}“ does not match the value “1234“, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + InvertMatch bool `protobuf:"varint,8,opt,name=invert_match,json=invertMatch,proto3" json:"invert_match,omitempty"` + // If specified, for any header match rule, if the header match rule specified header + // does not exist, this header value will be treated as empty. Defaults to false. + // + // Examples: + // + // - The header match rule specified header "header1" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to true; The "header1" header is not present. The match rule will + // treat the "header1" as an empty header. The empty header does not match the range, + // so it will match when inverted. + // - The header match rule specified header "header2" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to false; The "header2" header is not present and the header + // matcher rule for "header2" will be ignored so it will not match. + // - The header match rule specified header "header3" to a string regex match + // “^$“ which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to true; The "header3" header is not present. + // The match rule will treat the "header3" header as an empty header so it will match. + // - The header match rule specified header "header4" to a string regex match + // “^$“ which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to false; The "header4" header is not present. + // The match rule for "header4" will be ignored so it will not match. + TreatMissingHeaderAsEmpty bool `protobuf:"varint,14,opt,name=treat_missing_header_as_empty,json=treatMissingHeaderAsEmpty,proto3" json:"treat_missing_header_as_empty,omitempty"` +} + +func (x *HeaderMatcher) Reset() { + *x = HeaderMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderMatcher) ProtoMessage() {} + +func (x *HeaderMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderMatcher.ProtoReflect.Descriptor instead. +func (*HeaderMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{18} +} + +func (x *HeaderMatcher) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *HeaderMatcher) GetHeaderMatchSpecifier() isHeaderMatcher_HeaderMatchSpecifier { + if m != nil { + return m.HeaderMatchSpecifier + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetExactMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_ExactMatch); ok { + return x.ExactMatch + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetSafeRegexMatch() *v32.RegexMatcher { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_SafeRegexMatch); ok { + return x.SafeRegexMatch + } + return nil +} + +func (x *HeaderMatcher) GetRangeMatch() *v33.Int64Range { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_RangeMatch); ok { + return x.RangeMatch + } + return nil +} + +func (x *HeaderMatcher) GetPresentMatch() bool { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_PresentMatch); ok { + return x.PresentMatch + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetPrefixMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_PrefixMatch); ok { + return x.PrefixMatch + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetSuffixMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_SuffixMatch); ok { + return x.SuffixMatch + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *HeaderMatcher) GetContainsMatch() string { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_ContainsMatch); ok { + return x.ContainsMatch + } + return "" +} + +func (x *HeaderMatcher) GetStringMatch() *v32.StringMatcher { + if x, ok := x.GetHeaderMatchSpecifier().(*HeaderMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +func (x *HeaderMatcher) GetInvertMatch() bool { + if x != nil { + return x.InvertMatch + } + return false +} + +func (x *HeaderMatcher) GetTreatMissingHeaderAsEmpty() bool { + if x != nil { + return x.TreatMissingHeaderAsEmpty + } + return false +} + +type isHeaderMatcher_HeaderMatchSpecifier interface { + isHeaderMatcher_HeaderMatchSpecifier() +} + +type HeaderMatcher_ExactMatch struct { + // If specified, header match will be performed based on the value of the header. + // This field is deprecated. Please use :ref:`string_match `. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + ExactMatch string `protobuf:"bytes,4,opt,name=exact_match,json=exactMatch,proto3,oneof"` +} + +type HeaderMatcher_SafeRegexMatch struct { + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + // This field is deprecated. Please use :ref:`string_match `. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + SafeRegexMatch *v32.RegexMatcher `protobuf:"bytes,11,opt,name=safe_regex_match,json=safeRegexMatch,proto3,oneof"` +} + +type HeaderMatcher_RangeMatch struct { + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // - For range [-10,0), route will match for header value -1, but not for 0, “somestring“, 10.9, + // “-1somestring“ + RangeMatch *v33.Int64Range `protobuf:"bytes,6,opt,name=range_match,json=rangeMatch,proto3,oneof"` +} + +type HeaderMatcher_PresentMatch struct { + // If specified as true, header match will be performed based on whether the header is in the + // request. If specified as false, header match will be performed based on whether the header is absent. + PresentMatch bool `protobuf:"varint,7,opt,name=present_match,json=presentMatch,proto3,oneof"` +} + +type HeaderMatcher_PrefixMatch struct { + // If specified, header match will be performed based on the prefix of the header value. + // Note: empty prefix is not allowed, please use present_match instead. + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The prefix “abcd“ matches the value “abcdxyz“, but not for “abcxyz“. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + PrefixMatch string `protobuf:"bytes,9,opt,name=prefix_match,json=prefixMatch,proto3,oneof"` +} + +type HeaderMatcher_SuffixMatch struct { + // If specified, header match will be performed based on the suffix of the header value. + // Note: empty suffix is not allowed, please use present_match instead. + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The suffix “abcd“ matches the value “xyzabcd“, but not for “xyzbcd“. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + SuffixMatch string `protobuf:"bytes,10,opt,name=suffix_match,json=suffixMatch,proto3,oneof"` +} + +type HeaderMatcher_ContainsMatch struct { + // If specified, header match will be performed based on whether the header value contains + // the given value or not. + // Note: empty contains match is not allowed, please use present_match instead. + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The value “abcd“ matches the value “xyzabcdpqr“, but not for “xyzbcdpqr“. + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + ContainsMatch string `protobuf:"bytes,12,opt,name=contains_match,json=containsMatch,proto3,oneof"` +} + +type HeaderMatcher_StringMatch struct { + // If specified, header match will be performed based on the string match of the header value. + StringMatch *v32.StringMatcher `protobuf:"bytes,13,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +func (*HeaderMatcher_ExactMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_SafeRegexMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_RangeMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_PresentMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_PrefixMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_SuffixMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_ContainsMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +func (*HeaderMatcher_StringMatch) isHeaderMatcher_HeaderMatchSpecifier() {} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +// [#next-free-field: 7] +type QueryParameterMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the name of a key that must be present in the requested + // “path“'s query string. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to QueryParameterMatchSpecifier: + // + // *QueryParameterMatcher_StringMatch + // *QueryParameterMatcher_PresentMatch + QueryParameterMatchSpecifier isQueryParameterMatcher_QueryParameterMatchSpecifier `protobuf_oneof:"query_parameter_match_specifier"` +} + +func (x *QueryParameterMatcher) Reset() { + *x = QueryParameterMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterMatcher) ProtoMessage() {} + +func (x *QueryParameterMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterMatcher.ProtoReflect.Descriptor instead. +func (*QueryParameterMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{19} +} + +func (x *QueryParameterMatcher) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *QueryParameterMatcher) GetQueryParameterMatchSpecifier() isQueryParameterMatcher_QueryParameterMatchSpecifier { + if m != nil { + return m.QueryParameterMatchSpecifier + } + return nil +} + +func (x *QueryParameterMatcher) GetStringMatch() *v32.StringMatcher { + if x, ok := x.GetQueryParameterMatchSpecifier().(*QueryParameterMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +func (x *QueryParameterMatcher) GetPresentMatch() bool { + if x, ok := x.GetQueryParameterMatchSpecifier().(*QueryParameterMatcher_PresentMatch); ok { + return x.PresentMatch + } + return false +} + +type isQueryParameterMatcher_QueryParameterMatchSpecifier interface { + isQueryParameterMatcher_QueryParameterMatchSpecifier() +} + +type QueryParameterMatcher_StringMatch struct { + // Specifies whether a query parameter value should match against a string. + StringMatch *v32.StringMatcher `protobuf:"bytes,5,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type QueryParameterMatcher_PresentMatch struct { + // Specifies whether a query parameter should be present. + PresentMatch bool `protobuf:"varint,6,opt,name=present_match,json=presentMatch,proto3,oneof"` +} + +func (*QueryParameterMatcher_StringMatch) isQueryParameterMatcher_QueryParameterMatchSpecifier() {} + +func (*QueryParameterMatcher_PresentMatch) isQueryParameterMatcher_QueryParameterMatchSpecifier() {} + +// HTTP Internal Redirect :ref:`architecture overview `. +// [#next-free-field: 6] +type InternalRedirectPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + MaxInternalRedirects *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_internal_redirects,json=maxInternalRedirects,proto3" json:"max_internal_redirects,omitempty"` + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + RedirectResponseCodes []uint32 `protobuf:"varint,2,rep,packed,name=redirect_response_codes,json=redirectResponseCodes,proto3" json:"redirect_response_codes,omitempty"` + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + // [#extension-category: envoy.internal_redirect_predicates] + Predicates []*v31.TypedExtensionConfig `protobuf:"bytes,3,rep,name=predicates,proto3" json:"predicates,omitempty"` + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is false. + AllowCrossSchemeRedirect bool `protobuf:"varint,4,opt,name=allow_cross_scheme_redirect,json=allowCrossSchemeRedirect,proto3" json:"allow_cross_scheme_redirect,omitempty"` + // Specifies a list of headers, by name, to copy from the internal redirect into the subsequent + // request. If a header is specified here but not present in the redirect, it will be cleared in + // the subsequent request. + ResponseHeadersToCopy []string `protobuf:"bytes,5,rep,name=response_headers_to_copy,json=responseHeadersToCopy,proto3" json:"response_headers_to_copy,omitempty"` +} + +func (x *InternalRedirectPolicy) Reset() { + *x = InternalRedirectPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalRedirectPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalRedirectPolicy) ProtoMessage() {} + +func (x *InternalRedirectPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalRedirectPolicy.ProtoReflect.Descriptor instead. +func (*InternalRedirectPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{20} +} + +func (x *InternalRedirectPolicy) GetMaxInternalRedirects() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxInternalRedirects + } + return nil +} + +func (x *InternalRedirectPolicy) GetRedirectResponseCodes() []uint32 { + if x != nil { + return x.RedirectResponseCodes + } + return nil +} + +func (x *InternalRedirectPolicy) GetPredicates() []*v31.TypedExtensionConfig { + if x != nil { + return x.Predicates + } + return nil +} + +func (x *InternalRedirectPolicy) GetAllowCrossSchemeRedirect() bool { + if x != nil { + return x.AllowCrossSchemeRedirect + } + return false +} + +func (x *InternalRedirectPolicy) GetResponseHeadersToCopy() []string { + if x != nil { + return x.ResponseHeadersToCopy + } + return nil +} + +// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the +// map value in +// :ref:`VirtualHost.typed_per_filter_config`, +// :ref:`Route.typed_per_filter_config`, +// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` +// to add additional flags to the filter. +type FilterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter config. + Config *anypb.Any `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // If true, the filter is optional, meaning that if the client does + // not support the specified filter, it may ignore the map entry rather + // than rejecting the config. + IsOptional bool `protobuf:"varint,2,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` + // If true, the filter is disabled in the route or virtual host and the “config“ field is ignored. + // See :ref:`route based filter chain ` + // for more details. + // + // .. note:: + // + // This field will take effect when the request arrive and filter chain is created for the request. + // If initial route is selected for the request and a filter is disabled in the initial route, then + // the filter will not be added to the filter chain. + // And if the request is mutated later and re-match to another route, the disabled filter by the + // initial route will not be added back to the filter chain because the filter chain is already + // created and it is too late to change the chain. + // + // This field only make sense for the downstream HTTP filters for now. + Disabled bool `protobuf:"varint,3,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (x *FilterConfig) Reset() { + *x = FilterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterConfig) ProtoMessage() {} + +func (x *FilterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterConfig.ProtoReflect.Descriptor instead. +func (*FilterConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{21} +} + +func (x *FilterConfig) GetConfig() *anypb.Any { + if x != nil { + return x.Config + } + return nil +} + +func (x *FilterConfig) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +func (x *FilterConfig) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +// [#next-free-field: 13] +type WeightedCluster_ClusterWeight struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only one of “name“ and “cluster_header“ may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Only one of “name“ and “cluster_header“ may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + ClusterHeader string `protobuf:"bytes,12,opt,name=cluster_header,json=clusterHeader,proto3" json:"cluster_header,omitempty"` + // The weight of the cluster. This value is relative to the other clusters' + // weights. When a request matches the route, the choice of an upstream cluster + // is determined by its weight. The sum of weights across all + // entries in the clusters array must be greater than 0, and must not exceed + // uint32_t maximal value (4294967295). + Weight *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=weight,proto3" json:"weight,omitempty"` + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in + // :ref:`RouteAction.metadata_match `, with + // values here taking precedence. The filter name should be specified as “envoy.lb“. + MetadataMatch *v31.Metadata `protobuf:"bytes,3,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"` + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,4,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"` + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + RequestHeadersToRemove []string `protobuf:"bytes,9,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"` + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,5,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"` + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + ResponseHeadersToRemove []string `protobuf:"bytes,6,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"` + // This field can be used to provide weighted cluster specific per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,10,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Types that are assignable to HostRewriteSpecifier: + // + // *WeightedCluster_ClusterWeight_HostRewriteLiteral + HostRewriteSpecifier isWeightedCluster_ClusterWeight_HostRewriteSpecifier `protobuf_oneof:"host_rewrite_specifier"` +} + +func (x *WeightedCluster_ClusterWeight) Reset() { + *x = WeightedCluster_ClusterWeight{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WeightedCluster_ClusterWeight) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WeightedCluster_ClusterWeight) ProtoMessage() {} + +func (x *WeightedCluster_ClusterWeight) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WeightedCluster_ClusterWeight.ProtoReflect.Descriptor instead. +func (*WeightedCluster_ClusterWeight) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *WeightedCluster_ClusterWeight) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *WeightedCluster_ClusterWeight) GetClusterHeader() string { + if x != nil { + return x.ClusterHeader + } + return "" +} + +func (x *WeightedCluster_ClusterWeight) GetWeight() *wrapperspb.UInt32Value { + if x != nil { + return x.Weight + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetMetadataMatch() *v31.Metadata { + if x != nil { + return x.MetadataMatch + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetRequestHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.RequestHeadersToAdd + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetRequestHeadersToRemove() []string { + if x != nil { + return x.RequestHeadersToRemove + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetResponseHeadersToAdd() []*v31.HeaderValueOption { + if x != nil { + return x.ResponseHeadersToAdd + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetResponseHeadersToRemove() []string { + if x != nil { + return x.ResponseHeadersToRemove + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetTypedPerFilterConfig() map[string]*anypb.Any { + if x != nil { + return x.TypedPerFilterConfig + } + return nil +} + +func (m *WeightedCluster_ClusterWeight) GetHostRewriteSpecifier() isWeightedCluster_ClusterWeight_HostRewriteSpecifier { + if m != nil { + return m.HostRewriteSpecifier + } + return nil +} + +func (x *WeightedCluster_ClusterWeight) GetHostRewriteLiteral() string { + if x, ok := x.GetHostRewriteSpecifier().(*WeightedCluster_ClusterWeight_HostRewriteLiteral); ok { + return x.HostRewriteLiteral + } + return "" +} + +type isWeightedCluster_ClusterWeight_HostRewriteSpecifier interface { + isWeightedCluster_ClusterWeight_HostRewriteSpecifier() +} + +type WeightedCluster_ClusterWeight_HostRewriteLiteral struct { + // Indicates that during forwarding, the host header will be swapped with + // this value. + HostRewriteLiteral string `protobuf:"bytes,11,opt,name=host_rewrite_literal,json=hostRewriteLiteral,proto3,oneof"` +} + +func (*WeightedCluster_ClusterWeight_HostRewriteLiteral) isWeightedCluster_ClusterWeight_HostRewriteSpecifier() { +} + +type RouteMatch_GrpcRouteMatchOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RouteMatch_GrpcRouteMatchOptions) Reset() { + *x = RouteMatch_GrpcRouteMatchOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch_GrpcRouteMatchOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch_GrpcRouteMatchOptions) ProtoMessage() {} + +func (x *RouteMatch_GrpcRouteMatchOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch_GrpcRouteMatchOptions.ProtoReflect.Descriptor instead. +func (*RouteMatch_GrpcRouteMatchOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 0} +} + +type RouteMatch_TlsContextMatchOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If specified, the route will match against whether or not a certificate is presented. + // If not specified, certificate presentation status (true or false) will not be considered when route matching. + Presented *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=presented,proto3" json:"presented,omitempty"` + // If specified, the route will match against whether or not a certificate is validated. + // If not specified, certificate validation status (true or false) will not be considered when route matching. + // + // .. warning:: + // + // Client certificate validation is not currently performed upon TLS session resumption. For + // a resumed TLS session the route will match only when ``validated`` is false, regardless of + // whether the client TLS certificate is valid. + // + // The only known workaround for this issue is to disable TLS session resumption entirely, by + // setting both :ref:`disable_stateless_session_resumption ` + // and :ref:`disable_stateful_session_resumption ` on the DownstreamTlsContext. + Validated *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=validated,proto3" json:"validated,omitempty"` +} + +func (x *RouteMatch_TlsContextMatchOptions) Reset() { + *x = RouteMatch_TlsContextMatchOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch_TlsContextMatchOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch_TlsContextMatchOptions) ProtoMessage() {} + +func (x *RouteMatch_TlsContextMatchOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch_TlsContextMatchOptions.ProtoReflect.Descriptor instead. +func (*RouteMatch_TlsContextMatchOptions) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *RouteMatch_TlsContextMatchOptions) GetPresented() *wrapperspb.BoolValue { + if x != nil { + return x.Presented + } + return nil +} + +func (x *RouteMatch_TlsContextMatchOptions) GetValidated() *wrapperspb.BoolValue { + if x != nil { + return x.Validated + } + return nil +} + +// An extensible message for matching CONNECT or CONNECT-UDP requests. +type RouteMatch_ConnectMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RouteMatch_ConnectMatcher) Reset() { + *x = RouteMatch_ConnectMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteMatch_ConnectMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteMatch_ConnectMatcher) ProtoMessage() {} + +func (x *RouteMatch_ConnectMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteMatch_ConnectMatcher.ProtoReflect.Descriptor instead. +func (*RouteMatch_ConnectMatcher) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{6, 2} +} + +// The router is capable of shadowing traffic from one cluster to another. The current +// implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to +// respond before returning the response from the primary cluster. All normal statistics are +// collected for the shadow cluster making this feature useful for testing. +// +// During shadowing, the host/authority header is altered such that “-shadow“ is appended. This is +// useful for logging. For example, “cluster1“ becomes “cluster1-shadow“. This behavior can be +// disabled by setting “disable_shadow_host_suffix_append“ to “true“. +// +// .. note:: +// +// Shadowing will not be triggered if the primary cluster does not exist. +// +// .. note:: +// +// Shadowing doesn't support Http CONNECT and upgrades. +// +// [#next-free-field: 7] +type RouteAction_RequestMirrorPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only one of “cluster“ and “cluster_header“ can be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // Only one of “cluster“ and “cluster_header“ can be specified. + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. Only the first value in header is used, + // and no shadow request will happen if the value is not found in headers. Envoy will not wait for + // the shadow cluster to respond before returning the response from the primary cluster. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + ClusterHeader string `protobuf:"bytes,5,opt,name=cluster_header,json=clusterHeader,proto3" json:"cluster_header,omitempty"` + // If not specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the “runtime_key“ field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + RuntimeFraction *v31.RuntimeFractionalPercent `protobuf:"bytes,3,opt,name=runtime_fraction,json=runtimeFraction,proto3" json:"runtime_fraction,omitempty"` + // Determines if the trace span should be sampled. Defaults to true. + TraceSampled *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=trace_sampled,json=traceSampled,proto3" json:"trace_sampled,omitempty"` + // Disables appending the “-shadow“ suffix to the shadowed “Host“ header. Defaults to “false“. + DisableShadowHostSuffixAppend bool `protobuf:"varint,6,opt,name=disable_shadow_host_suffix_append,json=disableShadowHostSuffixAppend,proto3" json:"disable_shadow_host_suffix_append,omitempty"` +} + +func (x *RouteAction_RequestMirrorPolicy) Reset() { + *x = RouteAction_RequestMirrorPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_RequestMirrorPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_RequestMirrorPolicy) ProtoMessage() {} + +func (x *RouteAction_RequestMirrorPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_RequestMirrorPolicy.ProtoReflect.Descriptor instead. +func (*RouteAction_RequestMirrorPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *RouteAction_RequestMirrorPolicy) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *RouteAction_RequestMirrorPolicy) GetClusterHeader() string { + if x != nil { + return x.ClusterHeader + } + return "" +} + +func (x *RouteAction_RequestMirrorPolicy) GetRuntimeFraction() *v31.RuntimeFractionalPercent { + if x != nil { + return x.RuntimeFraction + } + return nil +} + +func (x *RouteAction_RequestMirrorPolicy) GetTraceSampled() *wrapperspb.BoolValue { + if x != nil { + return x.TraceSampled + } + return nil +} + +func (x *RouteAction_RequestMirrorPolicy) GetDisableShadowHostSuffixAppend() bool { + if x != nil { + return x.DisableShadowHostSuffixAppend + } + return false +} + +// Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer +// `. +// [#next-free-field: 7] +type RouteAction_HashPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PolicySpecifier: + // + // *RouteAction_HashPolicy_Header_ + // *RouteAction_HashPolicy_Cookie_ + // *RouteAction_HashPolicy_ConnectionProperties_ + // *RouteAction_HashPolicy_QueryParameter_ + // *RouteAction_HashPolicy_FilterState_ + PolicySpecifier isRouteAction_HashPolicy_PolicySpecifier `protobuf_oneof:"policy_specifier"` + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` +} + +func (x *RouteAction_HashPolicy) Reset() { + *x = RouteAction_HashPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy) ProtoMessage() {} + +func (x *RouteAction_HashPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1} +} + +func (m *RouteAction_HashPolicy) GetPolicySpecifier() isRouteAction_HashPolicy_PolicySpecifier { + if m != nil { + return m.PolicySpecifier + } + return nil +} + +func (x *RouteAction_HashPolicy) GetHeader() *RouteAction_HashPolicy_Header { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_Header_); ok { + return x.Header + } + return nil +} + +func (x *RouteAction_HashPolicy) GetCookie() *RouteAction_HashPolicy_Cookie { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_Cookie_); ok { + return x.Cookie + } + return nil +} + +func (x *RouteAction_HashPolicy) GetConnectionProperties() *RouteAction_HashPolicy_ConnectionProperties { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_ConnectionProperties_); ok { + return x.ConnectionProperties + } + return nil +} + +func (x *RouteAction_HashPolicy) GetQueryParameter() *RouteAction_HashPolicy_QueryParameter { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_QueryParameter_); ok { + return x.QueryParameter + } + return nil +} + +func (x *RouteAction_HashPolicy) GetFilterState() *RouteAction_HashPolicy_FilterState { + if x, ok := x.GetPolicySpecifier().(*RouteAction_HashPolicy_FilterState_); ok { + return x.FilterState + } + return nil +} + +func (x *RouteAction_HashPolicy) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +type isRouteAction_HashPolicy_PolicySpecifier interface { + isRouteAction_HashPolicy_PolicySpecifier() +} + +type RouteAction_HashPolicy_Header_ struct { + // Header hash policy. + Header *RouteAction_HashPolicy_Header `protobuf:"bytes,1,opt,name=header,proto3,oneof"` +} + +type RouteAction_HashPolicy_Cookie_ struct { + // Cookie hash policy. + Cookie *RouteAction_HashPolicy_Cookie `protobuf:"bytes,2,opt,name=cookie,proto3,oneof"` +} + +type RouteAction_HashPolicy_ConnectionProperties_ struct { + // Connection properties hash policy. + ConnectionProperties *RouteAction_HashPolicy_ConnectionProperties `protobuf:"bytes,3,opt,name=connection_properties,json=connectionProperties,proto3,oneof"` +} + +type RouteAction_HashPolicy_QueryParameter_ struct { + // Query parameter hash policy. + QueryParameter *RouteAction_HashPolicy_QueryParameter `protobuf:"bytes,5,opt,name=query_parameter,json=queryParameter,proto3,oneof"` +} + +type RouteAction_HashPolicy_FilterState_ struct { + // Filter state hash policy. + FilterState *RouteAction_HashPolicy_FilterState `protobuf:"bytes,6,opt,name=filter_state,json=filterState,proto3,oneof"` +} + +func (*RouteAction_HashPolicy_Header_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_Cookie_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_ConnectionProperties_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_QueryParameter_) isRouteAction_HashPolicy_PolicySpecifier() {} + +func (*RouteAction_HashPolicy_FilterState_) isRouteAction_HashPolicy_PolicySpecifier() {} + +// Allows enabling and disabling upgrades on a per-route basis. +// This overrides any enabled/disabled upgrade filter chain specified in the +// HttpConnectionManager +// :ref:`upgrade_configs +// ` +// but does not affect any custom filter chain specified there. +type RouteAction_UpgradeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + UpgradeType string `protobuf:"bytes,1,opt,name=upgrade_type,json=upgradeType,proto3" json:"upgrade_type,omitempty"` + // Determines if upgrades are available on this route. Defaults to true. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + ConnectConfig *RouteAction_UpgradeConfig_ConnectConfig `protobuf:"bytes,3,opt,name=connect_config,json=connectConfig,proto3" json:"connect_config,omitempty"` +} + +func (x *RouteAction_UpgradeConfig) Reset() { + *x = RouteAction_UpgradeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_UpgradeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_UpgradeConfig) ProtoMessage() {} + +func (x *RouteAction_UpgradeConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_UpgradeConfig.ProtoReflect.Descriptor instead. +func (*RouteAction_UpgradeConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2} +} + +func (x *RouteAction_UpgradeConfig) GetUpgradeType() string { + if x != nil { + return x.UpgradeType + } + return "" +} + +func (x *RouteAction_UpgradeConfig) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +func (x *RouteAction_UpgradeConfig) GetConnectConfig() *RouteAction_UpgradeConfig_ConnectConfig { + if x != nil { + return x.ConnectConfig + } + return nil +} + +type RouteAction_MaxStreamDuration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + MaxStreamDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // “max_stream_duration“, but limit the applied timeout to the maximum value specified here. + // If set to 0, the “grpc-timeout“ header is used without modification. + GrpcTimeoutHeaderMax *durationpb.Duration `protobuf:"bytes,2,opt,name=grpc_timeout_header_max,json=grpcTimeoutHeaderMax,proto3" json:"grpc_timeout_header_max,omitempty"` + // If present, Envoy will adjust the timeout provided by the “grpc-timeout“ header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + GrpcTimeoutHeaderOffset *durationpb.Duration `protobuf:"bytes,3,opt,name=grpc_timeout_header_offset,json=grpcTimeoutHeaderOffset,proto3" json:"grpc_timeout_header_offset,omitempty"` +} + +func (x *RouteAction_MaxStreamDuration) Reset() { + *x = RouteAction_MaxStreamDuration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_MaxStreamDuration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_MaxStreamDuration) ProtoMessage() {} + +func (x *RouteAction_MaxStreamDuration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_MaxStreamDuration.ProtoReflect.Descriptor instead. +func (*RouteAction_MaxStreamDuration) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 3} +} + +func (x *RouteAction_MaxStreamDuration) GetMaxStreamDuration() *durationpb.Duration { + if x != nil { + return x.MaxStreamDuration + } + return nil +} + +func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderMax() *durationpb.Duration { + if x != nil { + return x.GrpcTimeoutHeaderMax + } + return nil +} + +func (x *RouteAction_MaxStreamDuration) GetGrpcTimeoutHeaderOffset() *durationpb.Duration { + if x != nil { + return x.GrpcTimeoutHeaderOffset + } + return nil +} + +type RouteAction_HashPolicy_Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` + // If specified, the request header value will be rewritten and used + // to produce the hash key. + RegexRewrite *v32.RegexMatchAndSubstitute `protobuf:"bytes,2,opt,name=regex_rewrite,json=regexRewrite,proto3" json:"regex_rewrite,omitempty"` +} + +func (x *RouteAction_HashPolicy_Header) Reset() { + *x = RouteAction_HashPolicy_Header{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_Header) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_Header) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_Header.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_Header) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 0} +} + +func (x *RouteAction_HashPolicy_Header) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +func (x *RouteAction_HashPolicy_Header) GetRegexRewrite() *v32.RegexMatchAndSubstitute { + if x != nil { + return x.RegexRewrite + } + return nil +} + +// CookieAttribute defines an API for adding additional attributes for a HTTP cookie. +type RouteAction_HashPolicy_CookieAttribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cookie attribute. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The optional value of the cookie attribute. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *RouteAction_HashPolicy_CookieAttribute) Reset() { + *x = RouteAction_HashPolicy_CookieAttribute{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_CookieAttribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_CookieAttribute) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_CookieAttribute) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_CookieAttribute.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_CookieAttribute) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 1} +} + +func (x *RouteAction_HashPolicy_CookieAttribute) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RouteAction_HashPolicy_CookieAttribute) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Envoy supports two types of cookie affinity: +// +// 1. Passive. Envoy takes a cookie that's present in the cookies header and +// hashes on its value. +// +// 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) +// on the first request from the client in its response to the client, +// based on the endpoint the request gets sent to. The client then +// presents this on the next and all subsequent requests. The hash of +// this is sufficient to ensure these requests get sent to the same +// endpoint. The cookie is generated by hashing the source and +// destination ports and addresses so that multiple independent HTTP2 +// streams on the same connection will independently receive the same +// cookie, even if they arrive at the Envoy simultaneously. +type RouteAction_HashPolicy_Cookie struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` + // Additional attributes for the cookie. They will be used when generating a new cookie. + Attributes []*RouteAction_HashPolicy_CookieAttribute `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (x *RouteAction_HashPolicy_Cookie) Reset() { + *x = RouteAction_HashPolicy_Cookie{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_Cookie) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_Cookie) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_Cookie) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_Cookie.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_Cookie) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 2} +} + +func (x *RouteAction_HashPolicy_Cookie) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RouteAction_HashPolicy_Cookie) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +func (x *RouteAction_HashPolicy_Cookie) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *RouteAction_HashPolicy_Cookie) GetAttributes() []*RouteAction_HashPolicy_CookieAttribute { + if x != nil { + return x.Attributes + } + return nil +} + +type RouteAction_HashPolicy_ConnectionProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Hash on source IP address. + SourceIp bool `protobuf:"varint,1,opt,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"` +} + +func (x *RouteAction_HashPolicy_ConnectionProperties) Reset() { + *x = RouteAction_HashPolicy_ConnectionProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_ConnectionProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_ConnectionProperties) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_ConnectionProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_ConnectionProperties.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_ConnectionProperties) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 3} +} + +func (x *RouteAction_HashPolicy_ConnectionProperties) GetSourceIp() bool { + if x != nil { + return x.SourceIp + } + return false +} + +type RouteAction_HashPolicy_QueryParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the URL query parameter that will be used to obtain the hash + // key. If the parameter is not present, no hash will be produced. Query + // parameter names are case-sensitive. If query parameters are repeated, only + // the first value will be considered. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *RouteAction_HashPolicy_QueryParameter) Reset() { + *x = RouteAction_HashPolicy_QueryParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_QueryParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_QueryParameter) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_QueryParameter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_QueryParameter.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_QueryParameter) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 4} +} + +func (x *RouteAction_HashPolicy_QueryParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type RouteAction_HashPolicy_FilterState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Object in the per-request filterState, which is an + // Envoy::Hashable object. If there is no data associated with the key, + // or the stored object is not Envoy::Hashable, no hash will be produced. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *RouteAction_HashPolicy_FilterState) Reset() { + *x = RouteAction_HashPolicy_FilterState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_HashPolicy_FilterState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_HashPolicy_FilterState) ProtoMessage() {} + +func (x *RouteAction_HashPolicy_FilterState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_HashPolicy_FilterState.ProtoReflect.Descriptor instead. +func (*RouteAction_HashPolicy_FilterState) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1, 5} +} + +func (x *RouteAction_HashPolicy_FilterState) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Configuration for sending data upstream as a raw data payload. This is used for +// CONNECT or POST requests, when forwarding request payload as raw TCP. +type RouteAction_UpgradeConfig_ConnectConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + ProxyProtocolConfig *v31.ProxyProtocolConfig `protobuf:"bytes,1,opt,name=proxy_protocol_config,json=proxyProtocolConfig,proto3" json:"proxy_protocol_config,omitempty"` + // If set, the route will also allow forwarding POST payload as raw TCP. + AllowPost bool `protobuf:"varint,2,opt,name=allow_post,json=allowPost,proto3" json:"allow_post,omitempty"` +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) Reset() { + *x = RouteAction_UpgradeConfig_ConnectConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteAction_UpgradeConfig_ConnectConfig) ProtoMessage() {} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteAction_UpgradeConfig_ConnectConfig.ProtoReflect.Descriptor instead. +func (*RouteAction_UpgradeConfig_ConnectConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 2, 0} +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) GetProxyProtocolConfig() *v31.ProxyProtocolConfig { + if x != nil { + return x.ProxyProtocolConfig + } + return nil +} + +func (x *RouteAction_UpgradeConfig_ConnectConfig) GetAllowPost() bool { + if x != nil { + return x.AllowPost + } + return false +} + +type RetryPolicy_RetryPriority struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // [#extension-category: envoy.retry_priorities] + // + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryPriority_TypedConfig + ConfigType isRetryPolicy_RetryPriority_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryPriority) Reset() { + *x = RetryPolicy_RetryPriority{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryPriority) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryPriority) ProtoMessage() {} + +func (x *RetryPolicy_RetryPriority) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryPriority.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryPriority) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *RetryPolicy_RetryPriority) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryPriority) GetConfigType() isRetryPolicy_RetryPriority_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryPriority) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryPriority_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryPriority_ConfigType interface { + isRetryPolicy_RetryPriority_ConfigType() +} + +type RetryPolicy_RetryPriority_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryPriority_TypedConfig) isRetryPolicy_RetryPriority_ConfigType() {} + +type RetryPolicy_RetryHostPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // [#extension-category: envoy.retry_host_predicates] + // + // Types that are assignable to ConfigType: + // + // *RetryPolicy_RetryHostPredicate_TypedConfig + ConfigType isRetryPolicy_RetryHostPredicate_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *RetryPolicy_RetryHostPredicate) Reset() { + *x = RetryPolicy_RetryHostPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryHostPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryHostPredicate) ProtoMessage() {} + +func (x *RetryPolicy_RetryHostPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryHostPredicate.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryHostPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *RetryPolicy_RetryHostPredicate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *RetryPolicy_RetryHostPredicate) GetConfigType() isRetryPolicy_RetryHostPredicate_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *RetryPolicy_RetryHostPredicate) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isRetryPolicy_RetryHostPredicate_ConfigType interface { + isRetryPolicy_RetryHostPredicate_ConfigType() +} + +type RetryPolicy_RetryHostPredicate_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*RetryPolicy_RetryHostPredicate_TypedConfig) isRetryPolicy_RetryHostPredicate_ConfigType() {} + +type RetryPolicy_RetryBackOff struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"` + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the “base_interval“ if set. The default is 10 times the + // “base_interval“. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *RetryPolicy_RetryBackOff) Reset() { + *x = RetryPolicy_RetryBackOff{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RetryBackOff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RetryBackOff) ProtoMessage() {} + +func (x *RetryPolicy_RetryBackOff) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RetryBackOff.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RetryBackOff) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 2} +} + +func (x *RetryPolicy_RetryBackOff) GetBaseInterval() *durationpb.Duration { + if x != nil { + return x.BaseInterval + } + return nil +} + +func (x *RetryPolicy_RetryBackOff) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +type RetryPolicy_ResetHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The format of the reset header. + Format RetryPolicy_ResetHeaderFormat `protobuf:"varint,2,opt,name=format,proto3,enum=envoy.config.route.v3.RetryPolicy_ResetHeaderFormat" json:"format,omitempty"` +} + +func (x *RetryPolicy_ResetHeader) Reset() { + *x = RetryPolicy_ResetHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_ResetHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_ResetHeader) ProtoMessage() {} + +func (x *RetryPolicy_ResetHeader) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_ResetHeader.ProtoReflect.Descriptor instead. +func (*RetryPolicy_ResetHeader) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 3} +} + +func (x *RetryPolicy_ResetHeader) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RetryPolicy_ResetHeader) GetFormat() RetryPolicy_ResetHeaderFormat { + if x != nil { + return x.Format + } + return RetryPolicy_SECONDS +} + +// A retry back-off strategy that applies when the upstream server rate limits +// the request. +// +// Given this configuration: +// +// .. code-block:: yaml +// +// rate_limited_retry_back_off: +// reset_headers: +// - name: Retry-After +// format: SECONDS +// - name: X-RateLimit-Reset +// format: UNIX_TIMESTAMP +// max_interval: "300s" +// +// The following algorithm will apply: +// +// 1. If the response contains the header “Retry-After“ its value must be on +// the form “120“ (an integer that represents the number of seconds to +// wait before retrying). If so, this value is used as the back-off interval. +// 2. Otherwise, if the response contains the header “X-RateLimit-Reset“ its +// value must be on the form “1595320702“ (an integer that represents the +// point in time at which to retry, as a Unix timestamp in seconds). If so, +// the current time is subtracted from this value and the result is used as +// the back-off interval. +// 3. Otherwise, Envoy will use the default +// :ref:`exponential back-off ` +// strategy. +// +// No matter which format is used, if the resulting back-off interval exceeds +// “max_interval“ it is discarded and the next header in “reset_headers“ +// is tried. If a request timeout is configured for the route it will further +// limit how long the request will be allowed to run. +// +// To prevent many clients retrying at the same point in time jitter is added +// to the back-off interval, so the resulting interval is decided by taking: +// “random(interval, interval * 1.5)“. +// +// .. attention:: +// +// Configuring ``rate_limited_retry_back_off`` will not by itself cause a request +// to be retried. You will still need to configure the right retry policy to match +// the responses from the upstream server. +type RetryPolicy_RateLimitedRetryBackOff struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the reset headers (like “Retry-After“ or “X-RateLimit-Reset“) + // to match against the response. Headers are tried in order, and matched case + // insensitive. The first header to be parsed successfully is used. If no headers + // match the default exponential back-off is used instead. + ResetHeaders []*RetryPolicy_ResetHeader `protobuf:"bytes,1,rep,name=reset_headers,json=resetHeaders,proto3" json:"reset_headers,omitempty"` + // Specifies the maximum back off interval that Envoy will allow. If a reset + // header contains an interval longer than this then it will be discarded and + // the next header will be tried. Defaults to 300 seconds. + MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"` +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) Reset() { + *x = RetryPolicy_RateLimitedRetryBackOff{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy_RateLimitedRetryBackOff) ProtoMessage() {} + +func (x *RetryPolicy_RateLimitedRetryBackOff) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy_RateLimitedRetryBackOff.ProtoReflect.Descriptor instead. +func (*RetryPolicy_RateLimitedRetryBackOff) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 4} +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) GetResetHeaders() []*RetryPolicy_ResetHeader { + if x != nil { + return x.ResetHeaders + } + return nil +} + +func (x *RetryPolicy_RateLimitedRetryBackOff) GetMaxInterval() *durationpb.Duration { + if x != nil { + return x.MaxInterval + } + return nil +} + +// [#next-free-field: 12] +type RateLimit_Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ActionSpecifier: + // + // *RateLimit_Action_SourceCluster_ + // *RateLimit_Action_DestinationCluster_ + // *RateLimit_Action_RequestHeaders_ + // *RateLimit_Action_RemoteAddress_ + // *RateLimit_Action_GenericKey_ + // *RateLimit_Action_HeaderValueMatch_ + // *RateLimit_Action_DynamicMetadata + // *RateLimit_Action_Metadata + // *RateLimit_Action_Extension + // *RateLimit_Action_MaskedRemoteAddress_ + // *RateLimit_Action_QueryParameterValueMatch_ + ActionSpecifier isRateLimit_Action_ActionSpecifier `protobuf_oneof:"action_specifier"` +} + +func (x *RateLimit_Action) Reset() { + *x = RateLimit_Action{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action) ProtoMessage() {} + +func (x *RateLimit_Action) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action.ProtoReflect.Descriptor instead. +func (*RateLimit_Action) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0} +} + +func (m *RateLimit_Action) GetActionSpecifier() isRateLimit_Action_ActionSpecifier { + if m != nil { + return m.ActionSpecifier + } + return nil +} + +func (x *RateLimit_Action) GetSourceCluster() *RateLimit_Action_SourceCluster { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_SourceCluster_); ok { + return x.SourceCluster + } + return nil +} + +func (x *RateLimit_Action) GetDestinationCluster() *RateLimit_Action_DestinationCluster { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_DestinationCluster_); ok { + return x.DestinationCluster + } + return nil +} + +func (x *RateLimit_Action) GetRequestHeaders() *RateLimit_Action_RequestHeaders { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_RequestHeaders_); ok { + return x.RequestHeaders + } + return nil +} + +func (x *RateLimit_Action) GetRemoteAddress() *RateLimit_Action_RemoteAddress { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_RemoteAddress_); ok { + return x.RemoteAddress + } + return nil +} + +func (x *RateLimit_Action) GetGenericKey() *RateLimit_Action_GenericKey { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_GenericKey_); ok { + return x.GenericKey + } + return nil +} + +func (x *RateLimit_Action) GetHeaderValueMatch() *RateLimit_Action_HeaderValueMatch { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_HeaderValueMatch_); ok { + return x.HeaderValueMatch + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. +func (x *RateLimit_Action) GetDynamicMetadata() *RateLimit_Action_DynamicMetaData { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_DynamicMetadata); ok { + return x.DynamicMetadata + } + return nil +} + +func (x *RateLimit_Action) GetMetadata() *RateLimit_Action_MetaData { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_Metadata); ok { + return x.Metadata + } + return nil +} + +func (x *RateLimit_Action) GetExtension() *v31.TypedExtensionConfig { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_Extension); ok { + return x.Extension + } + return nil +} + +func (x *RateLimit_Action) GetMaskedRemoteAddress() *RateLimit_Action_MaskedRemoteAddress { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_MaskedRemoteAddress_); ok { + return x.MaskedRemoteAddress + } + return nil +} + +func (x *RateLimit_Action) GetQueryParameterValueMatch() *RateLimit_Action_QueryParameterValueMatch { + if x, ok := x.GetActionSpecifier().(*RateLimit_Action_QueryParameterValueMatch_); ok { + return x.QueryParameterValueMatch + } + return nil +} + +type isRateLimit_Action_ActionSpecifier interface { + isRateLimit_Action_ActionSpecifier() +} + +type RateLimit_Action_SourceCluster_ struct { + // Rate limit on source cluster. + SourceCluster *RateLimit_Action_SourceCluster `protobuf:"bytes,1,opt,name=source_cluster,json=sourceCluster,proto3,oneof"` +} + +type RateLimit_Action_DestinationCluster_ struct { + // Rate limit on destination cluster. + DestinationCluster *RateLimit_Action_DestinationCluster `protobuf:"bytes,2,opt,name=destination_cluster,json=destinationCluster,proto3,oneof"` +} + +type RateLimit_Action_RequestHeaders_ struct { + // Rate limit on request headers. + RequestHeaders *RateLimit_Action_RequestHeaders `protobuf:"bytes,3,opt,name=request_headers,json=requestHeaders,proto3,oneof"` +} + +type RateLimit_Action_RemoteAddress_ struct { + // Rate limit on remote address. + RemoteAddress *RateLimit_Action_RemoteAddress `protobuf:"bytes,4,opt,name=remote_address,json=remoteAddress,proto3,oneof"` +} + +type RateLimit_Action_GenericKey_ struct { + // Rate limit on a generic key. + GenericKey *RateLimit_Action_GenericKey `protobuf:"bytes,5,opt,name=generic_key,json=genericKey,proto3,oneof"` +} + +type RateLimit_Action_HeaderValueMatch_ struct { + // Rate limit on the existence of request headers. + HeaderValueMatch *RateLimit_Action_HeaderValueMatch `protobuf:"bytes,6,opt,name=header_value_match,json=headerValueMatch,proto3,oneof"` +} + +type RateLimit_Action_DynamicMetadata struct { + // Rate limit on dynamic metadata. + // + // .. attention:: + // + // This field has been deprecated in favor of the :ref:`metadata ` field + // + // Deprecated: Marked as deprecated in envoy/config/route/v3/route_components.proto. + DynamicMetadata *RateLimit_Action_DynamicMetaData `protobuf:"bytes,7,opt,name=dynamic_metadata,json=dynamicMetadata,proto3,oneof"` +} + +type RateLimit_Action_Metadata struct { + // Rate limit on metadata. + Metadata *RateLimit_Action_MetaData `protobuf:"bytes,8,opt,name=metadata,proto3,oneof"` +} + +type RateLimit_Action_Extension struct { + // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + // + // :ref:`HTTP matching input functions ` are + // permitted as descriptor extensions. The input functions are only + // looked up if there is no rate limit descriptor extension matching + // the type URL. + // + // [#extension-category: envoy.rate_limit_descriptors] + Extension *v31.TypedExtensionConfig `protobuf:"bytes,9,opt,name=extension,proto3,oneof"` +} + +type RateLimit_Action_MaskedRemoteAddress_ struct { + // Rate limit on masked remote address. + MaskedRemoteAddress *RateLimit_Action_MaskedRemoteAddress `protobuf:"bytes,10,opt,name=masked_remote_address,json=maskedRemoteAddress,proto3,oneof"` +} + +type RateLimit_Action_QueryParameterValueMatch_ struct { + // Rate limit on the existence of query parameters. + QueryParameterValueMatch *RateLimit_Action_QueryParameterValueMatch `protobuf:"bytes,11,opt,name=query_parameter_value_match,json=queryParameterValueMatch,proto3,oneof"` +} + +func (*RateLimit_Action_SourceCluster_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_DestinationCluster_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_RequestHeaders_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_RemoteAddress_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_GenericKey_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_HeaderValueMatch_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_DynamicMetadata) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_Metadata) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_Extension) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_MaskedRemoteAddress_) isRateLimit_Action_ActionSpecifier() {} + +func (*RateLimit_Action_QueryParameterValueMatch_) isRateLimit_Action_ActionSpecifier() {} + +type RateLimit_Override struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OverrideSpecifier: + // + // *RateLimit_Override_DynamicMetadata_ + OverrideSpecifier isRateLimit_Override_OverrideSpecifier `protobuf_oneof:"override_specifier"` +} + +func (x *RateLimit_Override) Reset() { + *x = RateLimit_Override{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Override) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Override) ProtoMessage() {} + +func (x *RateLimit_Override) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Override.ProtoReflect.Descriptor instead. +func (*RateLimit_Override) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 1} +} + +func (m *RateLimit_Override) GetOverrideSpecifier() isRateLimit_Override_OverrideSpecifier { + if m != nil { + return m.OverrideSpecifier + } + return nil +} + +func (x *RateLimit_Override) GetDynamicMetadata() *RateLimit_Override_DynamicMetadata { + if x, ok := x.GetOverrideSpecifier().(*RateLimit_Override_DynamicMetadata_); ok { + return x.DynamicMetadata + } + return nil +} + +type isRateLimit_Override_OverrideSpecifier interface { + isRateLimit_Override_OverrideSpecifier() +} + +type RateLimit_Override_DynamicMetadata_ struct { + // Limit override from dynamic metadata. + DynamicMetadata *RateLimit_Override_DynamicMetadata `protobuf:"bytes,1,opt,name=dynamic_metadata,json=dynamicMetadata,proto3,oneof"` +} + +func (*RateLimit_Override_DynamicMetadata_) isRateLimit_Override_OverrideSpecifier() {} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("source_cluster", "") +// +// is derived from the :option:`--service-cluster` option. +type RateLimit_Action_SourceCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RateLimit_Action_SourceCluster) Reset() { + *x = RateLimit_Action_SourceCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_SourceCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_SourceCluster) ProtoMessage() {} + +func (x *RateLimit_Action_SourceCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_SourceCluster.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_SourceCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 0} +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("destination_cluster", "") +// +// Once a request matches against a route table rule, a routed cluster is determined by one of +// the following :ref:`route table configuration ` +// settings: +// +// - :ref:`cluster ` indicates the upstream cluster +// to route to. +// - :ref:`weighted_clusters ` +// chooses a cluster randomly from a set of clusters with attributed weight. +// - :ref:`cluster_header ` indicates which +// header in the request contains the target cluster. +type RateLimit_Action_DestinationCluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RateLimit_Action_DestinationCluster) Reset() { + *x = RateLimit_Action_DestinationCluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_DestinationCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_DestinationCluster) ProtoMessage() {} + +func (x *RateLimit_Action_DestinationCluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_DestinationCluster.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_DestinationCluster) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 1} +} + +// The following descriptor entry is appended when a header contains a key that matches the +// “header_name“: +// +// .. code-block:: cpp +// +// ("", "") +type RateLimit_Action_RequestHeaders struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` + // The key to use in the descriptor entry. + DescriptorKey string `protobuf:"bytes,2,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + SkipIfAbsent bool `protobuf:"varint,3,opt,name=skip_if_absent,json=skipIfAbsent,proto3" json:"skip_if_absent,omitempty"` +} + +func (x *RateLimit_Action_RequestHeaders) Reset() { + *x = RateLimit_Action_RequestHeaders{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_RequestHeaders) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_RequestHeaders) ProtoMessage() {} + +func (x *RateLimit_Action_RequestHeaders) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_RequestHeaders.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_RequestHeaders) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 2} +} + +func (x *RateLimit_Action_RequestHeaders) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +func (x *RateLimit_Action_RequestHeaders) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_RequestHeaders) GetSkipIfAbsent() bool { + if x != nil { + return x.SkipIfAbsent + } + return false +} + +// The following descriptor entry is appended to the descriptor and is populated using the +// trusted address from :ref:`x-forwarded-for `: +// +// .. code-block:: cpp +// +// ("remote_address", "") +type RateLimit_Action_RemoteAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RateLimit_Action_RemoteAddress) Reset() { + *x = RateLimit_Action_RemoteAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_RemoteAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_RemoteAddress) ProtoMessage() {} + +func (x *RateLimit_Action_RemoteAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_RemoteAddress.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_RemoteAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 3} +} + +// The following descriptor entry is appended to the descriptor and is populated using the +// masked address from :ref:`x-forwarded-for `: +// +// .. code-block:: cpp +// +// ("masked_remote_address", "") +type RateLimit_Action_MaskedRemoteAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of prefix mask len for IPv4 (e.g. 0, 32). + // Defaults to 32 when unset. + // For example, trusted address from x-forwarded-for is “192.168.1.1“, + // the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); + // if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). + V4PrefixMaskLen *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=v4_prefix_mask_len,json=v4PrefixMaskLen,proto3" json:"v4_prefix_mask_len,omitempty"` + // Length of prefix mask len for IPv6 (e.g. 0, 128). + // Defaults to 128 when unset. + // For example, trusted address from x-forwarded-for is “2001:abcd:ef01:2345:6789:abcd:ef01:234“, + // the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); + // if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). + V6PrefixMaskLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=v6_prefix_mask_len,json=v6PrefixMaskLen,proto3" json:"v6_prefix_mask_len,omitempty"` +} + +func (x *RateLimit_Action_MaskedRemoteAddress) Reset() { + *x = RateLimit_Action_MaskedRemoteAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_MaskedRemoteAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_MaskedRemoteAddress) ProtoMessage() {} + +func (x *RateLimit_Action_MaskedRemoteAddress) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_MaskedRemoteAddress.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_MaskedRemoteAddress) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 4} +} + +func (x *RateLimit_Action_MaskedRemoteAddress) GetV4PrefixMaskLen() *wrapperspb.UInt32Value { + if x != nil { + return x.V4PrefixMaskLen + } + return nil +} + +func (x *RateLimit_Action_MaskedRemoteAddress) GetV6PrefixMaskLen() *wrapperspb.UInt32Value { + if x != nil { + return x.V6PrefixMaskLen + } + return nil +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("generic_key", "") +type RateLimit_Action_GenericKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // An optional key to use in the descriptor entry. If not set it defaults + // to 'generic_key' as the descriptor key. + DescriptorKey string `protobuf:"bytes,2,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` +} + +func (x *RateLimit_Action_GenericKey) Reset() { + *x = RateLimit_Action_GenericKey{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_GenericKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_GenericKey) ProtoMessage() {} + +func (x *RateLimit_Action_GenericKey) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_GenericKey.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_GenericKey) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 5} +} + +func (x *RateLimit_Action_GenericKey) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_GenericKey) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("header_match", "") +type RateLimit_Action_HeaderValueMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. Defaults to “header_match“. + DescriptorKey string `protobuf:"bytes,4,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + ExpectMatch *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + Headers []*HeaderMatcher `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *RateLimit_Action_HeaderValueMatch) Reset() { + *x = RateLimit_Action_HeaderValueMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_HeaderValueMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_HeaderValueMatch) ProtoMessage() {} + +func (x *RateLimit_Action_HeaderValueMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_HeaderValueMatch.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_HeaderValueMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 6} +} + +func (x *RateLimit_Action_HeaderValueMatch) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_HeaderValueMatch) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_HeaderValueMatch) GetExpectMatch() *wrapperspb.BoolValue { + if x != nil { + return x.ExpectMatch + } + return nil +} + +func (x *RateLimit_Action_HeaderValueMatch) GetHeaders() []*HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +// The following descriptor entry is appended when the +// :ref:`dynamic metadata ` contains a key value: +// +// .. code-block:: cpp +// +// ("", "") +// +// .. attention:: +// +// This action has been deprecated in favor of the :ref:`metadata ` action +type RateLimit_Action_DynamicMetaData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. + DescriptorKey string `protobuf:"bytes,1,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + MetadataKey *v35.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` + // An optional value to use if “metadata_key“ is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *RateLimit_Action_DynamicMetaData) Reset() { + *x = RateLimit_Action_DynamicMetaData{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_DynamicMetaData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_DynamicMetaData) ProtoMessage() {} + +func (x *RateLimit_Action_DynamicMetaData) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_DynamicMetaData.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_DynamicMetaData) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 7} +} + +func (x *RateLimit_Action_DynamicMetaData) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_DynamicMetaData) GetMetadataKey() *v35.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +func (x *RateLimit_Action_DynamicMetaData) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// The following descriptor entry is appended when the metadata contains a key value: +// +// .. code-block:: cpp +// +// ("", "") +// +// [#next-free-field: 6] +type RateLimit_Action_MetaData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. + DescriptorKey string `protobuf:"bytes,1,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + MetadataKey *v35.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` + // An optional value to use if “metadata_key“ is empty. If not set and + // no value is present under the metadata_key then “skip_if_absent“ is followed to + // skip calling the rate limiting service or skip the descriptor. + DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + // Source of metadata + Source RateLimit_Action_MetaData_Source `protobuf:"varint,4,opt,name=source,proto3,enum=envoy.config.route.v3.RateLimit_Action_MetaData_Source" json:"source,omitempty"` + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when “metadata_key“ is empty and “default_value“ is not set. By default it skips calling the + // rate limiting service in that case. + SkipIfAbsent bool `protobuf:"varint,5,opt,name=skip_if_absent,json=skipIfAbsent,proto3" json:"skip_if_absent,omitempty"` +} + +func (x *RateLimit_Action_MetaData) Reset() { + *x = RateLimit_Action_MetaData{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_MetaData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_MetaData) ProtoMessage() {} + +func (x *RateLimit_Action_MetaData) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_MetaData.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_MetaData) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8} +} + +func (x *RateLimit_Action_MetaData) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_MetaData) GetMetadataKey() *v35.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +func (x *RateLimit_Action_MetaData) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +func (x *RateLimit_Action_MetaData) GetSource() RateLimit_Action_MetaData_Source { + if x != nil { + return x.Source + } + return RateLimit_Action_MetaData_DYNAMIC +} + +func (x *RateLimit_Action_MetaData) GetSkipIfAbsent() bool { + if x != nil { + return x.SkipIfAbsent + } + return false +} + +// The following descriptor entry is appended to the descriptor: +// +// .. code-block:: cpp +// +// ("query_match", "") +type RateLimit_Action_QueryParameterValueMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to use in the descriptor entry. Defaults to “query_match“. + DescriptorKey string `protobuf:"bytes,4,opt,name=descriptor_key,json=descriptorKey,proto3" json:"descriptor_key,omitempty"` + // The value to use in the descriptor entry. + DescriptorValue string `protobuf:"bytes,1,opt,name=descriptor_value,json=descriptorValue,proto3" json:"descriptor_value,omitempty"` + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + ExpectMatch *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=expect_match,json=expectMatch,proto3" json:"expect_match,omitempty"` + // Specifies a set of query parameters that the rate limit action should match + // on. The action will check the request’s query parameters against all the + // specified query parameters in the config. A match will happen if all the + // query parameters in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + QueryParameters []*QueryParameterMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` +} + +func (x *RateLimit_Action_QueryParameterValueMatch) Reset() { + *x = RateLimit_Action_QueryParameterValueMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Action_QueryParameterValueMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Action_QueryParameterValueMatch) ProtoMessage() {} + +func (x *RateLimit_Action_QueryParameterValueMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Action_QueryParameterValueMatch.ProtoReflect.Descriptor instead. +func (*RateLimit_Action_QueryParameterValueMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 9} +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorKey() string { + if x != nil { + return x.DescriptorKey + } + return "" +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetDescriptorValue() string { + if x != nil { + return x.DescriptorValue + } + return "" +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetExpectMatch() *wrapperspb.BoolValue { + if x != nil { + return x.ExpectMatch + } + return nil +} + +func (x *RateLimit_Action_QueryParameterValueMatch) GetQueryParameters() []*QueryParameterMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +// Fetches the override from the dynamic metadata. +type RateLimit_Override_DynamicMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + MetadataKey *v35.MetadataKey `protobuf:"bytes,1,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` +} + +func (x *RateLimit_Override_DynamicMetadata) Reset() { + *x = RateLimit_Override_DynamicMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimit_Override_DynamicMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimit_Override_DynamicMetadata) ProtoMessage() {} + +func (x *RateLimit_Override_DynamicMetadata) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimit_Override_DynamicMetadata.ProtoReflect.Descriptor instead. +func (*RateLimit_Override_DynamicMetadata) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 1, 0} +} + +func (x *RateLimit_Override_DynamicMetadata) GetMetadataKey() *v35.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +var File_envoy_config_route_v3_route_components_proto protoreflect.FileDescriptor + +var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x0f, 0x0a, 0x0b, 0x56, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, + 0x08, 0x01, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x07, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, + 0x06, 0x02, 0x08, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x60, 0x0a, + 0x0b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x2e, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x54, 0x6c, 0x73, 0x12, + 0x50, 0x0a, 0x10, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x0f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, + 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, + 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, + 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, + 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x73, 0x0a, 0x17, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, + 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, + 0x1f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x3a, 0x0a, 0x12, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, + 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, + 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, + 0x6f, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x52, + 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x64, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0x0a, 0x09, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0xaf, 0x0b, 0x0a, 0x05, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x05, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a, + 0x0f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x60, 0x0a, 0x15, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x13, + 0x6e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x3e, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x6d, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, + 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, + 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, + 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, + 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, + 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x1e, + 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, + 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, + 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1f, + 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x42, + 0x0d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf3, 0x0a, + 0x0a, 0x0f, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, + 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x4b, 0x65, 0x79, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x08, 0x0a, 0x0d, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x45, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, + 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, + 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, + 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, + 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x14, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x00, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x5d, 0x0a, + 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x37, 0x9a, 0xc5, + 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, + 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x72, 0x61, 0x6e, + 0x64, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x16, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x52, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xc5, 0x0a, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x18, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, + 0x65, 0x78, 0x12, 0x5b, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x4b, 0x0a, 0x15, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x15, + 0xfa, 0x42, 0x12, 0x72, 0x10, 0x32, 0x0e, 0x5e, 0x5b, 0x5e, 0x3f, 0x23, 0x5d, 0x2b, 0x5b, 0x5e, + 0x3f, 0x23, 0x2f, 0x5d, 0x24, 0x48, 0x00, 0x52, 0x13, 0x70, 0x61, 0x74, 0x68, 0x53, 0x65, 0x70, + 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x58, 0x0a, 0x11, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x73, + 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x63, 0x61, 0x73, 0x65, + 0x53, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, + 0x04, 0x67, 0x72, 0x70, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, 0x67, 0x72, 0x70, 0x63, 0x12, 0x59, 0x0a, 0x0b, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x53, 0x0a, 0x15, 0x47, 0x72, 0x70, 0x63, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc9, 0x01, + 0x0a, 0x16, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, + 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, + 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, 0x24, 0x9a, 0xc5, 0x88, + 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xa8, 0x06, 0x0a, 0x0a, + 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, 0x0a, 0x19, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, + 0x78, 0x70, 0x6f, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, + 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x57, + 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x5b, + 0x0a, 0x1c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x61, 0x0a, 0x1f, 0x66, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x1c, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x3a, 0x24, + 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa6, 0x2d, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, + 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, + 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0d, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, + 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x5a, 0x0a, 0x13, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, 0x68, + 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, + 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, + 0x48, 0x01, 0x52, 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, + 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, + 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, + 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0f, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, + 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcb, 0x03, 0x0a, 0x13, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, + 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x12, + 0x48, 0x0a, 0x21, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0xd6, 0x0b, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, + 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, + 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x5f, 0x0a, 0x0f, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, + 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0xfe, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x5d, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, + 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, + 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, + 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, + 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, + 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, + 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, + 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, + 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, + 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, + 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, + 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, + 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, + 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, + 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, + 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, + 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, + 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, + 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x68, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x02, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, + 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, + 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, + 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, + 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, + 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, + 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, + 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, + 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, + 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, + 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, + 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xb5, 0x18, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, + 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, + 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, + 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, + 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x53, + 0x0a, 0x12, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, + 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, + 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, + 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x1a, 0xbe, 0x01, 0x0a, 0x13, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x76, 0x34, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x6c, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x20, 0x52, 0x0f, 0x76, + 0x34, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, 0x4c, 0x65, 0x6e, 0x12, 0x53, + 0x0a, 0x12, 0x76, 0x36, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, + 0x80, 0x01, 0x52, 0x0f, 0x76, 0x36, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, + 0x4c, 0x65, 0x6e, 0x1a, 0x9e, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x3a, 0x35, 0x9a, + 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x1a, 0xb3, 0x02, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, + 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x3b, 0x9a, + 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xb8, 0x01, 0x0a, 0x0f, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x50, + 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, + 0x65, 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x66, 0x5f, + 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, + 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x26, 0x0a, 0x06, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x61, 0x0a, 0x10, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xf2, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x66, + 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x63, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x12, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe6, 0x05, 0x0a, 0x0d, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x5c, 0x0a, 0x10, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, + 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3c, + 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, + 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x74, 0x72, 0x65, 0x61, 0x74, 0x4d, 0x69, + 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1e, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x53, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, + 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x21, 0x0a, 0x1f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x86, 0x03, 0x0a, 0x16, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x18, 0x01, + 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x70, + 0x79, 0x22, 0x79, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x8b, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_config_route_v3_route_components_proto_rawDescOnce sync.Once + file_envoy_config_route_v3_route_components_proto_rawDescData = file_envoy_config_route_v3_route_components_proto_rawDesc +) + +func file_envoy_config_route_v3_route_components_proto_rawDescGZIP() []byte { + file_envoy_config_route_v3_route_components_proto_rawDescOnce.Do(func() { + file_envoy_config_route_v3_route_components_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_route_components_proto_rawDescData) + }) + return file_envoy_config_route_v3_route_components_proto_rawDescData +} + +var file_envoy_config_route_v3_route_components_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_envoy_config_route_v3_route_components_proto_msgTypes = make([]protoimpl.MessageInfo, 58) +var file_envoy_config_route_v3_route_components_proto_goTypes = []interface{}{ + (VirtualHost_TlsRequirementType)(0), // 0: envoy.config.route.v3.VirtualHost.TlsRequirementType + (RouteAction_ClusterNotFoundResponseCode)(0), // 1: envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode + (RouteAction_InternalRedirectAction)(0), // 2: envoy.config.route.v3.RouteAction.InternalRedirectAction + (RetryPolicy_ResetHeaderFormat)(0), // 3: envoy.config.route.v3.RetryPolicy.ResetHeaderFormat + (RedirectAction_RedirectResponseCode)(0), // 4: envoy.config.route.v3.RedirectAction.RedirectResponseCode + (RateLimit_Action_MetaData_Source)(0), // 5: envoy.config.route.v3.RateLimit.Action.MetaData.Source + (*VirtualHost)(nil), // 6: envoy.config.route.v3.VirtualHost + (*FilterAction)(nil), // 7: envoy.config.route.v3.FilterAction + (*RouteList)(nil), // 8: envoy.config.route.v3.RouteList + (*Route)(nil), // 9: envoy.config.route.v3.Route + (*WeightedCluster)(nil), // 10: envoy.config.route.v3.WeightedCluster + (*ClusterSpecifierPlugin)(nil), // 11: envoy.config.route.v3.ClusterSpecifierPlugin + (*RouteMatch)(nil), // 12: envoy.config.route.v3.RouteMatch + (*CorsPolicy)(nil), // 13: envoy.config.route.v3.CorsPolicy + (*RouteAction)(nil), // 14: envoy.config.route.v3.RouteAction + (*RetryPolicy)(nil), // 15: envoy.config.route.v3.RetryPolicy + (*HedgePolicy)(nil), // 16: envoy.config.route.v3.HedgePolicy + (*RedirectAction)(nil), // 17: envoy.config.route.v3.RedirectAction + (*DirectResponseAction)(nil), // 18: envoy.config.route.v3.DirectResponseAction + (*NonForwardingAction)(nil), // 19: envoy.config.route.v3.NonForwardingAction + (*Decorator)(nil), // 20: envoy.config.route.v3.Decorator + (*Tracing)(nil), // 21: envoy.config.route.v3.Tracing + (*VirtualCluster)(nil), // 22: envoy.config.route.v3.VirtualCluster + (*RateLimit)(nil), // 23: envoy.config.route.v3.RateLimit + (*HeaderMatcher)(nil), // 24: envoy.config.route.v3.HeaderMatcher + (*QueryParameterMatcher)(nil), // 25: envoy.config.route.v3.QueryParameterMatcher + (*InternalRedirectPolicy)(nil), // 26: envoy.config.route.v3.InternalRedirectPolicy + (*FilterConfig)(nil), // 27: envoy.config.route.v3.FilterConfig + nil, // 28: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry + nil, // 29: envoy.config.route.v3.Route.TypedPerFilterConfigEntry + (*WeightedCluster_ClusterWeight)(nil), // 30: envoy.config.route.v3.WeightedCluster.ClusterWeight + nil, // 31: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + (*RouteMatch_GrpcRouteMatchOptions)(nil), // 32: envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions + (*RouteMatch_TlsContextMatchOptions)(nil), // 33: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions + (*RouteMatch_ConnectMatcher)(nil), // 34: envoy.config.route.v3.RouteMatch.ConnectMatcher + (*RouteAction_RequestMirrorPolicy)(nil), // 35: envoy.config.route.v3.RouteAction.RequestMirrorPolicy + (*RouteAction_HashPolicy)(nil), // 36: envoy.config.route.v3.RouteAction.HashPolicy + (*RouteAction_UpgradeConfig)(nil), // 37: envoy.config.route.v3.RouteAction.UpgradeConfig + (*RouteAction_MaxStreamDuration)(nil), // 38: envoy.config.route.v3.RouteAction.MaxStreamDuration + (*RouteAction_HashPolicy_Header)(nil), // 39: envoy.config.route.v3.RouteAction.HashPolicy.Header + (*RouteAction_HashPolicy_CookieAttribute)(nil), // 40: envoy.config.route.v3.RouteAction.HashPolicy.CookieAttribute + (*RouteAction_HashPolicy_Cookie)(nil), // 41: envoy.config.route.v3.RouteAction.HashPolicy.Cookie + (*RouteAction_HashPolicy_ConnectionProperties)(nil), // 42: envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + (*RouteAction_HashPolicy_QueryParameter)(nil), // 43: envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + (*RouteAction_HashPolicy_FilterState)(nil), // 44: envoy.config.route.v3.RouteAction.HashPolicy.FilterState + (*RouteAction_UpgradeConfig_ConnectConfig)(nil), // 45: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + (*RetryPolicy_RetryPriority)(nil), // 46: envoy.config.route.v3.RetryPolicy.RetryPriority + (*RetryPolicy_RetryHostPredicate)(nil), // 47: envoy.config.route.v3.RetryPolicy.RetryHostPredicate + (*RetryPolicy_RetryBackOff)(nil), // 48: envoy.config.route.v3.RetryPolicy.RetryBackOff + (*RetryPolicy_ResetHeader)(nil), // 49: envoy.config.route.v3.RetryPolicy.ResetHeader + (*RetryPolicy_RateLimitedRetryBackOff)(nil), // 50: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + (*RateLimit_Action)(nil), // 51: envoy.config.route.v3.RateLimit.Action + (*RateLimit_Override)(nil), // 52: envoy.config.route.v3.RateLimit.Override + (*RateLimit_Action_SourceCluster)(nil), // 53: envoy.config.route.v3.RateLimit.Action.SourceCluster + (*RateLimit_Action_DestinationCluster)(nil), // 54: envoy.config.route.v3.RateLimit.Action.DestinationCluster + (*RateLimit_Action_RequestHeaders)(nil), // 55: envoy.config.route.v3.RateLimit.Action.RequestHeaders + (*RateLimit_Action_RemoteAddress)(nil), // 56: envoy.config.route.v3.RateLimit.Action.RemoteAddress + (*RateLimit_Action_MaskedRemoteAddress)(nil), // 57: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + (*RateLimit_Action_GenericKey)(nil), // 58: envoy.config.route.v3.RateLimit.Action.GenericKey + (*RateLimit_Action_HeaderValueMatch)(nil), // 59: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + (*RateLimit_Action_DynamicMetaData)(nil), // 60: envoy.config.route.v3.RateLimit.Action.DynamicMetaData + (*RateLimit_Action_MetaData)(nil), // 61: envoy.config.route.v3.RateLimit.Action.MetaData + (*RateLimit_Action_QueryParameterValueMatch)(nil), // 62: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + (*RateLimit_Override_DynamicMetadata)(nil), // 63: envoy.config.route.v3.RateLimit.Override.DynamicMetadata + (*v3.Matcher)(nil), // 64: xds.type.matcher.v3.Matcher + (*v31.HeaderValueOption)(nil), // 65: envoy.config.core.v3.HeaderValueOption + (*anypb.Any)(nil), // 66: google.protobuf.Any + (*wrapperspb.UInt32Value)(nil), // 67: google.protobuf.UInt32Value + (*v31.Metadata)(nil), // 68: envoy.config.core.v3.Metadata + (*v31.TypedExtensionConfig)(nil), // 69: envoy.config.core.v3.TypedExtensionConfig + (*v32.RegexMatcher)(nil), // 70: envoy.type.matcher.v3.RegexMatcher + (*wrapperspb.BoolValue)(nil), // 71: google.protobuf.BoolValue + (*v31.RuntimeFractionalPercent)(nil), // 72: envoy.config.core.v3.RuntimeFractionalPercent + (*v32.MetadataMatcher)(nil), // 73: envoy.type.matcher.v3.MetadataMatcher + (*v32.StringMatcher)(nil), // 74: envoy.type.matcher.v3.StringMatcher + (*v32.RegexMatchAndSubstitute)(nil), // 75: envoy.type.matcher.v3.RegexMatchAndSubstitute + (*durationpb.Duration)(nil), // 76: google.protobuf.Duration + (v31.RoutingPriority)(0), // 77: envoy.config.core.v3.RoutingPriority + (*v33.FractionalPercent)(nil), // 78: envoy.type.v3.FractionalPercent + (*v31.DataSource)(nil), // 79: envoy.config.core.v3.DataSource + (*v34.CustomTag)(nil), // 80: envoy.type.tracing.v3.CustomTag + (*v33.Int64Range)(nil), // 81: envoy.type.v3.Int64Range + (*v31.ProxyProtocolConfig)(nil), // 82: envoy.config.core.v3.ProxyProtocolConfig + (*v35.MetadataKey)(nil), // 83: envoy.type.metadata.v3.MetadataKey +} +var file_envoy_config_route_v3_route_components_proto_depIdxs = []int32{ + 9, // 0: envoy.config.route.v3.VirtualHost.routes:type_name -> envoy.config.route.v3.Route + 64, // 1: envoy.config.route.v3.VirtualHost.matcher:type_name -> xds.type.matcher.v3.Matcher + 0, // 2: envoy.config.route.v3.VirtualHost.require_tls:type_name -> envoy.config.route.v3.VirtualHost.TlsRequirementType + 22, // 3: envoy.config.route.v3.VirtualHost.virtual_clusters:type_name -> envoy.config.route.v3.VirtualCluster + 23, // 4: envoy.config.route.v3.VirtualHost.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 65, // 5: envoy.config.route.v3.VirtualHost.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 6: envoy.config.route.v3.VirtualHost.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 13, // 7: envoy.config.route.v3.VirtualHost.cors:type_name -> envoy.config.route.v3.CorsPolicy + 28, // 8: envoy.config.route.v3.VirtualHost.typed_per_filter_config:type_name -> envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry + 15, // 9: envoy.config.route.v3.VirtualHost.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 66, // 10: envoy.config.route.v3.VirtualHost.retry_policy_typed_config:type_name -> google.protobuf.Any + 16, // 11: envoy.config.route.v3.VirtualHost.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 67, // 12: envoy.config.route.v3.VirtualHost.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 35, // 13: envoy.config.route.v3.VirtualHost.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 68, // 14: envoy.config.route.v3.VirtualHost.metadata:type_name -> envoy.config.core.v3.Metadata + 66, // 15: envoy.config.route.v3.FilterAction.action:type_name -> google.protobuf.Any + 9, // 16: envoy.config.route.v3.RouteList.routes:type_name -> envoy.config.route.v3.Route + 12, // 17: envoy.config.route.v3.Route.match:type_name -> envoy.config.route.v3.RouteMatch + 14, // 18: envoy.config.route.v3.Route.route:type_name -> envoy.config.route.v3.RouteAction + 17, // 19: envoy.config.route.v3.Route.redirect:type_name -> envoy.config.route.v3.RedirectAction + 18, // 20: envoy.config.route.v3.Route.direct_response:type_name -> envoy.config.route.v3.DirectResponseAction + 7, // 21: envoy.config.route.v3.Route.filter_action:type_name -> envoy.config.route.v3.FilterAction + 19, // 22: envoy.config.route.v3.Route.non_forwarding_action:type_name -> envoy.config.route.v3.NonForwardingAction + 68, // 23: envoy.config.route.v3.Route.metadata:type_name -> envoy.config.core.v3.Metadata + 20, // 24: envoy.config.route.v3.Route.decorator:type_name -> envoy.config.route.v3.Decorator + 29, // 25: envoy.config.route.v3.Route.typed_per_filter_config:type_name -> envoy.config.route.v3.Route.TypedPerFilterConfigEntry + 65, // 26: envoy.config.route.v3.Route.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 27: envoy.config.route.v3.Route.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 21, // 28: envoy.config.route.v3.Route.tracing:type_name -> envoy.config.route.v3.Tracing + 67, // 29: envoy.config.route.v3.Route.per_request_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value + 30, // 30: envoy.config.route.v3.WeightedCluster.clusters:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight + 67, // 31: envoy.config.route.v3.WeightedCluster.total_weight:type_name -> google.protobuf.UInt32Value + 69, // 32: envoy.config.route.v3.ClusterSpecifierPlugin.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 70, // 33: envoy.config.route.v3.RouteMatch.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher + 34, // 34: envoy.config.route.v3.RouteMatch.connect_matcher:type_name -> envoy.config.route.v3.RouteMatch.ConnectMatcher + 69, // 35: envoy.config.route.v3.RouteMatch.path_match_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 71, // 36: envoy.config.route.v3.RouteMatch.case_sensitive:type_name -> google.protobuf.BoolValue + 72, // 37: envoy.config.route.v3.RouteMatch.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 24, // 38: envoy.config.route.v3.RouteMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 25, // 39: envoy.config.route.v3.RouteMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 32, // 40: envoy.config.route.v3.RouteMatch.grpc:type_name -> envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions + 33, // 41: envoy.config.route.v3.RouteMatch.tls_context:type_name -> envoy.config.route.v3.RouteMatch.TlsContextMatchOptions + 73, // 42: envoy.config.route.v3.RouteMatch.dynamic_metadata:type_name -> envoy.type.matcher.v3.MetadataMatcher + 74, // 43: envoy.config.route.v3.CorsPolicy.allow_origin_string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 71, // 44: envoy.config.route.v3.CorsPolicy.allow_credentials:type_name -> google.protobuf.BoolValue + 72, // 45: envoy.config.route.v3.CorsPolicy.filter_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 72, // 46: envoy.config.route.v3.CorsPolicy.shadow_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 71, // 47: envoy.config.route.v3.CorsPolicy.allow_private_network_access:type_name -> google.protobuf.BoolValue + 71, // 48: envoy.config.route.v3.CorsPolicy.forward_not_matching_preflights:type_name -> google.protobuf.BoolValue + 10, // 49: envoy.config.route.v3.RouteAction.weighted_clusters:type_name -> envoy.config.route.v3.WeightedCluster + 11, // 50: envoy.config.route.v3.RouteAction.inline_cluster_specifier_plugin:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin + 1, // 51: envoy.config.route.v3.RouteAction.cluster_not_found_response_code:type_name -> envoy.config.route.v3.RouteAction.ClusterNotFoundResponseCode + 68, // 52: envoy.config.route.v3.RouteAction.metadata_match:type_name -> envoy.config.core.v3.Metadata + 75, // 53: envoy.config.route.v3.RouteAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 69, // 54: envoy.config.route.v3.RouteAction.path_rewrite_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 71, // 55: envoy.config.route.v3.RouteAction.auto_host_rewrite:type_name -> google.protobuf.BoolValue + 75, // 56: envoy.config.route.v3.RouteAction.host_rewrite_path_regex:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 76, // 57: envoy.config.route.v3.RouteAction.timeout:type_name -> google.protobuf.Duration + 76, // 58: envoy.config.route.v3.RouteAction.idle_timeout:type_name -> google.protobuf.Duration + 69, // 59: envoy.config.route.v3.RouteAction.early_data_policy:type_name -> envoy.config.core.v3.TypedExtensionConfig + 15, // 60: envoy.config.route.v3.RouteAction.retry_policy:type_name -> envoy.config.route.v3.RetryPolicy + 66, // 61: envoy.config.route.v3.RouteAction.retry_policy_typed_config:type_name -> google.protobuf.Any + 35, // 62: envoy.config.route.v3.RouteAction.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy + 77, // 63: envoy.config.route.v3.RouteAction.priority:type_name -> envoy.config.core.v3.RoutingPriority + 23, // 64: envoy.config.route.v3.RouteAction.rate_limits:type_name -> envoy.config.route.v3.RateLimit + 71, // 65: envoy.config.route.v3.RouteAction.include_vh_rate_limits:type_name -> google.protobuf.BoolValue + 36, // 66: envoy.config.route.v3.RouteAction.hash_policy:type_name -> envoy.config.route.v3.RouteAction.HashPolicy + 13, // 67: envoy.config.route.v3.RouteAction.cors:type_name -> envoy.config.route.v3.CorsPolicy + 76, // 68: envoy.config.route.v3.RouteAction.max_grpc_timeout:type_name -> google.protobuf.Duration + 76, // 69: envoy.config.route.v3.RouteAction.grpc_timeout_offset:type_name -> google.protobuf.Duration + 37, // 70: envoy.config.route.v3.RouteAction.upgrade_configs:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig + 26, // 71: envoy.config.route.v3.RouteAction.internal_redirect_policy:type_name -> envoy.config.route.v3.InternalRedirectPolicy + 2, // 72: envoy.config.route.v3.RouteAction.internal_redirect_action:type_name -> envoy.config.route.v3.RouteAction.InternalRedirectAction + 67, // 73: envoy.config.route.v3.RouteAction.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 16, // 74: envoy.config.route.v3.RouteAction.hedge_policy:type_name -> envoy.config.route.v3.HedgePolicy + 38, // 75: envoy.config.route.v3.RouteAction.max_stream_duration:type_name -> envoy.config.route.v3.RouteAction.MaxStreamDuration + 67, // 76: envoy.config.route.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value + 76, // 77: envoy.config.route.v3.RetryPolicy.per_try_timeout:type_name -> google.protobuf.Duration + 76, // 78: envoy.config.route.v3.RetryPolicy.per_try_idle_timeout:type_name -> google.protobuf.Duration + 46, // 79: envoy.config.route.v3.RetryPolicy.retry_priority:type_name -> envoy.config.route.v3.RetryPolicy.RetryPriority + 47, // 80: envoy.config.route.v3.RetryPolicy.retry_host_predicate:type_name -> envoy.config.route.v3.RetryPolicy.RetryHostPredicate + 69, // 81: envoy.config.route.v3.RetryPolicy.retry_options_predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 48, // 82: envoy.config.route.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RetryBackOff + 50, // 83: envoy.config.route.v3.RetryPolicy.rate_limited_retry_back_off:type_name -> envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff + 24, // 84: envoy.config.route.v3.RetryPolicy.retriable_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 24, // 85: envoy.config.route.v3.RetryPolicy.retriable_request_headers:type_name -> envoy.config.route.v3.HeaderMatcher + 67, // 86: envoy.config.route.v3.HedgePolicy.initial_requests:type_name -> google.protobuf.UInt32Value + 78, // 87: envoy.config.route.v3.HedgePolicy.additional_request_chance:type_name -> envoy.type.v3.FractionalPercent + 75, // 88: envoy.config.route.v3.RedirectAction.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 4, // 89: envoy.config.route.v3.RedirectAction.response_code:type_name -> envoy.config.route.v3.RedirectAction.RedirectResponseCode + 79, // 90: envoy.config.route.v3.DirectResponseAction.body:type_name -> envoy.config.core.v3.DataSource + 71, // 91: envoy.config.route.v3.Decorator.propagate:type_name -> google.protobuf.BoolValue + 78, // 92: envoy.config.route.v3.Tracing.client_sampling:type_name -> envoy.type.v3.FractionalPercent + 78, // 93: envoy.config.route.v3.Tracing.random_sampling:type_name -> envoy.type.v3.FractionalPercent + 78, // 94: envoy.config.route.v3.Tracing.overall_sampling:type_name -> envoy.type.v3.FractionalPercent + 80, // 95: envoy.config.route.v3.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 24, // 96: envoy.config.route.v3.VirtualCluster.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 67, // 97: envoy.config.route.v3.RateLimit.stage:type_name -> google.protobuf.UInt32Value + 51, // 98: envoy.config.route.v3.RateLimit.actions:type_name -> envoy.config.route.v3.RateLimit.Action + 52, // 99: envoy.config.route.v3.RateLimit.limit:type_name -> envoy.config.route.v3.RateLimit.Override + 70, // 100: envoy.config.route.v3.HeaderMatcher.safe_regex_match:type_name -> envoy.type.matcher.v3.RegexMatcher + 81, // 101: envoy.config.route.v3.HeaderMatcher.range_match:type_name -> envoy.type.v3.Int64Range + 74, // 102: envoy.config.route.v3.HeaderMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 74, // 103: envoy.config.route.v3.QueryParameterMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 67, // 104: envoy.config.route.v3.InternalRedirectPolicy.max_internal_redirects:type_name -> google.protobuf.UInt32Value + 69, // 105: envoy.config.route.v3.InternalRedirectPolicy.predicates:type_name -> envoy.config.core.v3.TypedExtensionConfig + 66, // 106: envoy.config.route.v3.FilterConfig.config:type_name -> google.protobuf.Any + 66, // 107: envoy.config.route.v3.VirtualHost.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 66, // 108: envoy.config.route.v3.Route.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 67, // 109: envoy.config.route.v3.WeightedCluster.ClusterWeight.weight:type_name -> google.protobuf.UInt32Value + 68, // 110: envoy.config.route.v3.WeightedCluster.ClusterWeight.metadata_match:type_name -> envoy.config.core.v3.Metadata + 65, // 111: envoy.config.route.v3.WeightedCluster.ClusterWeight.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 65, // 112: envoy.config.route.v3.WeightedCluster.ClusterWeight.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 31, // 113: envoy.config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config:type_name -> envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry + 66, // 114: envoy.config.route.v3.WeightedCluster.ClusterWeight.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any + 71, // 115: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.presented:type_name -> google.protobuf.BoolValue + 71, // 116: envoy.config.route.v3.RouteMatch.TlsContextMatchOptions.validated:type_name -> google.protobuf.BoolValue + 72, // 117: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.runtime_fraction:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 71, // 118: envoy.config.route.v3.RouteAction.RequestMirrorPolicy.trace_sampled:type_name -> google.protobuf.BoolValue + 39, // 119: envoy.config.route.v3.RouteAction.HashPolicy.header:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Header + 41, // 120: envoy.config.route.v3.RouteAction.HashPolicy.cookie:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.Cookie + 42, // 121: envoy.config.route.v3.RouteAction.HashPolicy.connection_properties:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties + 43, // 122: envoy.config.route.v3.RouteAction.HashPolicy.query_parameter:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter + 44, // 123: envoy.config.route.v3.RouteAction.HashPolicy.filter_state:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.FilterState + 71, // 124: envoy.config.route.v3.RouteAction.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 45, // 125: envoy.config.route.v3.RouteAction.UpgradeConfig.connect_config:type_name -> envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig + 76, // 126: envoy.config.route.v3.RouteAction.MaxStreamDuration.max_stream_duration:type_name -> google.protobuf.Duration + 76, // 127: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max:type_name -> google.protobuf.Duration + 76, // 128: envoy.config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_offset:type_name -> google.protobuf.Duration + 75, // 129: envoy.config.route.v3.RouteAction.HashPolicy.Header.regex_rewrite:type_name -> envoy.type.matcher.v3.RegexMatchAndSubstitute + 76, // 130: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.ttl:type_name -> google.protobuf.Duration + 40, // 131: envoy.config.route.v3.RouteAction.HashPolicy.Cookie.attributes:type_name -> envoy.config.route.v3.RouteAction.HashPolicy.CookieAttribute + 82, // 132: envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig.proxy_protocol_config:type_name -> envoy.config.core.v3.ProxyProtocolConfig + 66, // 133: envoy.config.route.v3.RetryPolicy.RetryPriority.typed_config:type_name -> google.protobuf.Any + 66, // 134: envoy.config.route.v3.RetryPolicy.RetryHostPredicate.typed_config:type_name -> google.protobuf.Any + 76, // 135: envoy.config.route.v3.RetryPolicy.RetryBackOff.base_interval:type_name -> google.protobuf.Duration + 76, // 136: envoy.config.route.v3.RetryPolicy.RetryBackOff.max_interval:type_name -> google.protobuf.Duration + 3, // 137: envoy.config.route.v3.RetryPolicy.ResetHeader.format:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeaderFormat + 49, // 138: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.reset_headers:type_name -> envoy.config.route.v3.RetryPolicy.ResetHeader + 76, // 139: envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff.max_interval:type_name -> google.protobuf.Duration + 53, // 140: envoy.config.route.v3.RateLimit.Action.source_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.SourceCluster + 54, // 141: envoy.config.route.v3.RateLimit.Action.destination_cluster:type_name -> envoy.config.route.v3.RateLimit.Action.DestinationCluster + 55, // 142: envoy.config.route.v3.RateLimit.Action.request_headers:type_name -> envoy.config.route.v3.RateLimit.Action.RequestHeaders + 56, // 143: envoy.config.route.v3.RateLimit.Action.remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.RemoteAddress + 58, // 144: envoy.config.route.v3.RateLimit.Action.generic_key:type_name -> envoy.config.route.v3.RateLimit.Action.GenericKey + 59, // 145: envoy.config.route.v3.RateLimit.Action.header_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.HeaderValueMatch + 60, // 146: envoy.config.route.v3.RateLimit.Action.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Action.DynamicMetaData + 61, // 147: envoy.config.route.v3.RateLimit.Action.metadata:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData + 69, // 148: envoy.config.route.v3.RateLimit.Action.extension:type_name -> envoy.config.core.v3.TypedExtensionConfig + 57, // 149: envoy.config.route.v3.RateLimit.Action.masked_remote_address:type_name -> envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress + 62, // 150: envoy.config.route.v3.RateLimit.Action.query_parameter_value_match:type_name -> envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch + 63, // 151: envoy.config.route.v3.RateLimit.Override.dynamic_metadata:type_name -> envoy.config.route.v3.RateLimit.Override.DynamicMetadata + 67, // 152: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v4_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 67, // 153: envoy.config.route.v3.RateLimit.Action.MaskedRemoteAddress.v6_prefix_mask_len:type_name -> google.protobuf.UInt32Value + 71, // 154: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 24, // 155: envoy.config.route.v3.RateLimit.Action.HeaderValueMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 83, // 156: envoy.config.route.v3.RateLimit.Action.DynamicMetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 83, // 157: envoy.config.route.v3.RateLimit.Action.MetaData.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 5, // 158: envoy.config.route.v3.RateLimit.Action.MetaData.source:type_name -> envoy.config.route.v3.RateLimit.Action.MetaData.Source + 71, // 159: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.expect_match:type_name -> google.protobuf.BoolValue + 25, // 160: envoy.config.route.v3.RateLimit.Action.QueryParameterValueMatch.query_parameters:type_name -> envoy.config.route.v3.QueryParameterMatcher + 83, // 161: envoy.config.route.v3.RateLimit.Override.DynamicMetadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 162, // [162:162] is the sub-list for method output_type + 162, // [162:162] is the sub-list for method input_type + 162, // [162:162] is the sub-list for extension type_name + 162, // [162:162] is the sub-list for extension extendee + 0, // [0:162] is the sub-list for field type_name +} + +func init() { file_envoy_config_route_v3_route_components_proto_init() } +func file_envoy_config_route_v3_route_components_proto_init() { + if File_envoy_config_route_v3_route_components_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_route_v3_route_components_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualHost); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Route); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WeightedCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterSpecifierPlugin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CorsPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HedgePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RedirectAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DirectResponseAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonForwardingAction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decorator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tracing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InternalRedirectPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WeightedCluster_ClusterWeight); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch_GrpcRouteMatchOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch_TlsContextMatchOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteMatch_ConnectMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_RequestMirrorPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_UpgradeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_MaxStreamDuration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_CookieAttribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_Cookie); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_ConnectionProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_QueryParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_HashPolicy_FilterState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteAction_UpgradeConfig_ConnectConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryPriority); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryHostPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RetryBackOff); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_ResetHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy_RateLimitedRetryBackOff); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Override); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_SourceCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_DestinationCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_RequestHeaders); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_RemoteAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_MaskedRemoteAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_GenericKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_HeaderValueMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_DynamicMetaData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_MetaData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Action_QueryParameterValueMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimit_Override_DynamicMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_route_v3_route_components_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Route_Route)(nil), + (*Route_Redirect)(nil), + (*Route_DirectResponse)(nil), + (*Route_FilterAction)(nil), + (*Route_NonForwardingAction)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*WeightedCluster_HeaderName)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*RouteMatch_Prefix)(nil), + (*RouteMatch_Path)(nil), + (*RouteMatch_SafeRegex)(nil), + (*RouteMatch_ConnectMatcher_)(nil), + (*RouteMatch_PathSeparatedPrefix)(nil), + (*RouteMatch_PathMatchPolicy)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*CorsPolicy_FilterEnabled)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*RouteAction_Cluster)(nil), + (*RouteAction_ClusterHeader)(nil), + (*RouteAction_WeightedClusters)(nil), + (*RouteAction_ClusterSpecifierPlugin)(nil), + (*RouteAction_InlineClusterSpecifierPlugin)(nil), + (*RouteAction_HostRewriteLiteral)(nil), + (*RouteAction_AutoHostRewrite)(nil), + (*RouteAction_HostRewriteHeader)(nil), + (*RouteAction_HostRewritePathRegex)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*RedirectAction_HttpsRedirect)(nil), + (*RedirectAction_SchemeRedirect)(nil), + (*RedirectAction_PathRedirect)(nil), + (*RedirectAction_PrefixRewrite)(nil), + (*RedirectAction_RegexRewrite)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[18].OneofWrappers = []interface{}{ + (*HeaderMatcher_ExactMatch)(nil), + (*HeaderMatcher_SafeRegexMatch)(nil), + (*HeaderMatcher_RangeMatch)(nil), + (*HeaderMatcher_PresentMatch)(nil), + (*HeaderMatcher_PrefixMatch)(nil), + (*HeaderMatcher_SuffixMatch)(nil), + (*HeaderMatcher_ContainsMatch)(nil), + (*HeaderMatcher_StringMatch)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*QueryParameterMatcher_StringMatch)(nil), + (*QueryParameterMatcher_PresentMatch)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*WeightedCluster_ClusterWeight_HostRewriteLiteral)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*RouteAction_HashPolicy_Header_)(nil), + (*RouteAction_HashPolicy_Cookie_)(nil), + (*RouteAction_HashPolicy_ConnectionProperties_)(nil), + (*RouteAction_HashPolicy_QueryParameter_)(nil), + (*RouteAction_HashPolicy_FilterState_)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[40].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryPriority_TypedConfig)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[41].OneofWrappers = []interface{}{ + (*RetryPolicy_RetryHostPredicate_TypedConfig)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[45].OneofWrappers = []interface{}{ + (*RateLimit_Action_SourceCluster_)(nil), + (*RateLimit_Action_DestinationCluster_)(nil), + (*RateLimit_Action_RequestHeaders_)(nil), + (*RateLimit_Action_RemoteAddress_)(nil), + (*RateLimit_Action_GenericKey_)(nil), + (*RateLimit_Action_HeaderValueMatch_)(nil), + (*RateLimit_Action_DynamicMetadata)(nil), + (*RateLimit_Action_Metadata)(nil), + (*RateLimit_Action_Extension)(nil), + (*RateLimit_Action_MaskedRemoteAddress_)(nil), + (*RateLimit_Action_QueryParameterValueMatch_)(nil), + } + file_envoy_config_route_v3_route_components_proto_msgTypes[46].OneofWrappers = []interface{}{ + (*RateLimit_Override_DynamicMetadata_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_route_v3_route_components_proto_rawDesc, + NumEnums: 6, + NumMessages: 58, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_route_v3_route_components_proto_goTypes, + DependencyIndexes: file_envoy_config_route_v3_route_components_proto_depIdxs, + EnumInfos: file_envoy_config_route_v3_route_components_proto_enumTypes, + MessageInfos: file_envoy_config_route_v3_route_components_proto_msgTypes, + }.Build() + File_envoy_config_route_v3_route_components_proto = out.File + file_envoy_config_route_v3_route_components_proto_rawDesc = nil + file_envoy_config_route_v3_route_components_proto_goTypes = nil + file_envoy_config_route_v3_route_components_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go new file mode 100644 index 0000000000..ffa6daf7b8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.validate.go @@ -0,0 +1,12236 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RoutingPriority(0) +) + +// Validate checks the field values on VirtualHost with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *VirtualHost) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VirtualHost with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in VirtualHostMultiError, or +// nil if none found. +func (m *VirtualHost) ValidateAll() error { + return m.validate(true) +} + +func (m *VirtualHost) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := VirtualHostValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetDomains()) < 1 { + err := VirtualHostValidationError{ + field: "Domains", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetDomains() { + _, _ = idx, item + + if !_VirtualHost_Domains_Pattern.MatchString(item) { + err := VirtualHostValidationError{ + field: fmt.Sprintf("Domains[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetRoutes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := VirtualHost_TlsRequirementType_name[int32(m.GetRequireTls())]; !ok { + err := VirtualHostValidationError{ + field: "RequireTls", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetVirtualClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("VirtualClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("VirtualClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("VirtualClusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRateLimits() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := VirtualHostValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_VirtualHost_RequestHeadersToRemove_Pattern.MatchString(item) { + err := VirtualHostValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := VirtualHostValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_VirtualHost_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := VirtualHostValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetCors()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCors()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + // no validation rules for IncludeRequestAttemptCount + + // no validation rules for IncludeAttemptCountInResponse + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPolicyTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicyTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHedgePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHedgePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IncludeIsTimeoutRetryHeader + + if all { + switch v := interface{}(m.GetPerRequestBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerRequestBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRequestMirrorPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualHostValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualHostValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return VirtualHostMultiError(errors) + } + + return nil +} + +// VirtualHostMultiError is an error wrapping multiple validation errors +// returned by VirtualHost.ValidateAll() if the designated constraints aren't met. +type VirtualHostMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VirtualHostMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VirtualHostMultiError) AllErrors() []error { return m } + +// VirtualHostValidationError is the validation error returned by +// VirtualHost.Validate if the designated constraints aren't met. +type VirtualHostValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VirtualHostValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VirtualHostValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VirtualHostValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VirtualHostValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VirtualHostValidationError) ErrorName() string { return "VirtualHostValidationError" } + +// Error satisfies the builtin error interface +func (e VirtualHostValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVirtualHost.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VirtualHostValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VirtualHostValidationError{} + +var _VirtualHost_Domains_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _VirtualHost_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _VirtualHost_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on FilterAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilterAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterAction with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilterActionMultiError, or +// nil if none found. +func (m *FilterAction) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterActionValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterActionValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterActionValidationError{ + field: "Action", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return FilterActionMultiError(errors) + } + + return nil +} + +// FilterActionMultiError is an error wrapping multiple validation errors +// returned by FilterAction.ValidateAll() if the designated constraints aren't met. +type FilterActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterActionMultiError) AllErrors() []error { return m } + +// FilterActionValidationError is the validation error returned by +// FilterAction.Validate if the designated constraints aren't met. +type FilterActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterActionValidationError) ErrorName() string { return "FilterActionValidationError" } + +// Error satisfies the builtin error interface +func (e FilterActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterActionValidationError{} + +// Validate checks the field values on RouteList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteList with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteListMultiError, or nil +// if none found. +func (m *RouteList) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRoutes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteListValidationError{ + field: fmt.Sprintf("Routes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouteListMultiError(errors) + } + + return nil +} + +// RouteListMultiError is an error wrapping multiple validation errors returned +// by RouteList.ValidateAll() if the designated constraints aren't met. +type RouteListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteListMultiError) AllErrors() []error { return m } + +// RouteListValidationError is the validation error returned by +// RouteList.Validate if the designated constraints aren't met. +type RouteListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteListValidationError) ErrorName() string { return "RouteListValidationError" } + +// Error satisfies the builtin error interface +func (e RouteListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteListValidationError{} + +// Validate checks the field values on Route with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Route) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Route with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RouteMultiError, or nil if none found. +func (m *Route) ValidateAll() error { + return m.validate(true) +} + +func (m *Route) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if m.GetMatch() == nil { + err := RouteValidationError{ + field: "Match", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDecorator()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Decorator", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Decorator", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDecorator()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Decorator", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := RouteValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_Route_RequestHeadersToRemove_Pattern.MatchString(item) { + err := RouteValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := RouteValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 1 { + err := RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_Route_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := RouteValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetTracing()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerRequestBufferLimitBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerRequestBufferLimitBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "PerRequestBufferLimitBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StatPrefix + + oneofActionPresent := false + switch v := m.Action.(type) { + case *Route_Route: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetRoute()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoute()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_Redirect: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetRedirect()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Redirect", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "Redirect", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRedirect()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "Redirect", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_DirectResponse: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetDirectResponse()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "DirectResponse", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "DirectResponse", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDirectResponse()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "DirectResponse", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_FilterAction: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetFilterAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "FilterAction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "FilterAction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "FilterAction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Route_NonForwardingAction: + if v == nil { + err := RouteValidationError{ + field: "Action", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionPresent = true + + if all { + switch v := interface{}(m.GetNonForwardingAction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteValidationError{ + field: "NonForwardingAction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteValidationError{ + field: "NonForwardingAction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNonForwardingAction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteValidationError{ + field: "NonForwardingAction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofActionPresent { + err := RouteValidationError{ + field: "Action", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteMultiError(errors) + } + + return nil +} + +// RouteMultiError is an error wrapping multiple validation errors returned by +// Route.ValidateAll() if the designated constraints aren't met. +type RouteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMultiError) AllErrors() []error { return m } + +// RouteValidationError is the validation error returned by Route.Validate if +// the designated constraints aren't met. +type RouteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteValidationError) ErrorName() string { return "RouteValidationError" } + +// Error satisfies the builtin error interface +func (e RouteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteValidationError{} + +var _Route_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _Route_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on WeightedCluster with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *WeightedCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WeightedCluster with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WeightedClusterMultiError, or nil if none found. +func (m *WeightedCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *WeightedCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetClusters()) < 1 { + err := WeightedClusterValidationError{ + field: "Clusters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetClusters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedClusterValidationError{ + field: fmt.Sprintf("Clusters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTotalWeight()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTotalWeight()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedClusterValidationError{ + field: "TotalWeight", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RuntimeKeyPrefix + + switch v := m.RandomValueSpecifier.(type) { + case *WeightedCluster_HeaderName: + if v == nil { + err := WeightedClusterValidationError{ + field: "RandomValueSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_WeightedCluster_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := WeightedClusterValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return WeightedClusterMultiError(errors) + } + + return nil +} + +// WeightedClusterMultiError is an error wrapping multiple validation errors +// returned by WeightedCluster.ValidateAll() if the designated constraints +// aren't met. +type WeightedClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WeightedClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WeightedClusterMultiError) AllErrors() []error { return m } + +// WeightedClusterValidationError is the validation error returned by +// WeightedCluster.Validate if the designated constraints aren't met. +type WeightedClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WeightedClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WeightedClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WeightedClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WeightedClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WeightedClusterValidationError) ErrorName() string { return "WeightedClusterValidationError" } + +// Error satisfies the builtin error interface +func (e WeightedClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWeightedCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WeightedClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WeightedClusterValidationError{} + +var _WeightedCluster_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on ClusterSpecifierPlugin with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClusterSpecifierPlugin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterSpecifierPlugin with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClusterSpecifierPluginMultiError, or nil if none found. +func (m *ClusterSpecifierPlugin) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterSpecifierPlugin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetExtension() == nil { + err := ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClusterSpecifierPluginValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + if len(errors) > 0 { + return ClusterSpecifierPluginMultiError(errors) + } + + return nil +} + +// ClusterSpecifierPluginMultiError is an error wrapping multiple validation +// errors returned by ClusterSpecifierPlugin.ValidateAll() if the designated +// constraints aren't met. +type ClusterSpecifierPluginMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterSpecifierPluginMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterSpecifierPluginMultiError) AllErrors() []error { return m } + +// ClusterSpecifierPluginValidationError is the validation error returned by +// ClusterSpecifierPlugin.Validate if the designated constraints aren't met. +type ClusterSpecifierPluginValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterSpecifierPluginValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterSpecifierPluginValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterSpecifierPluginValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterSpecifierPluginValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterSpecifierPluginValidationError) ErrorName() string { + return "ClusterSpecifierPluginValidationError" +} + +// Error satisfies the builtin error interface +func (e ClusterSpecifierPluginValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterSpecifierPlugin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterSpecifierPluginValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterSpecifierPluginValidationError{} + +// Validate checks the field values on RouteMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteMatchMultiError, or +// nil if none found. +func (m *RouteMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCaseSensitive()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "CaseSensitive", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "CaseSensitive", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCaseSensitive()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "CaseSensitive", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRuntimeFraction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRuntimeFraction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetQueryParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "Grpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "Grpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "Grpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "TlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "TlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "TlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetDynamicMetadata() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("DynamicMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: fmt.Sprintf("DynamicMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: fmt.Sprintf("DynamicMetadata[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + oneofPathSpecifierPresent := false + switch v := m.PathSpecifier.(type) { + case *RouteMatch_Prefix: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + // no validation rules for Prefix + case *RouteMatch_Path: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + // no validation rules for Path + case *RouteMatch_SafeRegex: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if m.GetSafeRegex() == nil { + err := RouteMatchValidationError{ + field: "SafeRegex", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteMatch_ConnectMatcher_: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if all { + switch v := interface{}(m.GetConnectMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "ConnectMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "ConnectMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "ConnectMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteMatch_PathSeparatedPrefix: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if !_RouteMatch_PathSeparatedPrefix_Pattern.MatchString(m.GetPathSeparatedPrefix()) { + err := RouteMatchValidationError{ + field: "PathSeparatedPrefix", + reason: "value does not match regex pattern \"^[^?#]+[^?#/]$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteMatch_PathMatchPolicy: + if v == nil { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPathSpecifierPresent = true + + if all { + switch v := interface{}(m.GetPathMatchPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathMatchPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatchValidationError{ + field: "PathMatchPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofPathSpecifierPresent { + err := RouteMatchValidationError{ + field: "PathSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteMatchMultiError(errors) + } + + return nil +} + +// RouteMatchMultiError is an error wrapping multiple validation errors +// returned by RouteMatch.ValidateAll() if the designated constraints aren't met. +type RouteMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatchMultiError) AllErrors() []error { return m } + +// RouteMatchValidationError is the validation error returned by +// RouteMatch.Validate if the designated constraints aren't met. +type RouteMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatchValidationError) ErrorName() string { return "RouteMatchValidationError" } + +// Error satisfies the builtin error interface +func (e RouteMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatchValidationError{} + +var _RouteMatch_PathSeparatedPrefix_Pattern = regexp.MustCompile("^[^?#]+[^?#/]$") + +// Validate checks the field values on CorsPolicy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CorsPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CorsPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CorsPolicyMultiError, or +// nil if none found. +func (m *CorsPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *CorsPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAllowOriginStringMatch() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: fmt.Sprintf("AllowOriginStringMatch[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: fmt.Sprintf("AllowOriginStringMatch[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: fmt.Sprintf("AllowOriginStringMatch[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AllowMethods + + // no validation rules for AllowHeaders + + // no validation rules for ExposeHeaders + + // no validation rules for MaxAge + + if all { + switch v := interface{}(m.GetAllowCredentials()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowCredentials", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowCredentials()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "AllowCredentials", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetShadowEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ShadowEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ShadowEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetShadowEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "ShadowEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAllowPrivateNetworkAccess()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAllowPrivateNetworkAccess()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "AllowPrivateNetworkAccess", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetForwardNotMatchingPreflights()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetForwardNotMatchingPreflights()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "ForwardNotMatchingPreflights", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.EnabledSpecifier.(type) { + case *CorsPolicy_FilterEnabled: + if v == nil { + err := CorsPolicyValidationError{ + field: "EnabledSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFilterEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "FilterEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CorsPolicyValidationError{ + field: "FilterEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CorsPolicyValidationError{ + field: "FilterEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CorsPolicyMultiError(errors) + } + + return nil +} + +// CorsPolicyMultiError is an error wrapping multiple validation errors +// returned by CorsPolicy.ValidateAll() if the designated constraints aren't met. +type CorsPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CorsPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CorsPolicyMultiError) AllErrors() []error { return m } + +// CorsPolicyValidationError is the validation error returned by +// CorsPolicy.Validate if the designated constraints aren't met. +type CorsPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CorsPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CorsPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CorsPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CorsPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CorsPolicyValidationError) ErrorName() string { return "CorsPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e CorsPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCorsPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CorsPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CorsPolicyValidationError{} + +// Validate checks the field values on RouteAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RouteActionMultiError, or +// nil if none found. +func (m *RouteAction) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RouteAction_ClusterNotFoundResponseCode_name[int32(m.GetClusterNotFoundResponseCode())]; !ok { + err := RouteActionValidationError{ + field: "ClusterNotFoundResponseCode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if !_RouteAction_PrefixRewrite_Pattern.MatchString(m.GetPrefixRewrite()) { + err := RouteActionValidationError{ + field: "PrefixRewrite", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRegexRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRegexRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPathRewritePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathRewritePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "PathRewritePolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AppendXForwardedHost + + if all { + switch v := interface{}(m.GetTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "IdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEarlyDataPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "EarlyDataPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "EarlyDataPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEarlyDataPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "EarlyDataPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "RetryPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPolicyTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPolicyTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "RetryPolicyTypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRequestMirrorPolicies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if _, ok := v3.RoutingPriority_name[int32(m.GetPriority())]; !ok { + err := RouteActionValidationError{ + field: "Priority", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRateLimits() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("RateLimits[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetIncludeVhRateLimits()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IncludeVhRateLimits", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "IncludeVhRateLimits", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIncludeVhRateLimits()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "IncludeVhRateLimits", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetHashPolicy() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("HashPolicy[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("HashPolicy[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("HashPolicy[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetCors()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCors()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "Cors", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxGrpcTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxGrpcTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxGrpcTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxGrpcTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MaxGrpcTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcTimeoutOffset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "GrpcTimeoutOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "GrpcTimeoutOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcTimeoutOffset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "GrpcTimeoutOffset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetUpgradeConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetInternalRedirectPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InternalRedirectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InternalRedirectPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInternalRedirectPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "InternalRedirectPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for InternalRedirectAction + + if all { + switch v := interface{}(m.GetMaxInternalRedirects()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxInternalRedirects()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHedgePolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHedgePolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "HedgePolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxStreamDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofClusterSpecifierPresent := false + switch v := m.ClusterSpecifier.(type) { + case *RouteAction_Cluster: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if utf8.RuneCountInString(m.GetCluster()) < 1 { + err := RouteActionValidationError{ + field: "Cluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_ClusterHeader: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if utf8.RuneCountInString(m.GetClusterHeader()) < 1 { + err := RouteActionValidationError{ + field: "ClusterHeader", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_ClusterHeader_Pattern.MatchString(m.GetClusterHeader()) { + err := RouteActionValidationError{ + field: "ClusterHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_WeightedClusters: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetWeightedClusters()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "WeightedClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "WeightedClusters", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightedClusters()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "WeightedClusters", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_ClusterSpecifierPlugin: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + // no validation rules for ClusterSpecifierPlugin + case *RouteAction_InlineClusterSpecifierPlugin: + if v == nil { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofClusterSpecifierPresent = true + + if all { + switch v := interface{}(m.GetInlineClusterSpecifierPlugin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InlineClusterSpecifierPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "InlineClusterSpecifierPlugin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInlineClusterSpecifierPlugin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "InlineClusterSpecifierPlugin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofClusterSpecifierPresent { + err := RouteActionValidationError{ + field: "ClusterSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + switch v := m.HostRewriteSpecifier.(type) { + case *RouteAction_HostRewriteLiteral: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HostRewriteLiteral_Pattern.MatchString(m.GetHostRewriteLiteral()) { + err := RouteActionValidationError{ + field: "HostRewriteLiteral", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_AutoHostRewrite: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAutoHostRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "AutoHostRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "AutoHostRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAutoHostRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "AutoHostRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HostRewriteHeader: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HostRewriteHeader_Pattern.MatchString(m.GetHostRewriteHeader()) { + err := RouteActionValidationError{ + field: "HostRewriteHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RouteAction_HostRewritePathRegex: + if v == nil { + err := RouteActionValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetHostRewritePathRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HostRewritePathRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteActionValidationError{ + field: "HostRewritePathRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHostRewritePathRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteActionValidationError{ + field: "HostRewritePathRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RouteActionMultiError(errors) + } + + return nil +} + +// RouteActionMultiError is an error wrapping multiple validation errors +// returned by RouteAction.ValidateAll() if the designated constraints aren't met. +type RouteActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteActionMultiError) AllErrors() []error { return m } + +// RouteActionValidationError is the validation error returned by +// RouteAction.Validate if the designated constraints aren't met. +type RouteActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteActionValidationError) ErrorName() string { return "RouteActionValidationError" } + +// Error satisfies the builtin error interface +func (e RouteActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteActionValidationError{} + +var _RouteAction_ClusterHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_PrefixRewrite_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_HostRewriteLiteral_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_HostRewriteHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RetryPolicyMultiError, or +// nil if none found. +func (m *RetryPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RetryOn + + if all { + switch v := interface{}(m.GetNumRetries()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "NumRetries", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerTryTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerTryTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "PerTryTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPerTryIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "PerTryIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPerTryIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "PerTryIdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRetryPriority()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryPriority()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryPriority", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetryHostPredicate() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryHostPredicate[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRetryOptionsPredicates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryOptionsPredicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetryOptionsPredicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetryOptionsPredicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for HostSelectionRetryMaxAttempts + + if all { + switch v := interface{}(m.GetRetryBackOff()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RetryBackOff", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRateLimitedRetryBackOff()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RateLimitedRetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: "RateLimitedRetryBackOff", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRateLimitedRetryBackOff()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: "RateLimitedRetryBackOff", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetRetriableHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRetriableRequestHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableRequestHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableRequestHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicyValidationError{ + field: fmt.Sprintf("RetriableRequestHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RetryPolicyMultiError(errors) + } + + return nil +} + +// RetryPolicyMultiError is an error wrapping multiple validation errors +// returned by RetryPolicy.ValidateAll() if the designated constraints aren't met. +type RetryPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicyMultiError) AllErrors() []error { return m } + +// RetryPolicyValidationError is the validation error returned by +// RetryPolicy.Validate if the designated constraints aren't met. +type RetryPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e RetryPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicyValidationError{} + +// Validate checks the field values on HedgePolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HedgePolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HedgePolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HedgePolicyMultiError, or +// nil if none found. +func (m *HedgePolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *HedgePolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetInitialRequests(); wrapper != nil { + + if wrapper.GetValue() < 1 { + err := HedgePolicyValidationError{ + field: "InitialRequests", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetAdditionalRequestChance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HedgePolicyValidationError{ + field: "AdditionalRequestChance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HedgePolicyValidationError{ + field: "AdditionalRequestChance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAdditionalRequestChance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HedgePolicyValidationError{ + field: "AdditionalRequestChance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for HedgeOnPerTryTimeout + + if len(errors) > 0 { + return HedgePolicyMultiError(errors) + } + + return nil +} + +// HedgePolicyMultiError is an error wrapping multiple validation errors +// returned by HedgePolicy.ValidateAll() if the designated constraints aren't met. +type HedgePolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HedgePolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HedgePolicyMultiError) AllErrors() []error { return m } + +// HedgePolicyValidationError is the validation error returned by +// HedgePolicy.Validate if the designated constraints aren't met. +type HedgePolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HedgePolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HedgePolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HedgePolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HedgePolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HedgePolicyValidationError) ErrorName() string { return "HedgePolicyValidationError" } + +// Error satisfies the builtin error interface +func (e HedgePolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHedgePolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HedgePolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HedgePolicyValidationError{} + +// Validate checks the field values on RedirectAction with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RedirectAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RedirectAction with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RedirectActionMultiError, +// or nil if none found. +func (m *RedirectAction) ValidateAll() error { + return m.validate(true) +} + +func (m *RedirectAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_RedirectAction_HostRedirect_Pattern.MatchString(m.GetHostRedirect()) { + err := RedirectActionValidationError{ + field: "HostRedirect", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for PortRedirect + + if _, ok := RedirectAction_RedirectResponseCode_name[int32(m.GetResponseCode())]; !ok { + err := RedirectActionValidationError{ + field: "ResponseCode", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for StripQuery + + switch v := m.SchemeRewriteSpecifier.(type) { + case *RedirectAction_HttpsRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "SchemeRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for HttpsRedirect + case *RedirectAction_SchemeRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "SchemeRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for SchemeRedirect + default: + _ = v // ensures v is used + } + switch v := m.PathRewriteSpecifier.(type) { + case *RedirectAction_PathRedirect: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RedirectAction_PathRedirect_Pattern.MatchString(m.GetPathRedirect()) { + err := RedirectActionValidationError{ + field: "PathRedirect", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RedirectAction_PrefixRewrite: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RedirectAction_PrefixRewrite_Pattern.MatchString(m.GetPrefixRewrite()) { + err := RedirectActionValidationError{ + field: "PrefixRewrite", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RedirectAction_RegexRewrite: + if v == nil { + err := RedirectActionValidationError{ + field: "PathRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRegexRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RedirectActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RedirectActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRegexRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RedirectActionValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RedirectActionMultiError(errors) + } + + return nil +} + +// RedirectActionMultiError is an error wrapping multiple validation errors +// returned by RedirectAction.ValidateAll() if the designated constraints +// aren't met. +type RedirectActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RedirectActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RedirectActionMultiError) AllErrors() []error { return m } + +// RedirectActionValidationError is the validation error returned by +// RedirectAction.Validate if the designated constraints aren't met. +type RedirectActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RedirectActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RedirectActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RedirectActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RedirectActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RedirectActionValidationError) ErrorName() string { return "RedirectActionValidationError" } + +// Error satisfies the builtin error interface +func (e RedirectActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRedirectAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RedirectActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RedirectActionValidationError{} + +var _RedirectAction_HostRedirect_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RedirectAction_PathRedirect_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RedirectAction_PrefixRewrite_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on DirectResponseAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DirectResponseAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DirectResponseAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DirectResponseActionMultiError, or nil if none found. +func (m *DirectResponseAction) ValidateAll() error { + return m.validate(true) +} + +func (m *DirectResponseAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetStatus(); val < 200 || val >= 600 { + err := DirectResponseActionValidationError{ + field: "Status", + reason: "value must be inside range [200, 600)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetBody()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DirectResponseActionValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DirectResponseActionValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBody()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DirectResponseActionValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DirectResponseActionMultiError(errors) + } + + return nil +} + +// DirectResponseActionMultiError is an error wrapping multiple validation +// errors returned by DirectResponseAction.ValidateAll() if the designated +// constraints aren't met. +type DirectResponseActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DirectResponseActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DirectResponseActionMultiError) AllErrors() []error { return m } + +// DirectResponseActionValidationError is the validation error returned by +// DirectResponseAction.Validate if the designated constraints aren't met. +type DirectResponseActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DirectResponseActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DirectResponseActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DirectResponseActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DirectResponseActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DirectResponseActionValidationError) ErrorName() string { + return "DirectResponseActionValidationError" +} + +// Error satisfies the builtin error interface +func (e DirectResponseActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDirectResponseAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DirectResponseActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DirectResponseActionValidationError{} + +// Validate checks the field values on NonForwardingAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NonForwardingAction) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NonForwardingAction with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NonForwardingActionMultiError, or nil if none found. +func (m *NonForwardingAction) ValidateAll() error { + return m.validate(true) +} + +func (m *NonForwardingAction) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return NonForwardingActionMultiError(errors) + } + + return nil +} + +// NonForwardingActionMultiError is an error wrapping multiple validation +// errors returned by NonForwardingAction.ValidateAll() if the designated +// constraints aren't met. +type NonForwardingActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NonForwardingActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NonForwardingActionMultiError) AllErrors() []error { return m } + +// NonForwardingActionValidationError is the validation error returned by +// NonForwardingAction.Validate if the designated constraints aren't met. +type NonForwardingActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NonForwardingActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NonForwardingActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NonForwardingActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NonForwardingActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NonForwardingActionValidationError) ErrorName() string { + return "NonForwardingActionValidationError" +} + +// Error satisfies the builtin error interface +func (e NonForwardingActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNonForwardingAction.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NonForwardingActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NonForwardingActionValidationError{} + +// Validate checks the field values on Decorator with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Decorator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Decorator with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DecoratorMultiError, or nil +// if none found. +func (m *Decorator) ValidateAll() error { + return m.validate(true) +} + +func (m *Decorator) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetOperation()) < 1 { + err := DecoratorValidationError{ + field: "Operation", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPropagate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DecoratorValidationError{ + field: "Propagate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DecoratorValidationError{ + field: "Propagate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPropagate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DecoratorValidationError{ + field: "Propagate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DecoratorMultiError(errors) + } + + return nil +} + +// DecoratorMultiError is an error wrapping multiple validation errors returned +// by Decorator.ValidateAll() if the designated constraints aren't met. +type DecoratorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DecoratorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DecoratorMultiError) AllErrors() []error { return m } + +// DecoratorValidationError is the validation error returned by +// Decorator.Validate if the designated constraints aren't met. +type DecoratorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DecoratorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DecoratorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DecoratorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DecoratorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DecoratorValidationError) ErrorName() string { return "DecoratorValidationError" } + +// Error satisfies the builtin error interface +func (e DecoratorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDecorator.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DecoratorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DecoratorValidationError{} + +// Validate checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Tracing) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TracingMultiError, or nil if none found. +func (m *Tracing) ValidateAll() error { + return m.validate(true) +} + +func (m *Tracing) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetClientSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClientSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRandomSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRandomSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverallSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverallSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCustomTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TracingMultiError(errors) + } + + return nil +} + +// TracingMultiError is an error wrapping multiple validation errors returned +// by Tracing.ValidateAll() if the designated constraints aren't met. +type TracingMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TracingMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TracingMultiError) AllErrors() []error { return m } + +// TracingValidationError is the validation error returned by Tracing.Validate +// if the designated constraints aren't met. +type TracingValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TracingValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TracingValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TracingValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TracingValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TracingValidationError) ErrorName() string { return "TracingValidationError" } + +// Error satisfies the builtin error interface +func (e TracingValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTracing.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TracingValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TracingValidationError{} + +// Validate checks the field values on VirtualCluster with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *VirtualCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on VirtualCluster with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in VirtualClusterMultiError, +// or nil if none found. +func (m *VirtualCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *VirtualCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VirtualClusterValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VirtualClusterValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VirtualClusterValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := VirtualClusterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return VirtualClusterMultiError(errors) + } + + return nil +} + +// VirtualClusterMultiError is an error wrapping multiple validation errors +// returned by VirtualCluster.ValidateAll() if the designated constraints +// aren't met. +type VirtualClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VirtualClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VirtualClusterMultiError) AllErrors() []error { return m } + +// VirtualClusterValidationError is the validation error returned by +// VirtualCluster.Validate if the designated constraints aren't met. +type VirtualClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VirtualClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VirtualClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VirtualClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VirtualClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VirtualClusterValidationError) ErrorName() string { return "VirtualClusterValidationError" } + +// Error satisfies the builtin error interface +func (e VirtualClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVirtualCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VirtualClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VirtualClusterValidationError{} + +// Validate checks the field values on RateLimit with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RateLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RateLimitMultiError, or nil +// if none found. +func (m *RateLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetStage(); wrapper != nil { + + if wrapper.GetValue() > 10 { + err := RateLimitValidationError{ + field: "Stage", + reason: "value must be less than or equal to 10", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for DisableKey + + if len(m.GetActions()) < 1 { + err := RateLimitValidationError{ + field: "Actions", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetActions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitValidationError{ + field: fmt.Sprintf("Actions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: "Limit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitValidationError{ + field: "Limit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitValidationError{ + field: "Limit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RateLimitMultiError(errors) + } + + return nil +} + +// RateLimitMultiError is an error wrapping multiple validation errors returned +// by RateLimit.ValidateAll() if the designated constraints aren't met. +type RateLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitMultiError) AllErrors() []error { return m } + +// RateLimitValidationError is the validation error returned by +// RateLimit.Validate if the designated constraints aren't met. +type RateLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitValidationError) ErrorName() string { return "RateLimitValidationError" } + +// Error satisfies the builtin error interface +func (e RateLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitValidationError{} + +// Validate checks the field values on HeaderMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HeaderMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HeaderMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HeaderMatcherMultiError, or +// nil if none found. +func (m *HeaderMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *HeaderMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HeaderMatcherValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_HeaderMatcher_Name_Pattern.MatchString(m.GetName()) { + err := HeaderMatcherValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for InvertMatch + + // no validation rules for TreatMissingHeaderAsEmpty + + switch v := m.HeaderMatchSpecifier.(type) { + case *HeaderMatcher_ExactMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for ExactMatch + case *HeaderMatcher_SafeRegexMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegexMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "SafeRegexMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "SafeRegexMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegexMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMatcherValidationError{ + field: "SafeRegexMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HeaderMatcher_RangeMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRangeMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "RangeMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "RangeMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRangeMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMatcherValidationError{ + field: "RangeMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HeaderMatcher_PresentMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for PresentMatch + case *HeaderMatcher_PrefixMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetPrefixMatch()) < 1 { + err := HeaderMatcherValidationError{ + field: "PrefixMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HeaderMatcher_SuffixMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetSuffixMatch()) < 1 { + err := HeaderMatcherValidationError{ + field: "SuffixMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HeaderMatcher_ContainsMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetContainsMatch()) < 1 { + err := HeaderMatcherValidationError{ + field: "ContainsMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HeaderMatcher_StringMatch: + if v == nil { + err := HeaderMatcherValidationError{ + field: "HeaderMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HeaderMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HeaderMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HeaderMatcherMultiError(errors) + } + + return nil +} + +// HeaderMatcherMultiError is an error wrapping multiple validation errors +// returned by HeaderMatcher.ValidateAll() if the designated constraints +// aren't met. +type HeaderMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HeaderMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HeaderMatcherMultiError) AllErrors() []error { return m } + +// HeaderMatcherValidationError is the validation error returned by +// HeaderMatcher.Validate if the designated constraints aren't met. +type HeaderMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HeaderMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HeaderMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HeaderMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HeaderMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HeaderMatcherValidationError) ErrorName() string { return "HeaderMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e HeaderMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHeaderMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HeaderMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HeaderMatcherValidationError{} + +var _HeaderMatcher_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on QueryParameterMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *QueryParameterMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QueryParameterMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// QueryParameterMatcherMultiError, or nil if none found. +func (m *QueryParameterMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *QueryParameterMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := QueryParameterMatcherValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetName()) > 1024 { + err := QueryParameterMatcherValidationError{ + field: "Name", + reason: "value length must be at most 1024 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.QueryParameterMatchSpecifier.(type) { + case *QueryParameterMatcher_StringMatch: + if v == nil { + err := QueryParameterMatcherValidationError{ + field: "QueryParameterMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetStringMatch() == nil { + err := QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QueryParameterMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *QueryParameterMatcher_PresentMatch: + if v == nil { + err := QueryParameterMatcherValidationError{ + field: "QueryParameterMatchSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for PresentMatch + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return QueryParameterMatcherMultiError(errors) + } + + return nil +} + +// QueryParameterMatcherMultiError is an error wrapping multiple validation +// errors returned by QueryParameterMatcher.ValidateAll() if the designated +// constraints aren't met. +type QueryParameterMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QueryParameterMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QueryParameterMatcherMultiError) AllErrors() []error { return m } + +// QueryParameterMatcherValidationError is the validation error returned by +// QueryParameterMatcher.Validate if the designated constraints aren't met. +type QueryParameterMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QueryParameterMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QueryParameterMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QueryParameterMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QueryParameterMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QueryParameterMatcherValidationError) ErrorName() string { + return "QueryParameterMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e QueryParameterMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQueryParameterMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QueryParameterMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QueryParameterMatcherValidationError{} + +// Validate checks the field values on InternalRedirectPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *InternalRedirectPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on InternalRedirectPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// InternalRedirectPolicyMultiError, or nil if none found. +func (m *InternalRedirectPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *InternalRedirectPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxInternalRedirects()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxInternalRedirects()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return InternalRedirectPolicyValidationError{ + field: "MaxInternalRedirects", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRedirectResponseCodes()) > 5 { + err := InternalRedirectPolicyValidationError{ + field: "RedirectResponseCodes", + reason: "value must contain no more than 5 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPredicates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("Predicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("Predicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("Predicates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for AllowCrossSchemeRedirect + + _InternalRedirectPolicy_ResponseHeadersToCopy_Unique := make(map[string]struct{}, len(m.GetResponseHeadersToCopy())) + + for idx, item := range m.GetResponseHeadersToCopy() { + _, _ = idx, item + + if _, exists := _InternalRedirectPolicy_ResponseHeadersToCopy_Unique[item]; exists { + err := InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("ResponseHeadersToCopy[%v]", idx), + reason: "repeated value must contain unique items", + } + if !all { + return err + } + errors = append(errors, err) + } else { + _InternalRedirectPolicy_ResponseHeadersToCopy_Unique[item] = struct{}{} + } + + if !_InternalRedirectPolicy_ResponseHeadersToCopy_Pattern.MatchString(item) { + err := InternalRedirectPolicyValidationError{ + field: fmt.Sprintf("ResponseHeadersToCopy[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return InternalRedirectPolicyMultiError(errors) + } + + return nil +} + +// InternalRedirectPolicyMultiError is an error wrapping multiple validation +// errors returned by InternalRedirectPolicy.ValidateAll() if the designated +// constraints aren't met. +type InternalRedirectPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m InternalRedirectPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m InternalRedirectPolicyMultiError) AllErrors() []error { return m } + +// InternalRedirectPolicyValidationError is the validation error returned by +// InternalRedirectPolicy.Validate if the designated constraints aren't met. +type InternalRedirectPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e InternalRedirectPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e InternalRedirectPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e InternalRedirectPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e InternalRedirectPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e InternalRedirectPolicyValidationError) ErrorName() string { + return "InternalRedirectPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e InternalRedirectPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInternalRedirectPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = InternalRedirectPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = InternalRedirectPolicyValidationError{} + +var _InternalRedirectPolicy_ResponseHeadersToCopy_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on FilterConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilterConfigMultiError, or +// nil if none found. +func (m *FilterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsOptional + + // no validation rules for Disabled + + if len(errors) > 0 { + return FilterConfigMultiError(errors) + } + + return nil +} + +// FilterConfigMultiError is an error wrapping multiple validation errors +// returned by FilterConfig.ValidateAll() if the designated constraints aren't met. +type FilterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterConfigMultiError) AllErrors() []error { return m } + +// FilterConfigValidationError is the validation error returned by +// FilterConfig.Validate if the designated constraints aren't met. +type FilterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterConfigValidationError) ErrorName() string { return "FilterConfigValidationError" } + +// Error satisfies the builtin error interface +func (e FilterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterConfigValidationError{} + +// Validate checks the field values on WeightedCluster_ClusterWeight with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *WeightedCluster_ClusterWeight) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WeightedCluster_ClusterWeight with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// WeightedCluster_ClusterWeightMultiError, or nil if none found. +func (m *WeightedCluster_ClusterWeight) ValidateAll() error { + return m.validate(true) +} + +func (m *WeightedCluster_ClusterWeight) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if !_WeightedCluster_ClusterWeight_ClusterHeader_Pattern.MatchString(m.GetClusterHeader()) { + err := WeightedCluster_ClusterWeightValidationError{ + field: "ClusterHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetWeight()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "Weight", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "Weight", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeight()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: "Weight", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadataMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: "MetadataMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetRequestHeadersToAdd()) > 1000 { + err := WeightedCluster_ClusterWeightValidationError{ + field: "RequestHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRequestHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRequestHeadersToRemove() { + _, _ = idx, item + + if !_WeightedCluster_ClusterWeight_RequestHeadersToRemove_Pattern.MatchString(item) { + err := WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(m.GetResponseHeadersToAdd()) > 1000 { + err := WeightedCluster_ClusterWeightValidationError{ + field: "ResponseHeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResponseHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResponseHeadersToRemove() { + _, _ = idx, item + + if !_WeightedCluster_ClusterWeight_ResponseHeadersToRemove_Pattern.MatchString(item) { + err := WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx), + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + { + sorted_keys := make([]string, len(m.GetTypedPerFilterConfig())) + i := 0 + for key := range m.GetTypedPerFilterConfig() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetTypedPerFilterConfig()[key] + _ = val + + // no validation rules for TypedPerFilterConfig[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WeightedCluster_ClusterWeightValidationError{ + field: fmt.Sprintf("TypedPerFilterConfig[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + switch v := m.HostRewriteSpecifier.(type) { + case *WeightedCluster_ClusterWeight_HostRewriteLiteral: + if v == nil { + err := WeightedCluster_ClusterWeightValidationError{ + field: "HostRewriteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_WeightedCluster_ClusterWeight_HostRewriteLiteral_Pattern.MatchString(m.GetHostRewriteLiteral()) { + err := WeightedCluster_ClusterWeightValidationError{ + field: "HostRewriteLiteral", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return WeightedCluster_ClusterWeightMultiError(errors) + } + + return nil +} + +// WeightedCluster_ClusterWeightMultiError is an error wrapping multiple +// validation errors returned by WeightedCluster_ClusterWeight.ValidateAll() +// if the designated constraints aren't met. +type WeightedCluster_ClusterWeightMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WeightedCluster_ClusterWeightMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WeightedCluster_ClusterWeightMultiError) AllErrors() []error { return m } + +// WeightedCluster_ClusterWeightValidationError is the validation error +// returned by WeightedCluster_ClusterWeight.Validate if the designated +// constraints aren't met. +type WeightedCluster_ClusterWeightValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WeightedCluster_ClusterWeightValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WeightedCluster_ClusterWeightValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WeightedCluster_ClusterWeightValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WeightedCluster_ClusterWeightValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WeightedCluster_ClusterWeightValidationError) ErrorName() string { + return "WeightedCluster_ClusterWeightValidationError" +} + +// Error satisfies the builtin error interface +func (e WeightedCluster_ClusterWeightValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWeightedCluster_ClusterWeight.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WeightedCluster_ClusterWeightValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WeightedCluster_ClusterWeightValidationError{} + +var _WeightedCluster_ClusterWeight_ClusterHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _WeightedCluster_ClusterWeight_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _WeightedCluster_ClusterWeight_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _WeightedCluster_ClusterWeight_HostRewriteLiteral_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteMatch_GrpcRouteMatchOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RouteMatch_GrpcRouteMatchOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch_GrpcRouteMatchOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteMatch_GrpcRouteMatchOptionsMultiError, or nil if none found. +func (m *RouteMatch_GrpcRouteMatchOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch_GrpcRouteMatchOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RouteMatch_GrpcRouteMatchOptionsMultiError(errors) + } + + return nil +} + +// RouteMatch_GrpcRouteMatchOptionsMultiError is an error wrapping multiple +// validation errors returned by +// RouteMatch_GrpcRouteMatchOptions.ValidateAll() if the designated +// constraints aren't met. +type RouteMatch_GrpcRouteMatchOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatch_GrpcRouteMatchOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatch_GrpcRouteMatchOptionsMultiError) AllErrors() []error { return m } + +// RouteMatch_GrpcRouteMatchOptionsValidationError is the validation error +// returned by RouteMatch_GrpcRouteMatchOptions.Validate if the designated +// constraints aren't met. +type RouteMatch_GrpcRouteMatchOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) ErrorName() string { + return "RouteMatch_GrpcRouteMatchOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteMatch_GrpcRouteMatchOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch_GrpcRouteMatchOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatch_GrpcRouteMatchOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatch_GrpcRouteMatchOptionsValidationError{} + +// Validate checks the field values on RouteMatch_TlsContextMatchOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RouteMatch_TlsContextMatchOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch_TlsContextMatchOptions +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RouteMatch_TlsContextMatchOptionsMultiError, or nil if none found. +func (m *RouteMatch_TlsContextMatchOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch_TlsContextMatchOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPresented()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Presented", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Presented", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPresented()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Presented", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetValidated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Validated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Validated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteMatch_TlsContextMatchOptionsValidationError{ + field: "Validated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteMatch_TlsContextMatchOptionsMultiError(errors) + } + + return nil +} + +// RouteMatch_TlsContextMatchOptionsMultiError is an error wrapping multiple +// validation errors returned by +// RouteMatch_TlsContextMatchOptions.ValidateAll() if the designated +// constraints aren't met. +type RouteMatch_TlsContextMatchOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatch_TlsContextMatchOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatch_TlsContextMatchOptionsMultiError) AllErrors() []error { return m } + +// RouteMatch_TlsContextMatchOptionsValidationError is the validation error +// returned by RouteMatch_TlsContextMatchOptions.Validate if the designated +// constraints aren't met. +type RouteMatch_TlsContextMatchOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatch_TlsContextMatchOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatch_TlsContextMatchOptionsValidationError) ErrorName() string { + return "RouteMatch_TlsContextMatchOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteMatch_TlsContextMatchOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch_TlsContextMatchOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatch_TlsContextMatchOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatch_TlsContextMatchOptionsValidationError{} + +// Validate checks the field values on RouteMatch_ConnectMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteMatch_ConnectMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteMatch_ConnectMatcher with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteMatch_ConnectMatcherMultiError, or nil if none found. +func (m *RouteMatch_ConnectMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteMatch_ConnectMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RouteMatch_ConnectMatcherMultiError(errors) + } + + return nil +} + +// RouteMatch_ConnectMatcherMultiError is an error wrapping multiple validation +// errors returned by RouteMatch_ConnectMatcher.ValidateAll() if the +// designated constraints aren't met. +type RouteMatch_ConnectMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteMatch_ConnectMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteMatch_ConnectMatcherMultiError) AllErrors() []error { return m } + +// RouteMatch_ConnectMatcherValidationError is the validation error returned by +// RouteMatch_ConnectMatcher.Validate if the designated constraints aren't met. +type RouteMatch_ConnectMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteMatch_ConnectMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteMatch_ConnectMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteMatch_ConnectMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteMatch_ConnectMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteMatch_ConnectMatcherValidationError) ErrorName() string { + return "RouteMatch_ConnectMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteMatch_ConnectMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteMatch_ConnectMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteMatch_ConnectMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteMatch_ConnectMatcherValidationError{} + +// Validate checks the field values on RouteAction_RequestMirrorPolicy with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_RequestMirrorPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_RequestMirrorPolicy with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_RequestMirrorPolicyMultiError, or nil if none found. +func (m *RouteAction_RequestMirrorPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_RequestMirrorPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Cluster + + if !_RouteAction_RequestMirrorPolicy_ClusterHeader_Pattern.MatchString(m.GetClusterHeader()) { + err := RouteAction_RequestMirrorPolicyValidationError{ + field: "ClusterHeader", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRuntimeFraction()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRuntimeFraction()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_RequestMirrorPolicyValidationError{ + field: "RuntimeFraction", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTraceSampled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "TraceSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_RequestMirrorPolicyValidationError{ + field: "TraceSampled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTraceSampled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_RequestMirrorPolicyValidationError{ + field: "TraceSampled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableShadowHostSuffixAppend + + if len(errors) > 0 { + return RouteAction_RequestMirrorPolicyMultiError(errors) + } + + return nil +} + +// RouteAction_RequestMirrorPolicyMultiError is an error wrapping multiple +// validation errors returned by RouteAction_RequestMirrorPolicy.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_RequestMirrorPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_RequestMirrorPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_RequestMirrorPolicyMultiError) AllErrors() []error { return m } + +// RouteAction_RequestMirrorPolicyValidationError is the validation error +// returned by RouteAction_RequestMirrorPolicy.Validate if the designated +// constraints aren't met. +type RouteAction_RequestMirrorPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_RequestMirrorPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_RequestMirrorPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_RequestMirrorPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_RequestMirrorPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_RequestMirrorPolicyValidationError) ErrorName() string { + return "RouteAction_RequestMirrorPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_RequestMirrorPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_RequestMirrorPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_RequestMirrorPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_RequestMirrorPolicyValidationError{} + +var _RouteAction_RequestMirrorPolicy_ClusterHeader_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_HashPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicyMultiError, or nil if none found. +func (m *RouteAction_HashPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Terminal + + oneofPolicySpecifierPresent := false + switch v := m.PolicySpecifier.(type) { + case *RouteAction_HashPolicy_Header_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "Header", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_Cookie_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetCookie()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Cookie", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "Cookie", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCookie()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "Cookie", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_ConnectionProperties_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetConnectionProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_QueryParameter_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetQueryParameter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "QueryParameter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "QueryParameter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQueryParameter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "QueryParameter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RouteAction_HashPolicy_FilterState_: + if v == nil { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofPolicySpecifierPresent { + err := RouteAction_HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicyMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicyMultiError is an error wrapping multiple validation +// errors returned by RouteAction_HashPolicy.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicyMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicyValidationError is the validation error returned by +// RouteAction_HashPolicy.Validate if the designated constraints aren't met. +type RouteAction_HashPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicyValidationError) ErrorName() string { + return "RouteAction_HashPolicyValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicyValidationError{} + +// Validate checks the field values on RouteAction_UpgradeConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_UpgradeConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_UpgradeConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RouteAction_UpgradeConfigMultiError, or nil if none found. +func (m *RouteAction_UpgradeConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_UpgradeConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetUpgradeType()) < 1 { + err := RouteAction_UpgradeConfigValidationError{ + field: "UpgradeType", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_UpgradeConfig_UpgradeType_Pattern.MatchString(m.GetUpgradeType()) { + err := RouteAction_UpgradeConfigValidationError{ + field: "UpgradeType", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "ConnectConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_UpgradeConfigValidationError{ + field: "ConnectConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_UpgradeConfigValidationError{ + field: "ConnectConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteAction_UpgradeConfigMultiError(errors) + } + + return nil +} + +// RouteAction_UpgradeConfigMultiError is an error wrapping multiple validation +// errors returned by RouteAction_UpgradeConfig.ValidateAll() if the +// designated constraints aren't met. +type RouteAction_UpgradeConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_UpgradeConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_UpgradeConfigMultiError) AllErrors() []error { return m } + +// RouteAction_UpgradeConfigValidationError is the validation error returned by +// RouteAction_UpgradeConfig.Validate if the designated constraints aren't met. +type RouteAction_UpgradeConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_UpgradeConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_UpgradeConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_UpgradeConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_UpgradeConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_UpgradeConfigValidationError) ErrorName() string { + return "RouteAction_UpgradeConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_UpgradeConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_UpgradeConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_UpgradeConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_UpgradeConfigValidationError{} + +var _RouteAction_UpgradeConfig_UpgradeType_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_MaxStreamDuration with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_MaxStreamDuration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_MaxStreamDuration with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_MaxStreamDurationMultiError, or nil if none found. +func (m *RouteAction_MaxStreamDuration) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_MaxStreamDuration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxStreamDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_MaxStreamDurationValidationError{ + field: "MaxStreamDuration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcTimeoutHeaderMax()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderMax", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderMax", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcTimeoutHeaderMax()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderMax", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetGrpcTimeoutHeaderOffset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderOffset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcTimeoutHeaderOffset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_MaxStreamDurationValidationError{ + field: "GrpcTimeoutHeaderOffset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteAction_MaxStreamDurationMultiError(errors) + } + + return nil +} + +// RouteAction_MaxStreamDurationMultiError is an error wrapping multiple +// validation errors returned by RouteAction_MaxStreamDuration.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_MaxStreamDurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_MaxStreamDurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_MaxStreamDurationMultiError) AllErrors() []error { return m } + +// RouteAction_MaxStreamDurationValidationError is the validation error +// returned by RouteAction_MaxStreamDuration.Validate if the designated +// constraints aren't met. +type RouteAction_MaxStreamDurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_MaxStreamDurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_MaxStreamDurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_MaxStreamDurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_MaxStreamDurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_MaxStreamDurationValidationError) ErrorName() string { + return "RouteAction_MaxStreamDurationValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_MaxStreamDurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_MaxStreamDuration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_MaxStreamDurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_MaxStreamDurationValidationError{} + +// Validate checks the field values on RouteAction_HashPolicy_Header with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy_Header) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_Header with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_HeaderMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_Header) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_Header) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetHeaderName()) < 1 { + err := RouteAction_HashPolicy_HeaderValidationError{ + field: "HeaderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HashPolicy_Header_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := RouteAction_HashPolicy_HeaderValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRegexRewrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicy_HeaderValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicy_HeaderValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRegexRewrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicy_HeaderValidationError{ + field: "RegexRewrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_HeaderMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_HeaderMultiError is an error wrapping multiple +// validation errors returned by RouteAction_HashPolicy_Header.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_HashPolicy_HeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_HeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_HeaderMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_HeaderValidationError is the validation error +// returned by RouteAction_HashPolicy_Header.Validate if the designated +// constraints aren't met. +type RouteAction_HashPolicy_HeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_HeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_HeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_HeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_HeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_HeaderValidationError) ErrorName() string { + return "RouteAction_HashPolicy_HeaderValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_HeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_Header.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_HeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_HeaderValidationError{} + +var _RouteAction_HashPolicy_Header_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_HashPolicy_CookieAttribute +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RouteAction_HashPolicy_CookieAttribute) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RouteAction_HashPolicy_CookieAttribute with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// RouteAction_HashPolicy_CookieAttributeMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_CookieAttribute) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_CookieAttribute) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetName()) > 16384 { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Name", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HashPolicy_CookieAttribute_Name_Pattern.MatchString(m.GetName()) { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetValue()) > 16384 { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Value", + reason: "value length must be at most 16384 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RouteAction_HashPolicy_CookieAttribute_Value_Pattern.MatchString(m.GetValue()) { + err := RouteAction_HashPolicy_CookieAttributeValidationError{ + field: "Value", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_CookieAttributeMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_CookieAttributeMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_HashPolicy_CookieAttribute.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_CookieAttributeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_CookieAttributeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_CookieAttributeMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_CookieAttributeValidationError is the validation +// error returned by RouteAction_HashPolicy_CookieAttribute.Validate if the +// designated constraints aren't met. +type RouteAction_HashPolicy_CookieAttributeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_CookieAttributeValidationError) ErrorName() string { + return "RouteAction_HashPolicy_CookieAttributeValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_CookieAttributeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_CookieAttribute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_CookieAttributeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_CookieAttributeValidationError{} + +var _RouteAction_HashPolicy_CookieAttribute_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _RouteAction_HashPolicy_CookieAttribute_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RouteAction_HashPolicy_Cookie with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy_Cookie) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_Cookie with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_CookieMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_Cookie) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_Cookie) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RouteAction_HashPolicy_CookieValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTtl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTtl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicy_CookieValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Path + + for idx, item := range m.GetAttributes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: fmt.Sprintf("Attributes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_HashPolicy_CookieValidationError{ + field: fmt.Sprintf("Attributes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_HashPolicy_CookieValidationError{ + field: fmt.Sprintf("Attributes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_CookieMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_CookieMultiError is an error wrapping multiple +// validation errors returned by RouteAction_HashPolicy_Cookie.ValidateAll() +// if the designated constraints aren't met. +type RouteAction_HashPolicy_CookieMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_CookieMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_CookieMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_CookieValidationError is the validation error +// returned by RouteAction_HashPolicy_Cookie.Validate if the designated +// constraints aren't met. +type RouteAction_HashPolicy_CookieValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_CookieValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_CookieValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_CookieValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_CookieValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_CookieValidationError) ErrorName() string { + return "RouteAction_HashPolicy_CookieValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_CookieValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_Cookie.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_CookieValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_CookieValidationError{} + +// Validate checks the field values on +// RouteAction_HashPolicy_ConnectionProperties with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RouteAction_HashPolicy_ConnectionProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RouteAction_HashPolicy_ConnectionProperties with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RouteAction_HashPolicy_ConnectionPropertiesMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_ConnectionProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SourceIp + + if len(errors) > 0 { + return RouteAction_HashPolicy_ConnectionPropertiesMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_ConnectionPropertiesMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_HashPolicy_ConnectionProperties.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_ConnectionPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_ConnectionPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_ConnectionPropertiesMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_ConnectionPropertiesValidationError is the validation +// error returned by RouteAction_HashPolicy_ConnectionProperties.Validate if +// the designated constraints aren't met. +type RouteAction_HashPolicy_ConnectionPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) ErrorName() string { + return "RouteAction_HashPolicy_ConnectionPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_ConnectionPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_ConnectionProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_ConnectionPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_ConnectionPropertiesValidationError{} + +// Validate checks the field values on RouteAction_HashPolicy_QueryParameter +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RouteAction_HashPolicy_QueryParameter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_QueryParameter +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_QueryParameterMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_QueryParameter) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_QueryParameter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RouteAction_HashPolicy_QueryParameterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_QueryParameterMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_QueryParameterMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_HashPolicy_QueryParameter.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_QueryParameterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_QueryParameterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_QueryParameterMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_QueryParameterValidationError is the validation error +// returned by RouteAction_HashPolicy_QueryParameter.Validate if the +// designated constraints aren't met. +type RouteAction_HashPolicy_QueryParameterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_QueryParameterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_QueryParameterValidationError) ErrorName() string { + return "RouteAction_HashPolicy_QueryParameterValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_QueryParameterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_QueryParameter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_QueryParameterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_QueryParameterValidationError{} + +// Validate checks the field values on RouteAction_HashPolicy_FilterState with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RouteAction_HashPolicy_FilterState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RouteAction_HashPolicy_FilterState +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RouteAction_HashPolicy_FilterStateMultiError, or nil if none found. +func (m *RouteAction_HashPolicy_FilterState) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_HashPolicy_FilterState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := RouteAction_HashPolicy_FilterStateValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RouteAction_HashPolicy_FilterStateMultiError(errors) + } + + return nil +} + +// RouteAction_HashPolicy_FilterStateMultiError is an error wrapping multiple +// validation errors returned by +// RouteAction_HashPolicy_FilterState.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_HashPolicy_FilterStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_HashPolicy_FilterStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_HashPolicy_FilterStateMultiError) AllErrors() []error { return m } + +// RouteAction_HashPolicy_FilterStateValidationError is the validation error +// returned by RouteAction_HashPolicy_FilterState.Validate if the designated +// constraints aren't met. +type RouteAction_HashPolicy_FilterStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_HashPolicy_FilterStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_HashPolicy_FilterStateValidationError) ErrorName() string { + return "RouteAction_HashPolicy_FilterStateValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_HashPolicy_FilterStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_HashPolicy_FilterState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_HashPolicy_FilterStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_HashPolicy_FilterStateValidationError{} + +// Validate checks the field values on RouteAction_UpgradeConfig_ConnectConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RouteAction_UpgradeConfig_ConnectConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RouteAction_UpgradeConfig_ConnectConfig with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// RouteAction_UpgradeConfig_ConnectConfigMultiError, or nil if none found. +func (m *RouteAction_UpgradeConfig_ConnectConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetProxyProtocolConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouteAction_UpgradeConfig_ConnectConfigValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouteAction_UpgradeConfig_ConnectConfigValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyProtocolConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouteAction_UpgradeConfig_ConnectConfigValidationError{ + field: "ProxyProtocolConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowPost + + if len(errors) > 0 { + return RouteAction_UpgradeConfig_ConnectConfigMultiError(errors) + } + + return nil +} + +// RouteAction_UpgradeConfig_ConnectConfigMultiError is an error wrapping +// multiple validation errors returned by +// RouteAction_UpgradeConfig_ConnectConfig.ValidateAll() if the designated +// constraints aren't met. +type RouteAction_UpgradeConfig_ConnectConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouteAction_UpgradeConfig_ConnectConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouteAction_UpgradeConfig_ConnectConfigMultiError) AllErrors() []error { return m } + +// RouteAction_UpgradeConfig_ConnectConfigValidationError is the validation +// error returned by RouteAction_UpgradeConfig_ConnectConfig.Validate if the +// designated constraints aren't met. +type RouteAction_UpgradeConfig_ConnectConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) ErrorName() string { + return "RouteAction_UpgradeConfig_ConnectConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e RouteAction_UpgradeConfig_ConnectConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouteAction_UpgradeConfig_ConnectConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouteAction_UpgradeConfig_ConnectConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouteAction_UpgradeConfig_ConnectConfigValidationError{} + +// Validate checks the field values on RetryPolicy_RetryPriority with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryPriority) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryPriority with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryPriorityMultiError, or nil if none found. +func (m *RetryPolicy_RetryPriority) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryPriority) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryPriorityValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryPriority_TypedConfig: + if v == nil { + err := RetryPolicy_RetryPriorityValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryPriorityValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryPriorityMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryPriorityMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryPriority.ValidateAll() if the +// designated constraints aren't met. +type RetryPolicy_RetryPriorityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryPriorityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryPriorityMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryPriorityValidationError is the validation error returned by +// RetryPolicy_RetryPriority.Validate if the designated constraints aren't met. +type RetryPolicy_RetryPriorityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryPriorityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryPriorityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryPriorityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryPriorityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryPriorityValidationError) ErrorName() string { + return "RetryPolicy_RetryPriorityValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryPriorityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryPriority.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryPriorityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryPriorityValidationError{} + +// Validate checks the field values on RetryPolicy_RetryHostPredicate with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryHostPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryHostPredicate with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryHostPredicateMultiError, or nil if none found. +func (m *RetryPolicy_RetryHostPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryHostPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *RetryPolicy_RetryHostPredicate_TypedConfig: + if v == nil { + err := RetryPolicy_RetryHostPredicateValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RetryHostPredicateValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RetryPolicy_RetryHostPredicateMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryHostPredicateMultiError is an error wrapping multiple +// validation errors returned by RetryPolicy_RetryHostPredicate.ValidateAll() +// if the designated constraints aren't met. +type RetryPolicy_RetryHostPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryHostPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryHostPredicateMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryHostPredicateValidationError is the validation error +// returned by RetryPolicy_RetryHostPredicate.Validate if the designated +// constraints aren't met. +type RetryPolicy_RetryHostPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryHostPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryHostPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryHostPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryHostPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryHostPredicateValidationError) ErrorName() string { + return "RetryPolicy_RetryHostPredicateValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryHostPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryHostPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryHostPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryHostPredicateValidationError{} + +// Validate checks the field values on RetryPolicy_RetryBackOff with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_RetryBackOff) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RetryBackOff with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_RetryBackOffMultiError, or nil if none found. +func (m *RetryPolicy_RetryBackOff) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RetryBackOff) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetBaseInterval() == nil { + err := RetryPolicy_RetryBackOffValidationError{ + field: "BaseInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetBaseInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = RetryPolicy_RetryBackOffValidationError{ + field: "BaseInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := RetryPolicy_RetryBackOffValidationError{ + field: "BaseInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = RetryPolicy_RetryBackOffValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := RetryPolicy_RetryBackOffValidationError{ + field: "MaxInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return RetryPolicy_RetryBackOffMultiError(errors) + } + + return nil +} + +// RetryPolicy_RetryBackOffMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_RetryBackOff.ValidateAll() if the designated +// constraints aren't met. +type RetryPolicy_RetryBackOffMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RetryBackOffMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RetryBackOffMultiError) AllErrors() []error { return m } + +// RetryPolicy_RetryBackOffValidationError is the validation error returned by +// RetryPolicy_RetryBackOff.Validate if the designated constraints aren't met. +type RetryPolicy_RetryBackOffValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RetryBackOffValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RetryBackOffValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RetryBackOffValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RetryBackOffValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RetryBackOffValidationError) ErrorName() string { + return "RetryPolicy_RetryBackOffValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RetryBackOffValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RetryBackOff.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RetryBackOffValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RetryBackOffValidationError{} + +// Validate checks the field values on RetryPolicy_ResetHeader with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RetryPolicy_ResetHeader) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_ResetHeader with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RetryPolicy_ResetHeaderMultiError, or nil if none found. +func (m *RetryPolicy_ResetHeader) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_ResetHeader) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := RetryPolicy_ResetHeaderValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RetryPolicy_ResetHeader_Name_Pattern.MatchString(m.GetName()) { + err := RetryPolicy_ResetHeaderValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := RetryPolicy_ResetHeaderFormat_name[int32(m.GetFormat())]; !ok { + err := RetryPolicy_ResetHeaderValidationError{ + field: "Format", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RetryPolicy_ResetHeaderMultiError(errors) + } + + return nil +} + +// RetryPolicy_ResetHeaderMultiError is an error wrapping multiple validation +// errors returned by RetryPolicy_ResetHeader.ValidateAll() if the designated +// constraints aren't met. +type RetryPolicy_ResetHeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_ResetHeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_ResetHeaderMultiError) AllErrors() []error { return m } + +// RetryPolicy_ResetHeaderValidationError is the validation error returned by +// RetryPolicy_ResetHeader.Validate if the designated constraints aren't met. +type RetryPolicy_ResetHeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_ResetHeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_ResetHeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_ResetHeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_ResetHeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_ResetHeaderValidationError) ErrorName() string { + return "RetryPolicy_ResetHeaderValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_ResetHeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_ResetHeader.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_ResetHeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_ResetHeaderValidationError{} + +var _RetryPolicy_ResetHeader_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RetryPolicy_RateLimitedRetryBackOff with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RetryPolicy_RateLimitedRetryBackOff) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RetryPolicy_RateLimitedRetryBackOff +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RetryPolicy_RateLimitedRetryBackOffMultiError, or nil if none found. +func (m *RetryPolicy_RateLimitedRetryBackOff) ValidateAll() error { + return m.validate(true) +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetResetHeaders()) < 1 { + err := RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: "ResetHeaders", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetResetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: fmt.Sprintf("ResetHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: fmt.Sprintf("ResetHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: fmt.Sprintf("ResetHeaders[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if d := m.GetMaxInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: "MaxInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := RetryPolicy_RateLimitedRetryBackOffValidationError{ + field: "MaxInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return RetryPolicy_RateLimitedRetryBackOffMultiError(errors) + } + + return nil +} + +// RetryPolicy_RateLimitedRetryBackOffMultiError is an error wrapping multiple +// validation errors returned by +// RetryPolicy_RateLimitedRetryBackOff.ValidateAll() if the designated +// constraints aren't met. +type RetryPolicy_RateLimitedRetryBackOffMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RetryPolicy_RateLimitedRetryBackOffMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RetryPolicy_RateLimitedRetryBackOffMultiError) AllErrors() []error { return m } + +// RetryPolicy_RateLimitedRetryBackOffValidationError is the validation error +// returned by RetryPolicy_RateLimitedRetryBackOff.Validate if the designated +// constraints aren't met. +type RetryPolicy_RateLimitedRetryBackOffValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) ErrorName() string { + return "RetryPolicy_RateLimitedRetryBackOffValidationError" +} + +// Error satisfies the builtin error interface +func (e RetryPolicy_RateLimitedRetryBackOffValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRetryPolicy_RateLimitedRetryBackOff.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RetryPolicy_RateLimitedRetryBackOffValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RetryPolicy_RateLimitedRetryBackOffValidationError{} + +// Validate checks the field values on RateLimit_Action with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_ActionMultiError, or nil if none found. +func (m *RateLimit_Action) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofActionSpecifierPresent := false + switch v := m.ActionSpecifier.(type) { + case *RateLimit_Action_SourceCluster_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetSourceCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "SourceCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "SourceCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "SourceCluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_DestinationCluster_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDestinationCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DestinationCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DestinationCluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDestinationCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "DestinationCluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_RequestHeaders_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRequestHeaders()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RequestHeaders", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RequestHeaders", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestHeaders()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "RequestHeaders", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_RemoteAddress_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "RemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "RemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_GenericKey_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetGenericKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "GenericKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "GenericKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenericKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "GenericKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_HeaderValueMatch_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetHeaderValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "HeaderValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "HeaderValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "HeaderValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_DynamicMetadata: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDynamicMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_Metadata: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_Extension: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "Extension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_MaskedRemoteAddress_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMaskedRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "MaskedRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "MaskedRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaskedRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "MaskedRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimit_Action_QueryParameterValueMatch_: + if v == nil { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofActionSpecifierPresent = true + + if all { + switch v := interface{}(m.GetQueryParameterValueMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQueryParameterValueMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_ActionValidationError{ + field: "QueryParameterValueMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofActionSpecifierPresent { + err := RateLimit_ActionValidationError{ + field: "ActionSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimit_ActionMultiError(errors) + } + + return nil +} + +// RateLimit_ActionMultiError is an error wrapping multiple validation errors +// returned by RateLimit_Action.ValidateAll() if the designated constraints +// aren't met. +type RateLimit_ActionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_ActionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_ActionMultiError) AllErrors() []error { return m } + +// RateLimit_ActionValidationError is the validation error returned by +// RateLimit_Action.Validate if the designated constraints aren't met. +type RateLimit_ActionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_ActionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_ActionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_ActionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_ActionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_ActionValidationError) ErrorName() string { return "RateLimit_ActionValidationError" } + +// Error satisfies the builtin error interface +func (e RateLimit_ActionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_ActionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_ActionValidationError{} + +// Validate checks the field values on RateLimit_Override with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Override) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Override with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_OverrideMultiError, or nil if none found. +func (m *RateLimit_Override) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Override) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOverrideSpecifierPresent := false + switch v := m.OverrideSpecifier.(type) { + case *RateLimit_Override_DynamicMetadata_: + if v == nil { + err := RateLimit_OverrideValidationError{ + field: "OverrideSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOverrideSpecifierPresent = true + + if all { + switch v := interface{}(m.GetDynamicMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_OverrideValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_OverrideValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_OverrideValidationError{ + field: "DynamicMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOverrideSpecifierPresent { + err := RateLimit_OverrideValidationError{ + field: "OverrideSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimit_OverrideMultiError(errors) + } + + return nil +} + +// RateLimit_OverrideMultiError is an error wrapping multiple validation errors +// returned by RateLimit_Override.ValidateAll() if the designated constraints +// aren't met. +type RateLimit_OverrideMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_OverrideMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_OverrideMultiError) AllErrors() []error { return m } + +// RateLimit_OverrideValidationError is the validation error returned by +// RateLimit_Override.Validate if the designated constraints aren't met. +type RateLimit_OverrideValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_OverrideValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_OverrideValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_OverrideValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_OverrideValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_OverrideValidationError) ErrorName() string { + return "RateLimit_OverrideValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_OverrideValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Override.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_OverrideValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_OverrideValidationError{} + +// Validate checks the field values on RateLimit_Action_SourceCluster with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_SourceCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_SourceCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_SourceClusterMultiError, or nil if none found. +func (m *RateLimit_Action_SourceCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_SourceCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RateLimit_Action_SourceClusterMultiError(errors) + } + + return nil +} + +// RateLimit_Action_SourceClusterMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_SourceCluster.ValidateAll() +// if the designated constraints aren't met. +type RateLimit_Action_SourceClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_SourceClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_SourceClusterMultiError) AllErrors() []error { return m } + +// RateLimit_Action_SourceClusterValidationError is the validation error +// returned by RateLimit_Action_SourceCluster.Validate if the designated +// constraints aren't met. +type RateLimit_Action_SourceClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_SourceClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_SourceClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_SourceClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_SourceClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_SourceClusterValidationError) ErrorName() string { + return "RateLimit_Action_SourceClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_SourceClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_SourceCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_SourceClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_SourceClusterValidationError{} + +// Validate checks the field values on RateLimit_Action_DestinationCluster with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Action_DestinationCluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_DestinationCluster +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_DestinationClusterMultiError, or nil if none found. +func (m *RateLimit_Action_DestinationCluster) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_DestinationCluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RateLimit_Action_DestinationClusterMultiError(errors) + } + + return nil +} + +// RateLimit_Action_DestinationClusterMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_DestinationCluster.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_DestinationClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_DestinationClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_DestinationClusterMultiError) AllErrors() []error { return m } + +// RateLimit_Action_DestinationClusterValidationError is the validation error +// returned by RateLimit_Action_DestinationCluster.Validate if the designated +// constraints aren't met. +type RateLimit_Action_DestinationClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_DestinationClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_DestinationClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_DestinationClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_DestinationClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_DestinationClusterValidationError) ErrorName() string { + return "RateLimit_Action_DestinationClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_DestinationClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_DestinationCluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_DestinationClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_DestinationClusterValidationError{} + +// Validate checks the field values on RateLimit_Action_RequestHeaders with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_RequestHeaders) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_RequestHeaders with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_RequestHeadersMultiError, or nil if none found. +func (m *RateLimit_Action_RequestHeaders) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_RequestHeaders) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetHeaderName()) < 1 { + err := RateLimit_Action_RequestHeadersValidationError{ + field: "HeaderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_RateLimit_Action_RequestHeaders_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := RateLimit_Action_RequestHeadersValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetDescriptorKey()) < 1 { + err := RateLimit_Action_RequestHeadersValidationError{ + field: "DescriptorKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for SkipIfAbsent + + if len(errors) > 0 { + return RateLimit_Action_RequestHeadersMultiError(errors) + } + + return nil +} + +// RateLimit_Action_RequestHeadersMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_RequestHeaders.ValidateAll() +// if the designated constraints aren't met. +type RateLimit_Action_RequestHeadersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_RequestHeadersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_RequestHeadersMultiError) AllErrors() []error { return m } + +// RateLimit_Action_RequestHeadersValidationError is the validation error +// returned by RateLimit_Action_RequestHeaders.Validate if the designated +// constraints aren't met. +type RateLimit_Action_RequestHeadersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_RequestHeadersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_RequestHeadersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_RequestHeadersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_RequestHeadersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_RequestHeadersValidationError) ErrorName() string { + return "RateLimit_Action_RequestHeadersValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_RequestHeadersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_RequestHeaders.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_RequestHeadersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_RequestHeadersValidationError{} + +var _RateLimit_Action_RequestHeaders_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RateLimit_Action_RemoteAddress with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_RemoteAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_RemoteAddress with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_RemoteAddressMultiError, or nil if none found. +func (m *RateLimit_Action_RemoteAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_RemoteAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RateLimit_Action_RemoteAddressMultiError(errors) + } + + return nil +} + +// RateLimit_Action_RemoteAddressMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_RemoteAddress.ValidateAll() +// if the designated constraints aren't met. +type RateLimit_Action_RemoteAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_RemoteAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_RemoteAddressMultiError) AllErrors() []error { return m } + +// RateLimit_Action_RemoteAddressValidationError is the validation error +// returned by RateLimit_Action_RemoteAddress.Validate if the designated +// constraints aren't met. +type RateLimit_Action_RemoteAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_RemoteAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_RemoteAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_RemoteAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_RemoteAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_RemoteAddressValidationError) ErrorName() string { + return "RateLimit_Action_RemoteAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_RemoteAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_RemoteAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_RemoteAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_RemoteAddressValidationError{} + +// Validate checks the field values on RateLimit_Action_MaskedRemoteAddress +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RateLimit_Action_MaskedRemoteAddress) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_MaskedRemoteAddress +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_MaskedRemoteAddressMultiError, or nil if none found. +func (m *RateLimit_Action_MaskedRemoteAddress) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_MaskedRemoteAddress) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetV4PrefixMaskLen(); wrapper != nil { + + if wrapper.GetValue() > 32 { + err := RateLimit_Action_MaskedRemoteAddressValidationError{ + field: "V4PrefixMaskLen", + reason: "value must be less than or equal to 32", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetV6PrefixMaskLen(); wrapper != nil { + + if wrapper.GetValue() > 128 { + err := RateLimit_Action_MaskedRemoteAddressValidationError{ + field: "V6PrefixMaskLen", + reason: "value must be less than or equal to 128", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return RateLimit_Action_MaskedRemoteAddressMultiError(errors) + } + + return nil +} + +// RateLimit_Action_MaskedRemoteAddressMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_MaskedRemoteAddress.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_MaskedRemoteAddressMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_MaskedRemoteAddressMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_MaskedRemoteAddressMultiError) AllErrors() []error { return m } + +// RateLimit_Action_MaskedRemoteAddressValidationError is the validation error +// returned by RateLimit_Action_MaskedRemoteAddress.Validate if the designated +// constraints aren't met. +type RateLimit_Action_MaskedRemoteAddressValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_MaskedRemoteAddressValidationError) ErrorName() string { + return "RateLimit_Action_MaskedRemoteAddressValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_MaskedRemoteAddressValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_MaskedRemoteAddress.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_MaskedRemoteAddressValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_MaskedRemoteAddressValidationError{} + +// Validate checks the field values on RateLimit_Action_GenericKey with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_GenericKey) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_GenericKey with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_Action_GenericKeyMultiError, or nil if none found. +func (m *RateLimit_Action_GenericKey) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_GenericKey) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_GenericKeyValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DescriptorKey + + if len(errors) > 0 { + return RateLimit_Action_GenericKeyMultiError(errors) + } + + return nil +} + +// RateLimit_Action_GenericKeyMultiError is an error wrapping multiple +// validation errors returned by RateLimit_Action_GenericKey.ValidateAll() if +// the designated constraints aren't met. +type RateLimit_Action_GenericKeyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_GenericKeyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_GenericKeyMultiError) AllErrors() []error { return m } + +// RateLimit_Action_GenericKeyValidationError is the validation error returned +// by RateLimit_Action_GenericKey.Validate if the designated constraints +// aren't met. +type RateLimit_Action_GenericKeyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_GenericKeyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_GenericKeyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_GenericKeyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_GenericKeyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_GenericKeyValidationError) ErrorName() string { + return "RateLimit_Action_GenericKeyValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_GenericKeyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_GenericKey.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_GenericKeyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_GenericKeyValidationError{} + +// Validate checks the field values on RateLimit_Action_HeaderValueMatch with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Action_HeaderValueMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_HeaderValueMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_HeaderValueMatchMultiError, or nil if none found. +func (m *RateLimit_Action_HeaderValueMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_HeaderValueMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DescriptorKey + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_HeaderValueMatchValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExpectMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpectMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_HeaderValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetHeaders()) < 1 { + err := RateLimit_Action_HeaderValueMatchValidationError{ + field: "Headers", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_HeaderValueMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_HeaderValueMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RateLimit_Action_HeaderValueMatchMultiError(errors) + } + + return nil +} + +// RateLimit_Action_HeaderValueMatchMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_HeaderValueMatch.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_HeaderValueMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_HeaderValueMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_HeaderValueMatchMultiError) AllErrors() []error { return m } + +// RateLimit_Action_HeaderValueMatchValidationError is the validation error +// returned by RateLimit_Action_HeaderValueMatch.Validate if the designated +// constraints aren't met. +type RateLimit_Action_HeaderValueMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_HeaderValueMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_HeaderValueMatchValidationError) ErrorName() string { + return "RateLimit_Action_HeaderValueMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_HeaderValueMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_HeaderValueMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_HeaderValueMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_HeaderValueMatchValidationError{} + +// Validate checks the field values on RateLimit_Action_DynamicMetaData with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Action_DynamicMetaData) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_DynamicMetaData with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// RateLimit_Action_DynamicMetaDataMultiError, or nil if none found. +func (m *RateLimit_Action_DynamicMetaData) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_DynamicMetaData) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetDescriptorKey()) < 1 { + err := RateLimit_Action_DynamicMetaDataValidationError{ + field: "DescriptorKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMetadataKey() == nil { + err := RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_DynamicMetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return RateLimit_Action_DynamicMetaDataMultiError(errors) + } + + return nil +} + +// RateLimit_Action_DynamicMetaDataMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Action_DynamicMetaData.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_DynamicMetaDataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_DynamicMetaDataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_DynamicMetaDataMultiError) AllErrors() []error { return m } + +// RateLimit_Action_DynamicMetaDataValidationError is the validation error +// returned by RateLimit_Action_DynamicMetaData.Validate if the designated +// constraints aren't met. +type RateLimit_Action_DynamicMetaDataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_DynamicMetaDataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_DynamicMetaDataValidationError) ErrorName() string { + return "RateLimit_Action_DynamicMetaDataValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_DynamicMetaDataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_DynamicMetaData.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_DynamicMetaDataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_DynamicMetaDataValidationError{} + +// Validate checks the field values on RateLimit_Action_MetaData with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_MetaData) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Action_MetaData with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimit_Action_MetaDataMultiError, or nil if none found. +func (m *RateLimit_Action_MetaData) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_MetaData) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetDescriptorKey()) < 1 { + err := RateLimit_Action_MetaDataValidationError{ + field: "DescriptorKey", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMetadataKey() == nil { + err := RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_MetaDataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultValue + + if _, ok := RateLimit_Action_MetaData_Source_name[int32(m.GetSource())]; !ok { + err := RateLimit_Action_MetaDataValidationError{ + field: "Source", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for SkipIfAbsent + + if len(errors) > 0 { + return RateLimit_Action_MetaDataMultiError(errors) + } + + return nil +} + +// RateLimit_Action_MetaDataMultiError is an error wrapping multiple validation +// errors returned by RateLimit_Action_MetaData.ValidateAll() if the +// designated constraints aren't met. +type RateLimit_Action_MetaDataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_MetaDataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_MetaDataMultiError) AllErrors() []error { return m } + +// RateLimit_Action_MetaDataValidationError is the validation error returned by +// RateLimit_Action_MetaData.Validate if the designated constraints aren't met. +type RateLimit_Action_MetaDataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_MetaDataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_MetaDataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_MetaDataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_MetaDataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_MetaDataValidationError) ErrorName() string { + return "RateLimit_Action_MetaDataValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_MetaDataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_MetaData.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_MetaDataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_MetaDataValidationError{} + +// Validate checks the field values on +// RateLimit_Action_QueryParameterValueMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RateLimit_Action_QueryParameterValueMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// RateLimit_Action_QueryParameterValueMatch with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// RateLimit_Action_QueryParameterValueMatchMultiError, or nil if none found. +func (m *RateLimit_Action_QueryParameterValueMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Action_QueryParameterValueMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DescriptorKey + + if utf8.RuneCountInString(m.GetDescriptorValue()) < 1 { + err := RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "DescriptorValue", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetExpectMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpectMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "ExpectMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetQueryParameters()) < 1 { + err := RateLimit_Action_QueryParameterValueMatchValidationError{ + field: "QueryParameters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetQueryParameters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Action_QueryParameterValueMatchValidationError{ + field: fmt.Sprintf("QueryParameters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RateLimit_Action_QueryParameterValueMatchMultiError(errors) + } + + return nil +} + +// RateLimit_Action_QueryParameterValueMatchMultiError is an error wrapping +// multiple validation errors returned by +// RateLimit_Action_QueryParameterValueMatch.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Action_QueryParameterValueMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Action_QueryParameterValueMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Action_QueryParameterValueMatchMultiError) AllErrors() []error { return m } + +// RateLimit_Action_QueryParameterValueMatchValidationError is the validation +// error returned by RateLimit_Action_QueryParameterValueMatch.Validate if the +// designated constraints aren't met. +type RateLimit_Action_QueryParameterValueMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Action_QueryParameterValueMatchValidationError) ErrorName() string { + return "RateLimit_Action_QueryParameterValueMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Action_QueryParameterValueMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Action_QueryParameterValueMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Action_QueryParameterValueMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Action_QueryParameterValueMatchValidationError{} + +// Validate checks the field values on RateLimit_Override_DynamicMetadata with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *RateLimit_Override_DynamicMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimit_Override_DynamicMetadata +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimit_Override_DynamicMetadataMultiError, or nil if none found. +func (m *RateLimit_Override_DynamicMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimit_Override_DynamicMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMetadataKey() == nil { + err := RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimit_Override_DynamicMetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RateLimit_Override_DynamicMetadataMultiError(errors) + } + + return nil +} + +// RateLimit_Override_DynamicMetadataMultiError is an error wrapping multiple +// validation errors returned by +// RateLimit_Override_DynamicMetadata.ValidateAll() if the designated +// constraints aren't met. +type RateLimit_Override_DynamicMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimit_Override_DynamicMetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimit_Override_DynamicMetadataMultiError) AllErrors() []error { return m } + +// RateLimit_Override_DynamicMetadataValidationError is the validation error +// returned by RateLimit_Override_DynamicMetadata.Validate if the designated +// constraints aren't met. +type RateLimit_Override_DynamicMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimit_Override_DynamicMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimit_Override_DynamicMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimit_Override_DynamicMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimit_Override_DynamicMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimit_Override_DynamicMetadataValidationError) ErrorName() string { + return "RateLimit_Override_DynamicMetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimit_Override_DynamicMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimit_Override_DynamicMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimit_Override_DynamicMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimit_Override_DynamicMetadataValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go new file mode 100644 index 0000000000..79709bb972 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components_vtproto.pb.go @@ -0,0 +1,8302 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/route_components.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *VirtualHost) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualHost) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *VirtualHost) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.IncludeIsTimeoutRetryHeader { + i-- + if m.IncludeIsTimeoutRetryHeader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.RetryPolicyTypedConfig != nil { + size, err := (*anypb.Any)(m.RetryPolicyTypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.IncludeAttemptCountInResponse { + i-- + if m.IncludeAttemptCountInResponse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.PerRequestBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.HedgePolicy != nil { + size, err := m.HedgePolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } + if m.IncludeRequestAttemptCount { + i-- + if m.IncludeRequestAttemptCount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + } + if m.Cors != nil { + size, err := m.Cors.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RateLimits[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.VirtualClusters) > 0 { + for iNdEx := len(m.VirtualClusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.VirtualClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RequireTls != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequireTls)) + i-- + dAtA[i] = 0x20 + } + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Routes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Domains) > 0 { + for iNdEx := len(m.Domains) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Domains[iNdEx]) + copy(dAtA[i:], m.Domains[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Domains[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Action != nil { + size, err := (*anypb.Any)(m.Action).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Routes) > 0 { + for iNdEx := len(m.Routes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Routes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Route) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if msg, ok := m.Action.(*Route_NonForwardingAction); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Action.(*Route_FilterAction); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.PerRequestBufferLimitBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Tracing != nil { + size, err := m.Tracing.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x72 + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if msg, ok := m.Action.(*Route_DirectResponse); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Decorator != nil { + size, err := m.Decorator.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.Action.(*Route_Redirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Action.(*Route_Route); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Match != nil { + size, err := m.Match.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Route_Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Route != nil { + size, err := m.Route.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Route_Redirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_Redirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Redirect != nil { + size, err := m.Redirect.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Route_DirectResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_DirectResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DirectResponse != nil { + size, err := m.DirectResponse.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Route_FilterAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_FilterAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterAction != nil { + size, err := m.FilterAction.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Route_NonForwardingAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Route_NonForwardingAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NonForwardingAction != nil { + size, err := m.NonForwardingAction.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *WeightedCluster_ClusterWeight) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedCluster_ClusterWeight) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_ClusterWeight) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterHeader) > 0 { + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x62 + } + if msg, ok := m.HostRewriteSpecifier.(*WeightedCluster_ClusterWeight_HostRewriteLiteral); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.MetadataMatch != nil { + if vtmsg, ok := interface{}(m.MetadataMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Weight != nil { + size, err := (*wrapperspb.UInt32Value)(m.Weight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteLiteral) + copy(dAtA[i:], m.HostRewriteLiteral) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteLiteral))) + i-- + dAtA[i] = 0x5a + return len(dAtA) - i, nil +} +func (m *WeightedCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.RandomValueSpecifier.(*WeightedCluster_HeaderName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TotalWeight != nil { + size, err := (*wrapperspb.UInt32Value)(m.TotalWeight).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.RuntimeKeyPrefix) > 0 { + i -= len(m.RuntimeKeyPrefix) + copy(dAtA[i:], m.RuntimeKeyPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKeyPrefix))) + i-- + dAtA[i] = 0x12 + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Clusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WeightedCluster_HeaderName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WeightedCluster_HeaderName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *ClusterSpecifierPlugin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Extension != nil { + if vtmsg, ok := interface{}(m.Extension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Extension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_GrpcRouteMatchOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_TlsContextMatchOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Validated != nil { + size, err := (*wrapperspb.BoolValue)(m.Validated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Presented != nil { + size, err := (*wrapperspb.BoolValue)(m.Presented).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_ConnectMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch_ConnectMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_ConnectMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PathSpecifier.(*RouteMatch_PathMatchPolicy); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PathSpecifier.(*RouteMatch_PathSeparatedPrefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.DynamicMetadata) > 0 { + for iNdEx := len(m.DynamicMetadata) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.DynamicMetadata[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DynamicMetadata[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if msg, ok := m.PathSpecifier.(*RouteMatch_ConnectMatcher_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsContext != nil { + size, err := m.TlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if msg, ok := m.PathSpecifier.(*RouteMatch_SafeRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RuntimeFraction != nil { + if vtmsg, ok := interface{}(m.RuntimeFraction).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RuntimeFraction) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.Grpc != nil { + size, err := m.Grpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.QueryParameters) > 0 { + for iNdEx := len(m.QueryParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QueryParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if m.CaseSensitive != nil { + size, err := (*wrapperspb.BoolValue)(m.CaseSensitive).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.PathSpecifier.(*RouteMatch_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PathSpecifier.(*RouteMatch_Prefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteMatch_Prefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_Prefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *RouteMatch_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RouteMatch_SafeRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_SafeRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegex != nil { + if vtmsg, ok := interface{}(m.SafeRegex).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SafeRegex) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *RouteMatch_ConnectMatcher_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_ConnectMatcher_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConnectMatcher != nil { + size, err := m.ConnectMatcher.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *RouteMatch_PathSeparatedPrefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_PathSeparatedPrefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PathSeparatedPrefix) + copy(dAtA[i:], m.PathSeparatedPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathSeparatedPrefix))) + i-- + dAtA[i] = 0x72 + return len(dAtA) - i, nil +} +func (m *RouteMatch_PathMatchPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteMatch_PathMatchPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PathMatchPolicy != nil { + if vtmsg, ok := interface{}(m.PathMatchPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PathMatchPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x7a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *CorsPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CorsPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CorsPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ForwardNotMatchingPreflights != nil { + size, err := (*wrapperspb.BoolValue)(m.ForwardNotMatchingPreflights).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.AllowPrivateNetworkAccess != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowPrivateNetworkAccess).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.AllowOriginStringMatch) > 0 { + for iNdEx := len(m.AllowOriginStringMatch) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AllowOriginStringMatch[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AllowOriginStringMatch[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + } + if m.ShadowEnabled != nil { + if vtmsg, ok := interface{}(m.ShadowEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x52 + } + if msg, ok := m.EnabledSpecifier.(*CorsPolicy_FilterEnabled); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.AllowCredentials != nil { + size, err := (*wrapperspb.BoolValue)(m.AllowCredentials).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.MaxAge) > 0 { + i -= len(m.MaxAge) + copy(dAtA[i:], m.MaxAge) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MaxAge))) + i-- + dAtA[i] = 0x2a + } + if len(m.ExposeHeaders) > 0 { + i -= len(m.ExposeHeaders) + copy(dAtA[i:], m.ExposeHeaders) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ExposeHeaders))) + i-- + dAtA[i] = 0x22 + } + if len(m.AllowHeaders) > 0 { + i -= len(m.AllowHeaders) + copy(dAtA[i:], m.AllowHeaders) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AllowHeaders))) + i-- + dAtA[i] = 0x1a + } + if len(m.AllowMethods) > 0 { + i -= len(m.AllowMethods) + copy(dAtA[i:], m.AllowMethods) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AllowMethods))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *CorsPolicy_FilterEnabled) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CorsPolicy_FilterEnabled) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterEnabled != nil { + if vtmsg, ok := interface{}(m.FilterEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FilterEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_RequestMirrorPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_RequestMirrorPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_RequestMirrorPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DisableShadowHostSuffixAppend { + i-- + if m.DisableShadowHostSuffixAppend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.ClusterHeader) > 0 { + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x2a + } + if m.TraceSampled != nil { + size, err := (*wrapperspb.BoolValue)(m.TraceSampled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.RuntimeFraction != nil { + if vtmsg, ok := interface{}(m.RuntimeFraction).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RuntimeFraction) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Cluster) > 0 { + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Header) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_CookieAttribute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Cookie) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SourceIp { + i-- + if m.SourceIp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_QueryParameter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_HashPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_FilterState_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_QueryParameter_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Terminal { + i-- + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_ConnectionProperties_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_Cookie_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*RouteAction_HashPolicy_Header_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_HashPolicy_Header_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Header_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Header != nil { + size, err := m.Header.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_Cookie_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_Cookie_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cookie != nil { + size, err := m.Cookie.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_ConnectionProperties_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_ConnectionProperties_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConnectionProperties != nil { + size, err := m.ConnectionProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_QueryParameter_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_QueryParameter_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.QueryParameter != nil { + size, err := m.QueryParameter.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HashPolicy_FilterState_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HashPolicy_FilterState_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + size, err := m.FilterState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_UpgradeConfig_ConnectConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowPost { + i-- + if m.AllowPost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.ProxyProtocolConfig != nil { + if vtmsg, ok := interface{}(m.ProxyProtocolConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ProxyProtocolConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_UpgradeConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_UpgradeConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_UpgradeConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectConfig != nil { + size, err := m.ConnectConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Enabled != nil { + size, err := (*wrapperspb.BoolValue)(m.Enabled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.UpgradeType) > 0 { + i -= len(m.UpgradeType) + copy(dAtA[i:], m.UpgradeType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpgradeType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_MaxStreamDuration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction_MaxStreamDuration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_MaxStreamDuration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcTimeoutHeaderOffset != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutHeaderOffset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.GrpcTimeoutHeaderMax != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutHeaderMax).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxStreamDuration != nil { + size, err := (*durationpb.Duration)(m.MaxStreamDuration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PathRewritePolicy != nil { + if vtmsg, ok := interface{}(m.PathRewritePolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PathRewritePolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.EarlyDataPolicy != nil { + if vtmsg, ok := interface{}(m.EarlyDataPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EarlyDataPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_InlineClusterSpecifierPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.AppendXForwardedHost { + i-- + if m.AppendXForwardedHost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_ClusterSpecifierPlugin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.MaxStreamDuration != nil { + size, err := m.MaxStreamDuration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewritePathRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InternalRedirectPolicy != nil { + size, err := m.InternalRedirectPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.RetryPolicyTypedConfig != nil { + size, err := (*anypb.Any)(m.RetryPolicyTypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if m.MaxInternalRedirects != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewriteHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.GrpcTimeoutOffset != nil { + size, err := (*durationpb.Duration)(m.GrpcTimeoutOffset).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.HedgePolicy != nil { + size, err := m.HedgePolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.InternalRedirectAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InternalRedirectAction)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.UpgradeConfigs) > 0 { + for iNdEx := len(m.UpgradeConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpgradeConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if m.IdleTimeout != nil { + size, err := (*durationpb.Duration)(m.IdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.MaxGrpcTimeout != nil { + size, err := (*durationpb.Duration)(m.MaxGrpcTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.ClusterNotFoundResponseCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClusterNotFoundResponseCode)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.Cors != nil { + size, err := m.Cors.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.HashPolicy) > 0 { + for iNdEx := len(m.HashPolicy) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HashPolicy[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if m.IncludeVhRateLimits != nil { + size, err := (*wrapperspb.BoolValue)(m.IncludeVhRateLimits).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RateLimits[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if m.Priority != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x58 + } + if m.RetryPolicy != nil { + size, err := m.RetryPolicy.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_AutoHostRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HostRewriteSpecifier.(*RouteAction_HostRewriteLiteral); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.PrefixRewrite) > 0 { + i -= len(m.PrefixRewrite) + copy(dAtA[i:], m.PrefixRewrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixRewrite))) + i-- + dAtA[i] = 0x2a + } + if m.MetadataMatch != nil { + if vtmsg, ok := interface{}(m.MetadataMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_WeightedClusters); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_ClusterHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ClusterSpecifier.(*RouteAction_Cluster); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RouteAction_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *RouteAction_ClusterHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_ClusterHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ClusterHeader) + copy(dAtA[i:], m.ClusterHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterHeader))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RouteAction_WeightedClusters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_WeightedClusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.WeightedClusters != nil { + size, err := m.WeightedClusters.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewriteLiteral) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewriteLiteral) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteLiteral) + copy(dAtA[i:], m.HostRewriteLiteral) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteLiteral))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *RouteAction_AutoHostRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_AutoHostRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AutoHostRewrite != nil { + size, err := (*wrapperspb.BoolValue)(m.AutoHostRewrite).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewriteHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewriteHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HostRewriteHeader) + copy(dAtA[i:], m.HostRewriteHeader) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRewriteHeader))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + return len(dAtA) - i, nil +} +func (m *RouteAction_HostRewritePathRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_HostRewritePathRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HostRewritePathRegex != nil { + if vtmsg, ok := interface{}(m.HostRewritePathRegex).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HostRewritePathRegex) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} +func (m *RouteAction_ClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_ClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ClusterSpecifierPlugin) + copy(dAtA[i:], m.ClusterSpecifierPlugin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClusterSpecifierPlugin))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + return len(dAtA) - i, nil +} +func (m *RouteAction_InlineClusterSpecifierPlugin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteAction_InlineClusterSpecifierPlugin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InlineClusterSpecifierPlugin != nil { + size, err := m.InlineClusterSpecifierPlugin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryPriority) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryPriority) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryPriority_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryHostPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*RetryPolicy_RetryHostPredicate_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RetryPolicy_RetryBackOff) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RetryBackOff) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RetryBackOff) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BaseInterval != nil { + size, err := (*durationpb.Duration)(m.BaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_ResetHeader) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_ResetHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_ResetHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Format != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxInterval != nil { + size, err := (*durationpb.Duration)(m.MaxInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ResetHeaders) > 0 { + for iNdEx := len(m.ResetHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResetHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RetryPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RetryPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PerTryIdleTimeout != nil { + size, err := (*durationpb.Duration)(m.PerTryIdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if len(m.RetryOptionsPredicates) > 0 { + for iNdEx := len(m.RetryOptionsPredicates) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RetryOptionsPredicates[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RetryOptionsPredicates[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + } + if m.RateLimitedRetryBackOff != nil { + size, err := m.RateLimitedRetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if len(m.RetriableRequestHeaders) > 0 { + for iNdEx := len(m.RetriableRequestHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetriableRequestHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RetriableHeaders) > 0 { + for iNdEx := len(m.RetriableHeaders) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetriableHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if m.RetryBackOff != nil { + size, err := m.RetryBackOff.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if len(m.RetriableStatusCodes) > 0 { + var pksize2 int + for _, num := range m.RetriableStatusCodes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.RetriableStatusCodes { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x3a + } + if m.HostSelectionRetryMaxAttempts != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HostSelectionRetryMaxAttempts)) + i-- + dAtA[i] = 0x30 + } + if len(m.RetryHostPredicate) > 0 { + for iNdEx := len(m.RetryHostPredicate) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RetryHostPredicate[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.RetryPriority != nil { + size, err := m.RetryPriority.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.PerTryTimeout != nil { + size, err := (*durationpb.Duration)(m.PerTryTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.NumRetries != nil { + size, err := (*wrapperspb.UInt32Value)(m.NumRetries).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HedgePolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HedgePolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HedgePolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HedgeOnPerTryTimeout { + i-- + if m.HedgeOnPerTryTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AdditionalRequestChance != nil { + if vtmsg, ok := interface{}(m.AdditionalRequestChance).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AdditionalRequestChance) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.InitialRequests != nil { + size, err := (*wrapperspb.UInt32Value)(m.InitialRequests).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RedirectAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_RegexRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.PortRedirect != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PortRedirect)) + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.SchemeRewriteSpecifier.(*RedirectAction_SchemeRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.StripQuery { + i-- + if m.StripQuery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_PrefixRewrite); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.SchemeRewriteSpecifier.(*RedirectAction_HttpsRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.ResponseCode != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseCode)) + i-- + dAtA[i] = 0x18 + } + if msg, ok := m.PathRewriteSpecifier.(*RedirectAction_PathRedirect); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.HostRedirect) > 0 { + i -= len(m.HostRedirect) + copy(dAtA[i:], m.HostRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HostRedirect))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RedirectAction_PathRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_PathRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PathRedirect) + copy(dAtA[i:], m.PathRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathRedirect))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *RedirectAction_HttpsRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_HttpsRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.HttpsRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *RedirectAction_PrefixRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_PrefixRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PrefixRewrite) + copy(dAtA[i:], m.PrefixRewrite) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixRewrite))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *RedirectAction_SchemeRedirect) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_SchemeRedirect) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SchemeRedirect) + copy(dAtA[i:], m.SchemeRedirect) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SchemeRedirect))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *RedirectAction_RegexRewrite) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RedirectAction_RegexRewrite) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RegexRewrite != nil { + if vtmsg, ok := interface{}(m.RegexRewrite).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RegexRewrite) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *DirectResponseAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DirectResponseAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DirectResponseAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Body != nil { + if vtmsg, ok := interface{}(m.Body).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Body) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NonForwardingAction) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonForwardingAction) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NonForwardingAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Decorator) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Decorator) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Decorator) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Propagate != nil { + size, err := (*wrapperspb.BoolValue)(m.Propagate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Operation) > 0 { + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CustomTags) > 0 { + for iNdEx := len(m.CustomTags) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CustomTags[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTags[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.OverallSampling != nil { + if vtmsg, ok := interface{}(m.OverallSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverallSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.RandomSampling != nil { + if vtmsg, ok := interface{}(m.RandomSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RandomSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ClientSampling != nil { + if vtmsg, ok := interface{}(m.ClientSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClientSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VirtualCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *VirtualCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_SourceCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_SourceCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_SourceCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_DestinationCluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_DestinationCluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DestinationCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_RequestHeaders) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_RequestHeaders) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RequestHeaders) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipIfAbsent { + i-- + if m.SkipIfAbsent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x12 + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_RemoteAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_RemoteAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RemoteAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MaskedRemoteAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.V6PrefixMaskLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.V6PrefixMaskLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.V4PrefixMaskLen != nil { + size, err := (*wrapperspb.UInt32Value)(m.V4PrefixMaskLen).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_GenericKey) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_GenericKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_GenericKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_HeaderValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Headers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ExpectMatch != nil { + size, err := (*wrapperspb.BoolValue)(m.ExpectMatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DynamicMetaData) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_MetaData) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_MetaData) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MetaData) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SkipIfAbsent { + i-- + if m.SkipIfAbsent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Source != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x20 + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_QueryParameterValueMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DescriptorKey) > 0 { + i -= len(m.DescriptorKey) + copy(dAtA[i:], m.DescriptorKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.QueryParameters) > 0 { + for iNdEx := len(m.QueryParameters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QueryParameters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.ExpectMatch != nil { + size, err := (*wrapperspb.BoolValue)(m.ExpectMatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.DescriptorValue) > 0 { + i -= len(m.DescriptorValue) + copy(dAtA[i:], m.DescriptorValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DescriptorValue))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Action) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_QueryParameterValueMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_MaskedRemoteAddress_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_Extension); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_Metadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_DynamicMetadata); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_HeaderValueMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_GenericKey_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_RemoteAddress_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_RequestHeaders_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_DestinationCluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ActionSpecifier.(*RateLimit_Action_SourceCluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Action_SourceCluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_SourceCluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceCluster != nil { + size, err := m.SourceCluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_DestinationCluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DestinationCluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DestinationCluster != nil { + size, err := m.DestinationCluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_RequestHeaders_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RequestHeaders_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestHeaders != nil { + size, err := m.RequestHeaders.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_RemoteAddress_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_RemoteAddress_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoteAddress != nil { + size, err := m.RemoteAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_GenericKey_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_GenericKey_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GenericKey != nil { + size, err := m.GenericKey.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_HeaderValueMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_HeaderValueMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderValueMatch != nil { + size, err := m.HeaderValueMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_DynamicMetadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_DynamicMetadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DynamicMetadata != nil { + size, err := m.DynamicMetadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_Extension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_Extension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Extension != nil { + if vtmsg, ok := interface{}(m.Extension).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Extension) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_MaskedRemoteAddress_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_MaskedRemoteAddress_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MaskedRemoteAddress != nil { + size, err := m.MaskedRemoteAddress.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Action_QueryParameterValueMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Action_QueryParameterValueMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.QueryParameterValueMatch != nil { + size, err := m.QueryParameterValueMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *RateLimit_Override_DynamicMetadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Override_DynamicMetadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override_DynamicMetadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Override) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit_Override) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OverrideSpecifier.(*RateLimit_Override_DynamicMetadata_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimit_Override_DynamicMetadata_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit_Override_DynamicMetadata_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DynamicMetadata != nil { + size, err := m.DynamicMetadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RateLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Limit != nil { + size, err := m.Limit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DisableKey) > 0 { + i -= len(m.DisableKey) + copy(dAtA[i:], m.DisableKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisableKey))) + i-- + dAtA[i] = 0x12 + } + if m.Stage != nil { + size, err := (*wrapperspb.UInt32Value)(m.Stage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TreatMissingHeaderAsEmpty { + i-- + if m.TreatMissingHeaderAsEmpty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_ContainsMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_SafeRegexMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_SuffixMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_PrefixMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.InvertMatch { + i-- + if m.InvertMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_RangeMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.HeaderMatchSpecifier.(*HeaderMatcher_ExactMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HeaderMatcher_ExactMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_ExactMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ExactMatch) + copy(dAtA[i:], m.ExactMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ExactMatch))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_RangeMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_RangeMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RangeMatch != nil { + if vtmsg, ok := interface{}(m.RangeMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RangeMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_PrefixMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_PrefixMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.PrefixMatch) + copy(dAtA[i:], m.PrefixMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PrefixMatch))) + i-- + dAtA[i] = 0x4a + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_SuffixMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_SuffixMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.SuffixMatch) + copy(dAtA[i:], m.SuffixMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SuffixMatch))) + i-- + dAtA[i] = 0x52 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_SafeRegexMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_SafeRegexMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegexMatch != nil { + if vtmsg, ok := interface{}(m.SafeRegexMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SafeRegexMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_ContainsMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_ContainsMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.ContainsMatch) + copy(dAtA[i:], m.ContainsMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ContainsMatch))) + i-- + dAtA[i] = 0x62 + return len(dAtA) - i, nil +} +func (m *HeaderMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HeaderMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + if vtmsg, ok := interface{}(m.StringMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StringMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *QueryParameterMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParameterMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.QueryParameterMatchSpecifier.(*QueryParameterMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.QueryParameterMatchSpecifier.(*QueryParameterMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryParameterMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + if vtmsg, ok := interface{}(m.StringMatch).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StringMatch) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *QueryParameterMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *QueryParameterMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *InternalRedirectPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRedirectPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *InternalRedirectPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResponseHeadersToCopy) > 0 { + for iNdEx := len(m.ResponseHeadersToCopy) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToCopy[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToCopy[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToCopy[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.AllowCrossSchemeRedirect { + i-- + if m.AllowCrossSchemeRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Predicates) > 0 { + for iNdEx := len(m.Predicates) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Predicates[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Predicates[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.RedirectResponseCodes) > 0 { + var pksize2 int + for _, num := range m.RedirectResponseCodes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.RedirectResponseCodes { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.MaxInternalRedirects != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Config != nil { + size, err := (*anypb.Any)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VirtualHost) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Domains) > 0 { + for _, s := range m.Domains { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Routes) > 0 { + for _, e := range m.Routes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequireTls != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequireTls)) + } + if len(m.VirtualClusters) > 0 { + for _, e := range m.VirtualClusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Cors != nil { + l = m.Cors.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeRequestAttemptCount { + n += 2 + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HedgePolicy != nil { + l = m.HedgePolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerRequestBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IncludeAttemptCountInResponse { + n += 3 + } + if m.RetryPolicyTypedConfig != nil { + l = (*anypb.Any)(m.RetryPolicyTypedConfig).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeIsTimeoutRetryHeader { + n += 3 + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilterAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != nil { + l = (*anypb.Any)(m.Action).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Routes) > 0 { + for _, e := range m.Routes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Match != nil { + l = m.Match.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Action.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Decorator != nil { + l = m.Decorator.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + l = m.Tracing.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerRequestBufferLimitBytes != nil { + l = (*wrapperspb.UInt32Value)(m.PerRequestBufferLimitBytes).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.StatPrefix) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Route_Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Route != nil { + l = m.Route.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_Redirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Redirect != nil { + l = m.Redirect.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_DirectResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DirectResponse != nil { + l = m.DirectResponse.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Route_FilterAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterAction != nil { + l = m.FilterAction.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Route_NonForwardingAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NonForwardingAction != nil { + l = m.NonForwardingAction.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *WeightedCluster_ClusterWeight) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Weight != nil { + l = (*wrapperspb.UInt32Value)(m.Weight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataMatch != nil { + if size, ok := interface{}(m.MetadataMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if vtmsg, ok := m.HostRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.ClusterHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WeightedCluster_ClusterWeight_HostRewriteLiteral) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteLiteral) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *WeightedCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.RuntimeKeyPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TotalWeight != nil { + l = (*wrapperspb.UInt32Value)(m.TotalWeight).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.RandomValueSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *WeightedCluster_HeaderName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + if size, ok := interface{}(m.Extension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Extension) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_GrpcRouteMatchOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_TlsContextMatchOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Presented != nil { + l = (*wrapperspb.BoolValue)(m.Presented).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Validated != nil { + l = (*wrapperspb.BoolValue)(m.Validated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_ConnectMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PathSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.CaseSensitive != nil { + l = (*wrapperspb.BoolValue)(m.CaseSensitive).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.QueryParameters) > 0 { + for _, e := range m.QueryParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Grpc != nil { + l = m.Grpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RuntimeFraction != nil { + if size, ok := interface{}(m.RuntimeFraction).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RuntimeFraction) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsContext != nil { + l = m.TlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DynamicMetadata) > 0 { + for _, e := range m.DynamicMetadata { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RouteMatch_Prefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_SafeRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegex != nil { + if size, ok := interface{}(m.SafeRegex).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SafeRegex) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteMatch_ConnectMatcher_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConnectMatcher != nil { + l = m.ConnectMatcher.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteMatch_PathSeparatedPrefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathSeparatedPrefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteMatch_PathMatchPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PathMatchPolicy != nil { + if size, ok := interface{}(m.PathMatchPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PathMatchPolicy) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CorsPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AllowMethods) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AllowHeaders) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ExposeHeaders) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.MaxAge) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowCredentials != nil { + l = (*wrapperspb.BoolValue)(m.AllowCredentials).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.EnabledSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ShadowEnabled != nil { + if size, ok := interface{}(m.ShadowEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AllowOriginStringMatch) > 0 { + for _, e := range m.AllowOriginStringMatch { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AllowPrivateNetworkAccess != nil { + l = (*wrapperspb.BoolValue)(m.AllowPrivateNetworkAccess).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ForwardNotMatchingPreflights != nil { + l = (*wrapperspb.BoolValue)(m.ForwardNotMatchingPreflights).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CorsPolicy_FilterEnabled) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterEnabled != nil { + if size, ok := interface{}(m.FilterEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FilterEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_RequestMirrorPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RuntimeFraction != nil { + if size, ok := interface{}(m.RuntimeFraction).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RuntimeFraction) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceSampled != nil { + l = (*wrapperspb.BoolValue)(m.TraceSampled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ClusterHeader) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableShadowHostSuffixAppend { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_CookieAttribute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Cookie) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_ConnectionProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_QueryParameter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PolicySpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Terminal { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_HashPolicy_Header_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_Cookie_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cookie != nil { + l = m.Cookie.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_ConnectionProperties_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConnectionProperties != nil { + l = m.ConnectionProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_QueryParameter_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryParameter != nil { + l = m.QueryParameter.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HashPolicy_FilterState_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + l = m.FilterState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_UpgradeConfig_ConnectConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProxyProtocolConfig != nil { + if size, ok := interface{}(m.ProxyProtocolConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ProxyProtocolConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowPost { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_UpgradeConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UpgradeType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Enabled != nil { + l = (*wrapperspb.BoolValue)(m.Enabled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectConfig != nil { + l = m.ConnectConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_MaxStreamDuration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxStreamDuration != nil { + l = (*durationpb.Duration)(m.MaxStreamDuration).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutHeaderMax != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutHeaderMax).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutHeaderOffset != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutHeaderOffset).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ClusterSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MetadataMatch != nil { + if size, ok := interface{}(m.MetadataMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.PrefixRewrite) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HostRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicy != nil { + l = m.RetryPolicy.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Priority != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority)) + } + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IncludeVhRateLimits != nil { + l = (*wrapperspb.BoolValue)(m.IncludeVhRateLimits).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HashPolicy) > 0 { + for _, e := range m.HashPolicy { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Cors != nil { + l = m.Cors.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClusterNotFoundResponseCode != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ClusterNotFoundResponseCode)) + } + if m.MaxGrpcTimeout != nil { + l = (*durationpb.Duration)(m.MaxGrpcTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IdleTimeout != nil { + l = (*durationpb.Duration)(m.IdleTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpgradeConfigs) > 0 { + for _, e := range m.UpgradeConfigs { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.InternalRedirectAction != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.InternalRedirectAction)) + } + if m.HedgePolicy != nil { + l = m.HedgePolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcTimeoutOffset != nil { + l = (*durationpb.Duration)(m.GrpcTimeoutOffset).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxInternalRedirects != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPolicyTypedConfig != nil { + l = (*anypb.Any)(m.RetryPolicyTypedConfig).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InternalRedirectPolicy != nil { + l = m.InternalRedirectPolicy.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxStreamDuration != nil { + l = m.MaxStreamDuration.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendXForwardedHost { + n += 3 + } + if m.EarlyDataPolicy != nil { + if size, ok := interface{}(m.EarlyDataPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EarlyDataPolicy) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PathRewritePolicy != nil { + if size, ok := interface{}(m.PathRewritePolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PathRewritePolicy) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RouteAction_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cluster) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_ClusterHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterHeader) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_WeightedClusters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WeightedClusters != nil { + l = m.WeightedClusters.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HostRewriteLiteral) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteLiteral) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_AutoHostRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AutoHostRewrite != nil { + l = (*wrapperspb.BoolValue)(m.AutoHostRewrite).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RouteAction_HostRewriteHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRewriteHeader) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_HostRewritePathRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostRewritePathRegex != nil { + if size, ok := interface{}(m.HostRewritePathRegex).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HostRewritePathRegex) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *RouteAction_ClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterSpecifierPlugin) + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RouteAction_InlineClusterSpecifierPlugin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InlineClusterSpecifierPlugin != nil { + l = m.InlineClusterSpecifierPlugin.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *RetryPolicy_RetryPriority) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryPriority_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryHostPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RetryHostPredicate_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RetryPolicy_RetryBackOff) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BaseInterval != nil { + l = (*durationpb.Duration)(m.BaseInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_ResetHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Format != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Format)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy_RateLimitedRetryBackOff) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResetHeaders) > 0 { + for _, e := range m.ResetHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxInterval != nil { + l = (*durationpb.Duration)(m.MaxInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetryPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NumRetries != nil { + l = (*wrapperspb.UInt32Value)(m.NumRetries).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PerTryTimeout != nil { + l = (*durationpb.Duration)(m.PerTryTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RetryPriority != nil { + l = m.RetryPriority.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryHostPredicate) > 0 { + for _, e := range m.RetryHostPredicate { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.HostSelectionRetryMaxAttempts != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HostSelectionRetryMaxAttempts)) + } + if len(m.RetriableStatusCodes) > 0 { + l = 0 + for _, e := range m.RetriableStatusCodes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.RetryBackOff != nil { + l = m.RetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetriableHeaders) > 0 { + for _, e := range m.RetriableHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RetriableRequestHeaders) > 0 { + for _, e := range m.RetriableRequestHeaders { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RateLimitedRetryBackOff != nil { + l = m.RateLimitedRetryBackOff.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RetryOptionsPredicates) > 0 { + for _, e := range m.RetryOptionsPredicates { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.PerTryIdleTimeout != nil { + l = (*durationpb.Duration)(m.PerTryIdleTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HedgePolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitialRequests != nil { + l = (*wrapperspb.UInt32Value)(m.InitialRequests).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AdditionalRequestChance != nil { + if size, ok := interface{}(m.AdditionalRequestChance).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AdditionalRequestChance) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HedgeOnPerTryTimeout { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RedirectAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostRedirect) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.PathRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ResponseCode != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseCode)) + } + if vtmsg, ok := m.SchemeRewriteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.StripQuery { + n += 2 + } + if m.PortRedirect != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.PortRedirect)) + } + n += len(m.unknownFields) + return n +} + +func (m *RedirectAction_PathRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathRedirect) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_HttpsRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *RedirectAction_PrefixRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PrefixRewrite) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_SchemeRedirect) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SchemeRedirect) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *RedirectAction_RegexRewrite) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RegexRewrite != nil { + if size, ok := interface{}(m.RegexRewrite).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RegexRewrite) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DirectResponseAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if m.Body != nil { + if size, ok := interface{}(m.Body).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Body) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *NonForwardingAction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Decorator) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Operation) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Propagate != nil { + l = (*wrapperspb.BoolValue)(m.Propagate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientSampling != nil { + if size, ok := interface{}(m.ClientSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClientSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RandomSampling != nil { + if size, ok := interface{}(m.RandomSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RandomSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverallSampling != nil { + if size, ok := interface{}(m.OverallSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverallSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CustomTags) > 0 { + for _, e := range m.CustomTags { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VirtualCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_SourceCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_DestinationCluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_RequestHeaders) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SkipIfAbsent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_RemoteAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_MaskedRemoteAddress) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.V4PrefixMaskLen != nil { + l = (*wrapperspb.UInt32Value)(m.V4PrefixMaskLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.V6PrefixMaskLen != nil { + l = (*wrapperspb.UInt32Value)(m.V6PrefixMaskLen).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_GenericKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_HeaderValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpectMatch != nil { + l = (*wrapperspb.BoolValue)(m.ExpectMatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_DynamicMetaData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_MetaData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Source != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Source)) + } + if m.SkipIfAbsent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_QueryParameterValueMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DescriptorValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExpectMatch != nil { + l = (*wrapperspb.BoolValue)(m.ExpectMatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.QueryParameters) > 0 { + for _, e := range m.QueryParameters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.DescriptorKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ActionSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Action_SourceCluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceCluster != nil { + l = m.SourceCluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_DestinationCluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DestinationCluster != nil { + l = m.DestinationCluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_RequestHeaders_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestHeaders != nil { + l = m.RequestHeaders.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_RemoteAddress_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoteAddress != nil { + l = m.RemoteAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_GenericKey_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GenericKey != nil { + l = m.GenericKey.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_HeaderValueMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderValueMatch != nil { + l = m.HeaderValueMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_DynamicMetadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicMetadata != nil { + l = m.DynamicMetadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_Extension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Extension != nil { + if size, ok := interface{}(m.Extension).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Extension) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_MaskedRemoteAddress_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaskedRemoteAddress != nil { + l = m.MaskedRemoteAddress.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Action_QueryParameterValueMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryParameterValueMatch != nil { + l = m.QueryParameterValueMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit_Override_DynamicMetadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Override) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OverrideSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimit_Override_DynamicMetadata_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicMetadata != nil { + l = m.DynamicMetadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stage != nil { + l = (*wrapperspb.UInt32Value)(m.Stage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DisableKey) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Limit != nil { + l = m.Limit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.HeaderMatchSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.InvertMatch { + n += 2 + } + if m.TreatMissingHeaderAsEmpty { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HeaderMatcher_ExactMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ExactMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_RangeMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RangeMatch != nil { + if size, ok := interface{}(m.RangeMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RangeMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HeaderMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *HeaderMatcher_PrefixMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PrefixMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_SuffixMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SuffixMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_SafeRegexMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegexMatch != nil { + if size, ok := interface{}(m.SafeRegexMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SafeRegexMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HeaderMatcher_ContainsMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainsMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HeaderMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + if size, ok := interface{}(m.StringMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StringMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *QueryParameterMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.QueryParameterMatchSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *QueryParameterMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + if size, ok := interface{}(m.StringMatch).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StringMatch) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *QueryParameterMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *InternalRedirectPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxInternalRedirects != nil { + l = (*wrapperspb.UInt32Value)(m.MaxInternalRedirects).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RedirectResponseCodes) > 0 { + l = 0 + for _, e := range m.RedirectResponseCodes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.Predicates) > 0 { + for _, e := range m.Predicates { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AllowCrossSchemeRedirect { + n += 2 + } + if len(m.ResponseHeadersToCopy) > 0 { + for _, s := range m.ResponseHeadersToCopy { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *FilterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = (*anypb.Any)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsOptional { + n += 2 + } + if m.Disabled { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go new file mode 100644 index 0000000000..4f536bb8de --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_vtproto.pb.go @@ -0,0 +1,474 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/route.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RouteConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RouteConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.TypedPerFilterConfig) > 0 { + for k := range m.TypedPerFilterConfig { + v := m.TypedPerFilterConfig[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.IgnorePathParametersInPathMatching { + i-- + if m.IgnorePathParametersInPathMatching { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.IgnorePortInHostMatching { + i-- + if m.IgnorePortInHostMatching { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.RequestMirrorPolicies) > 0 { + for iNdEx := len(m.RequestMirrorPolicies) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RequestMirrorPolicies[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.ClusterSpecifierPlugins) > 0 { + for iNdEx := len(m.ClusterSpecifierPlugins) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterSpecifierPlugins[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + } + if m.MaxDirectResponseBodySizeBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxDirectResponseBodySizeBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.MostSpecificHeaderMutationsWins { + i-- + if m.MostSpecificHeaderMutationsWins { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.Vhds != nil { + size, err := m.Vhds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.RequestHeadersToRemove) > 0 { + for iNdEx := len(m.RequestHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequestHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.RequestHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.ValidateClusters != nil { + size, err := (*wrapperspb.BoolValue)(m.ValidateClusters).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if len(m.RequestHeadersToAdd) > 0 { + for iNdEx := len(m.RequestHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RequestHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RequestHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for iNdEx := len(m.ResponseHeadersToRemove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResponseHeadersToRemove[iNdEx]) + copy(dAtA[i:], m.ResponseHeadersToRemove[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseHeadersToRemove[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for iNdEx := len(m.ResponseHeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResponseHeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseHeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.InternalOnlyHeaders) > 0 { + for iNdEx := len(m.InternalOnlyHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.InternalOnlyHeaders[iNdEx]) + copy(dAtA[i:], m.InternalOnlyHeaders[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InternalOnlyHeaders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VirtualHosts) > 0 { + for iNdEx := len(m.VirtualHosts) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.VirtualHosts[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Vhds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vhds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Vhds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConfigSource != nil { + if vtmsg, ok := interface{}(m.ConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.VirtualHosts) > 0 { + for _, e := range m.VirtualHosts { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.InternalOnlyHeaders) > 0 { + for _, s := range m.InternalOnlyHeaders { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToAdd) > 0 { + for _, e := range m.ResponseHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResponseHeadersToRemove) > 0 { + for _, s := range m.ResponseHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestHeadersToAdd) > 0 { + for _, e := range m.RequestHeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ValidateClusters != nil { + l = (*wrapperspb.BoolValue)(m.ValidateClusters).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RequestHeadersToRemove) > 0 { + for _, s := range m.RequestHeadersToRemove { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Vhds != nil { + l = m.Vhds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MostSpecificHeaderMutationsWins { + n += 2 + } + if m.MaxDirectResponseBodySizeBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxDirectResponseBodySizeBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ClusterSpecifierPlugins) > 0 { + for _, e := range m.ClusterSpecifierPlugins { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RequestMirrorPolicies) > 0 { + for _, e := range m.RequestMirrorPolicies { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.IgnorePortInHostMatching { + n += 2 + } + if m.IgnorePathParametersInPathMatching { + n += 2 + } + if len(m.TypedPerFilterConfig) > 0 { + for k, v := range m.TypedPerFilterConfig { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Vhds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + if size, ok := interface{}(m.ConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go new file mode 100644 index 0000000000..1c02988b69 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.go @@ -0,0 +1,464 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. +// The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained dynamically +// via RDS (:ref:`route_configuration_name`) +// or specified inline (:ref:`route_configuration`). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: "," +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the “route-config1“ +// RouteConfiguration being assigned to the HTTP request/stream. +// +// [#next-free-field: 6] +type ScopedRouteConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the RouteConfiguration should be loaded on demand. + OnDemand bool `protobuf:"varint,4,opt,name=on_demand,json=onDemand,proto3" json:"on_demand,omitempty"` + // The name assigned to the routing scope. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated + // with this scope. + RouteConfigurationName string `protobuf:"bytes,2,opt,name=route_configuration_name,json=routeConfigurationName,proto3" json:"route_configuration_name,omitempty"` + // The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with the scope. + RouteConfiguration *RouteConfiguration `protobuf:"bytes,5,opt,name=route_configuration,json=routeConfiguration,proto3" json:"route_configuration,omitempty"` + // The key to match against. + Key *ScopedRouteConfiguration_Key `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *ScopedRouteConfiguration) Reset() { + *x = ScopedRouteConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfiguration) ProtoMessage() {} + +func (x *ScopedRouteConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfiguration.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfiguration) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP(), []int{0} +} + +func (x *ScopedRouteConfiguration) GetOnDemand() bool { + if x != nil { + return x.OnDemand + } + return false +} + +func (x *ScopedRouteConfiguration) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRouteConfiguration) GetRouteConfigurationName() string { + if x != nil { + return x.RouteConfigurationName + } + return "" +} + +func (x *ScopedRouteConfiguration) GetRouteConfiguration() *RouteConfiguration { + if x != nil { + return x.RouteConfiguration + } + return nil +} + +func (x *ScopedRouteConfiguration) GetKey() *ScopedRouteConfiguration_Key { + if x != nil { + return x.Key + } + return nil +} + +// Specifies a key which is matched against the output of the +// :ref:`scope_key_builder` +// specified in the HttpConnectionManager. The matching is done per HTTP +// request and is dependent on the order of the fragments contained in the +// Key. +type ScopedRouteConfiguration_Key struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + Fragments []*ScopedRouteConfiguration_Key_Fragment `protobuf:"bytes,1,rep,name=fragments,proto3" json:"fragments,omitempty"` +} + +func (x *ScopedRouteConfiguration_Key) Reset() { + *x = ScopedRouteConfiguration_Key{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfiguration_Key) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfiguration_Key) ProtoMessage() {} + +func (x *ScopedRouteConfiguration_Key) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfiguration_Key.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfiguration_Key) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ScopedRouteConfiguration_Key) GetFragments() []*ScopedRouteConfiguration_Key_Fragment { + if x != nil { + return x.Fragments + } + return nil +} + +type ScopedRouteConfiguration_Key_Fragment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *ScopedRouteConfiguration_Key_Fragment_StringKey + Type isScopedRouteConfiguration_Key_Fragment_Type `protobuf_oneof:"type"` +} + +func (x *ScopedRouteConfiguration_Key_Fragment) Reset() { + *x = ScopedRouteConfiguration_Key_Fragment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfiguration_Key_Fragment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfiguration_Key_Fragment) ProtoMessage() {} + +func (x *ScopedRouteConfiguration_Key_Fragment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_route_v3_scoped_route_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfiguration_Key_Fragment.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfiguration_Key_Fragment) Descriptor() ([]byte, []int) { + return file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP(), []int{0, 0, 0} +} + +func (m *ScopedRouteConfiguration_Key_Fragment) GetType() isScopedRouteConfiguration_Key_Fragment_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ScopedRouteConfiguration_Key_Fragment) GetStringKey() string { + if x, ok := x.GetType().(*ScopedRouteConfiguration_Key_Fragment_StringKey); ok { + return x.StringKey + } + return "" +} + +type isScopedRouteConfiguration_Key_Fragment_Type interface { + isScopedRouteConfiguration_Key_Fragment_Type() +} + +type ScopedRouteConfiguration_Key_Fragment_StringKey struct { + // A string to match against. + StringKey string `protobuf:"bytes,1,opt,name=string_key,json=stringKey,proto3,oneof"` +} + +func (*ScopedRouteConfiguration_Key_Fragment_StringKey) isScopedRouteConfiguration_Key_Fragment_Type() { +} + +var File_envoy_config_route_v3_scoped_route_proto protoreflect.FileDescriptor + +var file_envoy_config_route_v3_scoped_route_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xaa, 0x05, 0x0a, 0x18, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x6f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x18, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x14, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0e, + 0x12, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x70, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x14, + 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0e, 0x12, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x1a, 0x92, 0x02, 0x0a, 0x03, 0x4b, 0x65, + 0x79, 0x12, 0x64, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x66, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x73, 0x0a, 0x08, 0x46, 0x72, 0x61, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4b, 0x65, 0x79, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, + 0x0b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x30, 0x9a, 0xc5, + 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x3a, 0x2c, + 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x87, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_route_v3_scoped_route_proto_rawDescOnce sync.Once + file_envoy_config_route_v3_scoped_route_proto_rawDescData = file_envoy_config_route_v3_scoped_route_proto_rawDesc +) + +func file_envoy_config_route_v3_scoped_route_proto_rawDescGZIP() []byte { + file_envoy_config_route_v3_scoped_route_proto_rawDescOnce.Do(func() { + file_envoy_config_route_v3_scoped_route_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_scoped_route_proto_rawDescData) + }) + return file_envoy_config_route_v3_scoped_route_proto_rawDescData +} + +var file_envoy_config_route_v3_scoped_route_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_config_route_v3_scoped_route_proto_goTypes = []interface{}{ + (*ScopedRouteConfiguration)(nil), // 0: envoy.config.route.v3.ScopedRouteConfiguration + (*ScopedRouteConfiguration_Key)(nil), // 1: envoy.config.route.v3.ScopedRouteConfiguration.Key + (*ScopedRouteConfiguration_Key_Fragment)(nil), // 2: envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment + (*RouteConfiguration)(nil), // 3: envoy.config.route.v3.RouteConfiguration +} +var file_envoy_config_route_v3_scoped_route_proto_depIdxs = []int32{ + 3, // 0: envoy.config.route.v3.ScopedRouteConfiguration.route_configuration:type_name -> envoy.config.route.v3.RouteConfiguration + 1, // 1: envoy.config.route.v3.ScopedRouteConfiguration.key:type_name -> envoy.config.route.v3.ScopedRouteConfiguration.Key + 2, // 2: envoy.config.route.v3.ScopedRouteConfiguration.Key.fragments:type_name -> envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_route_v3_scoped_route_proto_init() } +func file_envoy_config_route_v3_scoped_route_proto_init() { + if File_envoy_config_route_v3_scoped_route_proto != nil { + return + } + file_envoy_config_route_v3_route_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_config_route_v3_scoped_route_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_scoped_route_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfiguration_Key); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_route_v3_scoped_route_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfiguration_Key_Fragment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_route_v3_scoped_route_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ScopedRouteConfiguration_Key_Fragment_StringKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_route_v3_scoped_route_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_route_v3_scoped_route_proto_goTypes, + DependencyIndexes: file_envoy_config_route_v3_scoped_route_proto_depIdxs, + MessageInfos: file_envoy_config_route_v3_scoped_route_proto_msgTypes, + }.Build() + File_envoy_config_route_v3_scoped_route_proto = out.File + file_envoy_config_route_v3_scoped_route_proto_rawDesc = nil + file_envoy_config_route_v3_scoped_route_proto_goTypes = nil + file_envoy_config_route_v3_scoped_route_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go new file mode 100644 index 0000000000..54c187ce19 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route.pb.validate.go @@ -0,0 +1,505 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ScopedRouteConfiguration with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRouteConfiguration) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfiguration with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRouteConfigurationMultiError, or nil if none found. +func (m *ScopedRouteConfiguration) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfiguration) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for OnDemand + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ScopedRouteConfigurationValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for RouteConfigurationName + + if all { + switch v := interface{}(m.GetRouteConfiguration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "RouteConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "RouteConfiguration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfiguration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfigurationValidationError{ + field: "RouteConfiguration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetKey() == nil { + err := ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfigurationValidationError{ + field: "Key", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ScopedRouteConfigurationMultiError(errors) + } + + return nil +} + +// ScopedRouteConfigurationMultiError is an error wrapping multiple validation +// errors returned by ScopedRouteConfiguration.ValidateAll() if the designated +// constraints aren't met. +type ScopedRouteConfigurationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfigurationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfigurationMultiError) AllErrors() []error { return m } + +// ScopedRouteConfigurationValidationError is the validation error returned by +// ScopedRouteConfiguration.Validate if the designated constraints aren't met. +type ScopedRouteConfigurationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfigurationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfigurationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfigurationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfigurationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfigurationValidationError) ErrorName() string { + return "ScopedRouteConfigurationValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfigurationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfiguration.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfigurationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfigurationValidationError{} + +// Validate checks the field values on ScopedRouteConfiguration_Key with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRouteConfiguration_Key) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfiguration_Key with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRouteConfiguration_KeyMultiError, or nil if none found. +func (m *ScopedRouteConfiguration_Key) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfiguration_Key) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFragments()) < 1 { + err := ScopedRouteConfiguration_KeyValidationError{ + field: "Fragments", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFragments() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfiguration_KeyValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfiguration_KeyValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfiguration_KeyValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRouteConfiguration_KeyMultiError(errors) + } + + return nil +} + +// ScopedRouteConfiguration_KeyMultiError is an error wrapping multiple +// validation errors returned by ScopedRouteConfiguration_Key.ValidateAll() if +// the designated constraints aren't met. +type ScopedRouteConfiguration_KeyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfiguration_KeyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfiguration_KeyMultiError) AllErrors() []error { return m } + +// ScopedRouteConfiguration_KeyValidationError is the validation error returned +// by ScopedRouteConfiguration_Key.Validate if the designated constraints +// aren't met. +type ScopedRouteConfiguration_KeyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfiguration_KeyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfiguration_KeyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfiguration_KeyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfiguration_KeyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfiguration_KeyValidationError) ErrorName() string { + return "ScopedRouteConfiguration_KeyValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfiguration_KeyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfiguration_Key.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfiguration_KeyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfiguration_KeyValidationError{} + +// Validate checks the field values on ScopedRouteConfiguration_Key_Fragment +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ScopedRouteConfiguration_Key_Fragment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfiguration_Key_Fragment +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ScopedRouteConfiguration_Key_FragmentMultiError, or nil if none found. +func (m *ScopedRouteConfiguration_Key_Fragment) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfiguration_Key_Fragment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofTypePresent := false + switch v := m.Type.(type) { + case *ScopedRouteConfiguration_Key_Fragment_StringKey: + if v == nil { + err := ScopedRouteConfiguration_Key_FragmentValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + // no validation rules for StringKey + default: + _ = v // ensures v is used + } + if !oneofTypePresent { + err := ScopedRouteConfiguration_Key_FragmentValidationError{ + field: "Type", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRouteConfiguration_Key_FragmentMultiError(errors) + } + + return nil +} + +// ScopedRouteConfiguration_Key_FragmentMultiError is an error wrapping +// multiple validation errors returned by +// ScopedRouteConfiguration_Key_Fragment.ValidateAll() if the designated +// constraints aren't met. +type ScopedRouteConfiguration_Key_FragmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfiguration_Key_FragmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfiguration_Key_FragmentMultiError) AllErrors() []error { return m } + +// ScopedRouteConfiguration_Key_FragmentValidationError is the validation error +// returned by ScopedRouteConfiguration_Key_Fragment.Validate if the +// designated constraints aren't met. +type ScopedRouteConfiguration_Key_FragmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfiguration_Key_FragmentValidationError) ErrorName() string { + return "ScopedRouteConfiguration_Key_FragmentValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfiguration_Key_FragmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfiguration_Key_Fragment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfiguration_Key_FragmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfiguration_Key_FragmentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go new file mode 100644 index 0000000000..1e6a7e81cf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/scoped_route_vtproto.pb.go @@ -0,0 +1,263 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/route/v3/scoped_route.proto + +package routev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key_Fragment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*ScopedRouteConfiguration_Key_Fragment_StringKey); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringKey) + copy(dAtA[i:], m.StringKey) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringKey))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *ScopedRouteConfiguration_Key) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Fragments) > 0 { + for iNdEx := len(m.Fragments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fragments[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfiguration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfiguration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RouteConfiguration != nil { + size, err := m.RouteConfiguration.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.OnDemand { + i-- + if m.OnDemand { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.RouteConfigurationName) > 0 { + i -= len(m.RouteConfigurationName) + copy(dAtA[i:], m.RouteConfigurationName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteConfigurationName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfiguration_Key_Fragment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfiguration_Key_Fragment_StringKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringKey) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *ScopedRouteConfiguration_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fragments) > 0 { + for _, e := range m.Fragments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfiguration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteConfigurationName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnDemand { + n += 2 + } + if m.RouteConfiguration != nil { + l = m.RouteConfiguration.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go new file mode 100644 index 0000000000..284516c99e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.go @@ -0,0 +1,1642 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Output format. All output is in the form of one or more :ref:`TraceWrapper +// ` messages. This enumeration indicates +// how those messages are written. Note that not all sinks support all output formats. See +// individual sink documentation for more information. +type OutputSink_Format int32 + +const ( + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_bytes + // ` field. This means that body data will be + // base64 encoded as per the `proto3 JSON mappings + // `_. + OutputSink_JSON_BODY_AS_BYTES OutputSink_Format = 0 + // Each message will be written as JSON. Any :ref:`body ` + // data will be present in the :ref:`as_string + // ` field. This means that body data will be + // string encoded as per the `proto3 JSON mappings + // `_. This format type is + // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the + // user wishes to view it directly without being forced to base64 decode the body. + OutputSink_JSON_BODY_AS_STRING OutputSink_Format = 1 + // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes + // multiple binary messages without any length information the data stream will not be + // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) + // this output format makes consumption simpler. + OutputSink_PROTO_BINARY OutputSink_Format = 2 + // Messages are written as a sequence tuples, where each tuple is the message length encoded + // as a `protobuf 32-bit varint + // `_ + // followed by the binary message. The messages can be read back using the language specific + // protobuf coded stream implementation to obtain the message length and the message. + OutputSink_PROTO_BINARY_LENGTH_DELIMITED OutputSink_Format = 3 + // Text proto format. + OutputSink_PROTO_TEXT OutputSink_Format = 4 +) + +// Enum value maps for OutputSink_Format. +var ( + OutputSink_Format_name = map[int32]string{ + 0: "JSON_BODY_AS_BYTES", + 1: "JSON_BODY_AS_STRING", + 2: "PROTO_BINARY", + 3: "PROTO_BINARY_LENGTH_DELIMITED", + 4: "PROTO_TEXT", + } + OutputSink_Format_value = map[string]int32{ + "JSON_BODY_AS_BYTES": 0, + "JSON_BODY_AS_STRING": 1, + "PROTO_BINARY": 2, + "PROTO_BINARY_LENGTH_DELIMITED": 3, + "PROTO_TEXT": 4, + } +) + +func (x OutputSink_Format) Enum() *OutputSink_Format { + p := new(OutputSink_Format) + *p = x + return p +} + +func (x OutputSink_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OutputSink_Format) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_tap_v3_common_proto_enumTypes[0].Descriptor() +} + +func (OutputSink_Format) Type() protoreflect.EnumType { + return &file_envoy_config_tap_v3_common_proto_enumTypes[0] +} + +func (x OutputSink_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OutputSink_Format.Descriptor instead. +func (OutputSink_Format) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{5, 0} +} + +// Tap configuration. +type TapConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + // + // Deprecated: Marked as deprecated in envoy/config/tap/v3/common.proto. + MatchConfig *MatchPredicate `protobuf:"bytes,1,opt,name=match_config,json=matchConfig,proto3" json:"match_config,omitempty"` + // The match configuration. If the configuration matches the data source being tapped, a tap will + // occur, with the result written to the configured output. + // Exactly one of :ref:`match ` and + // :ref:`match_config ` must be set. If both + // are set, the :ref:`match ` will be used. + Match *v3.MatchPredicate `protobuf:"bytes,4,opt,name=match,proto3" json:"match,omitempty"` + // The tap output configuration. If a match configuration matches a data source being tapped, + // a tap will occur and the data will be written to the configured output. + OutputConfig *OutputConfig `protobuf:"bytes,2,opt,name=output_config,json=outputConfig,proto3" json:"output_config,omitempty"` + // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for + // which the tap matching is enabled. When not enabled, the request\connection will not be + // recorded. + // + // .. note:: + // + // This field defaults to 100/:ref:`HUNDRED + // `. + TapEnabled *v31.RuntimeFractionalPercent `protobuf:"bytes,3,opt,name=tap_enabled,json=tapEnabled,proto3" json:"tap_enabled,omitempty"` +} + +func (x *TapConfig) Reset() { + *x = TapConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TapConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TapConfig) ProtoMessage() {} + +func (x *TapConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TapConfig.ProtoReflect.Descriptor instead. +func (*TapConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in envoy/config/tap/v3/common.proto. +func (x *TapConfig) GetMatchConfig() *MatchPredicate { + if x != nil { + return x.MatchConfig + } + return nil +} + +func (x *TapConfig) GetMatch() *v3.MatchPredicate { + if x != nil { + return x.Match + } + return nil +} + +func (x *TapConfig) GetOutputConfig() *OutputConfig { + if x != nil { + return x.OutputConfig + } + return nil +} + +func (x *TapConfig) GetTapEnabled() *v31.RuntimeFractionalPercent { + if x != nil { + return x.TapEnabled + } + return nil +} + +// Tap match configuration. This is a recursive structure which allows complex nested match +// configurations to be built using various logical operators. +// [#next-free-field: 11] +type MatchPredicate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *MatchPredicate_OrMatch + // *MatchPredicate_AndMatch + // *MatchPredicate_NotMatch + // *MatchPredicate_AnyMatch + // *MatchPredicate_HttpRequestHeadersMatch + // *MatchPredicate_HttpRequestTrailersMatch + // *MatchPredicate_HttpResponseHeadersMatch + // *MatchPredicate_HttpResponseTrailersMatch + // *MatchPredicate_HttpRequestGenericBodyMatch + // *MatchPredicate_HttpResponseGenericBodyMatch + Rule isMatchPredicate_Rule `protobuf_oneof:"rule"` +} + +func (x *MatchPredicate) Reset() { + *x = MatchPredicate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate) ProtoMessage() {} + +func (x *MatchPredicate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate.ProtoReflect.Descriptor instead. +func (*MatchPredicate) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (m *MatchPredicate) GetRule() isMatchPredicate_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *MatchPredicate) GetOrMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_OrMatch); ok { + return x.OrMatch + } + return nil +} + +func (x *MatchPredicate) GetAndMatch() *MatchPredicate_MatchSet { + if x, ok := x.GetRule().(*MatchPredicate_AndMatch); ok { + return x.AndMatch + } + return nil +} + +func (x *MatchPredicate) GetNotMatch() *MatchPredicate { + if x, ok := x.GetRule().(*MatchPredicate_NotMatch); ok { + return x.NotMatch + } + return nil +} + +func (x *MatchPredicate) GetAnyMatch() bool { + if x, ok := x.GetRule().(*MatchPredicate_AnyMatch); ok { + return x.AnyMatch + } + return false +} + +func (x *MatchPredicate) GetHttpRequestHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestHeadersMatch); ok { + return x.HttpRequestHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestTrailersMatch); ok { + return x.HttpRequestTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseHeadersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseHeadersMatch); ok { + return x.HttpResponseHeadersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseTrailersMatch() *HttpHeadersMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseTrailersMatch); ok { + return x.HttpResponseTrailersMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpRequestGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + return x.HttpRequestGenericBodyMatch + } + return nil +} + +func (x *MatchPredicate) GetHttpResponseGenericBodyMatch() *HttpGenericBodyMatch { + if x, ok := x.GetRule().(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + return x.HttpResponseGenericBodyMatch + } + return nil +} + +type isMatchPredicate_Rule interface { + isMatchPredicate_Rule() +} + +type MatchPredicate_OrMatch struct { + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + OrMatch *MatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +type MatchPredicate_AndMatch struct { + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + AndMatch *MatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` +} + +type MatchPredicate_NotMatch struct { + // A negation match. The match configuration will match if the negated match condition matches. + NotMatch *MatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` +} + +type MatchPredicate_AnyMatch struct { + // The match configuration will always match. + AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestHeadersMatch struct { + // HTTP request headers match configuration. + HttpRequestHeadersMatch *HttpHeadersMatch `protobuf:"bytes,5,opt,name=http_request_headers_match,json=httpRequestHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestTrailersMatch struct { + // HTTP request trailers match configuration. + HttpRequestTrailersMatch *HttpHeadersMatch `protobuf:"bytes,6,opt,name=http_request_trailers_match,json=httpRequestTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseHeadersMatch struct { + // HTTP response headers match configuration. + HttpResponseHeadersMatch *HttpHeadersMatch `protobuf:"bytes,7,opt,name=http_response_headers_match,json=httpResponseHeadersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseTrailersMatch struct { + // HTTP response trailers match configuration. + HttpResponseTrailersMatch *HttpHeadersMatch `protobuf:"bytes,8,opt,name=http_response_trailers_match,json=httpResponseTrailersMatch,proto3,oneof"` +} + +type MatchPredicate_HttpRequestGenericBodyMatch struct { + // HTTP request generic body match configuration. + HttpRequestGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,9,opt,name=http_request_generic_body_match,json=httpRequestGenericBodyMatch,proto3,oneof"` +} + +type MatchPredicate_HttpResponseGenericBodyMatch struct { + // HTTP response generic body match configuration. + HttpResponseGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,10,opt,name=http_response_generic_body_match,json=httpResponseGenericBodyMatch,proto3,oneof"` +} + +func (*MatchPredicate_OrMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AndMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_NotMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_AnyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseHeadersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseTrailersMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpRequestGenericBodyMatch) isMatchPredicate_Rule() {} + +func (*MatchPredicate_HttpResponseGenericBodyMatch) isMatchPredicate_Rule() {} + +// HTTP headers match configuration. +type HttpHeadersMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // HTTP headers to match. + Headers []*v32.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *HttpHeadersMatch) Reset() { + *x = HttpHeadersMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpHeadersMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpHeadersMatch) ProtoMessage() {} + +func (x *HttpHeadersMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpHeadersMatch.ProtoReflect.Descriptor instead. +func (*HttpHeadersMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpHeadersMatch) GetHeaders() []*v32.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +// HTTP generic body match configuration. +// List of text strings and hex strings to be located in HTTP body. +// All specified strings must be found in the HTTP body for positive match. +// The search may be limited to specified number of bytes from the body start. +// +// .. attention:: +// +// Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. +// If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified +// to scan only part of the http body. +type HttpGenericBodyMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). + BytesLimit uint32 `protobuf:"varint,1,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` + // List of patterns to match. + Patterns []*HttpGenericBodyMatch_GenericTextMatch `protobuf:"bytes,2,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *HttpGenericBodyMatch) Reset() { + *x = HttpGenericBodyMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{3} +} + +func (x *HttpGenericBodyMatch) GetBytesLimit() uint32 { + if x != nil { + return x.BytesLimit + } + return 0 +} + +func (x *HttpGenericBodyMatch) GetPatterns() []*HttpGenericBodyMatch_GenericTextMatch { + if x != nil { + return x.Patterns + } + return nil +} + +// Tap output configuration. +type OutputConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple + // sink types are supported this constraint will be relaxed. + Sinks []*OutputSink `protobuf:"bytes,1,rep,name=sinks,proto3" json:"sinks,omitempty"` + // For buffered tapping, the maximum amount of received body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + MaxBufferedRxBytes *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_buffered_rx_bytes,json=maxBufferedRxBytes,proto3" json:"max_buffered_rx_bytes,omitempty"` + // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to + // truncation. If truncation occurs, the :ref:`truncated + // ` field will be set. If not specified, the + // default is 1KiB. + MaxBufferedTxBytes *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_buffered_tx_bytes,json=maxBufferedTxBytes,proto3" json:"max_buffered_tx_bytes,omitempty"` + // Indicates whether taps produce a single buffered message per tap, or multiple streamed + // messages per tap in the emitted :ref:`TraceWrapper + // ` messages. Note that streamed tapping does not + // mean that no buffering takes place. Buffering may be required if data is processed before a + // match can be determined. See the HTTP tap filter :ref:`streaming + // ` documentation for more information. + Streaming bool `protobuf:"varint,4,opt,name=streaming,proto3" json:"streaming,omitempty"` +} + +func (x *OutputConfig) Reset() { + *x = OutputConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutputConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutputConfig) ProtoMessage() {} + +func (x *OutputConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutputConfig.ProtoReflect.Descriptor instead. +func (*OutputConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{4} +} + +func (x *OutputConfig) GetSinks() []*OutputSink { + if x != nil { + return x.Sinks + } + return nil +} + +func (x *OutputConfig) GetMaxBufferedRxBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxBufferedRxBytes + } + return nil +} + +func (x *OutputConfig) GetMaxBufferedTxBytes() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxBufferedTxBytes + } + return nil +} + +func (x *OutputConfig) GetStreaming() bool { + if x != nil { + return x.Streaming + } + return false +} + +// Tap output sink configuration. +// [#next-free-field: 7] +type OutputSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Sink output format. + Format OutputSink_Format `protobuf:"varint,1,opt,name=format,proto3,enum=envoy.config.tap.v3.OutputSink_Format" json:"format,omitempty"` + // Types that are assignable to OutputSinkType: + // + // *OutputSink_StreamingAdmin + // *OutputSink_FilePerTap + // *OutputSink_StreamingGrpc + // *OutputSink_BufferedAdmin + // *OutputSink_CustomSink + OutputSinkType isOutputSink_OutputSinkType `protobuf_oneof:"output_sink_type"` +} + +func (x *OutputSink) Reset() { + *x = OutputSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutputSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutputSink) ProtoMessage() {} + +func (x *OutputSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutputSink.ProtoReflect.Descriptor instead. +func (*OutputSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{5} +} + +func (x *OutputSink) GetFormat() OutputSink_Format { + if x != nil { + return x.Format + } + return OutputSink_JSON_BODY_AS_BYTES +} + +func (m *OutputSink) GetOutputSinkType() isOutputSink_OutputSinkType { + if m != nil { + return m.OutputSinkType + } + return nil +} + +func (x *OutputSink) GetStreamingAdmin() *StreamingAdminSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_StreamingAdmin); ok { + return x.StreamingAdmin + } + return nil +} + +func (x *OutputSink) GetFilePerTap() *FilePerTapSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_FilePerTap); ok { + return x.FilePerTap + } + return nil +} + +func (x *OutputSink) GetStreamingGrpc() *StreamingGrpcSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_StreamingGrpc); ok { + return x.StreamingGrpc + } + return nil +} + +func (x *OutputSink) GetBufferedAdmin() *BufferedAdminSink { + if x, ok := x.GetOutputSinkType().(*OutputSink_BufferedAdmin); ok { + return x.BufferedAdmin + } + return nil +} + +func (x *OutputSink) GetCustomSink() *v31.TypedExtensionConfig { + if x, ok := x.GetOutputSinkType().(*OutputSink_CustomSink); ok { + return x.CustomSink + } + return nil +} + +type isOutputSink_OutputSinkType interface { + isOutputSink_OutputSinkType() +} + +type OutputSink_StreamingAdmin struct { + // Tap output will be streamed out the :http:post:`/tap` admin endpoint. + // + // .. attention:: + // + // It is only allowed to specify the streaming admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the streaming admin output type will fail. + StreamingAdmin *StreamingAdminSink `protobuf:"bytes,2,opt,name=streaming_admin,json=streamingAdmin,proto3,oneof"` +} + +type OutputSink_FilePerTap struct { + // Tap output will be written to a file per tap sink. + FilePerTap *FilePerTapSink `protobuf:"bytes,3,opt,name=file_per_tap,json=filePerTap,proto3,oneof"` +} + +type OutputSink_StreamingGrpc struct { + // [#not-implemented-hide:] + // GrpcService to stream data to. The format argument must be PROTO_BINARY. + // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] + StreamingGrpc *StreamingGrpcSink `protobuf:"bytes,4,opt,name=streaming_grpc,json=streamingGrpc,proto3,oneof"` +} + +type OutputSink_BufferedAdmin struct { + // Tap output will be buffered in a single block before flushing to the :http:post:`/tap` admin endpoint + // + // .. attention:: + // + // It is only allowed to specify the buffered admin output sink if the tap is being + // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has + // been configured to receive tap configuration from some other source (e.g., static + // file, XDS, etc.) configuring the buffered admin output type will fail. + BufferedAdmin *BufferedAdminSink `protobuf:"bytes,5,opt,name=buffered_admin,json=bufferedAdmin,proto3,oneof"` +} + +type OutputSink_CustomSink struct { + // Tap output filter will be defined by an extension type + CustomSink *v31.TypedExtensionConfig `protobuf:"bytes,6,opt,name=custom_sink,json=customSink,proto3,oneof"` +} + +func (*OutputSink_StreamingAdmin) isOutputSink_OutputSinkType() {} + +func (*OutputSink_FilePerTap) isOutputSink_OutputSinkType() {} + +func (*OutputSink_StreamingGrpc) isOutputSink_OutputSinkType() {} + +func (*OutputSink_BufferedAdmin) isOutputSink_OutputSinkType() {} + +func (*OutputSink_CustomSink) isOutputSink_OutputSinkType() {} + +// Streaming admin sink configuration. +type StreamingAdminSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StreamingAdminSink) Reset() { + *x = StreamingAdminSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamingAdminSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingAdminSink) ProtoMessage() {} + +func (x *StreamingAdminSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingAdminSink.ProtoReflect.Descriptor instead. +func (*StreamingAdminSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{6} +} + +// BufferedAdminSink configures a tap output to collect traces without returning them until +// one of multiple criteria are satisfied. +// Similar to StreamingAdminSink, it is only allowed to specify the buffered admin output +// sink if the tap is being configured from the “/tap“ admin endpoint. +type BufferedAdminSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Stop collecting traces when the specified number are collected. + // If other criteria for ending collection are reached first, this value will not be used. + MaxTraces uint64 `protobuf:"varint,1,opt,name=max_traces,json=maxTraces,proto3" json:"max_traces,omitempty"` + // Acts as a fallback to prevent the client from waiting for long periods of time. + // After timeout has occurred, a buffer flush will be triggered, returning the traces buffered so far. + // This may result in returning fewer traces than were requested, and in the case that no traces are + // buffered during this time, no traces will be returned. + // Specifying 0 for the timeout value (or not specifying a value at all) indicates an infinite timeout. + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *BufferedAdminSink) Reset() { + *x = BufferedAdminSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BufferedAdminSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BufferedAdminSink) ProtoMessage() {} + +func (x *BufferedAdminSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BufferedAdminSink.ProtoReflect.Descriptor instead. +func (*BufferedAdminSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{7} +} + +func (x *BufferedAdminSink) GetMaxTraces() uint64 { + if x != nil { + return x.MaxTraces + } + return 0 +} + +func (x *BufferedAdminSink) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +// The file per tap sink outputs a discrete file for every tapped stream. +type FilePerTapSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Path prefix. The output file will be of the form _.pb, where is an + // identifier distinguishing the recorded trace for stream instances (the Envoy + // connection ID, HTTP stream ID, etc.). + PathPrefix string `protobuf:"bytes,1,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` +} + +func (x *FilePerTapSink) Reset() { + *x = FilePerTapSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilePerTapSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilePerTapSink) ProtoMessage() {} + +func (x *FilePerTapSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilePerTapSink.ProtoReflect.Descriptor instead. +func (*FilePerTapSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{8} +} + +func (x *FilePerTapSink) GetPathPrefix() string { + if x != nil { + return x.PathPrefix + } + return "" +} + +// [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC +// server. +type StreamingGrpcSink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Opaque identifier, that will be sent back to the streaming grpc server. + TapId string `protobuf:"bytes,1,opt,name=tap_id,json=tapId,proto3" json:"tap_id,omitempty"` + // The gRPC server that hosts the Tap Sink Service. + GrpcService *v31.GrpcService `protobuf:"bytes,2,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` +} + +func (x *StreamingGrpcSink) Reset() { + *x = StreamingGrpcSink{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamingGrpcSink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingGrpcSink) ProtoMessage() {} + +func (x *StreamingGrpcSink) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingGrpcSink.ProtoReflect.Descriptor instead. +func (*StreamingGrpcSink) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{9} +} + +func (x *StreamingGrpcSink) GetTapId() string { + if x != nil { + return x.TapId + } + return "" +} + +func (x *StreamingGrpcSink) GetGrpcService() *v31.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +// A set of match configurations used for logical operations. +type MatchPredicate_MatchSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of rules that make up the set. + Rules []*MatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *MatchPredicate_MatchSet) Reset() { + *x = MatchPredicate_MatchSet{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchPredicate_MatchSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchPredicate_MatchSet) ProtoMessage() {} + +func (x *MatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchPredicate_MatchSet.ProtoReflect.Descriptor instead. +func (*MatchPredicate_MatchSet) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *MatchPredicate_MatchSet) GetRules() []*MatchPredicate { + if x != nil { + return x.Rules + } + return nil +} + +type HttpGenericBodyMatch_GenericTextMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *HttpGenericBodyMatch_GenericTextMatch_StringMatch + // *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch + Rule isHttpGenericBodyMatch_GenericTextMatch_Rule `protobuf_oneof:"rule"` +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) Reset() { + *x = HttpGenericBodyMatch_GenericTextMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpGenericBodyMatch_GenericTextMatch) ProtoMessage() {} + +func (x *HttpGenericBodyMatch_GenericTextMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_tap_v3_common_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpGenericBodyMatch_GenericTextMatch.ProtoReflect.Descriptor instead. +func (*HttpGenericBodyMatch_GenericTextMatch) Descriptor() ([]byte, []int) { + return file_envoy_config_tap_v3_common_proto_rawDescGZIP(), []int{3, 0} +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) GetRule() isHttpGenericBodyMatch_GenericTextMatch_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetStringMatch() string { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + return x.StringMatch + } + return "" +} + +func (x *HttpGenericBodyMatch_GenericTextMatch) GetBinaryMatch() []byte { + if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + return x.BinaryMatch + } + return nil +} + +type isHttpGenericBodyMatch_GenericTextMatch_Rule interface { + isHttpGenericBodyMatch_GenericTextMatch_Rule() +} + +type HttpGenericBodyMatch_GenericTextMatch_StringMatch struct { + // Text string to be located in HTTP body. + StringMatch string `protobuf:"bytes,1,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type HttpGenericBodyMatch_GenericTextMatch_BinaryMatch struct { + // Sequence of bytes to be located in HTTP body. + BinaryMatch []byte `protobuf:"bytes,2,opt,name=binary_match,json=binaryMatch,proto3,oneof"` +} + +func (*HttpGenericBodyMatch_GenericTextMatch_StringMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +func (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { +} + +var File_envoy_config_tap_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_config_tap_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x13, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x02, 0x0a, 0x09, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x53, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x0d, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, + 0x0a, 0x0b, 0x74, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x74, 0x61, 0x70, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3a, + 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xe6, 0x08, 0x0a, 0x0e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x49, + 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, + 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x4b, 0x0a, 0x09, 0x61, 0x6e, 0x64, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, + 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x42, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, 0x6e, + 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x64, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, + 0x17, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x66, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, + 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x66, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, + 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x68, 0x0a, 0x1c, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, + 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x19, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x71, 0x0a, 0x1f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, + 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x73, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, + 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, + 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x1c, 0x68, 0x74, + 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x89, 0x01, 0x0a, 0x08, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x38, 0x9a, 0xc5, + 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, + 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, + 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x22, 0x85, 0x01, 0x0a, 0x10, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x96, 0x02, 0x0a, + 0x14, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x60, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x1a, 0x7b, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x7a, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xc0, 0x02, 0x0a, 0x0c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x92, 0x01, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x6d, 0x61, 0x78, + 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x42, 0x75, 0x66, 0x66, 0x65, + 0x72, 0x65, 0x64, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x6d, 0x61, + 0x78, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x42, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x65, 0x64, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xaa, 0x05, 0x0a, 0x0a, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x48, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x52, 0x0a, 0x0f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, + 0x69, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x47, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x74, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, 0x53, 0x69, 0x6e, 0x6b, + 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, 0x12, 0x4f, + 0x0a, 0x0e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x70, 0x63, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, 0x63, 0x53, 0x69, 0x6e, 0x6b, 0x48, 0x00, + 0x52, 0x0d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, 0x63, 0x12, + 0x4f, 0x0a, 0x0e, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, + 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x69, 0x6e, 0x6b, 0x48, + 0x00, 0x52, 0x0d, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x12, 0x4d, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x69, 0x6e, 0x6b, 0x22, + 0x7e, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x4a, 0x53, 0x4f, + 0x4e, 0x5f, 0x42, 0x4f, 0x44, 0x59, 0x5f, 0x41, 0x53, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x00, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x42, 0x4f, 0x44, 0x59, 0x5f, 0x41, + 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0e, 0x0a, 0x0a, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x5f, 0x54, 0x45, 0x58, 0x54, 0x10, 0x04, 0x3a, + 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x69, 0x6e, 0x6b, 0x42, 0x17, 0x0a, 0x10, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x49, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, + 0x6e, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x69, 0x6e, 0x6b, 0x3a, 0x33, 0x9a, 0xc5, 0x88, + 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x69, 0x6e, 0x6b, + 0x22, 0x70, 0x0a, 0x11, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x26, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, + 0x20, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x12, 0x33, 0x0a, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x22, 0x6b, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, + 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x28, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x3a, 0x2f, + 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x54, 0x61, 0x70, 0x53, 0x69, 0x6e, 0x6b, 0x22, + 0xae, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, + 0x63, 0x53, 0x69, 0x6e, 0x6b, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x61, 0x70, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x70, 0x49, 0x64, 0x12, 0x4e, 0x0a, 0x0c, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0b, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x32, 0x9a, 0xc5, + 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x70, 0x63, 0x53, 0x69, 0x6e, 0x6b, + 0x42, 0x7c, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_tap_v3_common_proto_rawDescOnce sync.Once + file_envoy_config_tap_v3_common_proto_rawDescData = file_envoy_config_tap_v3_common_proto_rawDesc +) + +func file_envoy_config_tap_v3_common_proto_rawDescGZIP() []byte { + file_envoy_config_tap_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_config_tap_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_tap_v3_common_proto_rawDescData) + }) + return file_envoy_config_tap_v3_common_proto_rawDescData +} + +var file_envoy_config_tap_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_tap_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_envoy_config_tap_v3_common_proto_goTypes = []interface{}{ + (OutputSink_Format)(0), // 0: envoy.config.tap.v3.OutputSink.Format + (*TapConfig)(nil), // 1: envoy.config.tap.v3.TapConfig + (*MatchPredicate)(nil), // 2: envoy.config.tap.v3.MatchPredicate + (*HttpHeadersMatch)(nil), // 3: envoy.config.tap.v3.HttpHeadersMatch + (*HttpGenericBodyMatch)(nil), // 4: envoy.config.tap.v3.HttpGenericBodyMatch + (*OutputConfig)(nil), // 5: envoy.config.tap.v3.OutputConfig + (*OutputSink)(nil), // 6: envoy.config.tap.v3.OutputSink + (*StreamingAdminSink)(nil), // 7: envoy.config.tap.v3.StreamingAdminSink + (*BufferedAdminSink)(nil), // 8: envoy.config.tap.v3.BufferedAdminSink + (*FilePerTapSink)(nil), // 9: envoy.config.tap.v3.FilePerTapSink + (*StreamingGrpcSink)(nil), // 10: envoy.config.tap.v3.StreamingGrpcSink + (*MatchPredicate_MatchSet)(nil), // 11: envoy.config.tap.v3.MatchPredicate.MatchSet + (*HttpGenericBodyMatch_GenericTextMatch)(nil), // 12: envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch + (*v3.MatchPredicate)(nil), // 13: envoy.config.common.matcher.v3.MatchPredicate + (*v31.RuntimeFractionalPercent)(nil), // 14: envoy.config.core.v3.RuntimeFractionalPercent + (*v32.HeaderMatcher)(nil), // 15: envoy.config.route.v3.HeaderMatcher + (*wrapperspb.UInt32Value)(nil), // 16: google.protobuf.UInt32Value + (*v31.TypedExtensionConfig)(nil), // 17: envoy.config.core.v3.TypedExtensionConfig + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration + (*v31.GrpcService)(nil), // 19: envoy.config.core.v3.GrpcService +} +var file_envoy_config_tap_v3_common_proto_depIdxs = []int32{ + 2, // 0: envoy.config.tap.v3.TapConfig.match_config:type_name -> envoy.config.tap.v3.MatchPredicate + 13, // 1: envoy.config.tap.v3.TapConfig.match:type_name -> envoy.config.common.matcher.v3.MatchPredicate + 5, // 2: envoy.config.tap.v3.TapConfig.output_config:type_name -> envoy.config.tap.v3.OutputConfig + 14, // 3: envoy.config.tap.v3.TapConfig.tap_enabled:type_name -> envoy.config.core.v3.RuntimeFractionalPercent + 11, // 4: envoy.config.tap.v3.MatchPredicate.or_match:type_name -> envoy.config.tap.v3.MatchPredicate.MatchSet + 11, // 5: envoy.config.tap.v3.MatchPredicate.and_match:type_name -> envoy.config.tap.v3.MatchPredicate.MatchSet + 2, // 6: envoy.config.tap.v3.MatchPredicate.not_match:type_name -> envoy.config.tap.v3.MatchPredicate + 3, // 7: envoy.config.tap.v3.MatchPredicate.http_request_headers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 3, // 8: envoy.config.tap.v3.MatchPredicate.http_request_trailers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 3, // 9: envoy.config.tap.v3.MatchPredicate.http_response_headers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 3, // 10: envoy.config.tap.v3.MatchPredicate.http_response_trailers_match:type_name -> envoy.config.tap.v3.HttpHeadersMatch + 4, // 11: envoy.config.tap.v3.MatchPredicate.http_request_generic_body_match:type_name -> envoy.config.tap.v3.HttpGenericBodyMatch + 4, // 12: envoy.config.tap.v3.MatchPredicate.http_response_generic_body_match:type_name -> envoy.config.tap.v3.HttpGenericBodyMatch + 15, // 13: envoy.config.tap.v3.HttpHeadersMatch.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 12, // 14: envoy.config.tap.v3.HttpGenericBodyMatch.patterns:type_name -> envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch + 6, // 15: envoy.config.tap.v3.OutputConfig.sinks:type_name -> envoy.config.tap.v3.OutputSink + 16, // 16: envoy.config.tap.v3.OutputConfig.max_buffered_rx_bytes:type_name -> google.protobuf.UInt32Value + 16, // 17: envoy.config.tap.v3.OutputConfig.max_buffered_tx_bytes:type_name -> google.protobuf.UInt32Value + 0, // 18: envoy.config.tap.v3.OutputSink.format:type_name -> envoy.config.tap.v3.OutputSink.Format + 7, // 19: envoy.config.tap.v3.OutputSink.streaming_admin:type_name -> envoy.config.tap.v3.StreamingAdminSink + 9, // 20: envoy.config.tap.v3.OutputSink.file_per_tap:type_name -> envoy.config.tap.v3.FilePerTapSink + 10, // 21: envoy.config.tap.v3.OutputSink.streaming_grpc:type_name -> envoy.config.tap.v3.StreamingGrpcSink + 8, // 22: envoy.config.tap.v3.OutputSink.buffered_admin:type_name -> envoy.config.tap.v3.BufferedAdminSink + 17, // 23: envoy.config.tap.v3.OutputSink.custom_sink:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 24: envoy.config.tap.v3.BufferedAdminSink.timeout:type_name -> google.protobuf.Duration + 19, // 25: envoy.config.tap.v3.StreamingGrpcSink.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 2, // 26: envoy.config.tap.v3.MatchPredicate.MatchSet.rules:type_name -> envoy.config.tap.v3.MatchPredicate + 27, // [27:27] is the sub-list for method output_type + 27, // [27:27] is the sub-list for method input_type + 27, // [27:27] is the sub-list for extension type_name + 27, // [27:27] is the sub-list for extension extendee + 0, // [0:27] is the sub-list for field type_name +} + +func init() { file_envoy_config_tap_v3_common_proto_init() } +func file_envoy_config_tap_v3_common_proto_init() { + if File_envoy_config_tap_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_tap_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TapConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpHeadersMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutputConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutputSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamingAdminSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BufferedAdminSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilePerTapSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamingGrpcSink); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MatchPredicate_MatchSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpGenericBodyMatch_GenericTextMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_tap_v3_common_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MatchPredicate_OrMatch)(nil), + (*MatchPredicate_AndMatch)(nil), + (*MatchPredicate_NotMatch)(nil), + (*MatchPredicate_AnyMatch)(nil), + (*MatchPredicate_HttpRequestHeadersMatch)(nil), + (*MatchPredicate_HttpRequestTrailersMatch)(nil), + (*MatchPredicate_HttpResponseHeadersMatch)(nil), + (*MatchPredicate_HttpResponseTrailersMatch)(nil), + (*MatchPredicate_HttpRequestGenericBodyMatch)(nil), + (*MatchPredicate_HttpResponseGenericBodyMatch)(nil), + } + file_envoy_config_tap_v3_common_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*OutputSink_StreamingAdmin)(nil), + (*OutputSink_FilePerTap)(nil), + (*OutputSink_StreamingGrpc)(nil), + (*OutputSink_BufferedAdmin)(nil), + (*OutputSink_CustomSink)(nil), + } + file_envoy_config_tap_v3_common_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*HttpGenericBodyMatch_GenericTextMatch_StringMatch)(nil), + (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_tap_v3_common_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_tap_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_config_tap_v3_common_proto_depIdxs, + EnumInfos: file_envoy_config_tap_v3_common_proto_enumTypes, + MessageInfos: file_envoy_config_tap_v3_common_proto_msgTypes, + }.Build() + File_envoy_config_tap_v3_common_proto = out.File + file_envoy_config_tap_v3_common_proto_rawDesc = nil + file_envoy_config_tap_v3_common_proto_goTypes = nil + file_envoy_config_tap_v3_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go new file mode 100644 index 0000000000..04df840e12 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common.pb.validate.go @@ -0,0 +1,2419 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TapConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TapConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TapConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TapConfigMultiError, or nil +// if none found. +func (m *TapConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TapConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMatchConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "MatchConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "MatchConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatchConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "MatchConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "Match", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetOutputConfig() == nil { + err := TapConfigValidationError{ + field: "OutputConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOutputConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "OutputConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "OutputConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "OutputConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTapEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "TapEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TapConfigValidationError{ + field: "TapEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTapEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TapConfigValidationError{ + field: "TapEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TapConfigMultiError(errors) + } + + return nil +} + +// TapConfigMultiError is an error wrapping multiple validation errors returned +// by TapConfig.ValidateAll() if the designated constraints aren't met. +type TapConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TapConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TapConfigMultiError) AllErrors() []error { return m } + +// TapConfigValidationError is the validation error returned by +// TapConfig.Validate if the designated constraints aren't met. +type TapConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TapConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TapConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TapConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TapConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TapConfigValidationError) ErrorName() string { return "TapConfigValidationError" } + +// Error satisfies the builtin error interface +func (e TapConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTapConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TapConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TapConfigValidationError{} + +// Validate checks the field values on MatchPredicate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MatchPredicateMultiError, +// or nil if none found. +func (m *MatchPredicate) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *MatchPredicate_OrMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AndMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetAndMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "AndMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_NotMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetNotMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "NotMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_AnyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetAnyMatch() != true { + err := MatchPredicateValidationError{ + field: "AnyMatch", + reason: "value must equal true", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *MatchPredicate_HttpRequestHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseHeadersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseHeadersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseHeadersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseHeadersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseTrailersMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseTrailersMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseTrailersMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseTrailersMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpRequestGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpRequestGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpRequestGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpRequestGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MatchPredicate_HttpResponseGenericBodyMatch: + if v == nil { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if all { + switch v := interface{}(m.GetHttpResponseGenericBodyMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpResponseGenericBodyMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicateValidationError{ + field: "HttpResponseGenericBodyMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := MatchPredicateValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MatchPredicateMultiError(errors) + } + + return nil +} + +// MatchPredicateMultiError is an error wrapping multiple validation errors +// returned by MatchPredicate.ValidateAll() if the designated constraints +// aren't met. +type MatchPredicateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicateMultiError) AllErrors() []error { return m } + +// MatchPredicateValidationError is the validation error returned by +// MatchPredicate.Validate if the designated constraints aren't met. +type MatchPredicateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicateValidationError) ErrorName() string { return "MatchPredicateValidationError" } + +// Error satisfies the builtin error interface +func (e MatchPredicateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicateValidationError{} + +// Validate checks the field values on HttpHeadersMatch with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *HttpHeadersMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpHeadersMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpHeadersMatchMultiError, or nil if none found. +func (m *HttpHeadersMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpHeadersMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpHeadersMatchValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpHeadersMatchMultiError(errors) + } + + return nil +} + +// HttpHeadersMatchMultiError is an error wrapping multiple validation errors +// returned by HttpHeadersMatch.ValidateAll() if the designated constraints +// aren't met. +type HttpHeadersMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpHeadersMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpHeadersMatchMultiError) AllErrors() []error { return m } + +// HttpHeadersMatchValidationError is the validation error returned by +// HttpHeadersMatch.Validate if the designated constraints aren't met. +type HttpHeadersMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpHeadersMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpHeadersMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpHeadersMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpHeadersMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpHeadersMatchValidationError) ErrorName() string { return "HttpHeadersMatchValidationError" } + +// Error satisfies the builtin error interface +func (e HttpHeadersMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpHeadersMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpHeadersMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpHeadersMatchValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpGenericBodyMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for BytesLimit + + if len(m.GetPatterns()) < 1 { + err := HttpGenericBodyMatchValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpGenericBodyMatchValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpGenericBodyMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatchMultiError is an error wrapping multiple validation +// errors returned by HttpGenericBodyMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatchValidationError is the validation error returned by +// HttpGenericBodyMatch.Validate if the designated constraints aren't met. +type HttpGenericBodyMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatchValidationError{} + +// Validate checks the field values on OutputConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OutputConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OutputConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OutputConfigMultiError, or +// nil if none found. +func (m *OutputConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *OutputConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetSinks()) != 1 { + err := OutputConfigValidationError{ + field: "Sinks", + reason: "value must contain exactly 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetSinks() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: fmt.Sprintf("Sinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: fmt.Sprintf("Sinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputConfigValidationError{ + field: fmt.Sprintf("Sinks[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMaxBufferedRxBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedRxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedRxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxBufferedRxBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputConfigValidationError{ + field: "MaxBufferedRxBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMaxBufferedTxBytes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedTxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputConfigValidationError{ + field: "MaxBufferedTxBytes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxBufferedTxBytes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputConfigValidationError{ + field: "MaxBufferedTxBytes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Streaming + + if len(errors) > 0 { + return OutputConfigMultiError(errors) + } + + return nil +} + +// OutputConfigMultiError is an error wrapping multiple validation errors +// returned by OutputConfig.ValidateAll() if the designated constraints aren't met. +type OutputConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OutputConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OutputConfigMultiError) AllErrors() []error { return m } + +// OutputConfigValidationError is the validation error returned by +// OutputConfig.Validate if the designated constraints aren't met. +type OutputConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OutputConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OutputConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OutputConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OutputConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OutputConfigValidationError) ErrorName() string { return "OutputConfigValidationError" } + +// Error satisfies the builtin error interface +func (e OutputConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOutputConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OutputConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OutputConfigValidationError{} + +// Validate checks the field values on OutputSink with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OutputSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OutputSink with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OutputSinkMultiError, or +// nil if none found. +func (m *OutputSink) ValidateAll() error { + return m.validate(true) +} + +func (m *OutputSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := OutputSink_Format_name[int32(m.GetFormat())]; !ok { + err := OutputSinkValidationError{ + field: "Format", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofOutputSinkTypePresent := false + switch v := m.OutputSinkType.(type) { + case *OutputSink_StreamingAdmin: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetStreamingAdmin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamingAdmin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "StreamingAdmin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_FilePerTap: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetFilePerTap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "FilePerTap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "FilePerTap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilePerTap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "FilePerTap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_StreamingGrpc: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetStreamingGrpc()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "StreamingGrpc", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamingGrpc()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "StreamingGrpc", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_BufferedAdmin: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetBufferedAdmin()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "BufferedAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "BufferedAdmin", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBufferedAdmin()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "BufferedAdmin", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *OutputSink_CustomSink: + if v == nil { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOutputSinkTypePresent = true + + if all { + switch v := interface{}(m.GetCustomSink()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "CustomSink", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OutputSinkValidationError{ + field: "CustomSink", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomSink()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OutputSinkValidationError{ + field: "CustomSink", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOutputSinkTypePresent { + err := OutputSinkValidationError{ + field: "OutputSinkType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return OutputSinkMultiError(errors) + } + + return nil +} + +// OutputSinkMultiError is an error wrapping multiple validation errors +// returned by OutputSink.ValidateAll() if the designated constraints aren't met. +type OutputSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OutputSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OutputSinkMultiError) AllErrors() []error { return m } + +// OutputSinkValidationError is the validation error returned by +// OutputSink.Validate if the designated constraints aren't met. +type OutputSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OutputSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OutputSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OutputSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OutputSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OutputSinkValidationError) ErrorName() string { return "OutputSinkValidationError" } + +// Error satisfies the builtin error interface +func (e OutputSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOutputSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OutputSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OutputSinkValidationError{} + +// Validate checks the field values on StreamingAdminSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *StreamingAdminSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StreamingAdminSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StreamingAdminSinkMultiError, or nil if none found. +func (m *StreamingAdminSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StreamingAdminSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return StreamingAdminSinkMultiError(errors) + } + + return nil +} + +// StreamingAdminSinkMultiError is an error wrapping multiple validation errors +// returned by StreamingAdminSink.ValidateAll() if the designated constraints +// aren't met. +type StreamingAdminSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StreamingAdminSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StreamingAdminSinkMultiError) AllErrors() []error { return m } + +// StreamingAdminSinkValidationError is the validation error returned by +// StreamingAdminSink.Validate if the designated constraints aren't met. +type StreamingAdminSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StreamingAdminSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StreamingAdminSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StreamingAdminSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StreamingAdminSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StreamingAdminSinkValidationError) ErrorName() string { + return "StreamingAdminSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e StreamingAdminSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStreamingAdminSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StreamingAdminSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StreamingAdminSinkValidationError{} + +// Validate checks the field values on BufferedAdminSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *BufferedAdminSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on BufferedAdminSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// BufferedAdminSinkMultiError, or nil if none found. +func (m *BufferedAdminSink) ValidateAll() error { + return m.validate(true) +} + +func (m *BufferedAdminSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMaxTraces() <= 0 { + err := BufferedAdminSinkValidationError{ + field: "MaxTraces", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, BufferedAdminSinkValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, BufferedAdminSinkValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return BufferedAdminSinkValidationError{ + field: "Timeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return BufferedAdminSinkMultiError(errors) + } + + return nil +} + +// BufferedAdminSinkMultiError is an error wrapping multiple validation errors +// returned by BufferedAdminSink.ValidateAll() if the designated constraints +// aren't met. +type BufferedAdminSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m BufferedAdminSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m BufferedAdminSinkMultiError) AllErrors() []error { return m } + +// BufferedAdminSinkValidationError is the validation error returned by +// BufferedAdminSink.Validate if the designated constraints aren't met. +type BufferedAdminSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e BufferedAdminSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e BufferedAdminSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e BufferedAdminSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e BufferedAdminSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e BufferedAdminSinkValidationError) ErrorName() string { + return "BufferedAdminSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e BufferedAdminSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sBufferedAdminSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = BufferedAdminSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = BufferedAdminSinkValidationError{} + +// Validate checks the field values on FilePerTapSink with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FilePerTapSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilePerTapSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FilePerTapSinkMultiError, +// or nil if none found. +func (m *FilePerTapSink) ValidateAll() error { + return m.validate(true) +} + +func (m *FilePerTapSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPathPrefix()) < 1 { + err := FilePerTapSinkValidationError{ + field: "PathPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FilePerTapSinkMultiError(errors) + } + + return nil +} + +// FilePerTapSinkMultiError is an error wrapping multiple validation errors +// returned by FilePerTapSink.ValidateAll() if the designated constraints +// aren't met. +type FilePerTapSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilePerTapSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilePerTapSinkMultiError) AllErrors() []error { return m } + +// FilePerTapSinkValidationError is the validation error returned by +// FilePerTapSink.Validate if the designated constraints aren't met. +type FilePerTapSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilePerTapSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilePerTapSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilePerTapSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilePerTapSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilePerTapSinkValidationError) ErrorName() string { return "FilePerTapSinkValidationError" } + +// Error satisfies the builtin error interface +func (e FilePerTapSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilePerTapSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilePerTapSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilePerTapSinkValidationError{} + +// Validate checks the field values on StreamingGrpcSink with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *StreamingGrpcSink) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StreamingGrpcSink with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StreamingGrpcSinkMultiError, or nil if none found. +func (m *StreamingGrpcSink) ValidateAll() error { + return m.validate(true) +} + +func (m *StreamingGrpcSink) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TapId + + if m.GetGrpcService() == nil { + err := StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StreamingGrpcSinkValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return StreamingGrpcSinkMultiError(errors) + } + + return nil +} + +// StreamingGrpcSinkMultiError is an error wrapping multiple validation errors +// returned by StreamingGrpcSink.ValidateAll() if the designated constraints +// aren't met. +type StreamingGrpcSinkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StreamingGrpcSinkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StreamingGrpcSinkMultiError) AllErrors() []error { return m } + +// StreamingGrpcSinkValidationError is the validation error returned by +// StreamingGrpcSink.Validate if the designated constraints aren't met. +type StreamingGrpcSinkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StreamingGrpcSinkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StreamingGrpcSinkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StreamingGrpcSinkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StreamingGrpcSinkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StreamingGrpcSinkValidationError) ErrorName() string { + return "StreamingGrpcSinkValidationError" +} + +// Error satisfies the builtin error interface +func (e StreamingGrpcSinkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStreamingGrpcSink.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StreamingGrpcSinkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StreamingGrpcSinkValidationError{} + +// Validate checks the field values on MatchPredicate_MatchSet with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MatchPredicate_MatchSet) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MatchPredicate_MatchSet with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MatchPredicate_MatchSetMultiError, or nil if none found. +func (m *MatchPredicate_MatchSet) ValidateAll() error { + return m.validate(true) +} + +func (m *MatchPredicate_MatchSet) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetRules()) < 2 { + err := MatchPredicate_MatchSetValidationError{ + field: "Rules", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetRules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MatchPredicate_MatchSetValidationError{ + field: fmt.Sprintf("Rules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return MatchPredicate_MatchSetMultiError(errors) + } + + return nil +} + +// MatchPredicate_MatchSetMultiError is an error wrapping multiple validation +// errors returned by MatchPredicate_MatchSet.ValidateAll() if the designated +// constraints aren't met. +type MatchPredicate_MatchSetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MatchPredicate_MatchSetMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MatchPredicate_MatchSetMultiError) AllErrors() []error { return m } + +// MatchPredicate_MatchSetValidationError is the validation error returned by +// MatchPredicate_MatchSet.Validate if the designated constraints aren't met. +type MatchPredicate_MatchSetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MatchPredicate_MatchSetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MatchPredicate_MatchSetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MatchPredicate_MatchSetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MatchPredicate_MatchSetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MatchPredicate_MatchSetValidationError) ErrorName() string { + return "MatchPredicate_MatchSetValidationError" +} + +// Error satisfies the builtin error interface +func (e MatchPredicate_MatchSetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMatchPredicate_MatchSet.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MatchPredicate_MatchSetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MatchPredicate_MatchSetValidationError{} + +// Validate checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpGenericBodyMatch_GenericTextMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpGenericBodyMatch_GenericTextMatch +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpGenericBodyMatch_GenericTextMatchMultiError, or nil if none found. +func (m *HttpGenericBodyMatch_GenericTextMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *HttpGenericBodyMatch_GenericTextMatch_StringMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if utf8.RuneCountInString(m.GetStringMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "StringMatch", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch: + if v == nil { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if len(m.GetBinaryMatch()) < 1 { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "BinaryMatch", + reason: "value length must be at least 1 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := HttpGenericBodyMatch_GenericTextMatchValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpGenericBodyMatch_GenericTextMatchMultiError(errors) + } + + return nil +} + +// HttpGenericBodyMatch_GenericTextMatchMultiError is an error wrapping +// multiple validation errors returned by +// HttpGenericBodyMatch_GenericTextMatch.ValidateAll() if the designated +// constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpGenericBodyMatch_GenericTextMatchMultiError) AllErrors() []error { return m } + +// HttpGenericBodyMatch_GenericTextMatchValidationError is the validation error +// returned by HttpGenericBodyMatch_GenericTextMatch.Validate if the +// designated constraints aren't met. +type HttpGenericBodyMatch_GenericTextMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) ErrorName() string { + return "HttpGenericBodyMatch_GenericTextMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpGenericBodyMatch_GenericTextMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpGenericBodyMatch_GenericTextMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpGenericBodyMatch_GenericTextMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpGenericBodyMatch_GenericTextMatchValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go new file mode 100644 index 0000000000..9cedeecd0a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/tap/v3/common_vtproto.pb.go @@ -0,0 +1,1591 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/tap/v3/common.proto + +package tapv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TapConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TapConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TapConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Match != nil { + if vtmsg, ok := interface{}(m.Match).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Match) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.TapEnabled != nil { + if vtmsg, ok := interface{}(m.TapEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TapEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.OutputConfig != nil { + size, err := m.OutputConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MatchConfig != nil { + size, err := m.MatchConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_MatchSet) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate_MatchSet) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_MatchSet) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchPredicate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestGenericBodyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpResponseHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestTrailersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_HttpRequestHeadersMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AnyMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_NotMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_AndMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*MatchPredicate_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MatchPredicate_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AndMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AndMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndMatch != nil { + size, err := m.AndMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_NotMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_NotMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotMatch != nil { + size, err := m.NotMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_AnyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_AnyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.AnyMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestHeadersMatch != nil { + size, err := m.HttpRequestHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestTrailersMatch != nil { + size, err := m.HttpRequestTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseHeadersMatch != nil { + size, err := m.HttpResponseHeadersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseTrailersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseTrailersMatch != nil { + size, err := m.HttpResponseTrailersMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpRequestGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpRequestGenericBodyMatch != nil { + size, err := m.HttpRequestGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MatchPredicate_HttpResponseGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpResponseGenericBodyMatch != nil { + size, err := m.HttpResponseGenericBodyMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *HttpHeadersMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpHeadersMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpHeadersMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Rule.(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringMatch) + copy(dAtA[i:], m.StringMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringMatch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BinaryMatch) + copy(dAtA[i:], m.BinaryMatch) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BinaryMatch))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpGenericBodyMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpGenericBodyMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpGenericBodyMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.BytesLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesLimit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutputConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutputConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Streaming { + i-- + if m.Streaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxBufferedTxBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxBufferedTxBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MaxBufferedRxBytes != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxBufferedRxBytes).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Sinks) > 0 { + for iNdEx := len(m.Sinks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Sinks[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OutputSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutputSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OutputSinkType.(*OutputSink_CustomSink); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_BufferedAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_StreamingGrpc); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_FilePerTap); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OutputSinkType.(*OutputSink_StreamingAdmin); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Format != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutputSink_StreamingAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_StreamingAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingAdmin != nil { + size, err := m.StreamingAdmin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *OutputSink_FilePerTap) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_FilePerTap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilePerTap != nil { + size, err := m.FilePerTap.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *OutputSink_StreamingGrpc) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_StreamingGrpc) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingGrpc != nil { + size, err := m.StreamingGrpc.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *OutputSink_BufferedAdmin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_BufferedAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BufferedAdmin != nil { + size, err := m.BufferedAdmin.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *OutputSink_CustomSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OutputSink_CustomSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CustomSink != nil { + if vtmsg, ok := interface{}(m.CustomSink).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomSink) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *StreamingAdminSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingAdminSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StreamingAdminSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *BufferedAdminSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BufferedAdminSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BufferedAdminSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timeout != nil { + size, err := (*durationpb.Duration)(m.Timeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTraces != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTraces)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FilePerTapSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilePerTapSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilePerTapSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PathPrefix) > 0 { + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StreamingGrpcSink) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingGrpcSink) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StreamingGrpcSink) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.TapId) > 0 { + i -= len(m.TapId) + copy(dAtA[i:], m.TapId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TapId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TapConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchConfig != nil { + l = m.MatchConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OutputConfig != nil { + l = m.OutputConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TapEnabled != nil { + if size, ok := interface{}(m.TapEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TapEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Match != nil { + if size, ok := interface{}(m.Match).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Match) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_MatchSet) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MatchPredicate_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AndMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndMatch != nil { + l = m.AndMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_NotMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotMatch != nil { + l = m.NotMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_AnyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *MatchPredicate_HttpRequestHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestHeadersMatch != nil { + l = m.HttpRequestHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestTrailersMatch != nil { + l = m.HttpRequestTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseHeadersMatch != nil { + l = m.HttpResponseHeadersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseTrailersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseTrailersMatch != nil { + l = m.HttpResponseTrailersMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpRequestGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpRequestGenericBodyMatch != nil { + l = m.HttpRequestGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MatchPredicate_HttpResponseGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpResponseGenericBodyMatch != nil { + l = m.HttpResponseGenericBodyMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpHeadersMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpGenericBodyMatch_GenericTextMatch_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinaryMatch) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpGenericBodyMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesLimit)) + } + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *OutputConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sinks) > 0 { + for _, e := range m.Sinks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxBufferedRxBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxBufferedRxBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxBufferedTxBytes != nil { + l = (*wrapperspb.UInt32Value)(m.MaxBufferedTxBytes).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Streaming { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *OutputSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Format != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Format)) + } + if vtmsg, ok := m.OutputSinkType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *OutputSink_StreamingAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingAdmin != nil { + l = m.StreamingAdmin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_FilePerTap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilePerTap != nil { + l = m.FilePerTap.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_StreamingGrpc) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingGrpc != nil { + l = m.StreamingGrpc.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_BufferedAdmin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BufferedAdmin != nil { + l = m.BufferedAdmin.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OutputSink_CustomSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CustomSink != nil { + if size, ok := interface{}(m.CustomSink).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomSink) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StreamingAdminSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *BufferedAdminSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTraces != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTraces)) + } + if m.Timeout != nil { + l = (*durationpb.Duration)(m.Timeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FilePerTapSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StreamingGrpcSink) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TapId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go new file mode 100644 index 0000000000..1975c6f020 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.go @@ -0,0 +1,290 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the Remote Configuration feature. +type DatadogRemoteConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Frequency at which new configuration updates are queried. + // If no value is provided, the default value is delegated to the Datadog tracing library. + PollingInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=polling_interval,json=pollingInterval,proto3" json:"polling_interval,omitempty"` +} + +func (x *DatadogRemoteConfig) Reset() { + *x = DatadogRemoteConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatadogRemoteConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatadogRemoteConfig) ProtoMessage() {} + +func (x *DatadogRemoteConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatadogRemoteConfig.ProtoReflect.Descriptor instead. +func (*DatadogRemoteConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{0} +} + +func (x *DatadogRemoteConfig) GetPollingInterval() *durationpb.Duration { + if x != nil { + return x.PollingInterval + } + return nil +} + +// Configuration for the Datadog tracer. +// [#extension: envoy.tracers.datadog] +type DatadogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster to use for submitting traces to the Datadog agent. + CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` + // The name used for the service when traces are generated by envoy. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors + // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. + CollectorHostname string `protobuf:"bytes,3,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` + // Enables and configures remote configuration. + // Remote Configuration allows to configure the tracer from Datadog's user interface. + // This feature can drastically increase the number of connections to the Datadog Agent. + // Each tracer regularly polls for configuration updates, and the number of tracers is the product + // of the number of listeners and worker threads. + RemoteConfig *DatadogRemoteConfig `protobuf:"bytes,4,opt,name=remote_config,json=remoteConfig,proto3" json:"remote_config,omitempty"` +} + +func (x *DatadogConfig) Reset() { + *x = DatadogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatadogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatadogConfig) ProtoMessage() {} + +func (x *DatadogConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_datadog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatadogConfig.ProtoReflect.Descriptor instead. +func (*DatadogConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_datadog_proto_rawDescGZIP(), []int{1} +} + +func (x *DatadogConfig) GetCollectorCluster() string { + if x != nil { + return x.CollectorCluster + } + return "" +} + +func (x *DatadogConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *DatadogConfig) GetCollectorHostname() string { + if x != nil { + return x.CollectorHostname + } + return "" +} + +func (x *DatadogConfig) GetRemoteConfig() *DatadogRemoteConfig { + if x != nil { + return x.RemoteConfig + } + return nil +} + +var File_envoy_config_trace_v3_datadog_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_datadog_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, + 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9d, 0x02, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x48, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb3, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2a, 0x12, 0x28, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_datadog_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_datadog_proto_rawDescData = file_envoy_config_trace_v3_datadog_proto_rawDesc +) + +func file_envoy_config_trace_v3_datadog_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_datadog_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_datadog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_datadog_proto_rawDescData) + }) + return file_envoy_config_trace_v3_datadog_proto_rawDescData +} + +var file_envoy_config_trace_v3_datadog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_datadog_proto_goTypes = []interface{}{ + (*DatadogRemoteConfig)(nil), // 0: envoy.config.trace.v3.DatadogRemoteConfig + (*DatadogConfig)(nil), // 1: envoy.config.trace.v3.DatadogConfig + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration +} +var file_envoy_config_trace_v3_datadog_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.DatadogRemoteConfig.polling_interval:type_name -> google.protobuf.Duration + 0, // 1: envoy.config.trace.v3.DatadogConfig.remote_config:type_name -> envoy.config.trace.v3.DatadogRemoteConfig + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_datadog_proto_init() } +func file_envoy_config_trace_v3_datadog_proto_init() { + if File_envoy_config_trace_v3_datadog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_datadog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatadogRemoteConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_datadog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatadogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_datadog_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_datadog_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_datadog_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_datadog_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_datadog_proto = out.File + file_envoy_config_trace_v3_datadog_proto_rawDesc = nil + file_envoy_config_trace_v3_datadog_proto_goTypes = nil + file_envoy_config_trace_v3_datadog_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go new file mode 100644 index 0000000000..43ac4c898b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog.pb.validate.go @@ -0,0 +1,321 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DatadogRemoteConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DatadogRemoteConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatadogRemoteConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DatadogRemoteConfigMultiError, or nil if none found. +func (m *DatadogRemoteConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DatadogRemoteConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPollingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPollingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatadogRemoteConfigValidationError{ + field: "PollingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DatadogRemoteConfigMultiError(errors) + } + + return nil +} + +// DatadogRemoteConfigMultiError is an error wrapping multiple validation +// errors returned by DatadogRemoteConfig.ValidateAll() if the designated +// constraints aren't met. +type DatadogRemoteConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatadogRemoteConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatadogRemoteConfigMultiError) AllErrors() []error { return m } + +// DatadogRemoteConfigValidationError is the validation error returned by +// DatadogRemoteConfig.Validate if the designated constraints aren't met. +type DatadogRemoteConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatadogRemoteConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatadogRemoteConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatadogRemoteConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatadogRemoteConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatadogRemoteConfigValidationError) ErrorName() string { + return "DatadogRemoteConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e DatadogRemoteConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatadogRemoteConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatadogRemoteConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatadogRemoteConfigValidationError{} + +// Validate checks the field values on DatadogConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DatadogConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatadogConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DatadogConfigMultiError, or +// nil if none found. +func (m *DatadogConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DatadogConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCollectorCluster()) < 1 { + err := DatadogConfigValidationError{ + field: "CollectorCluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetServiceName()) < 1 { + err := DatadogConfigValidationError{ + field: "ServiceName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for CollectorHostname + + if all { + switch v := interface{}(m.GetRemoteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRemoteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatadogConfigValidationError{ + field: "RemoteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DatadogConfigMultiError(errors) + } + + return nil +} + +// DatadogConfigMultiError is an error wrapping multiple validation errors +// returned by DatadogConfig.ValidateAll() if the designated constraints +// aren't met. +type DatadogConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatadogConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatadogConfigMultiError) AllErrors() []error { return m } + +// DatadogConfigValidationError is the validation error returned by +// DatadogConfig.Validate if the designated constraints aren't met. +type DatadogConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatadogConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatadogConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatadogConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatadogConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatadogConfigValidationError) ErrorName() string { return "DatadogConfigValidationError" } + +// Error satisfies the builtin error interface +func (e DatadogConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatadogConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatadogConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatadogConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go new file mode 100644 index 0000000000..b4cede7f00 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/datadog_vtproto.pb.go @@ -0,0 +1,167 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/datadog.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DatadogRemoteConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DatadogRemoteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DatadogRemoteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PollingInterval != nil { + size, err := (*durationpb.Duration)(m.PollingInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DatadogConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DatadogConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DatadogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RemoteConfig != nil { + size, err := m.RemoteConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.CollectorHostname) > 0 { + i -= len(m.CollectorHostname) + copy(dAtA[i:], m.CollectorHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorHostname))) + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DatadogRemoteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PollingInterval != nil { + l = (*durationpb.Duration)(m.PollingInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DatadogConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CollectorHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RemoteConfig != nil { + l = m.RemoteConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go new file mode 100644 index 0000000000..a3dd5bb959 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// DynamicOtConfig was used to dynamically load a tracer from a shared library +// that implements the `OpenTracing dynamic loading API +// `_. +// [#not-implemented-hide:] +type DynamicOtConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Dynamic library implementing the `OpenTracing API + // `_. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. + Library string `protobuf:"bytes,1,opt,name=library,proto3" json:"library,omitempty"` + // The configuration to use when creating a tracer from the given dynamic + // library. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. + Config *structpb.Struct `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *DynamicOtConfig) Reset() { + *x = DynamicOtConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicOtConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicOtConfig) ProtoMessage() {} + +func (x *DynamicOtConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicOtConfig.ProtoReflect.Descriptor instead. +func (*DynamicOtConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_dynamic_ot_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. +func (x *DynamicOtConfig) GetLibrary() string { + if x != nil { + return x.Library + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/dynamic_ot.proto. +func (x *DynamicOtConfig) GetConfig() *structpb.Struct { + if x != nil { + return x.Config + } + return nil +} + +var File_envoy_config_trace_v3_dynamic_ot_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, + 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb7, 0x01, + 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x32, 0x0a, 0x07, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x07, 0x6c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x11, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, + 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4f, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xb8, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, + 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6f, 0x74, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x4f, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData = file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc +) + +func file_envoy_config_trace_v3_dynamic_ot_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData) + }) + return file_envoy_config_trace_v3_dynamic_ot_proto_rawDescData +} + +var file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_dynamic_ot_proto_goTypes = []interface{}{ + (*DynamicOtConfig)(nil), // 0: envoy.config.trace.v3.DynamicOtConfig + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.DynamicOtConfig.config:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_dynamic_ot_proto_init() } +func file_envoy_config_trace_v3_dynamic_ot_proto_init() { + if File_envoy_config_trace_v3_dynamic_ot_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicOtConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_dynamic_ot_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_dynamic_ot_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_dynamic_ot_proto = out.File + file_envoy_config_trace_v3_dynamic_ot_proto_rawDesc = nil + file_envoy_config_trace_v3_dynamic_ot_proto_goTypes = nil + file_envoy_config_trace_v3_dynamic_ot_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go new file mode 100644 index 0000000000..2e93600275 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot.pb.validate.go @@ -0,0 +1,177 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DynamicOtConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DynamicOtConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DynamicOtConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DynamicOtConfigMultiError, or nil if none found. +func (m *DynamicOtConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicOtConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetLibrary()) < 1 { + err := DynamicOtConfigValidationError{ + field: "Library", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicOtConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicOtConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicOtConfigValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DynamicOtConfigMultiError(errors) + } + + return nil +} + +// DynamicOtConfigMultiError is an error wrapping multiple validation errors +// returned by DynamicOtConfig.ValidateAll() if the designated constraints +// aren't met. +type DynamicOtConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicOtConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicOtConfigMultiError) AllErrors() []error { return m } + +// DynamicOtConfigValidationError is the validation error returned by +// DynamicOtConfig.Validate if the designated constraints aren't met. +type DynamicOtConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicOtConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicOtConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicOtConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicOtConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicOtConfigValidationError) ErrorName() string { return "DynamicOtConfigValidationError" } + +// Error satisfies the builtin error interface +func (e DynamicOtConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicOtConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicOtConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicOtConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go new file mode 100644 index 0000000000..594ee84613 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/dynamic_ot_vtproto.pb.go @@ -0,0 +1,88 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/dynamic_ot.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DynamicOtConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicOtConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicOtConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + size, err := (*structpb.Struct)(m.Config).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Library) > 0 { + i -= len(m.Library) + copy(dAtA[i:], m.Library) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Library))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicOtConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Library) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Config != nil { + l = (*structpb.Struct)(m.Config).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go new file mode 100644 index 0000000000..98a8a86f15 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. +// +// Envoy may support other tracers in the future, but right now the HTTP tracer is the only one +// supported. +// +// .. attention:: +// +// Use of this message type has been deprecated in favor of direct use of +// :ref:`Tracing.Http `. +type Tracing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provides configuration for the HTTP tracer. + Http *Tracing_Http `protobuf:"bytes,1,opt,name=http,proto3" json:"http,omitempty"` +} + +func (x *Tracing) Reset() { + *x = Tracing{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tracing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tracing) ProtoMessage() {} + +func (x *Tracing) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tracing.ProtoReflect.Descriptor instead. +func (*Tracing) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_http_tracer_proto_rawDescGZIP(), []int{0} +} + +func (x *Tracing) GetHttp() *Tracing_Http { + if x != nil { + return x.Http + } + return nil +} + +// Configuration for an HTTP tracer provider used by Envoy. +// +// The configuration is defined by the +// :ref:`HttpConnectionManager.Tracing ` +// :ref:`provider ` +// field. +type Tracing_Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the HTTP trace driver to instantiate. The name must match a + // supported HTTP trace driver. + // See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Trace driver specific configuration which must be set according to the driver being instantiated. + // [#extension-category: envoy.tracers] + // + // Types that are assignable to ConfigType: + // + // *Tracing_Http_TypedConfig + ConfigType isTracing_Http_ConfigType `protobuf_oneof:"config_type"` +} + +func (x *Tracing_Http) Reset() { + *x = Tracing_Http{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tracing_Http) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tracing_Http) ProtoMessage() {} + +func (x *Tracing_Http) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tracing_Http.ProtoReflect.Descriptor instead. +func (*Tracing_Http) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_http_tracer_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Tracing_Http) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Tracing_Http) GetConfigType() isTracing_Http_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *Tracing_Http) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*Tracing_Http_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isTracing_Http_ConfigType interface { + isTracing_Http_ConfigType() +} + +type Tracing_Http_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*Tracing_Http_TypedConfig) isTracing_Http_ConfigType() {} + +var File_envoy_config_trace_v3_http_tracer_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_http_tracer_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, + 0x6e, 0x67, 0x12, 0x37, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x1a, 0xa6, 0x01, 0x0a, 0x04, + 0x48, 0x74, 0x74, 0x70, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, + 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, + 0x6e, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x86, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_http_tracer_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_http_tracer_proto_rawDescData = file_envoy_config_trace_v3_http_tracer_proto_rawDesc +) + +func file_envoy_config_trace_v3_http_tracer_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_http_tracer_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_http_tracer_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_http_tracer_proto_rawDescData) + }) + return file_envoy_config_trace_v3_http_tracer_proto_rawDescData +} + +var file_envoy_config_trace_v3_http_tracer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_http_tracer_proto_goTypes = []interface{}{ + (*Tracing)(nil), // 0: envoy.config.trace.v3.Tracing + (*Tracing_Http)(nil), // 1: envoy.config.trace.v3.Tracing.Http + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_envoy_config_trace_v3_http_tracer_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.Tracing.http:type_name -> envoy.config.trace.v3.Tracing.Http + 2, // 1: envoy.config.trace.v3.Tracing.Http.typed_config:type_name -> google.protobuf.Any + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_http_tracer_proto_init() } +func file_envoy_config_trace_v3_http_tracer_proto_init() { + if File_envoy_config_trace_v3_http_tracer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_http_tracer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tracing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tracing_Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_trace_v3_http_tracer_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Tracing_Http_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_http_tracer_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_http_tracer_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_http_tracer_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_http_tracer_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_http_tracer_proto = out.File + file_envoy_config_trace_v3_http_tracer_proto_rawDesc = nil + file_envoy_config_trace_v3_http_tracer_proto_goTypes = nil + file_envoy_config_trace_v3_http_tracer_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go new file mode 100644 index 0000000000..60cec43ae2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer.pb.validate.go @@ -0,0 +1,320 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Tracing) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tracing with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TracingMultiError, or nil if none found. +func (m *Tracing) ValidateAll() error { + return m.validate(true) +} + +func (m *Tracing) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetHttp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TracingValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TracingValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TracingValidationError{ + field: "Http", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TracingMultiError(errors) + } + + return nil +} + +// TracingMultiError is an error wrapping multiple validation errors returned +// by Tracing.ValidateAll() if the designated constraints aren't met. +type TracingMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TracingMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TracingMultiError) AllErrors() []error { return m } + +// TracingValidationError is the validation error returned by Tracing.Validate +// if the designated constraints aren't met. +type TracingValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TracingValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TracingValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TracingValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TracingValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TracingValidationError) ErrorName() string { return "TracingValidationError" } + +// Error satisfies the builtin error interface +func (e TracingValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTracing.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TracingValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TracingValidationError{} + +// Validate checks the field values on Tracing_Http with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Tracing_Http) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tracing_Http with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Tracing_HttpMultiError, or +// nil if none found. +func (m *Tracing_Http) ValidateAll() error { + return m.validate(true) +} + +func (m *Tracing_Http) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := Tracing_HttpValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.ConfigType.(type) { + case *Tracing_Http_TypedConfig: + if v == nil { + err := Tracing_HttpValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Tracing_HttpValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Tracing_HttpValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Tracing_HttpValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return Tracing_HttpMultiError(errors) + } + + return nil +} + +// Tracing_HttpMultiError is an error wrapping multiple validation errors +// returned by Tracing_Http.ValidateAll() if the designated constraints aren't met. +type Tracing_HttpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Tracing_HttpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Tracing_HttpMultiError) AllErrors() []error { return m } + +// Tracing_HttpValidationError is the validation error returned by +// Tracing_Http.Validate if the designated constraints aren't met. +type Tracing_HttpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Tracing_HttpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Tracing_HttpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Tracing_HttpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Tracing_HttpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Tracing_HttpValidationError) ErrorName() string { return "Tracing_HttpValidationError" } + +// Error satisfies the builtin error interface +func (e Tracing_HttpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTracing_Http.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Tracing_HttpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Tracing_HttpValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go new file mode 100644 index 0000000000..756a6de2ca --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/http_tracer_vtproto.pb.go @@ -0,0 +1,178 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/http_tracer.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Tracing_Http) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing_Http) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing_Http) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigType.(*Tracing_Http_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing_Http_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing_Http_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http != nil { + size, err := m.Http.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Tracing_Http) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Tracing_Http_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http != nil { + l = m.Http.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go new file mode 100644 index 0000000000..e7eeb33ef6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.go @@ -0,0 +1,294 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Available propagation modes +type LightstepConfig_PropagationMode int32 + +const ( + // Propagate trace context in the single header x-ot-span-context. + LightstepConfig_ENVOY LightstepConfig_PropagationMode = 0 + // Propagate trace context using LightStep's native format. + LightstepConfig_LIGHTSTEP LightstepConfig_PropagationMode = 1 + // Propagate trace context using the b3 format. + LightstepConfig_B3 LightstepConfig_PropagationMode = 2 + // Propagation trace context using the w3 trace-context standard. + LightstepConfig_TRACE_CONTEXT LightstepConfig_PropagationMode = 3 +) + +// Enum value maps for LightstepConfig_PropagationMode. +var ( + LightstepConfig_PropagationMode_name = map[int32]string{ + 0: "ENVOY", + 1: "LIGHTSTEP", + 2: "B3", + 3: "TRACE_CONTEXT", + } + LightstepConfig_PropagationMode_value = map[string]int32{ + "ENVOY": 0, + "LIGHTSTEP": 1, + "B3": 2, + "TRACE_CONTEXT": 3, + } +) + +func (x LightstepConfig_PropagationMode) Enum() *LightstepConfig_PropagationMode { + p := new(LightstepConfig_PropagationMode) + *p = x + return p +} + +func (x LightstepConfig_PropagationMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LightstepConfig_PropagationMode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_trace_v3_lightstep_proto_enumTypes[0].Descriptor() +} + +func (LightstepConfig_PropagationMode) Type() protoreflect.EnumType { + return &file_envoy_config_trace_v3_lightstep_proto_enumTypes[0] +} + +func (x LightstepConfig_PropagationMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LightstepConfig_PropagationMode.Descriptor instead. +func (LightstepConfig_PropagationMode) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_lightstep_proto_rawDescGZIP(), []int{0, 0} +} + +// Configuration for the LightStep tracer. +// [#extension: envoy.tracers.lightstep] +// [#not-implemented-hide:] +type LightstepConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster manager cluster that hosts the LightStep collectors. + CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` + // File containing the access token to the `LightStep + // `_ API. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/lightstep.proto. + AccessTokenFile string `protobuf:"bytes,2,opt,name=access_token_file,json=accessTokenFile,proto3" json:"access_token_file,omitempty"` + // Access token to the `LightStep `_ API. + AccessToken *v3.DataSource `protobuf:"bytes,4,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"` + // Propagation modes to use by LightStep's tracer. + PropagationModes []LightstepConfig_PropagationMode `protobuf:"varint,3,rep,packed,name=propagation_modes,json=propagationModes,proto3,enum=envoy.config.trace.v3.LightstepConfig_PropagationMode" json:"propagation_modes,omitempty"` +} + +func (x *LightstepConfig) Reset() { + *x = LightstepConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_lightstep_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightstepConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightstepConfig) ProtoMessage() {} + +func (x *LightstepConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_lightstep_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightstepConfig.ProtoReflect.Descriptor instead. +func (*LightstepConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_lightstep_proto_rawDescGZIP(), []int{0} +} + +func (x *LightstepConfig) GetCollectorCluster() string { + if x != nil { + return x.CollectorCluster + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/lightstep.proto. +func (x *LightstepConfig) GetAccessTokenFile() string { + if x != nil { + return x.AccessTokenFile + } + return "" +} + +func (x *LightstepConfig) GetAccessToken() *v3.DataSource { + if x != nil { + return x.AccessToken + } + return nil +} + +func (x *LightstepConfig) GetPropagationModes() []LightstepConfig_PropagationMode { + if x != nil { + return x.PropagationModes + } + return nil +} + +var File_envoy_config_trace_v3_lightstep_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_lightstep_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xaf, 0x03, 0x0a, 0x0f, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x11, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x46, 0x69, + 0x6c, 0x65, 0x12, 0x43, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x72, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x70, 0x61, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x67, 0x68, 0x74, + 0x73, 0x74, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x61, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, + 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x61, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x46, 0x0a, 0x0f, 0x50, + 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x49, 0x47, + 0x48, 0x54, 0x53, 0x54, 0x45, 0x50, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x42, 0x33, 0x10, 0x02, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, + 0x54, 0x10, 0x03, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0xb7, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2c, 0x12, 0x2a, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x73, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x2e, 0x76, + 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_lightstep_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_lightstep_proto_rawDescData = file_envoy_config_trace_v3_lightstep_proto_rawDesc +) + +func file_envoy_config_trace_v3_lightstep_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_lightstep_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_lightstep_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_lightstep_proto_rawDescData) + }) + return file_envoy_config_trace_v3_lightstep_proto_rawDescData +} + +var file_envoy_config_trace_v3_lightstep_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_trace_v3_lightstep_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_lightstep_proto_goTypes = []interface{}{ + (LightstepConfig_PropagationMode)(0), // 0: envoy.config.trace.v3.LightstepConfig.PropagationMode + (*LightstepConfig)(nil), // 1: envoy.config.trace.v3.LightstepConfig + (*v3.DataSource)(nil), // 2: envoy.config.core.v3.DataSource +} +var file_envoy_config_trace_v3_lightstep_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.LightstepConfig.access_token:type_name -> envoy.config.core.v3.DataSource + 0, // 1: envoy.config.trace.v3.LightstepConfig.propagation_modes:type_name -> envoy.config.trace.v3.LightstepConfig.PropagationMode + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_lightstep_proto_init() } +func file_envoy_config_trace_v3_lightstep_proto_init() { + if File_envoy_config_trace_v3_lightstep_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_lightstep_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightstepConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_lightstep_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_lightstep_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_lightstep_proto_depIdxs, + EnumInfos: file_envoy_config_trace_v3_lightstep_proto_enumTypes, + MessageInfos: file_envoy_config_trace_v3_lightstep_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_lightstep_proto = out.File + file_envoy_config_trace_v3_lightstep_proto_rawDesc = nil + file_envoy_config_trace_v3_lightstep_proto_goTypes = nil + file_envoy_config_trace_v3_lightstep_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go new file mode 100644 index 0000000000..d9ebdddca9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep.pb.validate.go @@ -0,0 +1,195 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LightstepConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LightstepConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LightstepConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LightstepConfigMultiError, or nil if none found. +func (m *LightstepConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LightstepConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCollectorCluster()) < 1 { + err := LightstepConfigValidationError{ + field: "CollectorCluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for AccessTokenFile + + if all { + switch v := interface{}(m.GetAccessToken()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LightstepConfigValidationError{ + field: "AccessToken", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LightstepConfigValidationError{ + field: "AccessToken", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAccessToken()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LightstepConfigValidationError{ + field: "AccessToken", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetPropagationModes() { + _, _ = idx, item + + if _, ok := LightstepConfig_PropagationMode_name[int32(item)]; !ok { + err := LightstepConfigValidationError{ + field: fmt.Sprintf("PropagationModes[%v]", idx), + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return LightstepConfigMultiError(errors) + } + + return nil +} + +// LightstepConfigMultiError is an error wrapping multiple validation errors +// returned by LightstepConfig.ValidateAll() if the designated constraints +// aren't met. +type LightstepConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LightstepConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LightstepConfigMultiError) AllErrors() []error { return m } + +// LightstepConfigValidationError is the validation error returned by +// LightstepConfig.Validate if the designated constraints aren't met. +type LightstepConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LightstepConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LightstepConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LightstepConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LightstepConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LightstepConfigValidationError) ErrorName() string { return "LightstepConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LightstepConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLightstepConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LightstepConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LightstepConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go new file mode 100644 index 0000000000..c1f9240a66 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/lightstep_vtproto.pb.go @@ -0,0 +1,145 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/lightstep.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LightstepConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightstepConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LightstepConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AccessToken != nil { + if vtmsg, ok := interface{}(m.AccessToken).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessToken) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.PropagationModes) > 0 { + var pksize2 int + for _, num := range m.PropagationModes { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.PropagationModes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x1a + } + if len(m.AccessTokenFile) > 0 { + i -= len(m.AccessTokenFile) + copy(dAtA[i:], m.AccessTokenFile) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessTokenFile))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightstepConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AccessTokenFile) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.PropagationModes) > 0 { + l = 0 + for _, e := range m.PropagationModes { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.AccessToken != nil { + if size, ok := interface{}(m.AccessToken).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.AccessToken) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go new file mode 100644 index 0000000000..b7e7c69f61 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.go @@ -0,0 +1,489 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OpenCensusConfig_TraceContext int32 + +const ( + // No-op default, no trace context is utilized. + OpenCensusConfig_NONE OpenCensusConfig_TraceContext = 0 + // W3C Trace-Context format "traceparent:" header. + OpenCensusConfig_TRACE_CONTEXT OpenCensusConfig_TraceContext = 1 + // Binary "grpc-trace-bin:" header. + OpenCensusConfig_GRPC_TRACE_BIN OpenCensusConfig_TraceContext = 2 + // "X-Cloud-Trace-Context:" header. + OpenCensusConfig_CLOUD_TRACE_CONTEXT OpenCensusConfig_TraceContext = 3 + // X-B3-* headers. + OpenCensusConfig_B3 OpenCensusConfig_TraceContext = 4 +) + +// Enum value maps for OpenCensusConfig_TraceContext. +var ( + OpenCensusConfig_TraceContext_name = map[int32]string{ + 0: "NONE", + 1: "TRACE_CONTEXT", + 2: "GRPC_TRACE_BIN", + 3: "CLOUD_TRACE_CONTEXT", + 4: "B3", + } + OpenCensusConfig_TraceContext_value = map[string]int32{ + "NONE": 0, + "TRACE_CONTEXT": 1, + "GRPC_TRACE_BIN": 2, + "CLOUD_TRACE_CONTEXT": 3, + "B3": 4, + } +) + +func (x OpenCensusConfig_TraceContext) Enum() *OpenCensusConfig_TraceContext { + p := new(OpenCensusConfig_TraceContext) + *p = x + return p +} + +func (x OpenCensusConfig_TraceContext) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OpenCensusConfig_TraceContext) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_trace_v3_opencensus_proto_enumTypes[0].Descriptor() +} + +func (OpenCensusConfig_TraceContext) Type() protoreflect.EnumType { + return &file_envoy_config_trace_v3_opencensus_proto_enumTypes[0] +} + +func (x OpenCensusConfig_TraceContext) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OpenCensusConfig_TraceContext.Descriptor instead. +func (OpenCensusConfig_TraceContext) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{0, 0} +} + +// Configuration for the OpenCensus tracer. +// [#next-free-field: 15] +// [#extension: envoy.tracers.opencensus] +type OpenCensusConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures tracing, e.g. the sampler, max number of annotations, etc. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + TraceConfig *v1.TraceConfig `protobuf:"bytes,1,opt,name=trace_config,json=traceConfig,proto3" json:"trace_config,omitempty"` + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StdoutExporterEnabled bool `protobuf:"varint,2,opt,name=stdout_exporter_enabled,json=stdoutExporterEnabled,proto3" json:"stdout_exporter_enabled,omitempty"` + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverExporterEnabled bool `protobuf:"varint,3,opt,name=stackdriver_exporter_enabled,json=stackdriverExporterEnabled,proto3" json:"stackdriver_exporter_enabled,omitempty"` + // The Cloud project_id to use for Stackdriver tracing. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverProjectId string `protobuf:"bytes,4,opt,name=stackdriver_project_id,json=stackdriverProjectId,proto3" json:"stackdriver_project_id,omitempty"` + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverAddress string `protobuf:"bytes,10,opt,name=stackdriver_address,json=stackdriverAddress,proto3" json:"stackdriver_address,omitempty"` + // (optional) The gRPC server that hosts Stackdriver tracing service. Only + // Google gRPC is supported. If :ref:`target_uri ` + // is not provided, the default production Stackdriver address will be used. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + StackdriverGrpcService *v3.GrpcService `protobuf:"bytes,13,opt,name=stackdriver_grpc_service,json=stackdriverGrpcService,proto3" json:"stackdriver_grpc_service,omitempty"` + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin + // tracer `. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + ZipkinExporterEnabled bool `protobuf:"varint,5,opt,name=zipkin_exporter_enabled,json=zipkinExporterEnabled,proto3" json:"zipkin_exporter_enabled,omitempty"` + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is + // deprecated, prefer to use Envoy's :ref:`native Zipkin tracer + // `. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + ZipkinUrl string `protobuf:"bytes,6,opt,name=zipkin_url,json=zipkinUrl,proto3" json:"zipkin_url,omitempty"` + // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or + // ocagent_grpc_service must also be set. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OcagentExporterEnabled bool `protobuf:"varint,11,opt,name=ocagent_exporter_enabled,json=ocagentExporterEnabled,proto3" json:"ocagent_exporter_enabled,omitempty"` + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + // [#comment:TODO: deprecate this field] + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OcagentAddress string `protobuf:"bytes,12,opt,name=ocagent_address,json=ocagentAddress,proto3" json:"ocagent_address,omitempty"` + // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. + // This is only used if the ocagent_address is left empty. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OcagentGrpcService *v3.GrpcService `protobuf:"bytes,14,opt,name=ocagent_grpc_service,json=ocagentGrpcService,proto3" json:"ocagent_grpc_service,omitempty"` + // List of incoming trace context headers we will accept. First one found + // wins. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + IncomingTraceContext []OpenCensusConfig_TraceContext `protobuf:"varint,8,rep,packed,name=incoming_trace_context,json=incomingTraceContext,proto3,enum=envoy.config.trace.v3.OpenCensusConfig_TraceContext" json:"incoming_trace_context,omitempty"` + // List of outgoing trace context headers we will produce. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. + OutgoingTraceContext []OpenCensusConfig_TraceContext `protobuf:"varint,9,rep,packed,name=outgoing_trace_context,json=outgoingTraceContext,proto3,enum=envoy.config.trace.v3.OpenCensusConfig_TraceContext" json:"outgoing_trace_context,omitempty"` +} + +func (x *OpenCensusConfig) Reset() { + *x = OpenCensusConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_opencensus_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OpenCensusConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenCensusConfig) ProtoMessage() {} + +func (x *OpenCensusConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_opencensus_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenCensusConfig.ProtoReflect.Descriptor instead. +func (*OpenCensusConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetTraceConfig() *v1.TraceConfig { + if x != nil { + return x.TraceConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStdoutExporterEnabled() bool { + if x != nil { + return x.StdoutExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverExporterEnabled() bool { + if x != nil { + return x.StackdriverExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverProjectId() string { + if x != nil { + return x.StackdriverProjectId + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverAddress() string { + if x != nil { + return x.StackdriverAddress + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetStackdriverGrpcService() *v3.GrpcService { + if x != nil { + return x.StackdriverGrpcService + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetZipkinExporterEnabled() bool { + if x != nil { + return x.ZipkinExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetZipkinUrl() string { + if x != nil { + return x.ZipkinUrl + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOcagentExporterEnabled() bool { + if x != nil { + return x.OcagentExporterEnabled + } + return false +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOcagentAddress() string { + if x != nil { + return x.OcagentAddress + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOcagentGrpcService() *v3.GrpcService { + if x != nil { + return x.OcagentGrpcService + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetIncomingTraceContext() []OpenCensusConfig_TraceContext { + if x != nil { + return x.IncomingTraceContext + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/opencensus.proto. +func (x *OpenCensusConfig) GetOutgoingTraceContext() []OpenCensusConfig_TraceContext { + if x != nil { + return x.OutgoingTraceContext + } + return nil +} + +var File_envoy_config_trace_v3_opencensus_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_opencensus_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, + 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x0a, + 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, + 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x49, 0x0a, 0x17, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x15, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x53, 0x0a, 0x1c, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x1a, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, + 0x72, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x14, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x13, 0x73, 0x74, 0x61, + 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x12, 0x73, 0x74, 0x61, 0x63, 0x6b, + 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x6e, 0x0a, + 0x18, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, + 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x16, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, + 0x65, 0x72, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x17, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, + 0x01, 0x52, 0x15, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x0a, 0x7a, 0x69, 0x70, 0x6b, + 0x69, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, + 0x09, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x4b, 0x0a, 0x18, 0x6f, 0x63, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x11, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, + 0x16, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x0f, 0x6f, 0x63, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x0e, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x66, 0x0a, 0x14, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, + 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x52, 0x12, 0x6f, 0x63, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7d, 0x0a, 0x16, 0x69, + 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, + 0x05, 0x01, 0x18, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x7d, 0x0a, 0x16, 0x6f, 0x75, + 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, + 0x01, 0x18, 0x01, 0x52, 0x14, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x60, 0x0a, 0x0c, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, + 0x54, 0x45, 0x58, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x54, + 0x52, 0x41, 0x43, 0x45, 0x5f, 0x42, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4c, + 0x4f, 0x55, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, + 0x54, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x42, 0x33, 0x10, 0x04, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, + 0x6e, 0x73, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x42, 0xb9, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x72, 0x73, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x76, + 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, + 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_opencensus_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_opencensus_proto_rawDescData = file_envoy_config_trace_v3_opencensus_proto_rawDesc +) + +func file_envoy_config_trace_v3_opencensus_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_opencensus_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_opencensus_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_opencensus_proto_rawDescData) + }) + return file_envoy_config_trace_v3_opencensus_proto_rawDescData +} + +var file_envoy_config_trace_v3_opencensus_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_trace_v3_opencensus_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_opencensus_proto_goTypes = []interface{}{ + (OpenCensusConfig_TraceContext)(0), // 0: envoy.config.trace.v3.OpenCensusConfig.TraceContext + (*OpenCensusConfig)(nil), // 1: envoy.config.trace.v3.OpenCensusConfig + (*v1.TraceConfig)(nil), // 2: opencensus.proto.trace.v1.TraceConfig + (*v3.GrpcService)(nil), // 3: envoy.config.core.v3.GrpcService +} +var file_envoy_config_trace_v3_opencensus_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.OpenCensusConfig.trace_config:type_name -> opencensus.proto.trace.v1.TraceConfig + 3, // 1: envoy.config.trace.v3.OpenCensusConfig.stackdriver_grpc_service:type_name -> envoy.config.core.v3.GrpcService + 3, // 2: envoy.config.trace.v3.OpenCensusConfig.ocagent_grpc_service:type_name -> envoy.config.core.v3.GrpcService + 0, // 3: envoy.config.trace.v3.OpenCensusConfig.incoming_trace_context:type_name -> envoy.config.trace.v3.OpenCensusConfig.TraceContext + 0, // 4: envoy.config.trace.v3.OpenCensusConfig.outgoing_trace_context:type_name -> envoy.config.trace.v3.OpenCensusConfig.TraceContext + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_opencensus_proto_init() } +func file_envoy_config_trace_v3_opencensus_proto_init() { + if File_envoy_config_trace_v3_opencensus_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_opencensus_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenCensusConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_opencensus_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_opencensus_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_opencensus_proto_depIdxs, + EnumInfos: file_envoy_config_trace_v3_opencensus_proto_enumTypes, + MessageInfos: file_envoy_config_trace_v3_opencensus_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_opencensus_proto = out.File + file_envoy_config_trace_v3_opencensus_proto_rawDesc = nil + file_envoy_config_trace_v3_opencensus_proto_goTypes = nil + file_envoy_config_trace_v3_opencensus_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go new file mode 100644 index 0000000000..4e8286181c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus.pb.validate.go @@ -0,0 +1,240 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OpenCensusConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OpenCensusConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OpenCensusConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OpenCensusConfigMultiError, or nil if none found. +func (m *OpenCensusConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *OpenCensusConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTraceConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "TraceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "TraceConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTraceConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenCensusConfigValidationError{ + field: "TraceConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StdoutExporterEnabled + + // no validation rules for StackdriverExporterEnabled + + // no validation rules for StackdriverProjectId + + // no validation rules for StackdriverAddress + + if all { + switch v := interface{}(m.GetStackdriverGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "StackdriverGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "StackdriverGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStackdriverGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenCensusConfigValidationError{ + field: "StackdriverGrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ZipkinExporterEnabled + + // no validation rules for ZipkinUrl + + // no validation rules for OcagentExporterEnabled + + // no validation rules for OcagentAddress + + if all { + switch v := interface{}(m.GetOcagentGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "OcagentGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenCensusConfigValidationError{ + field: "OcagentGrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOcagentGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenCensusConfigValidationError{ + field: "OcagentGrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OpenCensusConfigMultiError(errors) + } + + return nil +} + +// OpenCensusConfigMultiError is an error wrapping multiple validation errors +// returned by OpenCensusConfig.ValidateAll() if the designated constraints +// aren't met. +type OpenCensusConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OpenCensusConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OpenCensusConfigMultiError) AllErrors() []error { return m } + +// OpenCensusConfigValidationError is the validation error returned by +// OpenCensusConfig.Validate if the designated constraints aren't met. +type OpenCensusConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OpenCensusConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OpenCensusConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OpenCensusConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OpenCensusConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OpenCensusConfigValidationError) ErrorName() string { return "OpenCensusConfigValidationError" } + +// Error satisfies the builtin error interface +func (e OpenCensusConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOpenCensusConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OpenCensusConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OpenCensusConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go new file mode 100644 index 0000000000..66b08bf86e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opencensus_vtproto.pb.go @@ -0,0 +1,311 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/opencensus.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OpenCensusConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenCensusConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OpenCensusConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OcagentGrpcService != nil { + if vtmsg, ok := interface{}(m.OcagentGrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OcagentGrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.StackdriverGrpcService != nil { + if vtmsg, ok := interface{}(m.StackdriverGrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StackdriverGrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if len(m.OcagentAddress) > 0 { + i -= len(m.OcagentAddress) + copy(dAtA[i:], m.OcagentAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OcagentAddress))) + i-- + dAtA[i] = 0x62 + } + if m.OcagentExporterEnabled { + i-- + if m.OcagentExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.StackdriverAddress) > 0 { + i -= len(m.StackdriverAddress) + copy(dAtA[i:], m.StackdriverAddress) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StackdriverAddress))) + i-- + dAtA[i] = 0x52 + } + if len(m.OutgoingTraceContext) > 0 { + var pksize2 int + for _, num := range m.OutgoingTraceContext { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.OutgoingTraceContext { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x4a + } + if len(m.IncomingTraceContext) > 0 { + var pksize4 int + for _, num := range m.IncomingTraceContext { + pksize4 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num1 := range m.IncomingTraceContext { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA[j3] = uint8(num) + j3++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize4)) + i-- + dAtA[i] = 0x42 + } + if len(m.ZipkinUrl) > 0 { + i -= len(m.ZipkinUrl) + copy(dAtA[i:], m.ZipkinUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ZipkinUrl))) + i-- + dAtA[i] = 0x32 + } + if m.ZipkinExporterEnabled { + i-- + if m.ZipkinExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.StackdriverProjectId) > 0 { + i -= len(m.StackdriverProjectId) + copy(dAtA[i:], m.StackdriverProjectId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StackdriverProjectId))) + i-- + dAtA[i] = 0x22 + } + if m.StackdriverExporterEnabled { + i-- + if m.StackdriverExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.StdoutExporterEnabled { + i-- + if m.StdoutExporterEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TraceConfig != nil { + if vtmsg, ok := interface{}(m.TraceConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TraceConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenCensusConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceConfig != nil { + if size, ok := interface{}(m.TraceConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TraceConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StdoutExporterEnabled { + n += 2 + } + if m.StackdriverExporterEnabled { + n += 2 + } + l = len(m.StackdriverProjectId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ZipkinExporterEnabled { + n += 2 + } + l = len(m.ZipkinUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.IncomingTraceContext) > 0 { + l = 0 + for _, e := range m.IncomingTraceContext { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.OutgoingTraceContext) > 0 { + l = 0 + for _, e := range m.OutgoingTraceContext { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + l = len(m.StackdriverAddress) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcagentExporterEnabled { + n += 2 + } + l = len(m.OcagentAddress) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StackdriverGrpcService != nil { + if size, ok := interface{}(m.StackdriverGrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StackdriverGrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcagentGrpcService != nil { + if size, ok := interface{}(m.OcagentGrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OcagentGrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go new file mode 100644 index 0000000000..a0087e2580 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.go @@ -0,0 +1,258 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the OpenTelemetry tracer. +// +// [#extension: envoy.tracers.opentelemetry] +// +// [#next-free-field: 6] +type OpenTelemetryConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream gRPC cluster that will receive OTLP traces. + // Note that the tracer drops traces if the server does not read data fast enough. + // This field can be left empty to disable reporting traces to the gRPC service. + // Only one of “grpc_service“, “http_service“ may be used. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + // The upstream HTTP cluster that will receive OTLP traces. + // This field can be left empty to disable reporting traces to the HTTP service. + // Only one of “grpc_service“, “http_service“ may be used. + // + // .. note:: + // + // Note: The ``request_headers_to_add`` property in the OTLP HTTP exporter service + // does not support the :ref:`format specifier ` as used for + // :ref:`HTTP access logging `. + // The values configured are added as HTTP headers on the OTLP export request + // without any formatting applied. + HttpService *v3.HttpService `protobuf:"bytes,3,opt,name=http_service,json=httpService,proto3" json:"http_service,omitempty"` + // The name for the service. This will be populated in the ResourceSpan Resource attributes. + // If it is not provided, it will default to "unknown_service:envoy". + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // An ordered list of resource detectors + // [#extension-category: envoy.tracers.opentelemetry.resource_detectors] + ResourceDetectors []*v3.TypedExtensionConfig `protobuf:"bytes,4,rep,name=resource_detectors,json=resourceDetectors,proto3" json:"resource_detectors,omitempty"` + // Specifies the sampler to be used by the OpenTelemetry tracer. + // The configured sampler implements the Sampler interface defined by the OpenTelemetry specification. + // This field can be left empty. In this case, the default Envoy sampling decision is used. + // + // See: `OpenTelemetry sampler specification `_ + // [#extension-category: envoy.tracers.opentelemetry.samplers] + Sampler *v3.TypedExtensionConfig `protobuf:"bytes,5,opt,name=sampler,proto3" json:"sampler,omitempty"` +} + +func (x *OpenTelemetryConfig) Reset() { + *x = OpenTelemetryConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_opentelemetry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OpenTelemetryConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenTelemetryConfig) ProtoMessage() {} + +func (x *OpenTelemetryConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_opentelemetry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenTelemetryConfig.ProtoReflect.Descriptor instead. +func (*OpenTelemetryConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_opentelemetry_proto_rawDescGZIP(), []int{0} +} + +func (x *OpenTelemetryConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +func (x *OpenTelemetryConfig) GetHttpService() *v3.HttpService { + if x != nil { + return x.HttpService + } + return nil +} + +func (x *OpenTelemetryConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *OpenTelemetryConfig) GetResourceDetectors() []*v3.TypedExtensionConfig { + if x != nil { + return x.ResourceDetectors + } + return nil +} + +func (x *OpenTelemetryConfig) GetSampler() *v3.TypedExtensionConfig { + if x != nil { + return x.Sampler + } + return nil +} + +var File_envoy_config_trace_v3_opentelemetry_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_opentelemetry_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x03, 0x0a, 0x13, 0x4f, 0x70, + 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x5b, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x15, 0xf2, 0x98, 0xfe, 0x8f, + 0x05, 0x0f, 0x12, 0x0d, 0x6f, 0x74, 0x6c, 0x70, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x72, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5b, + 0x0a, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x15, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0f, 0x12, + 0x0d, 0x6f, 0x74, 0x6c, 0x70, 0x5f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x0b, + 0x68, 0x74, 0x74, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, + 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x42, + 0x89, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x12, 0x4f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_opentelemetry_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_opentelemetry_proto_rawDescData = file_envoy_config_trace_v3_opentelemetry_proto_rawDesc +) + +func file_envoy_config_trace_v3_opentelemetry_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_opentelemetry_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_opentelemetry_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_opentelemetry_proto_rawDescData) + }) + return file_envoy_config_trace_v3_opentelemetry_proto_rawDescData +} + +var file_envoy_config_trace_v3_opentelemetry_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_opentelemetry_proto_goTypes = []interface{}{ + (*OpenTelemetryConfig)(nil), // 0: envoy.config.trace.v3.OpenTelemetryConfig + (*v3.GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService + (*v3.HttpService)(nil), // 2: envoy.config.core.v3.HttpService + (*v3.TypedExtensionConfig)(nil), // 3: envoy.config.core.v3.TypedExtensionConfig +} +var file_envoy_config_trace_v3_opentelemetry_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.OpenTelemetryConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 2, // 1: envoy.config.trace.v3.OpenTelemetryConfig.http_service:type_name -> envoy.config.core.v3.HttpService + 3, // 2: envoy.config.trace.v3.OpenTelemetryConfig.resource_detectors:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 3: envoy.config.trace.v3.OpenTelemetryConfig.sampler:type_name -> envoy.config.core.v3.TypedExtensionConfig + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_opentelemetry_proto_init() } +func file_envoy_config_trace_v3_opentelemetry_proto_init() { + if File_envoy_config_trace_v3_opentelemetry_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_opentelemetry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenTelemetryConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_opentelemetry_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_opentelemetry_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_opentelemetry_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_opentelemetry_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_opentelemetry_proto = out.File + file_envoy_config_trace_v3_opentelemetry_proto_rawDesc = nil + file_envoy_config_trace_v3_opentelemetry_proto_goTypes = nil + file_envoy_config_trace_v3_opentelemetry_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go new file mode 100644 index 0000000000..101f73bbe8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry.pb.validate.go @@ -0,0 +1,262 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on OpenTelemetryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *OpenTelemetryConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OpenTelemetryConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OpenTelemetryConfigMultiError, or nil if none found. +func (m *OpenTelemetryConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *OpenTelemetryConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttpService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "HttpService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "HttpService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: "HttpService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ServiceName + + for idx, item := range m.GetResourceDetectors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: fmt.Sprintf("ResourceDetectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: fmt.Sprintf("ResourceDetectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: fmt.Sprintf("ResourceDetectors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetSampler()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "Sampler", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OpenTelemetryConfigValidationError{ + field: "Sampler", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSampler()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OpenTelemetryConfigValidationError{ + field: "Sampler", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OpenTelemetryConfigMultiError(errors) + } + + return nil +} + +// OpenTelemetryConfigMultiError is an error wrapping multiple validation +// errors returned by OpenTelemetryConfig.ValidateAll() if the designated +// constraints aren't met. +type OpenTelemetryConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OpenTelemetryConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OpenTelemetryConfigMultiError) AllErrors() []error { return m } + +// OpenTelemetryConfigValidationError is the validation error returned by +// OpenTelemetryConfig.Validate if the designated constraints aren't met. +type OpenTelemetryConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OpenTelemetryConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OpenTelemetryConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OpenTelemetryConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OpenTelemetryConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OpenTelemetryConfigValidationError) ErrorName() string { + return "OpenTelemetryConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e OpenTelemetryConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOpenTelemetryConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OpenTelemetryConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OpenTelemetryConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go new file mode 100644 index 0000000000..d6c6280515 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/opentelemetry_vtproto.pb.go @@ -0,0 +1,206 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/opentelemetry.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *OpenTelemetryConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpenTelemetryConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OpenTelemetryConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Sampler != nil { + if vtmsg, ok := interface{}(m.Sampler).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Sampler) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ResourceDetectors) > 0 { + for iNdEx := len(m.ResourceDetectors) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ResourceDetectors[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResourceDetectors[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if m.HttpService != nil { + if vtmsg, ok := interface{}(m.HttpService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpenTelemetryConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpService != nil { + if size, ok := interface{}(m.HttpService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceDetectors) > 0 { + for _, e := range m.ResourceDetectors { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Sampler != nil { + if size, ok := interface{}(m.Sampler).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Sampler) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go new file mode 100644 index 0000000000..662b1bea5d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.go @@ -0,0 +1,174 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration structure. +type TraceServiceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upstream gRPC cluster that hosts the metrics service. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` +} + +func (x *TraceServiceConfig) Reset() { + *x = TraceServiceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceServiceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceServiceConfig) ProtoMessage() {} + +func (x *TraceServiceConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceServiceConfig.ProtoReflect.Descriptor instead. +func (*TraceServiceConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_service_proto_rawDescGZIP(), []int{0} +} + +func (x *TraceServiceConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +var File_envoy_config_trace_v3_service_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_service_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x95, 0x01, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x83, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_service_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_service_proto_rawDescData = file_envoy_config_trace_v3_service_proto_rawDesc +) + +func file_envoy_config_trace_v3_service_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_service_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_service_proto_rawDescData) + }) + return file_envoy_config_trace_v3_service_proto_rawDescData +} + +var file_envoy_config_trace_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_service_proto_goTypes = []interface{}{ + (*TraceServiceConfig)(nil), // 0: envoy.config.trace.v3.TraceServiceConfig + (*v3.GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService +} +var file_envoy_config_trace_v3_service_proto_depIdxs = []int32{ + 1, // 0: envoy.config.trace.v3.TraceServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_service_proto_init() } +func file_envoy_config_trace_v3_service_proto_init() { + if File_envoy_config_trace_v3_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceServiceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_service_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_service_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_service_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_service_proto = out.File + file_envoy_config_trace_v3_service_proto_rawDesc = nil + file_envoy_config_trace_v3_service_proto_goTypes = nil + file_envoy_config_trace_v3_service_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go new file mode 100644 index 0000000000..87b74b5543 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service.pb.validate.go @@ -0,0 +1,179 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TraceServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TraceServiceConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TraceServiceConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TraceServiceConfigMultiError, or nil if none found. +func (m *TraceServiceConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *TraceServiceConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetGrpcService() == nil { + err := TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TraceServiceConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TraceServiceConfigMultiError(errors) + } + + return nil +} + +// TraceServiceConfigMultiError is an error wrapping multiple validation errors +// returned by TraceServiceConfig.ValidateAll() if the designated constraints +// aren't met. +type TraceServiceConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TraceServiceConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TraceServiceConfigMultiError) AllErrors() []error { return m } + +// TraceServiceConfigValidationError is the validation error returned by +// TraceServiceConfig.Validate if the designated constraints aren't met. +type TraceServiceConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TraceServiceConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TraceServiceConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TraceServiceConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TraceServiceConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TraceServiceConfigValidationError) ErrorName() string { + return "TraceServiceConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e TraceServiceConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTraceServiceConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TraceServiceConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TraceServiceConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go new file mode 100644 index 0000000000..71fddd3891 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/service_vtproto.pb.go @@ -0,0 +1,95 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/service.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TraceServiceConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceServiceConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TraceServiceConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TraceServiceConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go new file mode 100644 index 0000000000..948ea5f138 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.go @@ -0,0 +1,341 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the +// provider of tracing, then +// :ref:`spawn_upstream_span ` +// in the tracing config must be set to true to get the correct topology and tracing data. Moreover, SkyWalking +// Tracer does not support SkyWalking extension header (“sw8-x“) temporarily. +// [#extension: envoy.tracers.skywalking] +type SkyWalkingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SkyWalking collector service. + GrpcService *v3.GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3" json:"grpc_service,omitempty"` + ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig,proto3" json:"client_config,omitempty"` +} + +func (x *SkyWalkingConfig) Reset() { + *x = SkyWalkingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SkyWalkingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SkyWalkingConfig) ProtoMessage() {} + +func (x *SkyWalkingConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SkyWalkingConfig.ProtoReflect.Descriptor instead. +func (*SkyWalkingConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP(), []int{0} +} + +func (x *SkyWalkingConfig) GetGrpcService() *v3.GrpcService { + if x != nil { + return x.GrpcService + } + return nil +} + +func (x *SkyWalkingConfig) GetClientConfig() *ClientConfig { + if x != nil { + return x.ClientConfig + } + return nil +} + +// Client config for SkyWalking tracer. +type ClientConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Service name for SkyWalking tracer. If this field is empty, then local service cluster name + // that configured by :ref:`Bootstrap node ` + // message's :ref:`cluster ` field or command line + // option :option:`--service-cluster` will be used. If both this field and local service cluster + // name are empty, “EnvoyProxy“ is used as the service name by default. + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Service instance name for SkyWalking tracer. If this field is empty, then local service node + // that configured by :ref:`Bootstrap node ` + // message's :ref:`id ` field or command line option + // :option:`--service-node` will be used. If both this field and local service node are empty, + // “EnvoyProxy“ is used as the instance name by default. + InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Authentication token config for SkyWalking. SkyWalking can use token authentication to secure + // that monitoring application data can be trusted. In current version, Token is considered as a + // simple string. + // [#comment:TODO(wbpcode): Get backend token through the SDS API.] + // + // Types that are assignable to BackendTokenSpecifier: + // + // *ClientConfig_BackendToken + BackendTokenSpecifier isClientConfig_BackendTokenSpecifier `protobuf_oneof:"backend_token_specifier"` + // Envoy caches the segment in memory when the SkyWalking backend service is temporarily unavailable. + // This field specifies the maximum number of segments that can be cached. If not specified, the + // default is 1024. + MaxCacheSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_cache_size,json=maxCacheSize,proto3" json:"max_cache_size,omitempty"` +} + +func (x *ClientConfig) Reset() { + *x = ClientConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig) ProtoMessage() {} + +func (x *ClientConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_skywalking_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *ClientConfig) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (m *ClientConfig) GetBackendTokenSpecifier() isClientConfig_BackendTokenSpecifier { + if m != nil { + return m.BackendTokenSpecifier + } + return nil +} + +func (x *ClientConfig) GetBackendToken() string { + if x, ok := x.GetBackendTokenSpecifier().(*ClientConfig_BackendToken); ok { + return x.BackendToken + } + return "" +} + +func (x *ClientConfig) GetMaxCacheSize() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxCacheSize + } + return nil +} + +type isClientConfig_BackendTokenSpecifier interface { + isClientConfig_BackendTokenSpecifier() +} + +type ClientConfig_BackendToken struct { + // Inline authentication token string. + BackendToken string `protobuf:"bytes,3,opt,name=backend_token,json=backendToken,proto3,oneof"` +} + +func (*ClientConfig_BackendToken) isClientConfig_BackendTokenSpecifier() {} + +var File_envoy_config_trace_v3_skywalking_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_skywalking_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6b, 0x79, 0x77, 0x61, 0x6c, 0x6b, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xac, 0x01, 0x0a, 0x10, 0x53, 0x6b, 0x79, 0x57, 0x61, 0x6c, 0x6b, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x67, 0x72, 0x70, 0x63, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0xe4, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x0d, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x42, 0x0a, 0x0e, 0x6d, 0x61, 0x78, + 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0c, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x19, 0x0a, + 0x17, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0xb9, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x2d, 0x12, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x73, 0x6b, 0x79, 0x77, + 0x61, 0x6c, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x6b, 0x79, + 0x77, 0x61, 0x6c, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_skywalking_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_skywalking_proto_rawDescData = file_envoy_config_trace_v3_skywalking_proto_rawDesc +) + +func file_envoy_config_trace_v3_skywalking_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_skywalking_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_skywalking_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_skywalking_proto_rawDescData) + }) + return file_envoy_config_trace_v3_skywalking_proto_rawDescData +} + +var file_envoy_config_trace_v3_skywalking_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_skywalking_proto_goTypes = []interface{}{ + (*SkyWalkingConfig)(nil), // 0: envoy.config.trace.v3.SkyWalkingConfig + (*ClientConfig)(nil), // 1: envoy.config.trace.v3.ClientConfig + (*v3.GrpcService)(nil), // 2: envoy.config.core.v3.GrpcService + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value +} +var file_envoy_config_trace_v3_skywalking_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.SkyWalkingConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService + 1, // 1: envoy.config.trace.v3.SkyWalkingConfig.client_config:type_name -> envoy.config.trace.v3.ClientConfig + 3, // 2: envoy.config.trace.v3.ClientConfig.max_cache_size:type_name -> google.protobuf.UInt32Value + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_skywalking_proto_init() } +func file_envoy_config_trace_v3_skywalking_proto_init() { + if File_envoy_config_trace_v3_skywalking_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_skywalking_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SkyWalkingConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_skywalking_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_config_trace_v3_skywalking_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ClientConfig_BackendToken)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_skywalking_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_skywalking_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_skywalking_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_skywalking_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_skywalking_proto = out.File + file_envoy_config_trace_v3_skywalking_proto_rawDesc = nil + file_envoy_config_trace_v3_skywalking_proto_goTypes = nil + file_envoy_config_trace_v3_skywalking_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go new file mode 100644 index 0000000000..559bdb4939 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking.pb.validate.go @@ -0,0 +1,355 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SkyWalkingConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SkyWalkingConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SkyWalkingConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SkyWalkingConfigMultiError, or nil if none found. +func (m *SkyWalkingConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SkyWalkingConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetGrpcService() == nil { + err := SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrpcService()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SkyWalkingConfigValidationError{ + field: "GrpcService", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetClientConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "ClientConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SkyWalkingConfigValidationError{ + field: "ClientConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClientConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SkyWalkingConfigValidationError{ + field: "ClientConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SkyWalkingConfigMultiError(errors) + } + + return nil +} + +// SkyWalkingConfigMultiError is an error wrapping multiple validation errors +// returned by SkyWalkingConfig.ValidateAll() if the designated constraints +// aren't met. +type SkyWalkingConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SkyWalkingConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SkyWalkingConfigMultiError) AllErrors() []error { return m } + +// SkyWalkingConfigValidationError is the validation error returned by +// SkyWalkingConfig.Validate if the designated constraints aren't met. +type SkyWalkingConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SkyWalkingConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SkyWalkingConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SkyWalkingConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SkyWalkingConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SkyWalkingConfigValidationError) ErrorName() string { return "SkyWalkingConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SkyWalkingConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSkyWalkingConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SkyWalkingConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SkyWalkingConfigValidationError{} + +// Validate checks the field values on ClientConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClientConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClientConfigMultiError, or +// nil if none found. +func (m *ClientConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ServiceName + + // no validation rules for InstanceName + + if all { + switch v := interface{}(m.GetMaxCacheSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "MaxCacheSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "MaxCacheSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxCacheSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: "MaxCacheSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.BackendTokenSpecifier.(type) { + case *ClientConfig_BackendToken: + if v == nil { + err := ClientConfigValidationError{ + field: "BackendTokenSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for BackendToken + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ClientConfigMultiError(errors) + } + + return nil +} + +// ClientConfigMultiError is an error wrapping multiple validation errors +// returned by ClientConfig.ValidateAll() if the designated constraints aren't met. +type ClientConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientConfigMultiError) AllErrors() []error { return m } + +// ClientConfigValidationError is the validation error returned by +// ClientConfig.Validate if the designated constraints aren't met. +type ClientConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientConfigValidationError) ErrorName() string { return "ClientConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ClientConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go new file mode 100644 index 0000000000..8af59a37d5 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/skywalking_vtproto.pb.go @@ -0,0 +1,224 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/skywalking.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SkyWalkingConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SkyWalkingConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SkyWalkingConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientConfig != nil { + size, err := m.ClientConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.GrpcService != nil { + if vtmsg, ok := interface{}(m.GrpcService).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.GrpcService) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxCacheSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxCacheSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.BackendTokenSpecifier.(*ClientConfig_BackendToken); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig_BackendToken) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig_BackendToken) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BackendToken) + copy(dAtA[i:], m.BackendToken) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BackendToken))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *SkyWalkingConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GrpcService != nil { + if size, ok := interface{}(m.GrpcService).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.GrpcService) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ClientConfig != nil { + l = m.ClientConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.BackendTokenSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.MaxCacheSize != nil { + l = (*wrapperspb.UInt32Value)(m.MaxCacheSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig_BackendToken) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BackendToken) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go new file mode 100644 index 0000000000..94ded5e4a2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/trace.proto + +package tracev3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_envoy_config_trace_v3_trace_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_trace_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6f, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, + 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x42, 0x79, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x50, 0x00, + 0x50, 0x01, 0x50, 0x02, 0x50, 0x03, 0x50, 0x04, 0x50, 0x05, 0x50, 0x06, 0x50, 0x07, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_envoy_config_trace_v3_trace_proto_goTypes = []interface{}{} +var file_envoy_config_trace_v3_trace_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_trace_proto_init() } +func file_envoy_config_trace_v3_trace_proto_init() { + if File_envoy_config_trace_v3_trace_proto != nil { + return + } + file_envoy_config_trace_v3_datadog_proto_init() + file_envoy_config_trace_v3_dynamic_ot_proto_init() + file_envoy_config_trace_v3_http_tracer_proto_init() + file_envoy_config_trace_v3_lightstep_proto_init() + file_envoy_config_trace_v3_opencensus_proto_init() + file_envoy_config_trace_v3_opentelemetry_proto_init() + file_envoy_config_trace_v3_service_proto_init() + file_envoy_config_trace_v3_zipkin_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_trace_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_trace_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_trace_proto_depIdxs, + }.Build() + File_envoy_config_trace_v3_trace_proto = out.File + file_envoy_config_trace_v3_trace_proto_rawDesc = nil + file_envoy_config_trace_v3_trace_proto_goTypes = nil + file_envoy_config_trace_v3_trace_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go new file mode 100644 index 0000000000..1797e4924f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/trace.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/trace.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go new file mode 100644 index 0000000000..c040e1e0ee --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.go @@ -0,0 +1,310 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#extension: envoy.tracers.xray] +type XRayConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The UDP endpoint of the X-Ray Daemon where the spans will be sent. + // If this value is not set, the default value of 127.0.0.1:2000 will be used. + DaemonEndpoint *v3.SocketAddress `protobuf:"bytes,1,opt,name=daemon_endpoint,json=daemonEndpoint,proto3" json:"daemon_endpoint,omitempty"` + // The name of the X-Ray segment. + SegmentName string `protobuf:"bytes,2,opt,name=segment_name,json=segmentName,proto3" json:"segment_name,omitempty"` + // The location of a local custom sampling rules JSON file. + // For an example of the sampling rules see: + // `X-Ray SDK documentation + // `_ + SamplingRuleManifest *v3.DataSource `protobuf:"bytes,3,opt,name=sampling_rule_manifest,json=samplingRuleManifest,proto3" json:"sampling_rule_manifest,omitempty"` + // Optional custom fields to be added to each trace segment. + // see: `X-Ray Segment Document documentation + // `__ + SegmentFields *XRayConfig_SegmentFields `protobuf:"bytes,4,opt,name=segment_fields,json=segmentFields,proto3" json:"segment_fields,omitempty"` +} + +func (x *XRayConfig) Reset() { + *x = XRayConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XRayConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XRayConfig) ProtoMessage() {} + +func (x *XRayConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XRayConfig.ProtoReflect.Descriptor instead. +func (*XRayConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_xray_proto_rawDescGZIP(), []int{0} +} + +func (x *XRayConfig) GetDaemonEndpoint() *v3.SocketAddress { + if x != nil { + return x.DaemonEndpoint + } + return nil +} + +func (x *XRayConfig) GetSegmentName() string { + if x != nil { + return x.SegmentName + } + return "" +} + +func (x *XRayConfig) GetSamplingRuleManifest() *v3.DataSource { + if x != nil { + return x.SamplingRuleManifest + } + return nil +} + +func (x *XRayConfig) GetSegmentFields() *XRayConfig_SegmentFields { + if x != nil { + return x.SegmentFields + } + return nil +} + +type XRayConfig_SegmentFields struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". + Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"` + // AWS resource metadata dictionary. + // See: `X-Ray Segment Document documentation `__ + Aws *structpb.Struct `protobuf:"bytes,2,opt,name=aws,proto3" json:"aws,omitempty"` +} + +func (x *XRayConfig_SegmentFields) Reset() { + *x = XRayConfig_SegmentFields{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XRayConfig_SegmentFields) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XRayConfig_SegmentFields) ProtoMessage() {} + +func (x *XRayConfig_SegmentFields) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_xray_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XRayConfig_SegmentFields.ProtoReflect.Descriptor instead. +func (*XRayConfig_SegmentFields) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_xray_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *XRayConfig_SegmentFields) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *XRayConfig_SegmentFields) GetAws() *structpb.Struct { + if x != nil { + return x.Aws + } + return nil +} + +var File_envoy_config_trace_v3_xray_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_xray_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x78, 0x72, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb8, 0x03, 0x0a, 0x0a, 0x58, 0x52, 0x61, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x0c, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0b, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x56, 0x0a, 0x16, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x14, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x0e, 0x73, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x58, 0x52, 0x61, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x52, 0x0d, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x1a, 0x52, 0x0a, 0x0d, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x03, 0x61, 0x77, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x03, 0x61, 0x77, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x58, 0x52, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0xad, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x27, 0x12, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x78, 0x72, 0x61, 0x79, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x09, 0x58, 0x72, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_xray_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_xray_proto_rawDescData = file_envoy_config_trace_v3_xray_proto_rawDesc +) + +func file_envoy_config_trace_v3_xray_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_xray_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_xray_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_xray_proto_rawDescData) + }) + return file_envoy_config_trace_v3_xray_proto_rawDescData +} + +var file_envoy_config_trace_v3_xray_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_config_trace_v3_xray_proto_goTypes = []interface{}{ + (*XRayConfig)(nil), // 0: envoy.config.trace.v3.XRayConfig + (*XRayConfig_SegmentFields)(nil), // 1: envoy.config.trace.v3.XRayConfig.SegmentFields + (*v3.SocketAddress)(nil), // 2: envoy.config.core.v3.SocketAddress + (*v3.DataSource)(nil), // 3: envoy.config.core.v3.DataSource + (*structpb.Struct)(nil), // 4: google.protobuf.Struct +} +var file_envoy_config_trace_v3_xray_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.XRayConfig.daemon_endpoint:type_name -> envoy.config.core.v3.SocketAddress + 3, // 1: envoy.config.trace.v3.XRayConfig.sampling_rule_manifest:type_name -> envoy.config.core.v3.DataSource + 1, // 2: envoy.config.trace.v3.XRayConfig.segment_fields:type_name -> envoy.config.trace.v3.XRayConfig.SegmentFields + 4, // 3: envoy.config.trace.v3.XRayConfig.SegmentFields.aws:type_name -> google.protobuf.Struct + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_xray_proto_init() } +func file_envoy_config_trace_v3_xray_proto_init() { + if File_envoy_config_trace_v3_xray_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_xray_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XRayConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_config_trace_v3_xray_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XRayConfig_SegmentFields); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_xray_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_xray_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_xray_proto_depIdxs, + MessageInfos: file_envoy_config_trace_v3_xray_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_xray_proto = out.File + file_envoy_config_trace_v3_xray_proto_rawDesc = nil + file_envoy_config_trace_v3_xray_proto_goTypes = nil + file_envoy_config_trace_v3_xray_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go new file mode 100644 index 0000000000..a48a838edd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray.pb.validate.go @@ -0,0 +1,367 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on XRayConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *XRayConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on XRayConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in XRayConfigMultiError, or +// nil if none found. +func (m *XRayConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *XRayConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDaemonEndpoint()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "DaemonEndpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "DaemonEndpoint", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDaemonEndpoint()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfigValidationError{ + field: "DaemonEndpoint", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if utf8.RuneCountInString(m.GetSegmentName()) < 1 { + err := XRayConfigValidationError{ + field: "SegmentName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSamplingRuleManifest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SamplingRuleManifest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SamplingRuleManifest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSamplingRuleManifest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfigValidationError{ + field: "SamplingRuleManifest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSegmentFields()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SegmentFields", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfigValidationError{ + field: "SegmentFields", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSegmentFields()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfigValidationError{ + field: "SegmentFields", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return XRayConfigMultiError(errors) + } + + return nil +} + +// XRayConfigMultiError is an error wrapping multiple validation errors +// returned by XRayConfig.ValidateAll() if the designated constraints aren't met. +type XRayConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m XRayConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m XRayConfigMultiError) AllErrors() []error { return m } + +// XRayConfigValidationError is the validation error returned by +// XRayConfig.Validate if the designated constraints aren't met. +type XRayConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e XRayConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e XRayConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e XRayConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e XRayConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e XRayConfigValidationError) ErrorName() string { return "XRayConfigValidationError" } + +// Error satisfies the builtin error interface +func (e XRayConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sXRayConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = XRayConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = XRayConfigValidationError{} + +// Validate checks the field values on XRayConfig_SegmentFields with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *XRayConfig_SegmentFields) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on XRayConfig_SegmentFields with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// XRayConfig_SegmentFieldsMultiError, or nil if none found. +func (m *XRayConfig_SegmentFields) ValidateAll() error { + return m.validate(true) +} + +func (m *XRayConfig_SegmentFields) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Origin + + if all { + switch v := interface{}(m.GetAws()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, XRayConfig_SegmentFieldsValidationError{ + field: "Aws", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, XRayConfig_SegmentFieldsValidationError{ + field: "Aws", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAws()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return XRayConfig_SegmentFieldsValidationError{ + field: "Aws", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return XRayConfig_SegmentFieldsMultiError(errors) + } + + return nil +} + +// XRayConfig_SegmentFieldsMultiError is an error wrapping multiple validation +// errors returned by XRayConfig_SegmentFields.ValidateAll() if the designated +// constraints aren't met. +type XRayConfig_SegmentFieldsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m XRayConfig_SegmentFieldsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m XRayConfig_SegmentFieldsMultiError) AllErrors() []error { return m } + +// XRayConfig_SegmentFieldsValidationError is the validation error returned by +// XRayConfig_SegmentFields.Validate if the designated constraints aren't met. +type XRayConfig_SegmentFieldsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e XRayConfig_SegmentFieldsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e XRayConfig_SegmentFieldsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e XRayConfig_SegmentFieldsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e XRayConfig_SegmentFieldsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e XRayConfig_SegmentFieldsValidationError) ErrorName() string { + return "XRayConfig_SegmentFieldsValidationError" +} + +// Error satisfies the builtin error interface +func (e XRayConfig_SegmentFieldsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sXRayConfig_SegmentFields.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = XRayConfig_SegmentFieldsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = XRayConfig_SegmentFieldsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go new file mode 100644 index 0000000000..b5bfdff5b9 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/xray_vtproto.pb.go @@ -0,0 +1,221 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/xray.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *XRayConfig_SegmentFields) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *XRayConfig_SegmentFields) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *XRayConfig_SegmentFields) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Aws != nil { + size, err := (*structpb.Struct)(m.Aws).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Origin) > 0 { + i -= len(m.Origin) + copy(dAtA[i:], m.Origin) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Origin))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *XRayConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *XRayConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *XRayConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SegmentFields != nil { + size, err := m.SegmentFields.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.SamplingRuleManifest != nil { + if vtmsg, ok := interface{}(m.SamplingRuleManifest).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SamplingRuleManifest) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if len(m.SegmentName) > 0 { + i -= len(m.SegmentName) + copy(dAtA[i:], m.SegmentName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SegmentName))) + i-- + dAtA[i] = 0x12 + } + if m.DaemonEndpoint != nil { + if vtmsg, ok := interface{}(m.DaemonEndpoint).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DaemonEndpoint) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *XRayConfig_SegmentFields) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Origin) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aws != nil { + l = (*structpb.Struct)(m.Aws).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *XRayConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DaemonEndpoint != nil { + if size, ok := interface{}(m.DaemonEndpoint).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DaemonEndpoint) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SegmentName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SamplingRuleManifest != nil { + if size, ok := interface{}(m.SamplingRuleManifest).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SamplingRuleManifest) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SegmentFields != nil { + l = m.SegmentFields.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go new file mode 100644 index 0000000000..bf96a43d7a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.go @@ -0,0 +1,360 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Available Zipkin collector endpoint versions. +type ZipkinConfig_CollectorEndpointVersion int32 + +const ( + // Zipkin API v1, JSON over HTTP. + // [#comment: The default implementation of Zipkin client before this field is added was only v1 + // and the way user configure this was by not explicitly specifying the version. Consequently, + // before this is added, the corresponding Zipkin collector expected to receive v1 payload. + // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when + // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, + // since in Zipkin realm this v1 version is considered to be not preferable anymore.] + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. + ZipkinConfig_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE ZipkinConfig_CollectorEndpointVersion = 0 + // Zipkin API v2, JSON over HTTP. + ZipkinConfig_HTTP_JSON ZipkinConfig_CollectorEndpointVersion = 1 + // Zipkin API v2, protobuf over HTTP. + ZipkinConfig_HTTP_PROTO ZipkinConfig_CollectorEndpointVersion = 2 + // [#not-implemented-hide:] + ZipkinConfig_GRPC ZipkinConfig_CollectorEndpointVersion = 3 +) + +// Enum value maps for ZipkinConfig_CollectorEndpointVersion. +var ( + ZipkinConfig_CollectorEndpointVersion_name = map[int32]string{ + 0: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE", + 1: "HTTP_JSON", + 2: "HTTP_PROTO", + 3: "GRPC", + } + ZipkinConfig_CollectorEndpointVersion_value = map[string]int32{ + "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE": 0, + "HTTP_JSON": 1, + "HTTP_PROTO": 2, + "GRPC": 3, + } +) + +func (x ZipkinConfig_CollectorEndpointVersion) Enum() *ZipkinConfig_CollectorEndpointVersion { + p := new(ZipkinConfig_CollectorEndpointVersion) + *p = x + return p +} + +func (x ZipkinConfig_CollectorEndpointVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ZipkinConfig_CollectorEndpointVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_config_trace_v3_zipkin_proto_enumTypes[0].Descriptor() +} + +func (ZipkinConfig_CollectorEndpointVersion) Type() protoreflect.EnumType { + return &file_envoy_config_trace_v3_zipkin_proto_enumTypes[0] +} + +func (x ZipkinConfig_CollectorEndpointVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ZipkinConfig_CollectorEndpointVersion.Descriptor instead. +func (ZipkinConfig_CollectorEndpointVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_zipkin_proto_rawDescGZIP(), []int{0, 0} +} + +// Configuration for the Zipkin tracer. +// [#extension: envoy.tracers.zipkin] +// [#next-free-field: 8] +type ZipkinConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The cluster manager cluster that hosts the Zipkin collectors. + CollectorCluster string `protobuf:"bytes,1,opt,name=collector_cluster,json=collectorCluster,proto3" json:"collector_cluster,omitempty"` + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation. + CollectorEndpoint string `protobuf:"bytes,2,opt,name=collector_endpoint,json=collectorEndpoint,proto3" json:"collector_endpoint,omitempty"` + // Determines whether a 128bit trace id will be used when creating a new + // trace instance. The default value is false, which will result in a 64 bit trace id being used. + TraceId_128Bit bool `protobuf:"varint,3,opt,name=trace_id_128bit,json=traceId128bit,proto3" json:"trace_id_128bit,omitempty"` + // Determines whether client and server spans will share the same span context. + // The default value is true. + SharedSpanContext *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=shared_span_context,json=sharedSpanContext,proto3" json:"shared_span_context,omitempty"` + // Determines the selected collector endpoint version. + CollectorEndpointVersion ZipkinConfig_CollectorEndpointVersion `protobuf:"varint,5,opt,name=collector_endpoint_version,json=collectorEndpointVersion,proto3,enum=envoy.config.trace.v3.ZipkinConfig_CollectorEndpointVersion" json:"collector_endpoint_version,omitempty"` + // Optional hostname to use when sending spans to the collector_cluster. Useful for collectors + // that require a specific hostname. Defaults to :ref:`collector_cluster ` above. + CollectorHostname string `protobuf:"bytes,6,opt,name=collector_hostname,json=collectorHostname,proto3" json:"collector_hostname,omitempty"` + // If this is set to true, then Envoy will be treated as an independent hop in trace chain. A complete span pair will be created for a single + // request. Server span will be created for the downstream request and client span will be created for the related upstream request. + // This should be set to true in the following cases: + // + // * The Envoy Proxy is used as gateway or ingress. + // * The Envoy Proxy is used as sidecar but inbound traffic capturing or outbound traffic capturing is disabled. + // * Any case that the :ref:`start_child_span of router ` is set to true. + // + // .. attention:: + // + // If this is set to true, then the + // :ref:`start_child_span of router ` + // SHOULD be set to true also to ensure the correctness of trace chain. + // + // Both this field and ``start_child_span`` are deprecated by the + // :ref:`spawn_upstream_span `. + // Please use that ``spawn_upstream_span`` field to control the span creation. + // + // Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. + SplitSpansForRequest bool `protobuf:"varint,7,opt,name=split_spans_for_request,json=splitSpansForRequest,proto3" json:"split_spans_for_request,omitempty"` +} + +func (x *ZipkinConfig) Reset() { + *x = ZipkinConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_config_trace_v3_zipkin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ZipkinConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ZipkinConfig) ProtoMessage() {} + +func (x *ZipkinConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_config_trace_v3_zipkin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ZipkinConfig.ProtoReflect.Descriptor instead. +func (*ZipkinConfig) Descriptor() ([]byte, []int) { + return file_envoy_config_trace_v3_zipkin_proto_rawDescGZIP(), []int{0} +} + +func (x *ZipkinConfig) GetCollectorCluster() string { + if x != nil { + return x.CollectorCluster + } + return "" +} + +func (x *ZipkinConfig) GetCollectorEndpoint() string { + if x != nil { + return x.CollectorEndpoint + } + return "" +} + +func (x *ZipkinConfig) GetTraceId_128Bit() bool { + if x != nil { + return x.TraceId_128Bit + } + return false +} + +func (x *ZipkinConfig) GetSharedSpanContext() *wrapperspb.BoolValue { + if x != nil { + return x.SharedSpanContext + } + return nil +} + +func (x *ZipkinConfig) GetCollectorEndpointVersion() ZipkinConfig_CollectorEndpointVersion { + if x != nil { + return x.CollectorEndpointVersion + } + return ZipkinConfig_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE +} + +func (x *ZipkinConfig) GetCollectorHostname() string { + if x != nil { + return x.CollectorHostname + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/config/trace/v3/zipkin.proto. +func (x *ZipkinConfig) GetSplitSpansForRequest() bool { + if x != nil { + return x.SplitSpansForRequest + } + return false +} + +var File_envoy_config_trace_v3_zipkin_proto protoreflect.FileDescriptor + +var file_envoy_config_trace_v3_zipkin_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x05, 0x0a, 0x0c, + 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x36, 0x0a, 0x12, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x31, 0x32, 0x38, 0x62, 0x69, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x31, 0x32, 0x38, 0x62, + 0x69, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x7a, + 0x0a, 0x1a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x18, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x17, 0x73, 0x70, 0x6c, + 0x69, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x53, 0x70, + 0x61, 0x6e, 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, + 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x25, 0x44, 0x45, 0x50, + 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, + 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0xa8, 0xf7, 0xb4, 0x8b, 0x02, 0x01, 0x08, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x02, 0x12, 0x08, 0x0a, + 0x04, 0x47, 0x52, 0x50, 0x43, 0x10, 0x03, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0xb1, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x29, 0x12, 0x27, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x73, 0x2e, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x2e, 0x76, 0x34, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0b, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_config_trace_v3_zipkin_proto_rawDescOnce sync.Once + file_envoy_config_trace_v3_zipkin_proto_rawDescData = file_envoy_config_trace_v3_zipkin_proto_rawDesc +) + +func file_envoy_config_trace_v3_zipkin_proto_rawDescGZIP() []byte { + file_envoy_config_trace_v3_zipkin_proto_rawDescOnce.Do(func() { + file_envoy_config_trace_v3_zipkin_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_trace_v3_zipkin_proto_rawDescData) + }) + return file_envoy_config_trace_v3_zipkin_proto_rawDescData +} + +var file_envoy_config_trace_v3_zipkin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_config_trace_v3_zipkin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_config_trace_v3_zipkin_proto_goTypes = []interface{}{ + (ZipkinConfig_CollectorEndpointVersion)(0), // 0: envoy.config.trace.v3.ZipkinConfig.CollectorEndpointVersion + (*ZipkinConfig)(nil), // 1: envoy.config.trace.v3.ZipkinConfig + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue +} +var file_envoy_config_trace_v3_zipkin_proto_depIdxs = []int32{ + 2, // 0: envoy.config.trace.v3.ZipkinConfig.shared_span_context:type_name -> google.protobuf.BoolValue + 0, // 1: envoy.config.trace.v3.ZipkinConfig.collector_endpoint_version:type_name -> envoy.config.trace.v3.ZipkinConfig.CollectorEndpointVersion + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_config_trace_v3_zipkin_proto_init() } +func file_envoy_config_trace_v3_zipkin_proto_init() { + if File_envoy_config_trace_v3_zipkin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_config_trace_v3_zipkin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ZipkinConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_config_trace_v3_zipkin_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_config_trace_v3_zipkin_proto_goTypes, + DependencyIndexes: file_envoy_config_trace_v3_zipkin_proto_depIdxs, + EnumInfos: file_envoy_config_trace_v3_zipkin_proto_enumTypes, + MessageInfos: file_envoy_config_trace_v3_zipkin_proto_msgTypes, + }.Build() + File_envoy_config_trace_v3_zipkin_proto = out.File + file_envoy_config_trace_v3_zipkin_proto_rawDesc = nil + file_envoy_config_trace_v3_zipkin_proto_goTypes = nil + file_envoy_config_trace_v3_zipkin_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go new file mode 100644 index 0000000000..d2db7385ac --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin.pb.validate.go @@ -0,0 +1,195 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ZipkinConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ZipkinConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ZipkinConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ZipkinConfigMultiError, or +// nil if none found. +func (m *ZipkinConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ZipkinConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetCollectorCluster()) < 1 { + err := ZipkinConfigValidationError{ + field: "CollectorCluster", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetCollectorEndpoint()) < 1 { + err := ZipkinConfigValidationError{ + field: "CollectorEndpoint", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for TraceId_128Bit + + if all { + switch v := interface{}(m.GetSharedSpanContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ZipkinConfigValidationError{ + field: "SharedSpanContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ZipkinConfigValidationError{ + field: "SharedSpanContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSharedSpanContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ZipkinConfigValidationError{ + field: "SharedSpanContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CollectorEndpointVersion + + // no validation rules for CollectorHostname + + // no validation rules for SplitSpansForRequest + + if len(errors) > 0 { + return ZipkinConfigMultiError(errors) + } + + return nil +} + +// ZipkinConfigMultiError is an error wrapping multiple validation errors +// returned by ZipkinConfig.ValidateAll() if the designated constraints aren't met. +type ZipkinConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ZipkinConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ZipkinConfigMultiError) AllErrors() []error { return m } + +// ZipkinConfigValidationError is the validation error returned by +// ZipkinConfig.Validate if the designated constraints aren't met. +type ZipkinConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ZipkinConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ZipkinConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ZipkinConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ZipkinConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ZipkinConfigValidationError) ErrorName() string { return "ZipkinConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ZipkinConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sZipkinConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ZipkinConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ZipkinConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go new file mode 100644 index 0000000000..2dc450502f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/trace/v3/zipkin_vtproto.pb.go @@ -0,0 +1,144 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/config/trace/v3/zipkin.proto + +package tracev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ZipkinConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ZipkinConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ZipkinConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SplitSpansForRequest { + i-- + if m.SplitSpansForRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.CollectorHostname) > 0 { + i -= len(m.CollectorHostname) + copy(dAtA[i:], m.CollectorHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorHostname))) + i-- + dAtA[i] = 0x32 + } + if m.CollectorEndpointVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CollectorEndpointVersion)) + i-- + dAtA[i] = 0x28 + } + if m.SharedSpanContext != nil { + size, err := (*wrapperspb.BoolValue)(m.SharedSpanContext).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TraceId_128Bit { + i-- + if m.TraceId_128Bit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CollectorEndpoint) > 0 { + i -= len(m.CollectorEndpoint) + copy(dAtA[i:], m.CollectorEndpoint) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorEndpoint))) + i-- + dAtA[i] = 0x12 + } + if len(m.CollectorCluster) > 0 { + i -= len(m.CollectorCluster) + copy(dAtA[i:], m.CollectorCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CollectorCluster))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ZipkinConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CollectorCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CollectorEndpoint) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceId_128Bit { + n += 2 + } + if m.SharedSpanContext != nil { + l = (*wrapperspb.BoolValue)(m.SharedSpanContext).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CollectorEndpointVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CollectorEndpointVersion)) + } + l = len(m.CollectorHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SplitSpansForRequest { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go new file mode 100644 index 0000000000..eb9a42f3d7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.go @@ -0,0 +1,2599 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AccessLogType int32 + +const ( + AccessLogType_NotSet AccessLogType = 0 + AccessLogType_TcpUpstreamConnected AccessLogType = 1 + AccessLogType_TcpPeriodic AccessLogType = 2 + AccessLogType_TcpConnectionEnd AccessLogType = 3 + AccessLogType_DownstreamStart AccessLogType = 4 + AccessLogType_DownstreamPeriodic AccessLogType = 5 + AccessLogType_DownstreamEnd AccessLogType = 6 + AccessLogType_UpstreamPoolReady AccessLogType = 7 + AccessLogType_UpstreamPeriodic AccessLogType = 8 + AccessLogType_UpstreamEnd AccessLogType = 9 + AccessLogType_DownstreamTunnelSuccessfullyEstablished AccessLogType = 10 + AccessLogType_UdpTunnelUpstreamConnected AccessLogType = 11 + AccessLogType_UdpPeriodic AccessLogType = 12 + AccessLogType_UdpSessionEnd AccessLogType = 13 +) + +// Enum value maps for AccessLogType. +var ( + AccessLogType_name = map[int32]string{ + 0: "NotSet", + 1: "TcpUpstreamConnected", + 2: "TcpPeriodic", + 3: "TcpConnectionEnd", + 4: "DownstreamStart", + 5: "DownstreamPeriodic", + 6: "DownstreamEnd", + 7: "UpstreamPoolReady", + 8: "UpstreamPeriodic", + 9: "UpstreamEnd", + 10: "DownstreamTunnelSuccessfullyEstablished", + 11: "UdpTunnelUpstreamConnected", + 12: "UdpPeriodic", + 13: "UdpSessionEnd", + } + AccessLogType_value = map[string]int32{ + "NotSet": 0, + "TcpUpstreamConnected": 1, + "TcpPeriodic": 2, + "TcpConnectionEnd": 3, + "DownstreamStart": 4, + "DownstreamPeriodic": 5, + "DownstreamEnd": 6, + "UpstreamPoolReady": 7, + "UpstreamPeriodic": 8, + "UpstreamEnd": 9, + "DownstreamTunnelSuccessfullyEstablished": 10, + "UdpTunnelUpstreamConnected": 11, + "UdpPeriodic": 12, + "UdpSessionEnd": 13, + } +) + +func (x AccessLogType) Enum() *AccessLogType { + p := new(AccessLogType) + *p = x + return p +} + +func (x AccessLogType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AccessLogType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor() +} + +func (AccessLogType) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[0] +} + +func (x AccessLogType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AccessLogType.Descriptor instead. +func (AccessLogType) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +// HTTP version +type HTTPAccessLogEntry_HTTPVersion int32 + +const ( + HTTPAccessLogEntry_PROTOCOL_UNSPECIFIED HTTPAccessLogEntry_HTTPVersion = 0 + HTTPAccessLogEntry_HTTP10 HTTPAccessLogEntry_HTTPVersion = 1 + HTTPAccessLogEntry_HTTP11 HTTPAccessLogEntry_HTTPVersion = 2 + HTTPAccessLogEntry_HTTP2 HTTPAccessLogEntry_HTTPVersion = 3 + HTTPAccessLogEntry_HTTP3 HTTPAccessLogEntry_HTTPVersion = 4 +) + +// Enum value maps for HTTPAccessLogEntry_HTTPVersion. +var ( + HTTPAccessLogEntry_HTTPVersion_name = map[int32]string{ + 0: "PROTOCOL_UNSPECIFIED", + 1: "HTTP10", + 2: "HTTP11", + 3: "HTTP2", + 4: "HTTP3", + } + HTTPAccessLogEntry_HTTPVersion_value = map[string]int32{ + "PROTOCOL_UNSPECIFIED": 0, + "HTTP10": 1, + "HTTP11": 2, + "HTTP2": 3, + "HTTP3": 4, + } +) + +func (x HTTPAccessLogEntry_HTTPVersion) Enum() *HTTPAccessLogEntry_HTTPVersion { + p := new(HTTPAccessLogEntry_HTTPVersion) + *p = x + return p +} + +func (x HTTPAccessLogEntry_HTTPVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HTTPAccessLogEntry_HTTPVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor() +} + +func (HTTPAccessLogEntry_HTTPVersion) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[1] +} + +func (x HTTPAccessLogEntry_HTTPVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HTTPAccessLogEntry_HTTPVersion.Descriptor instead. +func (HTTPAccessLogEntry_HTTPVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1, 0} +} + +// Reasons why the request was unauthorized +type ResponseFlags_Unauthorized_Reason int32 + +const ( + ResponseFlags_Unauthorized_REASON_UNSPECIFIED ResponseFlags_Unauthorized_Reason = 0 + // The request was denied by the external authorization service. + ResponseFlags_Unauthorized_EXTERNAL_SERVICE ResponseFlags_Unauthorized_Reason = 1 +) + +// Enum value maps for ResponseFlags_Unauthorized_Reason. +var ( + ResponseFlags_Unauthorized_Reason_name = map[int32]string{ + 0: "REASON_UNSPECIFIED", + 1: "EXTERNAL_SERVICE", + } + ResponseFlags_Unauthorized_Reason_value = map[string]int32{ + "REASON_UNSPECIFIED": 0, + "EXTERNAL_SERVICE": 1, + } +) + +func (x ResponseFlags_Unauthorized_Reason) Enum() *ResponseFlags_Unauthorized_Reason { + p := new(ResponseFlags_Unauthorized_Reason) + *p = x + return p +} + +func (x ResponseFlags_Unauthorized_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResponseFlags_Unauthorized_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[2].Descriptor() +} + +func (ResponseFlags_Unauthorized_Reason) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[2] +} + +func (x ResponseFlags_Unauthorized_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResponseFlags_Unauthorized_Reason.Descriptor instead. +func (ResponseFlags_Unauthorized_Reason) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4, 0, 0} +} + +type TLSProperties_TLSVersion int32 + +const ( + TLSProperties_VERSION_UNSPECIFIED TLSProperties_TLSVersion = 0 + TLSProperties_TLSv1 TLSProperties_TLSVersion = 1 + TLSProperties_TLSv1_1 TLSProperties_TLSVersion = 2 + TLSProperties_TLSv1_2 TLSProperties_TLSVersion = 3 + TLSProperties_TLSv1_3 TLSProperties_TLSVersion = 4 +) + +// Enum value maps for TLSProperties_TLSVersion. +var ( + TLSProperties_TLSVersion_name = map[int32]string{ + 0: "VERSION_UNSPECIFIED", + 1: "TLSv1", + 2: "TLSv1_1", + 3: "TLSv1_2", + 4: "TLSv1_3", + } + TLSProperties_TLSVersion_value = map[string]int32{ + "VERSION_UNSPECIFIED": 0, + "TLSv1": 1, + "TLSv1_1": 2, + "TLSv1_2": 3, + "TLSv1_3": 4, + } +) + +func (x TLSProperties_TLSVersion) Enum() *TLSProperties_TLSVersion { + p := new(TLSProperties_TLSVersion) + *p = x + return p +} + +func (x TLSProperties_TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSProperties_TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[3].Descriptor() +} + +func (TLSProperties_TLSVersion) Type() protoreflect.EnumType { + return &file_envoy_data_accesslog_v3_accesslog_proto_enumTypes[3] +} + +func (x TLSProperties_TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSProperties_TLSVersion.Descriptor instead. +func (TLSProperties_TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0} +} + +type TCPAccessLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common properties shared by all Envoy access logs. + CommonProperties *AccessLogCommon `protobuf:"bytes,1,opt,name=common_properties,json=commonProperties,proto3" json:"common_properties,omitempty"` + // Properties of the TCP connection. + ConnectionProperties *ConnectionProperties `protobuf:"bytes,2,opt,name=connection_properties,json=connectionProperties,proto3" json:"connection_properties,omitempty"` +} + +func (x *TCPAccessLogEntry) Reset() { + *x = TCPAccessLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TCPAccessLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TCPAccessLogEntry) ProtoMessage() {} + +func (x *TCPAccessLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TCPAccessLogEntry.ProtoReflect.Descriptor instead. +func (*TCPAccessLogEntry) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0} +} + +func (x *TCPAccessLogEntry) GetCommonProperties() *AccessLogCommon { + if x != nil { + return x.CommonProperties + } + return nil +} + +func (x *TCPAccessLogEntry) GetConnectionProperties() *ConnectionProperties { + if x != nil { + return x.ConnectionProperties + } + return nil +} + +type HTTPAccessLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common properties shared by all Envoy access logs. + CommonProperties *AccessLogCommon `protobuf:"bytes,1,opt,name=common_properties,json=commonProperties,proto3" json:"common_properties,omitempty"` + ProtocolVersion HTTPAccessLogEntry_HTTPVersion `protobuf:"varint,2,opt,name=protocol_version,json=protocolVersion,proto3,enum=envoy.data.accesslog.v3.HTTPAccessLogEntry_HTTPVersion" json:"protocol_version,omitempty"` + // Description of the incoming HTTP request. + Request *HTTPRequestProperties `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` + // Description of the outgoing HTTP response. + Response *HTTPResponseProperties `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *HTTPAccessLogEntry) Reset() { + *x = HTTPAccessLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPAccessLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPAccessLogEntry) ProtoMessage() {} + +func (x *HTTPAccessLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPAccessLogEntry.ProtoReflect.Descriptor instead. +func (*HTTPAccessLogEntry) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPAccessLogEntry) GetCommonProperties() *AccessLogCommon { + if x != nil { + return x.CommonProperties + } + return nil +} + +func (x *HTTPAccessLogEntry) GetProtocolVersion() HTTPAccessLogEntry_HTTPVersion { + if x != nil { + return x.ProtocolVersion + } + return HTTPAccessLogEntry_PROTOCOL_UNSPECIFIED +} + +func (x *HTTPAccessLogEntry) GetRequest() *HTTPRequestProperties { + if x != nil { + return x.Request + } + return nil +} + +func (x *HTTPAccessLogEntry) GetResponse() *HTTPResponseProperties { + if x != nil { + return x.Response + } + return nil +} + +// Defines fields for a connection +type ConnectionProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of bytes received from downstream. + ReceivedBytes uint64 `protobuf:"varint,1,opt,name=received_bytes,json=receivedBytes,proto3" json:"received_bytes,omitempty"` + // Number of bytes sent to downstream. + SentBytes uint64 `protobuf:"varint,2,opt,name=sent_bytes,json=sentBytes,proto3" json:"sent_bytes,omitempty"` +} + +func (x *ConnectionProperties) Reset() { + *x = ConnectionProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionProperties) ProtoMessage() {} + +func (x *ConnectionProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionProperties.ProtoReflect.Descriptor instead. +func (*ConnectionProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2} +} + +func (x *ConnectionProperties) GetReceivedBytes() uint64 { + if x != nil { + return x.ReceivedBytes + } + return 0 +} + +func (x *ConnectionProperties) GetSentBytes() uint64 { + if x != nil { + return x.SentBytes + } + return 0 +} + +// Defines fields that are shared by all Envoy access logs. +// [#next-free-field: 34] +type AccessLogCommon struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] + // This field indicates the rate at which this log entry was sampled. + // Valid range is (0.0, 1.0]. + SampleRate float64 `protobuf:"fixed64,1,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"` + // This field is the remote/origin address on which the request from the user was received. + // Note: This may not be the physical peer. E.g, if the remote address is inferred from for + // example the x-forwarder-for header, proxy protocol, etc. + DownstreamRemoteAddress *v3.Address `protobuf:"bytes,2,opt,name=downstream_remote_address,json=downstreamRemoteAddress,proto3" json:"downstream_remote_address,omitempty"` + // This field is the local/destination address on which the request from the user was received. + DownstreamLocalAddress *v3.Address `protobuf:"bytes,3,opt,name=downstream_local_address,json=downstreamLocalAddress,proto3" json:"downstream_local_address,omitempty"` + // If the connection is secure,S this field will contain TLS properties. + TlsProperties *TLSProperties `protobuf:"bytes,4,opt,name=tls_properties,json=tlsProperties,proto3" json:"tls_properties,omitempty"` + // The time that Envoy started servicing this request. This is effectively the time that the first + // downstream byte is received. + StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Interval between the first downstream byte received and the last + // downstream byte received (i.e. time it takes to receive a request). + TimeToLastRxByte *durationpb.Duration `protobuf:"bytes,6,opt,name=time_to_last_rx_byte,json=timeToLastRxByte,proto3" json:"time_to_last_rx_byte,omitempty"` + // Interval between the first downstream byte received and the first upstream byte sent. There may + // by considerable delta between “time_to_last_rx_byte“ and this value due to filters. + // Additionally, the same caveats apply as documented in “time_to_last_downstream_tx_byte“ about + // not accounting for kernel socket buffer time, etc. + TimeToFirstUpstreamTxByte *durationpb.Duration `protobuf:"bytes,7,opt,name=time_to_first_upstream_tx_byte,json=timeToFirstUpstreamTxByte,proto3" json:"time_to_first_upstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the last upstream byte sent. There may + // by considerable delta between “time_to_last_rx_byte“ and this value due to filters. + // Additionally, the same caveats apply as documented in “time_to_last_downstream_tx_byte“ about + // not accounting for kernel socket buffer time, etc. + TimeToLastUpstreamTxByte *durationpb.Duration `protobuf:"bytes,8,opt,name=time_to_last_upstream_tx_byte,json=timeToLastUpstreamTxByte,proto3" json:"time_to_last_upstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the first upstream + // byte received (i.e. time it takes to start receiving a response). + TimeToFirstUpstreamRxByte *durationpb.Duration `protobuf:"bytes,9,opt,name=time_to_first_upstream_rx_byte,json=timeToFirstUpstreamRxByte,proto3" json:"time_to_first_upstream_rx_byte,omitempty"` + // Interval between the first downstream byte received and the last upstream + // byte received (i.e. time it takes to receive a complete response). + TimeToLastUpstreamRxByte *durationpb.Duration `protobuf:"bytes,10,opt,name=time_to_last_upstream_rx_byte,json=timeToLastUpstreamRxByte,proto3" json:"time_to_last_upstream_rx_byte,omitempty"` + // Interval between the first downstream byte received and the first downstream byte sent. + // There may be a considerable delta between the “time_to_first_upstream_rx_byte“ and this field + // due to filters. Additionally, the same caveats apply as documented in + // “time_to_last_downstream_tx_byte“ about not accounting for kernel socket buffer time, etc. + TimeToFirstDownstreamTxByte *durationpb.Duration `protobuf:"bytes,11,opt,name=time_to_first_downstream_tx_byte,json=timeToFirstDownstreamTxByte,proto3" json:"time_to_first_downstream_tx_byte,omitempty"` + // Interval between the first downstream byte received and the last downstream byte sent. + // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + // between “time_to_last_upstream_rx_byte“ and this field. Note also that this is an approximate + // time. In the current implementation it does not include kernel socket buffer time. In the + // current implementation it also does not include send window buffering inside the HTTP/2 codec. + // In the future it is likely that work will be done to make this duration more accurate. + TimeToLastDownstreamTxByte *durationpb.Duration `protobuf:"bytes,12,opt,name=time_to_last_downstream_tx_byte,json=timeToLastDownstreamTxByte,proto3" json:"time_to_last_downstream_tx_byte,omitempty"` + // The upstream remote/destination address that handles this exchange. This does not include + // retries. + UpstreamRemoteAddress *v3.Address `protobuf:"bytes,13,opt,name=upstream_remote_address,json=upstreamRemoteAddress,proto3" json:"upstream_remote_address,omitempty"` + // The upstream local/origin address that handles this exchange. This does not include retries. + UpstreamLocalAddress *v3.Address `protobuf:"bytes,14,opt,name=upstream_local_address,json=upstreamLocalAddress,proto3" json:"upstream_local_address,omitempty"` + // The upstream cluster that “upstream_remote_address“ belongs to. + UpstreamCluster string `protobuf:"bytes,15,opt,name=upstream_cluster,json=upstreamCluster,proto3" json:"upstream_cluster,omitempty"` + // Flags indicating occurrences during request/response processing. + ResponseFlags *ResponseFlags `protobuf:"bytes,16,opt,name=response_flags,json=responseFlags,proto3" json:"response_flags,omitempty"` + // All metadata encountered during request processing, including endpoint + // selection. + // + // This can be used to associate IDs attached to the various configurations + // used to process this request with the access log entry. For example, a + // route created from a higher level forwarding rule with some ID can place + // that ID in this field and cross reference later. It can also be used to + // determine if a canary endpoint was used or not. + Metadata *v3.Metadata `protobuf:"bytes,17,opt,name=metadata,proto3" json:"metadata,omitempty"` + // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured + // upstream transport socket. Common TLS failures are in + // :ref:`TLS trouble shooting `. + UpstreamTransportFailureReason string `protobuf:"bytes,18,opt,name=upstream_transport_failure_reason,json=upstreamTransportFailureReason,proto3" json:"upstream_transport_failure_reason,omitempty"` + // The name of the route + RouteName string `protobuf:"bytes,19,opt,name=route_name,json=routeName,proto3" json:"route_name,omitempty"` + // This field is the downstream direct remote address on which the request from the user was + // received. Note: This is always the physical peer, even if the remote address is inferred from + // for example the x-forwarder-for header, proxy protocol, etc. + DownstreamDirectRemoteAddress *v3.Address `protobuf:"bytes,20,opt,name=downstream_direct_remote_address,json=downstreamDirectRemoteAddress,proto3" json:"downstream_direct_remote_address,omitempty"` + // Map of filter state in stream info that have been configured to be logged. If the filter + // state serialized to any message other than “google.protobuf.Any“ it will be packed into + // “google.protobuf.Any“. + FilterStateObjects map[string]*anypb.Any `protobuf:"bytes,21,rep,name=filter_state_objects,json=filterStateObjects,proto3" json:"filter_state_objects,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of custom tags, which annotate logs with additional information. + // To configure this value, users should configure + // :ref:`custom_tags `. + CustomTags map[string]string `protobuf:"bytes,22,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // For HTTP: Total duration in milliseconds of the request from the start time to the last byte out. + // For TCP: Total duration in milliseconds of the downstream connection. + // This is the total duration of the request (i.e., when the request's ActiveStream is destroyed) + // and may be longer than “time_to_last_downstream_tx_byte“. + Duration *durationpb.Duration `protobuf:"bytes,23,opt,name=duration,proto3" json:"duration,omitempty"` + // For HTTP: Number of times the request is attempted upstream. Note that the field is omitted when the request was never attempted upstream. + // For TCP: Number of times the connection request is attempted upstream. Note that the field is omitted when the connect request was never attempted upstream. + UpstreamRequestAttemptCount uint32 `protobuf:"varint,24,opt,name=upstream_request_attempt_count,json=upstreamRequestAttemptCount,proto3" json:"upstream_request_attempt_count,omitempty"` + // Connection termination details may provide additional information about why the connection was terminated by Envoy for L4 reasons. + ConnectionTerminationDetails string `protobuf:"bytes,25,opt,name=connection_termination_details,json=connectionTerminationDetails,proto3" json:"connection_termination_details,omitempty"` + // Optional unique id of stream (TCP connection, long-live HTTP2 stream, HTTP request) for logging and tracing. + // This could be any format string that could be used to identify one stream. + StreamId string `protobuf:"bytes,26,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + // If this log entry is final log entry that flushed after the stream completed or + // intermediate log entry that flushed periodically during the stream. + // There may be multiple intermediate log entries and only one final log entry for each + // long-live stream (TCP connection, long-live HTTP2 stream). + // And if it is necessary, unique ID or identifier can be added to the log entry + // :ref:`stream_id ` to + // correlate all these intermediate log entries and final log entry. + // + // .. attention:: + // + // This field is deprecated in favor of ``access_log_type`` for better indication of the + // type of the access log record. + // + // Deprecated: Marked as deprecated in envoy/data/accesslog/v3/accesslog.proto. + IntermediateLogEntry bool `protobuf:"varint,27,opt,name=intermediate_log_entry,json=intermediateLogEntry,proto3" json:"intermediate_log_entry,omitempty"` + // If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the + // failure reason from the transport socket. The format of this field depends on the configured downstream + // transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. + DownstreamTransportFailureReason string `protobuf:"bytes,28,opt,name=downstream_transport_failure_reason,json=downstreamTransportFailureReason,proto3" json:"downstream_transport_failure_reason,omitempty"` + // For HTTP: Total number of bytes sent to the downstream by the http stream. + // For TCP: Total number of bytes sent to the downstream by the tcp proxy. + DownstreamWireBytesSent uint64 `protobuf:"varint,29,opt,name=downstream_wire_bytes_sent,json=downstreamWireBytesSent,proto3" json:"downstream_wire_bytes_sent,omitempty"` + // For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. + // For TCP: Total number of bytes received from the downstream by the tcp proxy. + DownstreamWireBytesReceived uint64 `protobuf:"varint,30,opt,name=downstream_wire_bytes_received,json=downstreamWireBytesReceived,proto3" json:"downstream_wire_bytes_received,omitempty"` + // For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. + UpstreamWireBytesSent uint64 `protobuf:"varint,31,opt,name=upstream_wire_bytes_sent,json=upstreamWireBytesSent,proto3" json:"upstream_wire_bytes_sent,omitempty"` + // For HTTP: Total number of bytes received from the upstream by the http stream. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. + UpstreamWireBytesReceived uint64 `protobuf:"varint,32,opt,name=upstream_wire_bytes_received,json=upstreamWireBytesReceived,proto3" json:"upstream_wire_bytes_received,omitempty"` + // The type of the access log, which indicates when the log was recorded. + // See :ref:`ACCESS_LOG_TYPE ` for the available values. + // In case the access log was recorded by a flow which does not correspond to one of the supported + // values, then the default value will be “NotSet“. + // For more information about how access log behaves and when it is being recorded, + // please refer to :ref:`access logging `. + AccessLogType AccessLogType `protobuf:"varint,33,opt,name=access_log_type,json=accessLogType,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"access_log_type,omitempty"` +} + +func (x *AccessLogCommon) Reset() { + *x = AccessLogCommon{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccessLogCommon) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccessLogCommon) ProtoMessage() {} + +func (x *AccessLogCommon) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccessLogCommon.ProtoReflect.Descriptor instead. +func (*AccessLogCommon) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3} +} + +func (x *AccessLogCommon) GetSampleRate() float64 { + if x != nil { + return x.SampleRate + } + return 0 +} + +func (x *AccessLogCommon) GetDownstreamRemoteAddress() *v3.Address { + if x != nil { + return x.DownstreamRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetDownstreamLocalAddress() *v3.Address { + if x != nil { + return x.DownstreamLocalAddress + } + return nil +} + +func (x *AccessLogCommon) GetTlsProperties() *TLSProperties { + if x != nil { + return x.TlsProperties + } + return nil +} + +func (x *AccessLogCommon) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastRxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstUpstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToFirstUpstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastUpstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastUpstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstUpstreamRxByte() *durationpb.Duration { + if x != nil { + return x.TimeToFirstUpstreamRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastUpstreamRxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastUpstreamRxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToFirstDownstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToFirstDownstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetTimeToLastDownstreamTxByte() *durationpb.Duration { + if x != nil { + return x.TimeToLastDownstreamTxByte + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamRemoteAddress() *v3.Address { + if x != nil { + return x.UpstreamRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamLocalAddress() *v3.Address { + if x != nil { + return x.UpstreamLocalAddress + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamCluster() string { + if x != nil { + return x.UpstreamCluster + } + return "" +} + +func (x *AccessLogCommon) GetResponseFlags() *ResponseFlags { + if x != nil { + return x.ResponseFlags + } + return nil +} + +func (x *AccessLogCommon) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamTransportFailureReason() string { + if x != nil { + return x.UpstreamTransportFailureReason + } + return "" +} + +func (x *AccessLogCommon) GetRouteName() string { + if x != nil { + return x.RouteName + } + return "" +} + +func (x *AccessLogCommon) GetDownstreamDirectRemoteAddress() *v3.Address { + if x != nil { + return x.DownstreamDirectRemoteAddress + } + return nil +} + +func (x *AccessLogCommon) GetFilterStateObjects() map[string]*anypb.Any { + if x != nil { + return x.FilterStateObjects + } + return nil +} + +func (x *AccessLogCommon) GetCustomTags() map[string]string { + if x != nil { + return x.CustomTags + } + return nil +} + +func (x *AccessLogCommon) GetDuration() *durationpb.Duration { + if x != nil { + return x.Duration + } + return nil +} + +func (x *AccessLogCommon) GetUpstreamRequestAttemptCount() uint32 { + if x != nil { + return x.UpstreamRequestAttemptCount + } + return 0 +} + +func (x *AccessLogCommon) GetConnectionTerminationDetails() string { + if x != nil { + return x.ConnectionTerminationDetails + } + return "" +} + +func (x *AccessLogCommon) GetStreamId() string { + if x != nil { + return x.StreamId + } + return "" +} + +// Deprecated: Marked as deprecated in envoy/data/accesslog/v3/accesslog.proto. +func (x *AccessLogCommon) GetIntermediateLogEntry() bool { + if x != nil { + return x.IntermediateLogEntry + } + return false +} + +func (x *AccessLogCommon) GetDownstreamTransportFailureReason() string { + if x != nil { + return x.DownstreamTransportFailureReason + } + return "" +} + +func (x *AccessLogCommon) GetDownstreamWireBytesSent() uint64 { + if x != nil { + return x.DownstreamWireBytesSent + } + return 0 +} + +func (x *AccessLogCommon) GetDownstreamWireBytesReceived() uint64 { + if x != nil { + return x.DownstreamWireBytesReceived + } + return 0 +} + +func (x *AccessLogCommon) GetUpstreamWireBytesSent() uint64 { + if x != nil { + return x.UpstreamWireBytesSent + } + return 0 +} + +func (x *AccessLogCommon) GetUpstreamWireBytesReceived() uint64 { + if x != nil { + return x.UpstreamWireBytesReceived + } + return 0 +} + +func (x *AccessLogCommon) GetAccessLogType() AccessLogType { + if x != nil { + return x.AccessLogType + } + return AccessLogType_NotSet +} + +// Flags indicating occurrences during request/response processing. +// [#next-free-field: 29] +type ResponseFlags struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates local server healthcheck failed. + FailedLocalHealthcheck bool `protobuf:"varint,1,opt,name=failed_local_healthcheck,json=failedLocalHealthcheck,proto3" json:"failed_local_healthcheck,omitempty"` + // Indicates there was no healthy upstream. + NoHealthyUpstream bool `protobuf:"varint,2,opt,name=no_healthy_upstream,json=noHealthyUpstream,proto3" json:"no_healthy_upstream,omitempty"` + // Indicates an there was an upstream request timeout. + UpstreamRequestTimeout bool `protobuf:"varint,3,opt,name=upstream_request_timeout,json=upstreamRequestTimeout,proto3" json:"upstream_request_timeout,omitempty"` + // Indicates local codec level reset was sent on the stream. + LocalReset bool `protobuf:"varint,4,opt,name=local_reset,json=localReset,proto3" json:"local_reset,omitempty"` + // Indicates remote codec level reset was received on the stream. + UpstreamRemoteReset bool `protobuf:"varint,5,opt,name=upstream_remote_reset,json=upstreamRemoteReset,proto3" json:"upstream_remote_reset,omitempty"` + // Indicates there was a local reset by a connection pool due to an initial connection failure. + UpstreamConnectionFailure bool `protobuf:"varint,6,opt,name=upstream_connection_failure,json=upstreamConnectionFailure,proto3" json:"upstream_connection_failure,omitempty"` + // Indicates the stream was reset due to an upstream connection termination. + UpstreamConnectionTermination bool `protobuf:"varint,7,opt,name=upstream_connection_termination,json=upstreamConnectionTermination,proto3" json:"upstream_connection_termination,omitempty"` + // Indicates the stream was reset because of a resource overflow. + UpstreamOverflow bool `protobuf:"varint,8,opt,name=upstream_overflow,json=upstreamOverflow,proto3" json:"upstream_overflow,omitempty"` + // Indicates no route was found for the request. + NoRouteFound bool `protobuf:"varint,9,opt,name=no_route_found,json=noRouteFound,proto3" json:"no_route_found,omitempty"` + // Indicates that the request was delayed before proxying. + DelayInjected bool `protobuf:"varint,10,opt,name=delay_injected,json=delayInjected,proto3" json:"delay_injected,omitempty"` + // Indicates that the request was aborted with an injected error code. + FaultInjected bool `protobuf:"varint,11,opt,name=fault_injected,json=faultInjected,proto3" json:"fault_injected,omitempty"` + // Indicates that the request was rate-limited locally. + RateLimited bool `protobuf:"varint,12,opt,name=rate_limited,json=rateLimited,proto3" json:"rate_limited,omitempty"` + // Indicates if the request was deemed unauthorized and the reason for it. + UnauthorizedDetails *ResponseFlags_Unauthorized `protobuf:"bytes,13,opt,name=unauthorized_details,json=unauthorizedDetails,proto3" json:"unauthorized_details,omitempty"` + // Indicates that the request was rejected because there was an error in rate limit service. + RateLimitServiceError bool `protobuf:"varint,14,opt,name=rate_limit_service_error,json=rateLimitServiceError,proto3" json:"rate_limit_service_error,omitempty"` + // Indicates the stream was reset due to a downstream connection termination. + DownstreamConnectionTermination bool `protobuf:"varint,15,opt,name=downstream_connection_termination,json=downstreamConnectionTermination,proto3" json:"downstream_connection_termination,omitempty"` + // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. + UpstreamRetryLimitExceeded bool `protobuf:"varint,16,opt,name=upstream_retry_limit_exceeded,json=upstreamRetryLimitExceeded,proto3" json:"upstream_retry_limit_exceeded,omitempty"` + // Indicates that the stream idle timeout was hit, resulting in a downstream 408. + StreamIdleTimeout bool `protobuf:"varint,17,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` + // Indicates that the request was rejected because an envoy request header failed strict + // validation. + InvalidEnvoyRequestHeaders bool `protobuf:"varint,18,opt,name=invalid_envoy_request_headers,json=invalidEnvoyRequestHeaders,proto3" json:"invalid_envoy_request_headers,omitempty"` + // Indicates there was an HTTP protocol error on the downstream request. + DownstreamProtocolError bool `protobuf:"varint,19,opt,name=downstream_protocol_error,json=downstreamProtocolError,proto3" json:"downstream_protocol_error,omitempty"` + // Indicates there was a max stream duration reached on the upstream request. + UpstreamMaxStreamDurationReached bool `protobuf:"varint,20,opt,name=upstream_max_stream_duration_reached,json=upstreamMaxStreamDurationReached,proto3" json:"upstream_max_stream_duration_reached,omitempty"` + // Indicates the response was served from a cache filter. + ResponseFromCacheFilter bool `protobuf:"varint,21,opt,name=response_from_cache_filter,json=responseFromCacheFilter,proto3" json:"response_from_cache_filter,omitempty"` + // Indicates that a filter configuration is not available. + NoFilterConfigFound bool `protobuf:"varint,22,opt,name=no_filter_config_found,json=noFilterConfigFound,proto3" json:"no_filter_config_found,omitempty"` + // Indicates that request or connection exceeded the downstream connection duration. + DurationTimeout bool `protobuf:"varint,23,opt,name=duration_timeout,json=durationTimeout,proto3" json:"duration_timeout,omitempty"` + // Indicates there was an HTTP protocol error in the upstream response. + UpstreamProtocolError bool `protobuf:"varint,24,opt,name=upstream_protocol_error,json=upstreamProtocolError,proto3" json:"upstream_protocol_error,omitempty"` + // Indicates no cluster was found for the request. + NoClusterFound bool `protobuf:"varint,25,opt,name=no_cluster_found,json=noClusterFound,proto3" json:"no_cluster_found,omitempty"` + // Indicates overload manager terminated the request. + OverloadManager bool `protobuf:"varint,26,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"` + // Indicates a DNS resolution failed. + DnsResolutionFailure bool `protobuf:"varint,27,opt,name=dns_resolution_failure,json=dnsResolutionFailure,proto3" json:"dns_resolution_failure,omitempty"` + // Indicates a downstream remote codec level reset was received on the stream + DownstreamRemoteReset bool `protobuf:"varint,28,opt,name=downstream_remote_reset,json=downstreamRemoteReset,proto3" json:"downstream_remote_reset,omitempty"` +} + +func (x *ResponseFlags) Reset() { + *x = ResponseFlags{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlags) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlags) ProtoMessage() {} + +func (x *ResponseFlags) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlags.ProtoReflect.Descriptor instead. +func (*ResponseFlags) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4} +} + +func (x *ResponseFlags) GetFailedLocalHealthcheck() bool { + if x != nil { + return x.FailedLocalHealthcheck + } + return false +} + +func (x *ResponseFlags) GetNoHealthyUpstream() bool { + if x != nil { + return x.NoHealthyUpstream + } + return false +} + +func (x *ResponseFlags) GetUpstreamRequestTimeout() bool { + if x != nil { + return x.UpstreamRequestTimeout + } + return false +} + +func (x *ResponseFlags) GetLocalReset() bool { + if x != nil { + return x.LocalReset + } + return false +} + +func (x *ResponseFlags) GetUpstreamRemoteReset() bool { + if x != nil { + return x.UpstreamRemoteReset + } + return false +} + +func (x *ResponseFlags) GetUpstreamConnectionFailure() bool { + if x != nil { + return x.UpstreamConnectionFailure + } + return false +} + +func (x *ResponseFlags) GetUpstreamConnectionTermination() bool { + if x != nil { + return x.UpstreamConnectionTermination + } + return false +} + +func (x *ResponseFlags) GetUpstreamOverflow() bool { + if x != nil { + return x.UpstreamOverflow + } + return false +} + +func (x *ResponseFlags) GetNoRouteFound() bool { + if x != nil { + return x.NoRouteFound + } + return false +} + +func (x *ResponseFlags) GetDelayInjected() bool { + if x != nil { + return x.DelayInjected + } + return false +} + +func (x *ResponseFlags) GetFaultInjected() bool { + if x != nil { + return x.FaultInjected + } + return false +} + +func (x *ResponseFlags) GetRateLimited() bool { + if x != nil { + return x.RateLimited + } + return false +} + +func (x *ResponseFlags) GetUnauthorizedDetails() *ResponseFlags_Unauthorized { + if x != nil { + return x.UnauthorizedDetails + } + return nil +} + +func (x *ResponseFlags) GetRateLimitServiceError() bool { + if x != nil { + return x.RateLimitServiceError + } + return false +} + +func (x *ResponseFlags) GetDownstreamConnectionTermination() bool { + if x != nil { + return x.DownstreamConnectionTermination + } + return false +} + +func (x *ResponseFlags) GetUpstreamRetryLimitExceeded() bool { + if x != nil { + return x.UpstreamRetryLimitExceeded + } + return false +} + +func (x *ResponseFlags) GetStreamIdleTimeout() bool { + if x != nil { + return x.StreamIdleTimeout + } + return false +} + +func (x *ResponseFlags) GetInvalidEnvoyRequestHeaders() bool { + if x != nil { + return x.InvalidEnvoyRequestHeaders + } + return false +} + +func (x *ResponseFlags) GetDownstreamProtocolError() bool { + if x != nil { + return x.DownstreamProtocolError + } + return false +} + +func (x *ResponseFlags) GetUpstreamMaxStreamDurationReached() bool { + if x != nil { + return x.UpstreamMaxStreamDurationReached + } + return false +} + +func (x *ResponseFlags) GetResponseFromCacheFilter() bool { + if x != nil { + return x.ResponseFromCacheFilter + } + return false +} + +func (x *ResponseFlags) GetNoFilterConfigFound() bool { + if x != nil { + return x.NoFilterConfigFound + } + return false +} + +func (x *ResponseFlags) GetDurationTimeout() bool { + if x != nil { + return x.DurationTimeout + } + return false +} + +func (x *ResponseFlags) GetUpstreamProtocolError() bool { + if x != nil { + return x.UpstreamProtocolError + } + return false +} + +func (x *ResponseFlags) GetNoClusterFound() bool { + if x != nil { + return x.NoClusterFound + } + return false +} + +func (x *ResponseFlags) GetOverloadManager() bool { + if x != nil { + return x.OverloadManager + } + return false +} + +func (x *ResponseFlags) GetDnsResolutionFailure() bool { + if x != nil { + return x.DnsResolutionFailure + } + return false +} + +func (x *ResponseFlags) GetDownstreamRemoteReset() bool { + if x != nil { + return x.DownstreamRemoteReset + } + return false +} + +// Properties of a negotiated TLS connection. +// [#next-free-field: 8] +type TLSProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of TLS that was negotiated. + TlsVersion TLSProperties_TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=envoy.data.accesslog.v3.TLSProperties_TLSVersion" json:"tls_version,omitempty"` + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. “009C“ for “TLS_RSA_WITH_AES_128_GCM_SHA256“). + // + // Here it is expressed as an integer. + TlsCipherSuite *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=tls_cipher_suite,json=tlsCipherSuite,proto3" json:"tls_cipher_suite,omitempty"` + // SNI hostname from handshake. + TlsSniHostname string `protobuf:"bytes,3,opt,name=tls_sni_hostname,json=tlsSniHostname,proto3" json:"tls_sni_hostname,omitempty"` + // Properties of the local certificate used to negotiate TLS. + LocalCertificateProperties *TLSProperties_CertificateProperties `protobuf:"bytes,4,opt,name=local_certificate_properties,json=localCertificateProperties,proto3" json:"local_certificate_properties,omitempty"` + // Properties of the peer certificate used to negotiate TLS. + PeerCertificateProperties *TLSProperties_CertificateProperties `protobuf:"bytes,5,opt,name=peer_certificate_properties,json=peerCertificateProperties,proto3" json:"peer_certificate_properties,omitempty"` + // The TLS session ID. + TlsSessionId string `protobuf:"bytes,6,opt,name=tls_session_id,json=tlsSessionId,proto3" json:"tls_session_id,omitempty"` + // The “JA3“ fingerprint when “JA3“ fingerprinting is enabled. + Ja3Fingerprint string `protobuf:"bytes,7,opt,name=ja3_fingerprint,json=ja3Fingerprint,proto3" json:"ja3_fingerprint,omitempty"` +} + +func (x *TLSProperties) Reset() { + *x = TLSProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties) ProtoMessage() {} + +func (x *TLSProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties.ProtoReflect.Descriptor instead. +func (*TLSProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5} +} + +func (x *TLSProperties) GetTlsVersion() TLSProperties_TLSVersion { + if x != nil { + return x.TlsVersion + } + return TLSProperties_VERSION_UNSPECIFIED +} + +func (x *TLSProperties) GetTlsCipherSuite() *wrapperspb.UInt32Value { + if x != nil { + return x.TlsCipherSuite + } + return nil +} + +func (x *TLSProperties) GetTlsSniHostname() string { + if x != nil { + return x.TlsSniHostname + } + return "" +} + +func (x *TLSProperties) GetLocalCertificateProperties() *TLSProperties_CertificateProperties { + if x != nil { + return x.LocalCertificateProperties + } + return nil +} + +func (x *TLSProperties) GetPeerCertificateProperties() *TLSProperties_CertificateProperties { + if x != nil { + return x.PeerCertificateProperties + } + return nil +} + +func (x *TLSProperties) GetTlsSessionId() string { + if x != nil { + return x.TlsSessionId + } + return "" +} + +func (x *TLSProperties) GetJa3Fingerprint() string { + if x != nil { + return x.Ja3Fingerprint + } + return "" +} + +// [#next-free-field: 16] +type HTTPRequestProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request method (RFC 7231/2616). + RequestMethod v3.RequestMethod `protobuf:"varint,1,opt,name=request_method,json=requestMethod,proto3,enum=envoy.config.core.v3.RequestMethod" json:"request_method,omitempty"` + // The scheme portion of the incoming request URI. + Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` + // HTTP/2 “:authority“ or HTTP/1.1 “Host“ header value. + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // The port of the incoming request URI + // (unused currently, as port is composed onto authority). + Port *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=port,proto3" json:"port,omitempty"` + // The path portion from the incoming request URI. + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + // Value of the “User-Agent“ request header. + UserAgent string `protobuf:"bytes,6,opt,name=user_agent,json=userAgent,proto3" json:"user_agent,omitempty"` + // Value of the “Referer“ request header. + Referer string `protobuf:"bytes,7,opt,name=referer,proto3" json:"referer,omitempty"` + // Value of the “X-Forwarded-For“ request header. + ForwardedFor string `protobuf:"bytes,8,opt,name=forwarded_for,json=forwardedFor,proto3" json:"forwarded_for,omitempty"` + // Value of the “X-Request-Id“ request header + // + // This header is used by Envoy to uniquely identify a request. + // It will be generated for all external requests and internal requests that + // do not already have a request ID. + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Value of the “X-Envoy-Original-Path“ request header. + OriginalPath string `protobuf:"bytes,10,opt,name=original_path,json=originalPath,proto3" json:"original_path,omitempty"` + // Size of the HTTP request headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + RequestHeadersBytes uint64 `protobuf:"varint,11,opt,name=request_headers_bytes,json=requestHeadersBytes,proto3" json:"request_headers_bytes,omitempty"` + // Size of the HTTP request body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + RequestBodyBytes uint64 `protobuf:"varint,12,opt,name=request_body_bytes,json=requestBodyBytes,proto3" json:"request_body_bytes,omitempty"` + // Map of additional headers that have been configured to be logged. + RequestHeaders map[string]string `protobuf:"bytes,13,rep,name=request_headers,json=requestHeaders,proto3" json:"request_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Number of header bytes sent to the upstream by the http stream, including protocol overhead. + // + // This value accumulates during upstream retries. + UpstreamHeaderBytesSent uint64 `protobuf:"varint,14,opt,name=upstream_header_bytes_sent,json=upstreamHeaderBytesSent,proto3" json:"upstream_header_bytes_sent,omitempty"` + // Number of header bytes received from the downstream by the http stream, including protocol overhead. + DownstreamHeaderBytesReceived uint64 `protobuf:"varint,15,opt,name=downstream_header_bytes_received,json=downstreamHeaderBytesReceived,proto3" json:"downstream_header_bytes_received,omitempty"` +} + +func (x *HTTPRequestProperties) Reset() { + *x = HTTPRequestProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPRequestProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPRequestProperties) ProtoMessage() {} + +func (x *HTTPRequestProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPRequestProperties.ProtoReflect.Descriptor instead. +func (*HTTPRequestProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6} +} + +func (x *HTTPRequestProperties) GetRequestMethod() v3.RequestMethod { + if x != nil { + return x.RequestMethod + } + return v3.RequestMethod(0) +} + +func (x *HTTPRequestProperties) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *HTTPRequestProperties) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *HTTPRequestProperties) GetPort() *wrapperspb.UInt32Value { + if x != nil { + return x.Port + } + return nil +} + +func (x *HTTPRequestProperties) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *HTTPRequestProperties) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *HTTPRequestProperties) GetReferer() string { + if x != nil { + return x.Referer + } + return "" +} + +func (x *HTTPRequestProperties) GetForwardedFor() string { + if x != nil { + return x.ForwardedFor + } + return "" +} + +func (x *HTTPRequestProperties) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *HTTPRequestProperties) GetOriginalPath() string { + if x != nil { + return x.OriginalPath + } + return "" +} + +func (x *HTTPRequestProperties) GetRequestHeadersBytes() uint64 { + if x != nil { + return x.RequestHeadersBytes + } + return 0 +} + +func (x *HTTPRequestProperties) GetRequestBodyBytes() uint64 { + if x != nil { + return x.RequestBodyBytes + } + return 0 +} + +func (x *HTTPRequestProperties) GetRequestHeaders() map[string]string { + if x != nil { + return x.RequestHeaders + } + return nil +} + +func (x *HTTPRequestProperties) GetUpstreamHeaderBytesSent() uint64 { + if x != nil { + return x.UpstreamHeaderBytesSent + } + return 0 +} + +func (x *HTTPRequestProperties) GetDownstreamHeaderBytesReceived() uint64 { + if x != nil { + return x.DownstreamHeaderBytesReceived + } + return 0 +} + +// [#next-free-field: 9] +type HTTPResponseProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP response code returned by Envoy. + ResponseCode *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + // Size of the HTTP response headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include protocol overhead or overhead from framing or encoding at other networking layers. + ResponseHeadersBytes uint64 `protobuf:"varint,2,opt,name=response_headers_bytes,json=responseHeadersBytes,proto3" json:"response_headers_bytes,omitempty"` + // Size of the HTTP response body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + ResponseBodyBytes uint64 `protobuf:"varint,3,opt,name=response_body_bytes,json=responseBodyBytes,proto3" json:"response_body_bytes,omitempty"` + // Map of additional headers configured to be logged. + ResponseHeaders map[string]string `protobuf:"bytes,4,rep,name=response_headers,json=responseHeaders,proto3" json:"response_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map of trailers configured to be logged. + ResponseTrailers map[string]string `protobuf:"bytes,5,rep,name=response_trailers,json=responseTrailers,proto3" json:"response_trailers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The HTTP response code details. + ResponseCodeDetails string `protobuf:"bytes,6,opt,name=response_code_details,json=responseCodeDetails,proto3" json:"response_code_details,omitempty"` + // Number of header bytes received from the upstream by the http stream, including protocol overhead. + UpstreamHeaderBytesReceived uint64 `protobuf:"varint,7,opt,name=upstream_header_bytes_received,json=upstreamHeaderBytesReceived,proto3" json:"upstream_header_bytes_received,omitempty"` + // Number of header bytes sent to the downstream by the http stream, including protocol overhead. + DownstreamHeaderBytesSent uint64 `protobuf:"varint,8,opt,name=downstream_header_bytes_sent,json=downstreamHeaderBytesSent,proto3" json:"downstream_header_bytes_sent,omitempty"` +} + +func (x *HTTPResponseProperties) Reset() { + *x = HTTPResponseProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPResponseProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPResponseProperties) ProtoMessage() {} + +func (x *HTTPResponseProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPResponseProperties.ProtoReflect.Descriptor instead. +func (*HTTPResponseProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7} +} + +func (x *HTTPResponseProperties) GetResponseCode() *wrapperspb.UInt32Value { + if x != nil { + return x.ResponseCode + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseHeadersBytes() uint64 { + if x != nil { + return x.ResponseHeadersBytes + } + return 0 +} + +func (x *HTTPResponseProperties) GetResponseBodyBytes() uint64 { + if x != nil { + return x.ResponseBodyBytes + } + return 0 +} + +func (x *HTTPResponseProperties) GetResponseHeaders() map[string]string { + if x != nil { + return x.ResponseHeaders + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseTrailers() map[string]string { + if x != nil { + return x.ResponseTrailers + } + return nil +} + +func (x *HTTPResponseProperties) GetResponseCodeDetails() string { + if x != nil { + return x.ResponseCodeDetails + } + return "" +} + +func (x *HTTPResponseProperties) GetUpstreamHeaderBytesReceived() uint64 { + if x != nil { + return x.UpstreamHeaderBytesReceived + } + return 0 +} + +func (x *HTTPResponseProperties) GetDownstreamHeaderBytesSent() uint64 { + if x != nil { + return x.DownstreamHeaderBytesSent + } + return 0 +} + +type ResponseFlags_Unauthorized struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reason ResponseFlags_Unauthorized_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=envoy.data.accesslog.v3.ResponseFlags_Unauthorized_Reason" json:"reason,omitempty"` +} + +func (x *ResponseFlags_Unauthorized) Reset() { + *x = ResponseFlags_Unauthorized{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseFlags_Unauthorized) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseFlags_Unauthorized) ProtoMessage() {} + +func (x *ResponseFlags_Unauthorized) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseFlags_Unauthorized.ProtoReflect.Descriptor instead. +func (*ResponseFlags_Unauthorized) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *ResponseFlags_Unauthorized) GetReason() ResponseFlags_Unauthorized_Reason { + if x != nil { + return x.Reason + } + return ResponseFlags_Unauthorized_REASON_UNSPECIFIED +} + +type TLSProperties_CertificateProperties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SANs present in the certificate. + SubjectAltName []*TLSProperties_CertificateProperties_SubjectAltName `protobuf:"bytes,1,rep,name=subject_alt_name,json=subjectAltName,proto3" json:"subject_alt_name,omitempty"` + // The subject field of the certificate. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // The issuer field of the certificate. + Issuer string `protobuf:"bytes,3,opt,name=issuer,proto3" json:"issuer,omitempty"` +} + +func (x *TLSProperties_CertificateProperties) Reset() { + *x = TLSProperties_CertificateProperties{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties_CertificateProperties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties_CertificateProperties) ProtoMessage() {} + +func (x *TLSProperties_CertificateProperties) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties_CertificateProperties.ProtoReflect.Descriptor instead. +func (*TLSProperties_CertificateProperties) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *TLSProperties_CertificateProperties) GetSubjectAltName() []*TLSProperties_CertificateProperties_SubjectAltName { + if x != nil { + return x.SubjectAltName + } + return nil +} + +func (x *TLSProperties_CertificateProperties) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *TLSProperties_CertificateProperties) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +type TLSProperties_CertificateProperties_SubjectAltName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to San: + // + // *TLSProperties_CertificateProperties_SubjectAltName_Uri + // *TLSProperties_CertificateProperties_SubjectAltName_Dns + San isTLSProperties_CertificateProperties_SubjectAltName_San `protobuf_oneof:"san"` +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) Reset() { + *x = TLSProperties_CertificateProperties_SubjectAltName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TLSProperties_CertificateProperties_SubjectAltName) ProtoMessage() {} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TLSProperties_CertificateProperties_SubjectAltName.ProtoReflect.Descriptor instead. +func (*TLSProperties_CertificateProperties_SubjectAltName) Descriptor() ([]byte, []int) { + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5, 0, 0} +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) GetSan() isTLSProperties_CertificateProperties_SubjectAltName_San { + if m != nil { + return m.San + } + return nil +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) GetUri() string { + if x, ok := x.GetSan().(*TLSProperties_CertificateProperties_SubjectAltName_Uri); ok { + return x.Uri + } + return "" +} + +func (x *TLSProperties_CertificateProperties_SubjectAltName) GetDns() string { + if x, ok := x.GetSan().(*TLSProperties_CertificateProperties_SubjectAltName_Dns); ok { + return x.Dns + } + return "" +} + +type isTLSProperties_CertificateProperties_SubjectAltName_San interface { + isTLSProperties_CertificateProperties_SubjectAltName_San() +} + +type TLSProperties_CertificateProperties_SubjectAltName_Uri struct { + Uri string `protobuf:"bytes,1,opt,name=uri,proto3,oneof"` +} + +type TLSProperties_CertificateProperties_SubjectAltName_Dns struct { + // [#not-implemented-hide:] + Dns string `protobuf:"bytes,2,opt,name=dns,proto3,oneof"` +} + +func (*TLSProperties_CertificateProperties_SubjectAltName_Uri) isTLSProperties_CertificateProperties_SubjectAltName_San() { +} + +func (*TLSProperties_CertificateProperties_SubjectAltName_Dns) isTLSProperties_CertificateProperties_SubjectAltName_San() { +} + +var File_envoy_data_accesslog_v3_accesslog_proto protoreflect.FileDescriptor + +var file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x55, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x10, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x62, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x14, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x43, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xf0, 0x03, 0x0a, 0x12, 0x48, 0x54, 0x54, 0x50, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x55, 0x0a, + 0x11, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x55, 0x0a, 0x0b, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, + 0x31, 0x30, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, 0x31, 0x31, 0x10, 0x02, + 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x48, + 0x54, 0x54, 0x50, 0x33, 0x10, 0x04, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x91, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x6e, + 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, + 0x65, 0x6e, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, + 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x15, + 0x0a, 0x0f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x17, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x57, 0x0a, 0x18, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x16, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x4d, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, + 0x0d, 0x74, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x14, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x78, + 0x42, 0x79, 0x74, 0x65, 0x12, 0x5c, 0x0a, 0x1e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, + 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, + 0x69, 0x72, 0x73, 0x74, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, + 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x1d, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5c, + 0x0a, 0x1e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x19, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, 0x69, 0x72, 0x73, 0x74, 0x55, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5a, 0x0a, 0x1d, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, + 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x52, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x60, 0x0a, 0x20, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x74, + 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x46, 0x69, 0x72, 0x73, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x5e, 0x0a, 0x1f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, + 0x74, 0x69, 0x6d, 0x65, 0x54, 0x6f, 0x4c, 0x61, 0x73, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x54, 0x78, 0x42, 0x79, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x17, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x53, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x4d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, + 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x21, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x66, 0x0a, 0x20, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, + 0x1d, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x72, + 0x0a, 0x14, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x59, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x12, 0x35, 0x0a, + 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x1e, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, + 0x65, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x16, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x4d, 0x0a, 0x23, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x20, 0x64, 0x6f, + 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3b, + 0x0a, 0x1a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x1d, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x17, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, + 0x72, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x43, 0x0a, 0x1e, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x1e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x1b, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, + 0x69, 0x72, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x12, 0x37, 0x0a, 0x18, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, + 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x72, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x72, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x0f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x5b, 0x0a, 0x17, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xa1, 0x0e, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x6e, 0x6f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x12, 0x38, 0x0a, 0x18, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, + 0x74, 0x12, 0x3e, 0x0a, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x46, 0x0a, 0x1f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x76, + 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x6e, 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x49, 0x6e, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x69, 0x6e, 0x6a, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x12, 0x66, 0x0a, + 0x14, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, + 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, + 0x52, 0x13, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4a, + 0x0a, 0x21, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x64, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x1d, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x5f, 0x65, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x78, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12, 0x2e, 0x0a, + 0x13, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, + 0x1d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x6e, 0x76, + 0x6f, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x3a, 0x0a, 0x19, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4e, 0x0a, 0x24, + 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x1a, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x16, 0x6e, 0x6f, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6e, 0x6f, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x29, + 0x0a, 0x10, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x6f, + 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x17, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x1a, 0xd5, 0x01, 0x0a, 0x0c, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x52, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, 0x55, 0x6e, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x36, 0x0a, 0x06, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, + 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, + 0x01, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x2e, + 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x3a, 0x2c, 0x9a, 0xc5, + 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xc5, 0x08, 0x0a, 0x0d, 0x54, + 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0b, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x46, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x6e, 0x69, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x53, 0x6e, 0x69, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x7e, 0x0a, 0x1c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x1a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x1b, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x19, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6a, 0x61, 0x33, 0x5f, 0x66, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6a, 0x61, 0x33, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x1a, + 0x99, 0x03, 0x0a, 0x15, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x10, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x4c, + 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x52, 0x0e, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, + 0x73, 0x75, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x1a, 0x92, 0x01, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x3a, 0x51, 0x9a, + 0xc5, 0x88, 0x1e, 0x4c, 0x0a, 0x4a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, + 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x42, 0x05, 0x0a, 0x03, 0x73, 0x61, 0x6e, 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x57, 0x0a, 0x0a, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x56, 0x45, 0x52, + 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, + 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, + 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x22, 0xd9, 0x06, 0x0a, 0x15, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, + 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, + 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x3b, 0x0a, 0x1a, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x17, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x20, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1d, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, + 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa0, + 0x06, 0x0a, 0x16, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0d, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, + 0x6f, 0x64, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x6f, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0x72, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, + 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x12, 0x3f, 0x0a, 0x1c, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x65, 0x6e, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x19, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x65, 0x6e, + 0x74, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, + 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x2a, 0xcb, 0x02, 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x53, 0x65, 0x74, 0x10, 0x00, 0x12, + 0x18, 0x0a, 0x14, 0x54, 0x63, 0x70, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x63, 0x70, + 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x63, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x05, 0x12, 0x11, 0x0a, + 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x6f, 0x6c, + 0x52, 0x65, 0x61, 0x64, 0x79, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x08, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x64, 0x10, 0x09, 0x12, 0x2b, + 0x0a, 0x27, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x75, 0x6e, 0x6e, + 0x65, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x45, 0x73, + 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x10, 0x0a, 0x12, 0x1e, 0x0a, 0x1a, 0x55, + 0x64, 0x70, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x10, 0x0b, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x64, 0x70, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x10, 0x0c, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x64, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x10, 0x0d, 0x42, + 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_data_accesslog_v3_accesslog_proto_rawDescOnce sync.Once + file_envoy_data_accesslog_v3_accesslog_proto_rawDescData = file_envoy_data_accesslog_v3_accesslog_proto_rawDesc +) + +func file_envoy_data_accesslog_v3_accesslog_proto_rawDescGZIP() []byte { + file_envoy_data_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() { + file_envoy_data_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_data_accesslog_v3_accesslog_proto_rawDescData) + }) + return file_envoy_data_accesslog_v3_accesslog_proto_rawDescData +} + +var file_envoy_data_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_envoy_data_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_envoy_data_accesslog_v3_accesslog_proto_goTypes = []interface{}{ + (AccessLogType)(0), // 0: envoy.data.accesslog.v3.AccessLogType + (HTTPAccessLogEntry_HTTPVersion)(0), // 1: envoy.data.accesslog.v3.HTTPAccessLogEntry.HTTPVersion + (ResponseFlags_Unauthorized_Reason)(0), // 2: envoy.data.accesslog.v3.ResponseFlags.Unauthorized.Reason + (TLSProperties_TLSVersion)(0), // 3: envoy.data.accesslog.v3.TLSProperties.TLSVersion + (*TCPAccessLogEntry)(nil), // 4: envoy.data.accesslog.v3.TCPAccessLogEntry + (*HTTPAccessLogEntry)(nil), // 5: envoy.data.accesslog.v3.HTTPAccessLogEntry + (*ConnectionProperties)(nil), // 6: envoy.data.accesslog.v3.ConnectionProperties + (*AccessLogCommon)(nil), // 7: envoy.data.accesslog.v3.AccessLogCommon + (*ResponseFlags)(nil), // 8: envoy.data.accesslog.v3.ResponseFlags + (*TLSProperties)(nil), // 9: envoy.data.accesslog.v3.TLSProperties + (*HTTPRequestProperties)(nil), // 10: envoy.data.accesslog.v3.HTTPRequestProperties + (*HTTPResponseProperties)(nil), // 11: envoy.data.accesslog.v3.HTTPResponseProperties + nil, // 12: envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry + nil, // 13: envoy.data.accesslog.v3.AccessLogCommon.CustomTagsEntry + (*ResponseFlags_Unauthorized)(nil), // 14: envoy.data.accesslog.v3.ResponseFlags.Unauthorized + (*TLSProperties_CertificateProperties)(nil), // 15: envoy.data.accesslog.v3.TLSProperties.CertificateProperties + (*TLSProperties_CertificateProperties_SubjectAltName)(nil), // 16: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName + nil, // 17: envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + nil, // 18: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + nil, // 19: envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + (*v3.Address)(nil), // 20: envoy.config.core.v3.Address + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*v3.Metadata)(nil), // 23: envoy.config.core.v3.Metadata + (*wrapperspb.UInt32Value)(nil), // 24: google.protobuf.UInt32Value + (v3.RequestMethod)(0), // 25: envoy.config.core.v3.RequestMethod + (*anypb.Any)(nil), // 26: google.protobuf.Any +} +var file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = []int32{ + 7, // 0: envoy.data.accesslog.v3.TCPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon + 6, // 1: envoy.data.accesslog.v3.TCPAccessLogEntry.connection_properties:type_name -> envoy.data.accesslog.v3.ConnectionProperties + 7, // 2: envoy.data.accesslog.v3.HTTPAccessLogEntry.common_properties:type_name -> envoy.data.accesslog.v3.AccessLogCommon + 1, // 3: envoy.data.accesslog.v3.HTTPAccessLogEntry.protocol_version:type_name -> envoy.data.accesslog.v3.HTTPAccessLogEntry.HTTPVersion + 10, // 4: envoy.data.accesslog.v3.HTTPAccessLogEntry.request:type_name -> envoy.data.accesslog.v3.HTTPRequestProperties + 11, // 5: envoy.data.accesslog.v3.HTTPAccessLogEntry.response:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties + 20, // 6: envoy.data.accesslog.v3.AccessLogCommon.downstream_remote_address:type_name -> envoy.config.core.v3.Address + 20, // 7: envoy.data.accesslog.v3.AccessLogCommon.downstream_local_address:type_name -> envoy.config.core.v3.Address + 9, // 8: envoy.data.accesslog.v3.AccessLogCommon.tls_properties:type_name -> envoy.data.accesslog.v3.TLSProperties + 21, // 9: envoy.data.accesslog.v3.AccessLogCommon.start_time:type_name -> google.protobuf.Timestamp + 22, // 10: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_rx_byte:type_name -> google.protobuf.Duration + 22, // 11: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_upstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 12: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_upstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 13: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_upstream_rx_byte:type_name -> google.protobuf.Duration + 22, // 14: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_upstream_rx_byte:type_name -> google.protobuf.Duration + 22, // 15: envoy.data.accesslog.v3.AccessLogCommon.time_to_first_downstream_tx_byte:type_name -> google.protobuf.Duration + 22, // 16: envoy.data.accesslog.v3.AccessLogCommon.time_to_last_downstream_tx_byte:type_name -> google.protobuf.Duration + 20, // 17: envoy.data.accesslog.v3.AccessLogCommon.upstream_remote_address:type_name -> envoy.config.core.v3.Address + 20, // 18: envoy.data.accesslog.v3.AccessLogCommon.upstream_local_address:type_name -> envoy.config.core.v3.Address + 8, // 19: envoy.data.accesslog.v3.AccessLogCommon.response_flags:type_name -> envoy.data.accesslog.v3.ResponseFlags + 23, // 20: envoy.data.accesslog.v3.AccessLogCommon.metadata:type_name -> envoy.config.core.v3.Metadata + 20, // 21: envoy.data.accesslog.v3.AccessLogCommon.downstream_direct_remote_address:type_name -> envoy.config.core.v3.Address + 12, // 22: envoy.data.accesslog.v3.AccessLogCommon.filter_state_objects:type_name -> envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry + 13, // 23: envoy.data.accesslog.v3.AccessLogCommon.custom_tags:type_name -> envoy.data.accesslog.v3.AccessLogCommon.CustomTagsEntry + 22, // 24: envoy.data.accesslog.v3.AccessLogCommon.duration:type_name -> google.protobuf.Duration + 0, // 25: envoy.data.accesslog.v3.AccessLogCommon.access_log_type:type_name -> envoy.data.accesslog.v3.AccessLogType + 14, // 26: envoy.data.accesslog.v3.ResponseFlags.unauthorized_details:type_name -> envoy.data.accesslog.v3.ResponseFlags.Unauthorized + 3, // 27: envoy.data.accesslog.v3.TLSProperties.tls_version:type_name -> envoy.data.accesslog.v3.TLSProperties.TLSVersion + 24, // 28: envoy.data.accesslog.v3.TLSProperties.tls_cipher_suite:type_name -> google.protobuf.UInt32Value + 15, // 29: envoy.data.accesslog.v3.TLSProperties.local_certificate_properties:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties + 15, // 30: envoy.data.accesslog.v3.TLSProperties.peer_certificate_properties:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties + 25, // 31: envoy.data.accesslog.v3.HTTPRequestProperties.request_method:type_name -> envoy.config.core.v3.RequestMethod + 24, // 32: envoy.data.accesslog.v3.HTTPRequestProperties.port:type_name -> google.protobuf.UInt32Value + 17, // 33: envoy.data.accesslog.v3.HTTPRequestProperties.request_headers:type_name -> envoy.data.accesslog.v3.HTTPRequestProperties.RequestHeadersEntry + 24, // 34: envoy.data.accesslog.v3.HTTPResponseProperties.response_code:type_name -> google.protobuf.UInt32Value + 18, // 35: envoy.data.accesslog.v3.HTTPResponseProperties.response_headers:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties.ResponseHeadersEntry + 19, // 36: envoy.data.accesslog.v3.HTTPResponseProperties.response_trailers:type_name -> envoy.data.accesslog.v3.HTTPResponseProperties.ResponseTrailersEntry + 26, // 37: envoy.data.accesslog.v3.AccessLogCommon.FilterStateObjectsEntry.value:type_name -> google.protobuf.Any + 2, // 38: envoy.data.accesslog.v3.ResponseFlags.Unauthorized.reason:type_name -> envoy.data.accesslog.v3.ResponseFlags.Unauthorized.Reason + 16, // 39: envoy.data.accesslog.v3.TLSProperties.CertificateProperties.subject_alt_name:type_name -> envoy.data.accesslog.v3.TLSProperties.CertificateProperties.SubjectAltName + 40, // [40:40] is the sub-list for method output_type + 40, // [40:40] is the sub-list for method input_type + 40, // [40:40] is the sub-list for extension type_name + 40, // [40:40] is the sub-list for extension extendee + 0, // [0:40] is the sub-list for field type_name +} + +func init() { file_envoy_data_accesslog_v3_accesslog_proto_init() } +func file_envoy_data_accesslog_v3_accesslog_proto_init() { + if File_envoy_data_accesslog_v3_accesslog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TCPAccessLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPAccessLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLogCommon); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlags); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPRequestProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPResponseProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseFlags_Unauthorized); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties_CertificateProperties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TLSProperties_CertificateProperties_SubjectAltName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_data_accesslog_v3_accesslog_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*TLSProperties_CertificateProperties_SubjectAltName_Uri)(nil), + (*TLSProperties_CertificateProperties_SubjectAltName_Dns)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_data_accesslog_v3_accesslog_proto_rawDesc, + NumEnums: 4, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_data_accesslog_v3_accesslog_proto_goTypes, + DependencyIndexes: file_envoy_data_accesslog_v3_accesslog_proto_depIdxs, + EnumInfos: file_envoy_data_accesslog_v3_accesslog_proto_enumTypes, + MessageInfos: file_envoy_data_accesslog_v3_accesslog_proto_msgTypes, + }.Build() + File_envoy_data_accesslog_v3_accesslog_proto = out.File + file_envoy_data_accesslog_v3_accesslog_proto_rawDesc = nil + file_envoy_data_accesslog_v3_accesslog_proto_goTypes = nil + file_envoy_data_accesslog_v3_accesslog_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go new file mode 100644 index 0000000000..7a0fec615d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog.pb.validate.go @@ -0,0 +1,2257 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.RequestMethod(0) +) + +// Validate checks the field values on TCPAccessLogEntry with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TCPAccessLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TCPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TCPAccessLogEntryMultiError, or nil if none found. +func (m *TCPAccessLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *TCPAccessLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TCPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConnectionProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConnectionProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TCPAccessLogEntryValidationError{ + field: "ConnectionProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TCPAccessLogEntryMultiError(errors) + } + + return nil +} + +// TCPAccessLogEntryMultiError is an error wrapping multiple validation errors +// returned by TCPAccessLogEntry.ValidateAll() if the designated constraints +// aren't met. +type TCPAccessLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TCPAccessLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TCPAccessLogEntryMultiError) AllErrors() []error { return m } + +// TCPAccessLogEntryValidationError is the validation error returned by +// TCPAccessLogEntry.Validate if the designated constraints aren't met. +type TCPAccessLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TCPAccessLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TCPAccessLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TCPAccessLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TCPAccessLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TCPAccessLogEntryValidationError) ErrorName() string { + return "TCPAccessLogEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e TCPAccessLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTCPAccessLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TCPAccessLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TCPAccessLogEntryValidationError{} + +// Validate checks the field values on HTTPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPAccessLogEntry) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPAccessLogEntry with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPAccessLogEntryMultiError, or nil if none found. +func (m *HTTPAccessLogEntry) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPAccessLogEntry) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "CommonProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProtocolVersion + + if all { + switch v := interface{}(m.GetRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetResponse()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponse()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPAccessLogEntryValidationError{ + field: "Response", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HTTPAccessLogEntryMultiError(errors) + } + + return nil +} + +// HTTPAccessLogEntryMultiError is an error wrapping multiple validation errors +// returned by HTTPAccessLogEntry.ValidateAll() if the designated constraints +// aren't met. +type HTTPAccessLogEntryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPAccessLogEntryMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPAccessLogEntryMultiError) AllErrors() []error { return m } + +// HTTPAccessLogEntryValidationError is the validation error returned by +// HTTPAccessLogEntry.Validate if the designated constraints aren't met. +type HTTPAccessLogEntryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPAccessLogEntryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPAccessLogEntryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPAccessLogEntryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPAccessLogEntryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPAccessLogEntryValidationError) ErrorName() string { + return "HTTPAccessLogEntryValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPAccessLogEntryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPAccessLogEntry.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPAccessLogEntryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPAccessLogEntryValidationError{} + +// Validate checks the field values on ConnectionProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ConnectionProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConnectionProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ConnectionPropertiesMultiError, or nil if none found. +func (m *ConnectionProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *ConnectionProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ReceivedBytes + + // no validation rules for SentBytes + + if len(errors) > 0 { + return ConnectionPropertiesMultiError(errors) + } + + return nil +} + +// ConnectionPropertiesMultiError is an error wrapping multiple validation +// errors returned by ConnectionProperties.ValidateAll() if the designated +// constraints aren't met. +type ConnectionPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConnectionPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConnectionPropertiesMultiError) AllErrors() []error { return m } + +// ConnectionPropertiesValidationError is the validation error returned by +// ConnectionProperties.Validate if the designated constraints aren't met. +type ConnectionPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConnectionPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConnectionPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConnectionPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConnectionPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConnectionPropertiesValidationError) ErrorName() string { + return "ConnectionPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e ConnectionPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConnectionProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConnectionPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConnectionPropertiesValidationError{} + +// Validate checks the field values on AccessLogCommon with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *AccessLogCommon) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AccessLogCommon with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// AccessLogCommonMultiError, or nil if none found. +func (m *AccessLogCommon) ValidateAll() error { + return m.validate(true) +} + +func (m *AccessLogCommon) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetSampleRate(); val <= 0 || val > 1 { + err := AccessLogCommonValidationError{ + field: "SampleRate", + reason: "value must be inside range (0, 1]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDownstreamRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDownstreamLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TlsProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetStartTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStartTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "StartTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstUpstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstUpstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastUpstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastUpstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastUpstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstUpstreamRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstUpstreamRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastUpstreamRxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastUpstreamRxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastUpstreamRxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToFirstDownstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToFirstDownstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToFirstDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTimeToLastDownstreamTxByte()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTimeToLastDownstreamTxByte()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "TimeToLastDownstreamTxByte", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "UpstreamRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpstreamLocalAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamLocalAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "UpstreamLocalAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamCluster + + if all { + switch v := interface{}(m.GetResponseFlags()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseFlags()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "ResponseFlags", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamTransportFailureReason + + // no validation rules for RouteName + + if all { + switch v := interface{}(m.GetDownstreamDirectRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDownstreamDirectRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "DownstreamDirectRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + { + sorted_keys := make([]string, len(m.GetFilterStateObjects())) + i := 0 + for key := range m.GetFilterStateObjects() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetFilterStateObjects()[key] + _ = val + + // no validation rules for FilterStateObjects[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: fmt.Sprintf("FilterStateObjects[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + // no validation rules for CustomTags + + if all { + switch v := interface{}(m.GetDuration()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDuration()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AccessLogCommonValidationError{ + field: "Duration", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamRequestAttemptCount + + // no validation rules for ConnectionTerminationDetails + + // no validation rules for StreamId + + // no validation rules for IntermediateLogEntry + + // no validation rules for DownstreamTransportFailureReason + + // no validation rules for DownstreamWireBytesSent + + // no validation rules for DownstreamWireBytesReceived + + // no validation rules for UpstreamWireBytesSent + + // no validation rules for UpstreamWireBytesReceived + + // no validation rules for AccessLogType + + if len(errors) > 0 { + return AccessLogCommonMultiError(errors) + } + + return nil +} + +// AccessLogCommonMultiError is an error wrapping multiple validation errors +// returned by AccessLogCommon.ValidateAll() if the designated constraints +// aren't met. +type AccessLogCommonMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AccessLogCommonMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AccessLogCommonMultiError) AllErrors() []error { return m } + +// AccessLogCommonValidationError is the validation error returned by +// AccessLogCommon.Validate if the designated constraints aren't met. +type AccessLogCommonValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AccessLogCommonValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AccessLogCommonValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AccessLogCommonValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AccessLogCommonValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AccessLogCommonValidationError) ErrorName() string { return "AccessLogCommonValidationError" } + +// Error satisfies the builtin error interface +func (e AccessLogCommonValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAccessLogCommon.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AccessLogCommonValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AccessLogCommonValidationError{} + +// Validate checks the field values on ResponseFlags with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResponseFlags) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlags with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResponseFlagsMultiError, or +// nil if none found. +func (m *ResponseFlags) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlags) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FailedLocalHealthcheck + + // no validation rules for NoHealthyUpstream + + // no validation rules for UpstreamRequestTimeout + + // no validation rules for LocalReset + + // no validation rules for UpstreamRemoteReset + + // no validation rules for UpstreamConnectionFailure + + // no validation rules for UpstreamConnectionTermination + + // no validation rules for UpstreamOverflow + + // no validation rules for NoRouteFound + + // no validation rules for DelayInjected + + // no validation rules for FaultInjected + + // no validation rules for RateLimited + + if all { + switch v := interface{}(m.GetUnauthorizedDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUnauthorizedDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseFlagsValidationError{ + field: "UnauthorizedDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RateLimitServiceError + + // no validation rules for DownstreamConnectionTermination + + // no validation rules for UpstreamRetryLimitExceeded + + // no validation rules for StreamIdleTimeout + + // no validation rules for InvalidEnvoyRequestHeaders + + // no validation rules for DownstreamProtocolError + + // no validation rules for UpstreamMaxStreamDurationReached + + // no validation rules for ResponseFromCacheFilter + + // no validation rules for NoFilterConfigFound + + // no validation rules for DurationTimeout + + // no validation rules for UpstreamProtocolError + + // no validation rules for NoClusterFound + + // no validation rules for OverloadManager + + // no validation rules for DnsResolutionFailure + + // no validation rules for DownstreamRemoteReset + + if len(errors) > 0 { + return ResponseFlagsMultiError(errors) + } + + return nil +} + +// ResponseFlagsMultiError is an error wrapping multiple validation errors +// returned by ResponseFlags.ValidateAll() if the designated constraints +// aren't met. +type ResponseFlagsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlagsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlagsMultiError) AllErrors() []error { return m } + +// ResponseFlagsValidationError is the validation error returned by +// ResponseFlags.Validate if the designated constraints aren't met. +type ResponseFlagsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlagsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlagsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlagsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlagsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlagsValidationError) ErrorName() string { return "ResponseFlagsValidationError" } + +// Error satisfies the builtin error interface +func (e ResponseFlagsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlags.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlagsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlagsValidationError{} + +// Validate checks the field values on TLSProperties with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TLSProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSProperties with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TLSPropertiesMultiError, or +// nil if none found. +func (m *TLSProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TlsVersion + + if all { + switch v := interface{}(m.GetTlsCipherSuite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCipherSuite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "TlsCipherSuite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TlsSniHostname + + if all { + switch v := interface{}(m.GetLocalCertificateProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalCertificateProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "LocalCertificateProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPeerCertificateProperties()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPeerCertificateProperties()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSPropertiesValidationError{ + field: "PeerCertificateProperties", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TlsSessionId + + // no validation rules for Ja3Fingerprint + + if len(errors) > 0 { + return TLSPropertiesMultiError(errors) + } + + return nil +} + +// TLSPropertiesMultiError is an error wrapping multiple validation errors +// returned by TLSProperties.ValidateAll() if the designated constraints +// aren't met. +type TLSPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSPropertiesMultiError) AllErrors() []error { return m } + +// TLSPropertiesValidationError is the validation error returned by +// TLSProperties.Validate if the designated constraints aren't met. +type TLSPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSPropertiesValidationError) ErrorName() string { return "TLSPropertiesValidationError" } + +// Error satisfies the builtin error interface +func (e TLSPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSPropertiesValidationError{} + +// Validate checks the field values on HTTPRequestProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPRequestProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPRequestProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPRequestPropertiesMultiError, or nil if none found. +func (m *HTTPRequestProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPRequestProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := v3.RequestMethod_name[int32(m.GetRequestMethod())]; !ok { + err := HTTPRequestPropertiesValidationError{ + field: "RequestMethod", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Scheme + + // no validation rules for Authority + + if all { + switch v := interface{}(m.GetPort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPRequestPropertiesValidationError{ + field: "Port", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Path + + // no validation rules for UserAgent + + // no validation rules for Referer + + // no validation rules for ForwardedFor + + // no validation rules for RequestId + + // no validation rules for OriginalPath + + // no validation rules for RequestHeadersBytes + + // no validation rules for RequestBodyBytes + + // no validation rules for RequestHeaders + + // no validation rules for UpstreamHeaderBytesSent + + // no validation rules for DownstreamHeaderBytesReceived + + if len(errors) > 0 { + return HTTPRequestPropertiesMultiError(errors) + } + + return nil +} + +// HTTPRequestPropertiesMultiError is an error wrapping multiple validation +// errors returned by HTTPRequestProperties.ValidateAll() if the designated +// constraints aren't met. +type HTTPRequestPropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPRequestPropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPRequestPropertiesMultiError) AllErrors() []error { return m } + +// HTTPRequestPropertiesValidationError is the validation error returned by +// HTTPRequestProperties.Validate if the designated constraints aren't met. +type HTTPRequestPropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPRequestPropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPRequestPropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPRequestPropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPRequestPropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPRequestPropertiesValidationError) ErrorName() string { + return "HTTPRequestPropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPRequestPropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPRequestProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPRequestPropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPRequestPropertiesValidationError{} + +// Validate checks the field values on HTTPResponseProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HTTPResponseProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPResponseProperties with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HTTPResponsePropertiesMultiError, or nil if none found. +func (m *HTTPResponseProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPResponseProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResponseCode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseCode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPResponsePropertiesValidationError{ + field: "ResponseCode", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ResponseHeadersBytes + + // no validation rules for ResponseBodyBytes + + // no validation rules for ResponseHeaders + + // no validation rules for ResponseTrailers + + // no validation rules for ResponseCodeDetails + + // no validation rules for UpstreamHeaderBytesReceived + + // no validation rules for DownstreamHeaderBytesSent + + if len(errors) > 0 { + return HTTPResponsePropertiesMultiError(errors) + } + + return nil +} + +// HTTPResponsePropertiesMultiError is an error wrapping multiple validation +// errors returned by HTTPResponseProperties.ValidateAll() if the designated +// constraints aren't met. +type HTTPResponsePropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPResponsePropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPResponsePropertiesMultiError) AllErrors() []error { return m } + +// HTTPResponsePropertiesValidationError is the validation error returned by +// HTTPResponseProperties.Validate if the designated constraints aren't met. +type HTTPResponsePropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPResponsePropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPResponsePropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPResponsePropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPResponsePropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPResponsePropertiesValidationError) ErrorName() string { + return "HTTPResponsePropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e HTTPResponsePropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPResponseProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPResponsePropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPResponsePropertiesValidationError{} + +// Validate checks the field values on ResponseFlags_Unauthorized with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResponseFlags_Unauthorized) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseFlags_Unauthorized with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResponseFlags_UnauthorizedMultiError, or nil if none found. +func (m *ResponseFlags_Unauthorized) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseFlags_Unauthorized) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Reason + + if len(errors) > 0 { + return ResponseFlags_UnauthorizedMultiError(errors) + } + + return nil +} + +// ResponseFlags_UnauthorizedMultiError is an error wrapping multiple +// validation errors returned by ResponseFlags_Unauthorized.ValidateAll() if +// the designated constraints aren't met. +type ResponseFlags_UnauthorizedMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseFlags_UnauthorizedMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseFlags_UnauthorizedMultiError) AllErrors() []error { return m } + +// ResponseFlags_UnauthorizedValidationError is the validation error returned +// by ResponseFlags_Unauthorized.Validate if the designated constraints aren't met. +type ResponseFlags_UnauthorizedValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseFlags_UnauthorizedValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseFlags_UnauthorizedValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseFlags_UnauthorizedValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseFlags_UnauthorizedValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseFlags_UnauthorizedValidationError) ErrorName() string { + return "ResponseFlags_UnauthorizedValidationError" +} + +// Error satisfies the builtin error interface +func (e ResponseFlags_UnauthorizedValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseFlags_Unauthorized.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseFlags_UnauthorizedValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseFlags_UnauthorizedValidationError{} + +// Validate checks the field values on TLSProperties_CertificateProperties with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *TLSProperties_CertificateProperties) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TLSProperties_CertificateProperties +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// TLSProperties_CertificatePropertiesMultiError, or nil if none found. +func (m *TLSProperties_CertificateProperties) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties_CertificateProperties) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetSubjectAltName() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TLSProperties_CertificatePropertiesValidationError{ + field: fmt.Sprintf("SubjectAltName[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Subject + + // no validation rules for Issuer + + if len(errors) > 0 { + return TLSProperties_CertificatePropertiesMultiError(errors) + } + + return nil +} + +// TLSProperties_CertificatePropertiesMultiError is an error wrapping multiple +// validation errors returned by +// TLSProperties_CertificateProperties.ValidateAll() if the designated +// constraints aren't met. +type TLSProperties_CertificatePropertiesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSProperties_CertificatePropertiesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSProperties_CertificatePropertiesMultiError) AllErrors() []error { return m } + +// TLSProperties_CertificatePropertiesValidationError is the validation error +// returned by TLSProperties_CertificateProperties.Validate if the designated +// constraints aren't met. +type TLSProperties_CertificatePropertiesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSProperties_CertificatePropertiesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TLSProperties_CertificatePropertiesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TLSProperties_CertificatePropertiesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TLSProperties_CertificatePropertiesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSProperties_CertificatePropertiesValidationError) ErrorName() string { + return "TLSProperties_CertificatePropertiesValidationError" +} + +// Error satisfies the builtin error interface +func (e TLSProperties_CertificatePropertiesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties_CertificateProperties.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSProperties_CertificatePropertiesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSProperties_CertificatePropertiesValidationError{} + +// Validate checks the field values on +// TLSProperties_CertificateProperties_SubjectAltName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TLSProperties_CertificateProperties_SubjectAltName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// TLSProperties_CertificateProperties_SubjectAltName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// TLSProperties_CertificateProperties_SubjectAltNameMultiError, or nil if +// none found. +func (m *TLSProperties_CertificateProperties_SubjectAltName) ValidateAll() error { + return m.validate(true) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.San.(type) { + case *TLSProperties_CertificateProperties_SubjectAltName_Uri: + if v == nil { + err := TLSProperties_CertificateProperties_SubjectAltNameValidationError{ + field: "San", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Uri + case *TLSProperties_CertificateProperties_SubjectAltName_Dns: + if v == nil { + err := TLSProperties_CertificateProperties_SubjectAltNameValidationError{ + field: "San", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Dns + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TLSProperties_CertificateProperties_SubjectAltNameMultiError(errors) + } + + return nil +} + +// TLSProperties_CertificateProperties_SubjectAltNameMultiError is an error +// wrapping multiple validation errors returned by +// TLSProperties_CertificateProperties_SubjectAltName.ValidateAll() if the +// designated constraints aren't met. +type TLSProperties_CertificateProperties_SubjectAltNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TLSProperties_CertificateProperties_SubjectAltNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TLSProperties_CertificateProperties_SubjectAltNameMultiError) AllErrors() []error { return m } + +// TLSProperties_CertificateProperties_SubjectAltNameValidationError is the +// validation error returned by +// TLSProperties_CertificateProperties_SubjectAltName.Validate if the +// designated constraints aren't met. +type TLSProperties_CertificateProperties_SubjectAltNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) ErrorName() string { + return "TLSProperties_CertificateProperties_SubjectAltNameValidationError" +} + +// Error satisfies the builtin error interface +func (e TLSProperties_CertificateProperties_SubjectAltNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTLSProperties_CertificateProperties_SubjectAltName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TLSProperties_CertificateProperties_SubjectAltNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TLSProperties_CertificateProperties_SubjectAltNameValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go new file mode 100644 index 0000000000..c37ec09159 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3/accesslog_vtproto.pb.go @@ -0,0 +1,2040 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/data/accesslog/v3/accesslog.proto + +package accesslogv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TCPAccessLogEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPAccessLogEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TCPAccessLogEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ConnectionProperties != nil { + size, err := m.ConnectionProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.CommonProperties != nil { + size, err := m.CommonProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPAccessLogEntry) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPAccessLogEntry) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPAccessLogEntry) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Response != nil { + size, err := m.Response.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ProtocolVersion)) + i-- + dAtA[i] = 0x10 + } + if m.CommonProperties != nil { + size, err := m.CommonProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConnectionProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SentBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SentBytes)) + i-- + dAtA[i] = 0x10 + } + if m.ReceivedBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ReceivedBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AccessLogCommon) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessLogCommon) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AccessLogCommon) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AccessLogType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AccessLogType)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.UpstreamWireBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamWireBytesReceived)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.UpstreamWireBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamWireBytesSent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.DownstreamWireBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamWireBytesReceived)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.DownstreamWireBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamWireBytesSent)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if len(m.DownstreamTransportFailureReason) > 0 { + i -= len(m.DownstreamTransportFailureReason) + copy(dAtA[i:], m.DownstreamTransportFailureReason) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DownstreamTransportFailureReason))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.IntermediateLogEntry { + i-- + if m.IntermediateLogEntry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if len(m.StreamId) > 0 { + i -= len(m.StreamId) + copy(dAtA[i:], m.StreamId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StreamId))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.ConnectionTerminationDetails) > 0 { + i -= len(m.ConnectionTerminationDetails) + copy(dAtA[i:], m.ConnectionTerminationDetails) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConnectionTerminationDetails))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.UpstreamRequestAttemptCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamRequestAttemptCount)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.Duration != nil { + size, err := (*durationpb.Duration)(m.Duration).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.CustomTags) > 0 { + for k := range m.CustomTags { + v := m.CustomTags[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if len(m.FilterStateObjects) > 0 { + for k := range m.FilterStateObjects { + v := m.FilterStateObjects[k] + baseI := i + size, err := (*anypb.Any)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if m.DownstreamDirectRemoteAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamDirectRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamDirectRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if len(m.RouteName) > 0 { + i -= len(m.RouteName) + copy(dAtA[i:], m.RouteName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.UpstreamTransportFailureReason) > 0 { + i -= len(m.UpstreamTransportFailureReason) + copy(dAtA[i:], m.UpstreamTransportFailureReason) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamTransportFailureReason))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.ResponseFlags != nil { + size, err := m.ResponseFlags.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.UpstreamCluster) > 0 { + i -= len(m.UpstreamCluster) + copy(dAtA[i:], m.UpstreamCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamCluster))) + i-- + dAtA[i] = 0x7a + } + if m.UpstreamLocalAddress != nil { + if vtmsg, ok := interface{}(m.UpstreamLocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamLocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x72 + } + if m.UpstreamRemoteAddress != nil { + if vtmsg, ok := interface{}(m.UpstreamRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if m.TimeToLastDownstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastDownstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.TimeToFirstDownstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstDownstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.TimeToLastUpstreamRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastUpstreamRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.TimeToFirstUpstreamRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstUpstreamRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.TimeToLastUpstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastUpstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.TimeToFirstUpstreamTxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToFirstUpstreamTxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.TimeToLastRxByte != nil { + size, err := (*durationpb.Duration)(m.TimeToLastRxByte).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.StartTime != nil { + size, err := (*timestamppb.Timestamp)(m.StartTime).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.TlsProperties != nil { + size, err := m.TlsProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.DownstreamLocalAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamLocalAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamLocalAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.DownstreamRemoteAddress != nil { + if vtmsg, ok := interface{}(m.DownstreamRemoteAddress).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DownstreamRemoteAddress) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SampleRate != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleRate)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlags_Unauthorized) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlags_Unauthorized) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlags_Unauthorized) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Reason != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Reason)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFlags) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFlags) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseFlags) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamRemoteReset { + i-- + if m.DownstreamRemoteReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.DnsResolutionFailure { + i-- + if m.DnsResolutionFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.OverloadManager { + i-- + if m.OverloadManager { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.NoClusterFound { + i-- + if m.NoClusterFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.UpstreamProtocolError { + i-- + if m.UpstreamProtocolError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.DurationTimeout { + i-- + if m.DurationTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.NoFilterConfigFound { + i-- + if m.NoFilterConfigFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.ResponseFromCacheFilter { + i-- + if m.ResponseFromCacheFilter { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.UpstreamMaxStreamDurationReached { + i-- + if m.UpstreamMaxStreamDurationReached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.DownstreamProtocolError { + i-- + if m.DownstreamProtocolError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.InvalidEnvoyRequestHeaders { + i-- + if m.InvalidEnvoyRequestHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.StreamIdleTimeout { + i-- + if m.StreamIdleTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.UpstreamRetryLimitExceeded { + i-- + if m.UpstreamRetryLimitExceeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DownstreamConnectionTermination { + i-- + if m.DownstreamConnectionTermination { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.RateLimitServiceError { + i-- + if m.RateLimitServiceError { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.UnauthorizedDetails != nil { + size, err := m.UnauthorizedDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.RateLimited { + i-- + if m.RateLimited { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.FaultInjected { + i-- + if m.FaultInjected { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.DelayInjected { + i-- + if m.DelayInjected { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.NoRouteFound { + i-- + if m.NoRouteFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.UpstreamOverflow { + i-- + if m.UpstreamOverflow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.UpstreamConnectionTermination { + i-- + if m.UpstreamConnectionTermination { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.UpstreamConnectionFailure { + i-- + if m.UpstreamConnectionFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.UpstreamRemoteReset { + i-- + if m.UpstreamRemoteReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.LocalReset { + i-- + if m.LocalReset { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.UpstreamRequestTimeout { + i-- + if m.UpstreamRequestTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.NoHealthyUpstream { + i-- + if m.NoHealthyUpstream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.FailedLocalHealthcheck { + i-- + if m.FailedLocalHealthcheck { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.San.(*TLSProperties_CertificateProperties_SubjectAltName_Dns); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.San.(*TLSProperties_CertificateProperties_SubjectAltName_Uri); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Dns) + copy(dAtA[i:], m.Dns) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *TLSProperties_CertificateProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties_CertificateProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties_CertificateProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Issuer) > 0 { + i -= len(m.Issuer) + copy(dAtA[i:], m.Issuer) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Issuer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subject) > 0 { + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0x12 + } + if len(m.SubjectAltName) > 0 { + for iNdEx := len(m.SubjectAltName) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SubjectAltName[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TLSProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TLSProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Ja3Fingerprint) > 0 { + i -= len(m.Ja3Fingerprint) + copy(dAtA[i:], m.Ja3Fingerprint) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Ja3Fingerprint))) + i-- + dAtA[i] = 0x3a + } + if len(m.TlsSessionId) > 0 { + i -= len(m.TlsSessionId) + copy(dAtA[i:], m.TlsSessionId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TlsSessionId))) + i-- + dAtA[i] = 0x32 + } + if m.PeerCertificateProperties != nil { + size, err := m.PeerCertificateProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LocalCertificateProperties != nil { + size, err := m.LocalCertificateProperties.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.TlsSniHostname) > 0 { + i -= len(m.TlsSniHostname) + copy(dAtA[i:], m.TlsSniHostname) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TlsSniHostname))) + i-- + dAtA[i] = 0x1a + } + if m.TlsCipherSuite != nil { + size, err := (*wrapperspb.UInt32Value)(m.TlsCipherSuite).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.TlsVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPRequestProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRequestProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPRequestProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamHeaderBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamHeaderBytesReceived)) + i-- + dAtA[i] = 0x78 + } + if m.UpstreamHeaderBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamHeaderBytesSent)) + i-- + dAtA[i] = 0x70 + } + if len(m.RequestHeaders) > 0 { + for k := range m.RequestHeaders { + v := m.RequestHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if m.RequestBodyBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestBodyBytes)) + i-- + dAtA[i] = 0x60 + } + if m.RequestHeadersBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestHeadersBytes)) + i-- + dAtA[i] = 0x58 + } + if len(m.OriginalPath) > 0 { + i -= len(m.OriginalPath) + copy(dAtA[i:], m.OriginalPath) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OriginalPath))) + i-- + dAtA[i] = 0x52 + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0x4a + } + if len(m.ForwardedFor) > 0 { + i -= len(m.ForwardedFor) + copy(dAtA[i:], m.ForwardedFor) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ForwardedFor))) + i-- + dAtA[i] = 0x42 + } + if len(m.Referer) > 0 { + i -= len(m.Referer) + copy(dAtA[i:], m.Referer) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Referer))) + i-- + dAtA[i] = 0x3a + } + if len(m.UserAgent) > 0 { + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x32 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x2a + } + if m.Port != nil { + size, err := (*wrapperspb.UInt32Value)(m.Port).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x1a + } + if len(m.Scheme) > 0 { + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scheme))) + i-- + dAtA[i] = 0x12 + } + if m.RequestMethod != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestMethod)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPResponseProperties) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPResponseProperties) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPResponseProperties) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DownstreamHeaderBytesSent != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DownstreamHeaderBytesSent)) + i-- + dAtA[i] = 0x40 + } + if m.UpstreamHeaderBytesReceived != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UpstreamHeaderBytesReceived)) + i-- + dAtA[i] = 0x38 + } + if len(m.ResponseCodeDetails) > 0 { + i -= len(m.ResponseCodeDetails) + copy(dAtA[i:], m.ResponseCodeDetails) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseCodeDetails))) + i-- + dAtA[i] = 0x32 + } + if len(m.ResponseTrailers) > 0 { + for k := range m.ResponseTrailers { + v := m.ResponseTrailers[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResponseHeaders) > 0 { + for k := range m.ResponseHeaders { + v := m.ResponseHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.ResponseBodyBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseBodyBytes)) + i-- + dAtA[i] = 0x18 + } + if m.ResponseHeadersBytes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResponseHeadersBytes)) + i-- + dAtA[i] = 0x10 + } + if m.ResponseCode != nil { + size, err := (*wrapperspb.UInt32Value)(m.ResponseCode).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TCPAccessLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonProperties != nil { + l = m.CommonProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConnectionProperties != nil { + l = m.ConnectionProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPAccessLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonProperties != nil { + l = m.CommonProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ProtocolVersion)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Response != nil { + l = m.Response.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConnectionProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReceivedBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ReceivedBytes)) + } + if m.SentBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SentBytes)) + } + n += len(m.unknownFields) + return n +} + +func (m *AccessLogCommon) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleRate != 0 { + n += 9 + } + if m.DownstreamRemoteAddress != nil { + if size, ok := interface{}(m.DownstreamRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamRemoteAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamLocalAddress != nil { + if size, ok := interface{}(m.DownstreamLocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamLocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsProperties != nil { + l = m.TlsProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StartTime != nil { + l = (*timestamppb.Timestamp)(m.StartTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastRxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstUpstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstUpstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastUpstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastUpstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstUpstreamRxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstUpstreamRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastUpstreamRxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastUpstreamRxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToFirstDownstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToFirstDownstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeToLastDownstreamTxByte != nil { + l = (*durationpb.Duration)(m.TimeToLastDownstreamTxByte).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamRemoteAddress != nil { + if size, ok := interface{}(m.UpstreamRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamRemoteAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamLocalAddress != nil { + if size, ok := interface{}(m.UpstreamLocalAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.UpstreamLocalAddress) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseFlags != nil { + l = m.ResponseFlags.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamTransportFailureReason) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteName) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamDirectRemoteAddress != nil { + if size, ok := interface{}(m.DownstreamDirectRemoteAddress).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DownstreamDirectRemoteAddress) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.FilterStateObjects) > 0 { + for k, v := range m.FilterStateObjects { + _ = k + _ = v + l = 0 + if v != nil { + l = (*anypb.Any)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.CustomTags) > 0 { + for k, v := range m.CustomTags { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.Duration != nil { + l = (*durationpb.Duration)(m.Duration).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamRequestAttemptCount != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamRequestAttemptCount)) + } + l = len(m.ConnectionTerminationDetails) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.StreamId) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IntermediateLogEntry { + n += 3 + } + l = len(m.DownstreamTransportFailureReason) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DownstreamWireBytesSent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DownstreamWireBytesSent)) + } + if m.DownstreamWireBytesReceived != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.DownstreamWireBytesReceived)) + } + if m.UpstreamWireBytesSent != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamWireBytesSent)) + } + if m.UpstreamWireBytesReceived != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.UpstreamWireBytesReceived)) + } + if m.AccessLogType != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.AccessLogType)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlags_Unauthorized) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Reason != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Reason)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseFlags) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FailedLocalHealthcheck { + n += 2 + } + if m.NoHealthyUpstream { + n += 2 + } + if m.UpstreamRequestTimeout { + n += 2 + } + if m.LocalReset { + n += 2 + } + if m.UpstreamRemoteReset { + n += 2 + } + if m.UpstreamConnectionFailure { + n += 2 + } + if m.UpstreamConnectionTermination { + n += 2 + } + if m.UpstreamOverflow { + n += 2 + } + if m.NoRouteFound { + n += 2 + } + if m.DelayInjected { + n += 2 + } + if m.FaultInjected { + n += 2 + } + if m.RateLimited { + n += 2 + } + if m.UnauthorizedDetails != nil { + l = m.UnauthorizedDetails.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RateLimitServiceError { + n += 2 + } + if m.DownstreamConnectionTermination { + n += 2 + } + if m.UpstreamRetryLimitExceeded { + n += 3 + } + if m.StreamIdleTimeout { + n += 3 + } + if m.InvalidEnvoyRequestHeaders { + n += 3 + } + if m.DownstreamProtocolError { + n += 3 + } + if m.UpstreamMaxStreamDurationReached { + n += 3 + } + if m.ResponseFromCacheFilter { + n += 3 + } + if m.NoFilterConfigFound { + n += 3 + } + if m.DurationTimeout { + n += 3 + } + if m.UpstreamProtocolError { + n += 3 + } + if m.NoClusterFound { + n += 3 + } + if m.OverloadManager { + n += 3 + } + if m.DnsResolutionFailure { + n += 3 + } + if m.DownstreamRemoteReset { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.San.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties_CertificateProperties_SubjectAltName_Uri) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TLSProperties_CertificateProperties_SubjectAltName_Dns) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Dns) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *TLSProperties_CertificateProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SubjectAltName) > 0 { + for _, e := range m.SubjectAltName { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Issuer) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TLSProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsVersion)) + } + if m.TlsCipherSuite != nil { + l = (*wrapperspb.UInt32Value)(m.TlsCipherSuite).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TlsSniHostname) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalCertificateProperties != nil { + l = m.LocalCertificateProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PeerCertificateProperties != nil { + l = m.PeerCertificateProperties.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TlsSessionId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Ja3Fingerprint) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPRequestProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestMethod != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestMethod)) + } + l = len(m.Scheme) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Port != nil { + l = (*wrapperspb.UInt32Value)(m.Port).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UserAgent) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Referer) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ForwardedFor) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RequestId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.OriginalPath) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestHeadersBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestHeadersBytes)) + } + if m.RequestBodyBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestBodyBytes)) + } + if len(m.RequestHeaders) > 0 { + for k, v := range m.RequestHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.UpstreamHeaderBytesSent != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UpstreamHeaderBytesSent)) + } + if m.DownstreamHeaderBytesReceived != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DownstreamHeaderBytesReceived)) + } + n += len(m.unknownFields) + return n +} + +func (m *HTTPResponseProperties) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResponseCode != nil { + l = (*wrapperspb.UInt32Value)(m.ResponseCode).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseHeadersBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseHeadersBytes)) + } + if m.ResponseBodyBytes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResponseBodyBytes)) + } + if len(m.ResponseHeaders) > 0 { + for k, v := range m.ResponseHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.ResponseTrailers) > 0 { + for k, v := range m.ResponseTrailers { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.ResponseCodeDetails) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UpstreamHeaderBytesReceived != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UpstreamHeaderBytesReceived)) + } + if m.DownstreamHeaderBytesSent != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.DownstreamHeaderBytesSent)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go new file mode 100644 index 0000000000..ed75102d4e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.go @@ -0,0 +1,174 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the aggregate cluster. See the :ref:`architecture overview +// ` for more information. +// [#extension: envoy.clusters.aggregate] +type ClusterConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they + // appear in this list. + Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` +} + +func (x *ClusterConfig) Reset() { + *x = ClusterConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterConfig) ProtoMessage() {} + +func (x *ClusterConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterConfig.ProtoReflect.Descriptor instead. +func (*ClusterConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescGZIP(), []int{0} +} + +func (x *ClusterConfig) GetClusters() []string { + if x != nil { + return x.Clusters + } + return nil +} + +var File_envoy_extensions_clusters_aggregate_v3_cluster_proto protoreflect.FileDescriptor + +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x67, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x72, 0x0a, 0x0d, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x08, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xa9, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x34, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescOnce sync.Once + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData = file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc +) + +func file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescGZIP() []byte { + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescOnce.Do(func() { + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData) + }) + return file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDescData +} + +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_goTypes = []interface{}{ + (*ClusterConfig)(nil), // 0: envoy.extensions.clusters.aggregate.v3.ClusterConfig +} +var file_envoy_extensions_clusters_aggregate_v3_cluster_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_clusters_aggregate_v3_cluster_proto_init() } +func file_envoy_extensions_clusters_aggregate_v3_cluster_proto_init() { + if File_envoy_extensions_clusters_aggregate_v3_cluster_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_goTypes, + DependencyIndexes: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_depIdxs, + MessageInfos: file_envoy_extensions_clusters_aggregate_v3_cluster_proto_msgTypes, + }.Build() + File_envoy_extensions_clusters_aggregate_v3_cluster_proto = out.File + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_rawDesc = nil + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_goTypes = nil + file_envoy_extensions_clusters_aggregate_v3_cluster_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go new file mode 100644 index 0000000000..44fb2c71f1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster.pb.validate.go @@ -0,0 +1,148 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClusterConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClusterConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClusterConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClusterConfigMultiError, or +// nil if none found. +func (m *ClusterConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClusterConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetClusters()) < 1 { + err := ClusterConfigValidationError{ + field: "Clusters", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ClusterConfigMultiError(errors) + } + + return nil +} + +// ClusterConfigMultiError is an error wrapping multiple validation errors +// returned by ClusterConfig.ValidateAll() if the designated constraints +// aren't met. +type ClusterConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClusterConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClusterConfigMultiError) AllErrors() []error { return m } + +// ClusterConfigValidationError is the validation error returned by +// ClusterConfig.Validate if the designated constraints aren't met. +type ClusterConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClusterConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClusterConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClusterConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClusterConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClusterConfigValidationError) ErrorName() string { return "ClusterConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ClusterConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClusterConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClusterConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClusterConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go new file mode 100644 index 0000000000..a3f22bf130 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3/cluster_vtproto.pb.go @@ -0,0 +1,77 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/clusters/aggregate/v3/cluster.proto + +package aggregatev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClusterConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Clusters[iNdEx]) + copy(dAtA[i:], m.Clusters[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Clusters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, s := range m.Clusters { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go new file mode 100644 index 0000000000..13e47ea832 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.go @@ -0,0 +1,622 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FaultDelay_FaultDelayType int32 + +const ( + // Unused and deprecated. + FaultDelay_FIXED FaultDelay_FaultDelayType = 0 +) + +// Enum value maps for FaultDelay_FaultDelayType. +var ( + FaultDelay_FaultDelayType_name = map[int32]string{ + 0: "FIXED", + } + FaultDelay_FaultDelayType_value = map[string]int32{ + "FIXED": 0, + } +) + +func (x FaultDelay_FaultDelayType) Enum() *FaultDelay_FaultDelayType { + p := new(FaultDelay_FaultDelayType) + *p = x + return p +} + +func (x FaultDelay_FaultDelayType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FaultDelay_FaultDelayType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes[0].Descriptor() +} + +func (FaultDelay_FaultDelayType) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes[0] +} + +func (x FaultDelay_FaultDelayType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FaultDelay_FaultDelayType.Descriptor instead. +func (FaultDelay_FaultDelayType) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{0, 0} +} + +// Delay specification is used to inject latency into the +// HTTP/Mongo operation. +// [#next-free-field: 6] +type FaultDelay struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to FaultDelaySecifier: + // + // *FaultDelay_FixedDelay + // *FaultDelay_HeaderDelay_ + FaultDelaySecifier isFaultDelay_FaultDelaySecifier `protobuf_oneof:"fault_delay_secifier"` + // The percentage of operations/connections/requests on which the delay will be injected. + Percentage *v3.FractionalPercent `protobuf:"bytes,4,opt,name=percentage,proto3" json:"percentage,omitempty"` +} + +func (x *FaultDelay) Reset() { + *x = FaultDelay{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultDelay) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultDelay) ProtoMessage() {} + +func (x *FaultDelay) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultDelay.ProtoReflect.Descriptor instead. +func (*FaultDelay) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{0} +} + +func (m *FaultDelay) GetFaultDelaySecifier() isFaultDelay_FaultDelaySecifier { + if m != nil { + return m.FaultDelaySecifier + } + return nil +} + +func (x *FaultDelay) GetFixedDelay() *durationpb.Duration { + if x, ok := x.GetFaultDelaySecifier().(*FaultDelay_FixedDelay); ok { + return x.FixedDelay + } + return nil +} + +func (x *FaultDelay) GetHeaderDelay() *FaultDelay_HeaderDelay { + if x, ok := x.GetFaultDelaySecifier().(*FaultDelay_HeaderDelay_); ok { + return x.HeaderDelay + } + return nil +} + +func (x *FaultDelay) GetPercentage() *v3.FractionalPercent { + if x != nil { + return x.Percentage + } + return nil +} + +type isFaultDelay_FaultDelaySecifier interface { + isFaultDelay_FaultDelaySecifier() +} + +type FaultDelay_FixedDelay struct { + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified + // delay will be injected before a new request/operation. + // This is required if type is FIXED. + FixedDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=fixed_delay,json=fixedDelay,proto3,oneof"` +} + +type FaultDelay_HeaderDelay_ struct { + // Fault delays are controlled via an HTTP header (if applicable). + HeaderDelay *FaultDelay_HeaderDelay `protobuf:"bytes,5,opt,name=header_delay,json=headerDelay,proto3,oneof"` +} + +func (*FaultDelay_FixedDelay) isFaultDelay_FaultDelaySecifier() {} + +func (*FaultDelay_HeaderDelay_) isFaultDelay_FaultDelaySecifier() {} + +// Describes a rate limit to be applied. +type FaultRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LimitType: + // + // *FaultRateLimit_FixedLimit_ + // *FaultRateLimit_HeaderLimit_ + LimitType isFaultRateLimit_LimitType `protobuf_oneof:"limit_type"` + // The percentage of operations/connections/requests on which the rate limit will be injected. + Percentage *v3.FractionalPercent `protobuf:"bytes,2,opt,name=percentage,proto3" json:"percentage,omitempty"` +} + +func (x *FaultRateLimit) Reset() { + *x = FaultRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultRateLimit) ProtoMessage() {} + +func (x *FaultRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultRateLimit.ProtoReflect.Descriptor instead. +func (*FaultRateLimit) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{1} +} + +func (m *FaultRateLimit) GetLimitType() isFaultRateLimit_LimitType { + if m != nil { + return m.LimitType + } + return nil +} + +func (x *FaultRateLimit) GetFixedLimit() *FaultRateLimit_FixedLimit { + if x, ok := x.GetLimitType().(*FaultRateLimit_FixedLimit_); ok { + return x.FixedLimit + } + return nil +} + +func (x *FaultRateLimit) GetHeaderLimit() *FaultRateLimit_HeaderLimit { + if x, ok := x.GetLimitType().(*FaultRateLimit_HeaderLimit_); ok { + return x.HeaderLimit + } + return nil +} + +func (x *FaultRateLimit) GetPercentage() *v3.FractionalPercent { + if x != nil { + return x.Percentage + } + return nil +} + +type isFaultRateLimit_LimitType interface { + isFaultRateLimit_LimitType() +} + +type FaultRateLimit_FixedLimit_ struct { + // A fixed rate limit. + FixedLimit *FaultRateLimit_FixedLimit `protobuf:"bytes,1,opt,name=fixed_limit,json=fixedLimit,proto3,oneof"` +} + +type FaultRateLimit_HeaderLimit_ struct { + // Rate limits are controlled via an HTTP header (if applicable). + HeaderLimit *FaultRateLimit_HeaderLimit `protobuf:"bytes,3,opt,name=header_limit,json=headerLimit,proto3,oneof"` +} + +func (*FaultRateLimit_FixedLimit_) isFaultRateLimit_LimitType() {} + +func (*FaultRateLimit_HeaderLimit_) isFaultRateLimit_LimitType() {} + +// Fault delays are controlled via an HTTP header (if applicable). See the +// :ref:`HTTP fault filter ` documentation for +// more information. +type FaultDelay_HeaderDelay struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FaultDelay_HeaderDelay) Reset() { + *x = FaultDelay_HeaderDelay{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultDelay_HeaderDelay) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultDelay_HeaderDelay) ProtoMessage() {} + +func (x *FaultDelay_HeaderDelay) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultDelay_HeaderDelay.ProtoReflect.Descriptor instead. +func (*FaultDelay_HeaderDelay) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{0, 0} +} + +// Describes a fixed/constant rate limit. +type FaultRateLimit_FixedLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The limit supplied in KiB/s. + LimitKbps uint64 `protobuf:"varint,1,opt,name=limit_kbps,json=limitKbps,proto3" json:"limit_kbps,omitempty"` +} + +func (x *FaultRateLimit_FixedLimit) Reset() { + *x = FaultRateLimit_FixedLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultRateLimit_FixedLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultRateLimit_FixedLimit) ProtoMessage() {} + +func (x *FaultRateLimit_FixedLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultRateLimit_FixedLimit.ProtoReflect.Descriptor instead. +func (*FaultRateLimit_FixedLimit) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *FaultRateLimit_FixedLimit) GetLimitKbps() uint64 { + if x != nil { + return x.LimitKbps + } + return 0 +} + +// Rate limits are controlled via an HTTP header (if applicable). See the +// :ref:`HTTP fault filter ` documentation for +// more information. +type FaultRateLimit_HeaderLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FaultRateLimit_HeaderLimit) Reset() { + *x = FaultRateLimit_HeaderLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultRateLimit_HeaderLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultRateLimit_HeaderLimit) ProtoMessage() {} + +func (x *FaultRateLimit_HeaderLimit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultRateLimit_HeaderLimit.ProtoReflect.Descriptor instead. +func (*FaultRateLimit_HeaderLimit) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP(), []int{1, 1} +} + +var File_envoy_extensions_filters_common_fault_v3_fault_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, + 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x03, 0x0a, 0x0a, 0x46, 0x61, 0x75, + 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, + 0x00, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x78, 0x65, 0x64, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, + 0x65, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, + 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0x49, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, + 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x65, + 0x6c, 0x61, 0x79, 0x22, 0x1b, 0x0a, 0x0e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x00, + 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x42, 0x1b, 0x0a, 0x14, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, + 0x73, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, + 0xb0, 0x04, 0x0a, 0x0e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x12, 0x66, 0x0a, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, + 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x48, 0x00, 0x52, 0x0a, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x69, 0x0a, 0x0c, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, + 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0x73, 0x0a, 0x0a, 0x46, 0x69, 0x78, 0x65, 0x64, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6b, + 0x62, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, + 0x28, 0x01, 0x52, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x4b, 0x62, 0x70, 0x73, 0x3a, 0x3d, 0x9a, + 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, + 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x4d, 0x0a, 0x0b, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, + 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, + 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x3a, 0x32, 0x9a, 0xc5, 0x88, + 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, + 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, + 0x11, 0x0a, 0x0a, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x42, 0xa7, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x36, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x57, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData = file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc +) + +func file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData) + }) + return file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDescData +} + +var file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes = []interface{}{ + (FaultDelay_FaultDelayType)(0), // 0: envoy.extensions.filters.common.fault.v3.FaultDelay.FaultDelayType + (*FaultDelay)(nil), // 1: envoy.extensions.filters.common.fault.v3.FaultDelay + (*FaultRateLimit)(nil), // 2: envoy.extensions.filters.common.fault.v3.FaultRateLimit + (*FaultDelay_HeaderDelay)(nil), // 3: envoy.extensions.filters.common.fault.v3.FaultDelay.HeaderDelay + (*FaultRateLimit_FixedLimit)(nil), // 4: envoy.extensions.filters.common.fault.v3.FaultRateLimit.FixedLimit + (*FaultRateLimit_HeaderLimit)(nil), // 5: envoy.extensions.filters.common.fault.v3.FaultRateLimit.HeaderLimit + (*durationpb.Duration)(nil), // 6: google.protobuf.Duration + (*v3.FractionalPercent)(nil), // 7: envoy.type.v3.FractionalPercent +} +var file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs = []int32{ + 6, // 0: envoy.extensions.filters.common.fault.v3.FaultDelay.fixed_delay:type_name -> google.protobuf.Duration + 3, // 1: envoy.extensions.filters.common.fault.v3.FaultDelay.header_delay:type_name -> envoy.extensions.filters.common.fault.v3.FaultDelay.HeaderDelay + 7, // 2: envoy.extensions.filters.common.fault.v3.FaultDelay.percentage:type_name -> envoy.type.v3.FractionalPercent + 4, // 3: envoy.extensions.filters.common.fault.v3.FaultRateLimit.fixed_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit.FixedLimit + 5, // 4: envoy.extensions.filters.common.fault.v3.FaultRateLimit.header_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit.HeaderLimit + 7, // 5: envoy.extensions.filters.common.fault.v3.FaultRateLimit.percentage:type_name -> envoy.type.v3.FractionalPercent + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_common_fault_v3_fault_proto_init() } +func file_envoy_extensions_filters_common_fault_v3_fault_proto_init() { + if File_envoy_extensions_filters_common_fault_v3_fault_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultDelay); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultDelay_HeaderDelay); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultRateLimit_FixedLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultRateLimit_HeaderLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FaultDelay_FixedDelay)(nil), + (*FaultDelay_HeaderDelay_)(nil), + } + file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*FaultRateLimit_FixedLimit_)(nil), + (*FaultRateLimit_HeaderLimit_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs, + EnumInfos: file_envoy_extensions_filters_common_fault_v3_fault_proto_enumTypes, + MessageInfos: file_envoy_extensions_filters_common_fault_v3_fault_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_common_fault_v3_fault_proto = out.File + file_envoy_extensions_filters_common_fault_v3_fault_proto_rawDesc = nil + file_envoy_extensions_filters_common_fault_v3_fault_proto_goTypes = nil + file_envoy_extensions_filters_common_fault_v3_fault_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go new file mode 100644 index 0000000000..7387e6d026 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault.pb.validate.go @@ -0,0 +1,812 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FaultDelay with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FaultDelay) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultDelay with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FaultDelayMultiError, or +// nil if none found. +func (m *FaultDelay) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultDelay) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultDelayValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofFaultDelaySecifierPresent := false + switch v := m.FaultDelaySecifier.(type) { + case *FaultDelay_FixedDelay: + if v == nil { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFaultDelaySecifierPresent = true + + if d := m.GetFixedDelay(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = FaultDelayValidationError{ + field: "FixedDelay", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := FaultDelayValidationError{ + field: "FixedDelay", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + case *FaultDelay_HeaderDelay_: + if v == nil { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofFaultDelaySecifierPresent = true + + if all { + switch v := interface{}(m.GetHeaderDelay()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "HeaderDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultDelayValidationError{ + field: "HeaderDelay", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderDelay()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultDelayValidationError{ + field: "HeaderDelay", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofFaultDelaySecifierPresent { + err := FaultDelayValidationError{ + field: "FaultDelaySecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultDelayMultiError(errors) + } + + return nil +} + +// FaultDelayMultiError is an error wrapping multiple validation errors +// returned by FaultDelay.ValidateAll() if the designated constraints aren't met. +type FaultDelayMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultDelayMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultDelayMultiError) AllErrors() []error { return m } + +// FaultDelayValidationError is the validation error returned by +// FaultDelay.Validate if the designated constraints aren't met. +type FaultDelayValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultDelayValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultDelayValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultDelayValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultDelayValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultDelayValidationError) ErrorName() string { return "FaultDelayValidationError" } + +// Error satisfies the builtin error interface +func (e FaultDelayValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultDelay.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultDelayValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultDelayValidationError{} + +// Validate checks the field values on FaultRateLimit with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FaultRateLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultRateLimit with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FaultRateLimitMultiError, +// or nil if none found. +func (m *FaultRateLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultRateLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultRateLimitValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofLimitTypePresent := false + switch v := m.LimitType.(type) { + case *FaultRateLimit_FixedLimit_: + if v == nil { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLimitTypePresent = true + + if all { + switch v := interface{}(m.GetFixedLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "FixedLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "FixedLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFixedLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultRateLimitValidationError{ + field: "FixedLimit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *FaultRateLimit_HeaderLimit_: + if v == nil { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLimitTypePresent = true + + if all { + switch v := interface{}(m.GetHeaderLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "HeaderLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultRateLimitValidationError{ + field: "HeaderLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultRateLimitValidationError{ + field: "HeaderLimit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLimitTypePresent { + err := FaultRateLimitValidationError{ + field: "LimitType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultRateLimitMultiError(errors) + } + + return nil +} + +// FaultRateLimitMultiError is an error wrapping multiple validation errors +// returned by FaultRateLimit.ValidateAll() if the designated constraints +// aren't met. +type FaultRateLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultRateLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultRateLimitMultiError) AllErrors() []error { return m } + +// FaultRateLimitValidationError is the validation error returned by +// FaultRateLimit.Validate if the designated constraints aren't met. +type FaultRateLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultRateLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultRateLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultRateLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultRateLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultRateLimitValidationError) ErrorName() string { return "FaultRateLimitValidationError" } + +// Error satisfies the builtin error interface +func (e FaultRateLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultRateLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultRateLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultRateLimitValidationError{} + +// Validate checks the field values on FaultDelay_HeaderDelay with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultDelay_HeaderDelay) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultDelay_HeaderDelay with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultDelay_HeaderDelayMultiError, or nil if none found. +func (m *FaultDelay_HeaderDelay) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultDelay_HeaderDelay) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return FaultDelay_HeaderDelayMultiError(errors) + } + + return nil +} + +// FaultDelay_HeaderDelayMultiError is an error wrapping multiple validation +// errors returned by FaultDelay_HeaderDelay.ValidateAll() if the designated +// constraints aren't met. +type FaultDelay_HeaderDelayMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultDelay_HeaderDelayMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultDelay_HeaderDelayMultiError) AllErrors() []error { return m } + +// FaultDelay_HeaderDelayValidationError is the validation error returned by +// FaultDelay_HeaderDelay.Validate if the designated constraints aren't met. +type FaultDelay_HeaderDelayValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultDelay_HeaderDelayValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultDelay_HeaderDelayValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultDelay_HeaderDelayValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultDelay_HeaderDelayValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultDelay_HeaderDelayValidationError) ErrorName() string { + return "FaultDelay_HeaderDelayValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultDelay_HeaderDelayValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultDelay_HeaderDelay.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultDelay_HeaderDelayValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultDelay_HeaderDelayValidationError{} + +// Validate checks the field values on FaultRateLimit_FixedLimit with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultRateLimit_FixedLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultRateLimit_FixedLimit with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultRateLimit_FixedLimitMultiError, or nil if none found. +func (m *FaultRateLimit_FixedLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultRateLimit_FixedLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetLimitKbps() < 1 { + err := FaultRateLimit_FixedLimitValidationError{ + field: "LimitKbps", + reason: "value must be greater than or equal to 1", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultRateLimit_FixedLimitMultiError(errors) + } + + return nil +} + +// FaultRateLimit_FixedLimitMultiError is an error wrapping multiple validation +// errors returned by FaultRateLimit_FixedLimit.ValidateAll() if the +// designated constraints aren't met. +type FaultRateLimit_FixedLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultRateLimit_FixedLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultRateLimit_FixedLimitMultiError) AllErrors() []error { return m } + +// FaultRateLimit_FixedLimitValidationError is the validation error returned by +// FaultRateLimit_FixedLimit.Validate if the designated constraints aren't met. +type FaultRateLimit_FixedLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultRateLimit_FixedLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultRateLimit_FixedLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultRateLimit_FixedLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultRateLimit_FixedLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultRateLimit_FixedLimitValidationError) ErrorName() string { + return "FaultRateLimit_FixedLimitValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultRateLimit_FixedLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultRateLimit_FixedLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultRateLimit_FixedLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultRateLimit_FixedLimitValidationError{} + +// Validate checks the field values on FaultRateLimit_HeaderLimit with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultRateLimit_HeaderLimit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultRateLimit_HeaderLimit with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultRateLimit_HeaderLimitMultiError, or nil if none found. +func (m *FaultRateLimit_HeaderLimit) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultRateLimit_HeaderLimit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return FaultRateLimit_HeaderLimitMultiError(errors) + } + + return nil +} + +// FaultRateLimit_HeaderLimitMultiError is an error wrapping multiple +// validation errors returned by FaultRateLimit_HeaderLimit.ValidateAll() if +// the designated constraints aren't met. +type FaultRateLimit_HeaderLimitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultRateLimit_HeaderLimitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultRateLimit_HeaderLimitMultiError) AllErrors() []error { return m } + +// FaultRateLimit_HeaderLimitValidationError is the validation error returned +// by FaultRateLimit_HeaderLimit.Validate if the designated constraints aren't met. +type FaultRateLimit_HeaderLimitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultRateLimit_HeaderLimitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultRateLimit_HeaderLimitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultRateLimit_HeaderLimitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultRateLimit_HeaderLimitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultRateLimit_HeaderLimitValidationError) ErrorName() string { + return "FaultRateLimit_HeaderLimitValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultRateLimit_HeaderLimitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultRateLimit_HeaderLimit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultRateLimit_HeaderLimitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultRateLimit_HeaderLimitValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go new file mode 100644 index 0000000000..4a462b40ad --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3/fault_vtproto.pb.go @@ -0,0 +1,491 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/common/fault/v3/fault.proto + +package faultv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FaultDelay_HeaderDelay) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultDelay_HeaderDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_HeaderDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultDelay) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.FaultDelaySecifier.(*FaultDelay_HeaderDelay_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if msg, ok := m.FaultDelaySecifier.(*FaultDelay_FixedDelay); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultDelay_FixedDelay) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_FixedDelay) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedDelay != nil { + size, err := (*durationpb.Duration)(m.FixedDelay).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *FaultDelay_HeaderDelay_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultDelay_HeaderDelay_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderDelay != nil { + size, err := m.HeaderDelay.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FaultRateLimit_FixedLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit_FixedLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_FixedLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LimitKbps != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LimitKbps)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit_HeaderLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit_HeaderLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_HeaderLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultRateLimit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LimitType.(*FaultRateLimit_HeaderLimit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.LimitType.(*FaultRateLimit_FixedLimit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultRateLimit_FixedLimit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_FixedLimit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedLimit != nil { + size, err := m.FixedLimit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *FaultRateLimit_HeaderLimit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultRateLimit_HeaderLimit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderLimit != nil { + size, err := m.HeaderLimit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *FaultDelay_HeaderDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.FaultDelaySecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultDelay_FixedDelay) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedDelay != nil { + l = (*durationpb.Duration)(m.FixedDelay).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultDelay_HeaderDelay_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderDelay != nil { + l = m.HeaderDelay.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultRateLimit_FixedLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LimitKbps != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LimitKbps)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit_HeaderLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LimitType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultRateLimit_FixedLimit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedLimit != nil { + l = m.FixedLimit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultRateLimit_HeaderLimit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderLimit != nil { + l = m.HeaderLimit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go new file mode 100644 index 0000000000..cea58cea9d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.go @@ -0,0 +1,655 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 6] +type FaultAbort struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ErrorType: + // + // *FaultAbort_HttpStatus + // *FaultAbort_GrpcStatus + // *FaultAbort_HeaderAbort_ + ErrorType isFaultAbort_ErrorType `protobuf_oneof:"error_type"` + // The percentage of requests/operations/connections that will be aborted with the error code + // provided. + Percentage *v3.FractionalPercent `protobuf:"bytes,3,opt,name=percentage,proto3" json:"percentage,omitempty"` +} + +func (x *FaultAbort) Reset() { + *x = FaultAbort{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultAbort) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultAbort) ProtoMessage() {} + +func (x *FaultAbort) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultAbort.ProtoReflect.Descriptor instead. +func (*FaultAbort) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP(), []int{0} +} + +func (m *FaultAbort) GetErrorType() isFaultAbort_ErrorType { + if m != nil { + return m.ErrorType + } + return nil +} + +func (x *FaultAbort) GetHttpStatus() uint32 { + if x, ok := x.GetErrorType().(*FaultAbort_HttpStatus); ok { + return x.HttpStatus + } + return 0 +} + +func (x *FaultAbort) GetGrpcStatus() uint32 { + if x, ok := x.GetErrorType().(*FaultAbort_GrpcStatus); ok { + return x.GrpcStatus + } + return 0 +} + +func (x *FaultAbort) GetHeaderAbort() *FaultAbort_HeaderAbort { + if x, ok := x.GetErrorType().(*FaultAbort_HeaderAbort_); ok { + return x.HeaderAbort + } + return nil +} + +func (x *FaultAbort) GetPercentage() *v3.FractionalPercent { + if x != nil { + return x.Percentage + } + return nil +} + +type isFaultAbort_ErrorType interface { + isFaultAbort_ErrorType() +} + +type FaultAbort_HttpStatus struct { + // HTTP status code to use to abort the HTTP request. + HttpStatus uint32 `protobuf:"varint,2,opt,name=http_status,json=httpStatus,proto3,oneof"` +} + +type FaultAbort_GrpcStatus struct { + // gRPC status code to use to abort the gRPC request. + GrpcStatus uint32 `protobuf:"varint,5,opt,name=grpc_status,json=grpcStatus,proto3,oneof"` +} + +type FaultAbort_HeaderAbort_ struct { + // Fault aborts are controlled via an HTTP header (if applicable). + HeaderAbort *FaultAbort_HeaderAbort `protobuf:"bytes,4,opt,name=header_abort,json=headerAbort,proto3,oneof"` +} + +func (*FaultAbort_HttpStatus) isFaultAbort_ErrorType() {} + +func (*FaultAbort_GrpcStatus) isFaultAbort_ErrorType() {} + +func (*FaultAbort_HeaderAbort_) isFaultAbort_ErrorType() {} + +// [#next-free-field: 17] +type HTTPFault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If specified, the filter will inject delays based on the values in the + // object. + Delay *v31.FaultDelay `protobuf:"bytes,1,opt,name=delay,proto3" json:"delay,omitempty"` + // If specified, the filter will abort requests based on the values in + // the object. At least “abort“ or “delay“ must be specified. + Abort *FaultAbort `protobuf:"bytes,2,opt,name=abort,proto3" json:"abort,omitempty"` + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + UpstreamCluster string `protobuf:"bytes,3,opt,name=upstream_cluster,json=upstreamCluster,proto3" json:"upstream_cluster,omitempty"` + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percentage + // ` field. + // The filter will check the request's headers against all the specified + // headers in the filter config. A match will happen if all the headers in the + // config are present in the request with the same values (or based on + // presence if the “value“ field is not in the config). + Headers []*v32.HeaderMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + DownstreamNodes []string `protobuf:"bytes,5,rep,name=downstream_nodes,json=downstreamNodes,proto3" json:"downstream_nodes,omitempty"` + // The maximum number of faults that can be active at a single time via the configured fault + // filter. Note that because this setting can be overridden at the route level, it's possible + // for the number of active faults to be greater than this value (if injected via a different + // route). If not specified, defaults to unlimited. This setting can be overridden via + // “runtime “ and any faults that are not injected + // due to overflow will be indicated via the “faults_overflow + // “ stat. + // + // .. attention:: + // + // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + // limit. It's possible for the number of active faults to rise slightly above the configured + // amount due to the implementation details. + MaxActiveFaults *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_active_faults,json=maxActiveFaults,proto3" json:"max_active_faults,omitempty"` + // The response rate limit to be applied to the response body of the stream. When configured, + // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + // ` runtime key. + // + // .. attention:: + // + // This is a per-stream limit versus a connection level limit. This means that concurrent streams + // will each get an independent limit. + ResponseRateLimit *v31.FaultRateLimit `protobuf:"bytes,7,opt,name=response_rate_limit,json=responseRateLimit,proto3" json:"response_rate_limit,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_delay_percent + DelayPercentRuntime string `protobuf:"bytes,8,opt,name=delay_percent_runtime,json=delayPercentRuntime,proto3" json:"delay_percent_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.abort_percent + AbortPercentRuntime string `protobuf:"bytes,9,opt,name=abort_percent_runtime,json=abortPercentRuntime,proto3" json:"abort_percent_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.delay.fixed_duration_ms + DelayDurationRuntime string `protobuf:"bytes,10,opt,name=delay_duration_runtime,json=delayDurationRuntime,proto3" json:"delay_duration_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.http_status + AbortHttpStatusRuntime string `protobuf:"bytes,11,opt,name=abort_http_status_runtime,json=abortHttpStatusRuntime,proto3" json:"abort_http_status_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.max_active_faults + MaxActiveFaultsRuntime string `protobuf:"bytes,12,opt,name=max_active_faults_runtime,json=maxActiveFaultsRuntime,proto3" json:"max_active_faults_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.rate_limit.response_percent + ResponseRateLimitPercentRuntime string `protobuf:"bytes,13,opt,name=response_rate_limit_percent_runtime,json=responseRateLimitPercentRuntime,proto3" json:"response_rate_limit_percent_runtime,omitempty"` + // The runtime key to override the :ref:`default ` + // runtime. The default is: fault.http.abort.grpc_status + AbortGrpcStatusRuntime string `protobuf:"bytes,14,opt,name=abort_grpc_status_runtime,json=abortGrpcStatusRuntime,proto3" json:"abort_grpc_status_runtime,omitempty"` + // To control whether stats storage is allocated dynamically for each downstream server. + // If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. + // If set to false, dynamic stats storage will be allocated for the downstream cluster name. + // Default value is false. + DisableDownstreamClusterStats bool `protobuf:"varint,15,opt,name=disable_downstream_cluster_stats,json=disableDownstreamClusterStats,proto3" json:"disable_downstream_cluster_stats,omitempty"` + // When an abort or delay fault is executed, the metadata struct provided here will be added to the + // request's dynamic metadata under the namespace corresponding to the name of the fault filter. + // This data can be logged as part of Access Logs using the :ref:`command operator + // ` %DYNAMIC_METADATA(NAMESPACE)%, where NAMESPACE is the name of + // the fault filter. + FilterMetadata *structpb.Struct `protobuf:"bytes,16,opt,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty"` +} + +func (x *HTTPFault) Reset() { + *x = HTTPFault{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPFault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPFault) ProtoMessage() {} + +func (x *HTTPFault) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPFault.ProtoReflect.Descriptor instead. +func (*HTTPFault) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPFault) GetDelay() *v31.FaultDelay { + if x != nil { + return x.Delay + } + return nil +} + +func (x *HTTPFault) GetAbort() *FaultAbort { + if x != nil { + return x.Abort + } + return nil +} + +func (x *HTTPFault) GetUpstreamCluster() string { + if x != nil { + return x.UpstreamCluster + } + return "" +} + +func (x *HTTPFault) GetHeaders() []*v32.HeaderMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HTTPFault) GetDownstreamNodes() []string { + if x != nil { + return x.DownstreamNodes + } + return nil +} + +func (x *HTTPFault) GetMaxActiveFaults() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxActiveFaults + } + return nil +} + +func (x *HTTPFault) GetResponseRateLimit() *v31.FaultRateLimit { + if x != nil { + return x.ResponseRateLimit + } + return nil +} + +func (x *HTTPFault) GetDelayPercentRuntime() string { + if x != nil { + return x.DelayPercentRuntime + } + return "" +} + +func (x *HTTPFault) GetAbortPercentRuntime() string { + if x != nil { + return x.AbortPercentRuntime + } + return "" +} + +func (x *HTTPFault) GetDelayDurationRuntime() string { + if x != nil { + return x.DelayDurationRuntime + } + return "" +} + +func (x *HTTPFault) GetAbortHttpStatusRuntime() string { + if x != nil { + return x.AbortHttpStatusRuntime + } + return "" +} + +func (x *HTTPFault) GetMaxActiveFaultsRuntime() string { + if x != nil { + return x.MaxActiveFaultsRuntime + } + return "" +} + +func (x *HTTPFault) GetResponseRateLimitPercentRuntime() string { + if x != nil { + return x.ResponseRateLimitPercentRuntime + } + return "" +} + +func (x *HTTPFault) GetAbortGrpcStatusRuntime() string { + if x != nil { + return x.AbortGrpcStatusRuntime + } + return "" +} + +func (x *HTTPFault) GetDisableDownstreamClusterStats() bool { + if x != nil { + return x.DisableDownstreamClusterStats + } + return false +} + +func (x *HTTPFault) GetFilterMetadata() *structpb.Struct { + if x != nil { + return x.FilterMetadata + } + return nil +} + +// Fault aborts are controlled via an HTTP header (if applicable). See the +// :ref:`HTTP fault filter ` documentation for +// more information. +type FaultAbort_HeaderAbort struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FaultAbort_HeaderAbort) Reset() { + *x = FaultAbort_HeaderAbort{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FaultAbort_HeaderAbort) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FaultAbort_HeaderAbort) ProtoMessage() {} + +func (x *FaultAbort_HeaderAbort) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FaultAbort_HeaderAbort.ProtoReflect.Descriptor instead. +func (*FaultAbort_HeaderAbort) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP(), []int{0, 0} +} + +var File_envoy_extensions_filters_http_fault_v3_fault_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x03, 0x0a, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, + 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x72, + 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x63, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, + 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x48, 0x00, + 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x12, 0x40, 0x0a, + 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, + 0x4e, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x3a, 0x3f, + 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, + 0x6f, 0x72, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x3a, + 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, + 0x62, 0x6f, 0x72, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xc7, 0x08, + 0x0a, 0x09, 0x48, 0x54, 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x4a, 0x0a, 0x05, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x52, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x61, 0x62, 0x6f, 0x72, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x61, 0x62, 0x6f, 0x72, + 0x74, 0x12, 0x29, 0x0a, 0x10, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x68, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, + 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x61, + 0x79, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x62, 0x6f, 0x72, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x61, 0x62, 0x6f, + 0x72, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x61, 0x62, + 0x6f, 0x72, 0x74, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x4c, 0x0a, 0x23, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x19, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x16, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x20, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x2e, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x54, + 0x54, 0x50, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x42, 0xa3, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x34, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x55, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData = file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc +) + +func file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData) + }) + return file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDescData +} + +var file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes = []interface{}{ + (*FaultAbort)(nil), // 0: envoy.extensions.filters.http.fault.v3.FaultAbort + (*HTTPFault)(nil), // 1: envoy.extensions.filters.http.fault.v3.HTTPFault + (*FaultAbort_HeaderAbort)(nil), // 2: envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort + (*v3.FractionalPercent)(nil), // 3: envoy.type.v3.FractionalPercent + (*v31.FaultDelay)(nil), // 4: envoy.extensions.filters.common.fault.v3.FaultDelay + (*v32.HeaderMatcher)(nil), // 5: envoy.config.route.v3.HeaderMatcher + (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value + (*v31.FaultRateLimit)(nil), // 7: envoy.extensions.filters.common.fault.v3.FaultRateLimit + (*structpb.Struct)(nil), // 8: google.protobuf.Struct +} +var file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.filters.http.fault.v3.FaultAbort.header_abort:type_name -> envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort + 3, // 1: envoy.extensions.filters.http.fault.v3.FaultAbort.percentage:type_name -> envoy.type.v3.FractionalPercent + 4, // 2: envoy.extensions.filters.http.fault.v3.HTTPFault.delay:type_name -> envoy.extensions.filters.common.fault.v3.FaultDelay + 0, // 3: envoy.extensions.filters.http.fault.v3.HTTPFault.abort:type_name -> envoy.extensions.filters.http.fault.v3.FaultAbort + 5, // 4: envoy.extensions.filters.http.fault.v3.HTTPFault.headers:type_name -> envoy.config.route.v3.HeaderMatcher + 6, // 5: envoy.extensions.filters.http.fault.v3.HTTPFault.max_active_faults:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.extensions.filters.http.fault.v3.HTTPFault.response_rate_limit:type_name -> envoy.extensions.filters.common.fault.v3.FaultRateLimit + 8, // 7: envoy.extensions.filters.http.fault.v3.HTTPFault.filter_metadata:type_name -> google.protobuf.Struct + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_http_fault_v3_fault_proto_init() } +func file_envoy_extensions_filters_http_fault_v3_fault_proto_init() { + if File_envoy_extensions_filters_http_fault_v3_fault_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultAbort); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPFault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FaultAbort_HeaderAbort); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FaultAbort_HttpStatus)(nil), + (*FaultAbort_GrpcStatus)(nil), + (*FaultAbort_HeaderAbort_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs, + MessageInfos: file_envoy_extensions_filters_http_fault_v3_fault_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_http_fault_v3_fault_proto = out.File + file_envoy_extensions_filters_http_fault_v3_fault_proto_rawDesc = nil + file_envoy_extensions_filters_http_fault_v3_fault_proto_goTypes = nil + file_envoy_extensions_filters_http_fault_v3_fault_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go new file mode 100644 index 0000000000..f3cc33072a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault.pb.validate.go @@ -0,0 +1,658 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FaultAbort with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FaultAbort) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultAbort with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FaultAbortMultiError, or +// nil if none found. +func (m *FaultAbort) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultAbort) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetPercentage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPercentage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultAbortValidationError{ + field: "Percentage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofErrorTypePresent := false + switch v := m.ErrorType.(type) { + case *FaultAbort_HttpStatus: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true + + if val := m.GetHttpStatus(); val < 200 || val >= 600 { + err := FaultAbortValidationError{ + field: "HttpStatus", + reason: "value must be inside range [200, 600)", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *FaultAbort_GrpcStatus: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true + // no validation rules for GrpcStatus + case *FaultAbort_HeaderAbort_: + if v == nil { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofErrorTypePresent = true + + if all { + switch v := interface{}(m.GetHeaderAbort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "HeaderAbort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FaultAbortValidationError{ + field: "HeaderAbort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderAbort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FaultAbortValidationError{ + field: "HeaderAbort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofErrorTypePresent { + err := FaultAbortValidationError{ + field: "ErrorType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FaultAbortMultiError(errors) + } + + return nil +} + +// FaultAbortMultiError is an error wrapping multiple validation errors +// returned by FaultAbort.ValidateAll() if the designated constraints aren't met. +type FaultAbortMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultAbortMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultAbortMultiError) AllErrors() []error { return m } + +// FaultAbortValidationError is the validation error returned by +// FaultAbort.Validate if the designated constraints aren't met. +type FaultAbortValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultAbortValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultAbortValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultAbortValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultAbortValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultAbortValidationError) ErrorName() string { return "FaultAbortValidationError" } + +// Error satisfies the builtin error interface +func (e FaultAbortValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultAbort.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultAbortValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultAbortValidationError{} + +// Validate checks the field values on HTTPFault with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HTTPFault) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HTTPFault with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HTTPFaultMultiError, or nil +// if none found. +func (m *HTTPFault) ValidateAll() error { + return m.validate(true) +} + +func (m *HTTPFault) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDelay()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Delay", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Delay", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDelay()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "Delay", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAbort()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Abort", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "Abort", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAbort()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "Abort", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for UpstreamCluster + + for idx, item := range m.GetHeaders() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: fmt.Sprintf("Headers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMaxActiveFaults()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "MaxActiveFaults", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "MaxActiveFaults", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxActiveFaults()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "MaxActiveFaults", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetResponseRateLimit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "ResponseRateLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "ResponseRateLimit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResponseRateLimit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "ResponseRateLimit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DelayPercentRuntime + + // no validation rules for AbortPercentRuntime + + // no validation rules for DelayDurationRuntime + + // no validation rules for AbortHttpStatusRuntime + + // no validation rules for MaxActiveFaultsRuntime + + // no validation rules for ResponseRateLimitPercentRuntime + + // no validation rules for AbortGrpcStatusRuntime + + // no validation rules for DisableDownstreamClusterStats + + if all { + switch v := interface{}(m.GetFilterMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HTTPFaultValidationError{ + field: "FilterMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HTTPFaultMultiError(errors) + } + + return nil +} + +// HTTPFaultMultiError is an error wrapping multiple validation errors returned +// by HTTPFault.ValidateAll() if the designated constraints aren't met. +type HTTPFaultMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HTTPFaultMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HTTPFaultMultiError) AllErrors() []error { return m } + +// HTTPFaultValidationError is the validation error returned by +// HTTPFault.Validate if the designated constraints aren't met. +type HTTPFaultValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HTTPFaultValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HTTPFaultValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HTTPFaultValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HTTPFaultValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HTTPFaultValidationError) ErrorName() string { return "HTTPFaultValidationError" } + +// Error satisfies the builtin error interface +func (e HTTPFaultValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHTTPFault.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HTTPFaultValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HTTPFaultValidationError{} + +// Validate checks the field values on FaultAbort_HeaderAbort with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FaultAbort_HeaderAbort) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FaultAbort_HeaderAbort with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FaultAbort_HeaderAbortMultiError, or nil if none found. +func (m *FaultAbort_HeaderAbort) ValidateAll() error { + return m.validate(true) +} + +func (m *FaultAbort_HeaderAbort) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return FaultAbort_HeaderAbortMultiError(errors) + } + + return nil +} + +// FaultAbort_HeaderAbortMultiError is an error wrapping multiple validation +// errors returned by FaultAbort_HeaderAbort.ValidateAll() if the designated +// constraints aren't met. +type FaultAbort_HeaderAbortMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FaultAbort_HeaderAbortMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FaultAbort_HeaderAbortMultiError) AllErrors() []error { return m } + +// FaultAbort_HeaderAbortValidationError is the validation error returned by +// FaultAbort_HeaderAbort.Validate if the designated constraints aren't met. +type FaultAbort_HeaderAbortValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FaultAbort_HeaderAbortValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FaultAbort_HeaderAbortValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FaultAbort_HeaderAbortValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FaultAbort_HeaderAbortValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FaultAbort_HeaderAbortValidationError) ErrorName() string { + return "FaultAbort_HeaderAbortValidationError" +} + +// Error satisfies the builtin error interface +func (e FaultAbort_HeaderAbortValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFaultAbort_HeaderAbort.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FaultAbort_HeaderAbortValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FaultAbort_HeaderAbortValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go new file mode 100644 index 0000000000..8baeafe14e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3/fault_vtproto.pb.go @@ -0,0 +1,546 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/fault/v3/fault.proto + +package faultv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + structpb "github.com/planetscale/vtprotobuf/types/known/structpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FaultAbort_HeaderAbort) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultAbort_HeaderAbort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HeaderAbort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FaultAbort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ErrorType.(*FaultAbort_GrpcStatus); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ErrorType.(*FaultAbort_HeaderAbort_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Percentage != nil { + if vtmsg, ok := interface{}(m.Percentage).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Percentage) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if msg, ok := m.ErrorType.(*FaultAbort_HttpStatus); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort_HttpStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HttpStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HttpStatus)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *FaultAbort_HeaderAbort_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_HeaderAbort_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderAbort != nil { + size, err := m.HeaderAbort.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *FaultAbort_GrpcStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FaultAbort_GrpcStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.GrpcStatus)) + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *HTTPFault) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPFault) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HTTPFault) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FilterMetadata != nil { + size, err := (*structpb.Struct)(m.FilterMetadata).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.DisableDownstreamClusterStats { + i-- + if m.DisableDownstreamClusterStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if len(m.AbortGrpcStatusRuntime) > 0 { + i -= len(m.AbortGrpcStatusRuntime) + copy(dAtA[i:], m.AbortGrpcStatusRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortGrpcStatusRuntime))) + i-- + dAtA[i] = 0x72 + } + if len(m.ResponseRateLimitPercentRuntime) > 0 { + i -= len(m.ResponseRateLimitPercentRuntime) + copy(dAtA[i:], m.ResponseRateLimitPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseRateLimitPercentRuntime))) + i-- + dAtA[i] = 0x6a + } + if len(m.MaxActiveFaultsRuntime) > 0 { + i -= len(m.MaxActiveFaultsRuntime) + copy(dAtA[i:], m.MaxActiveFaultsRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.MaxActiveFaultsRuntime))) + i-- + dAtA[i] = 0x62 + } + if len(m.AbortHttpStatusRuntime) > 0 { + i -= len(m.AbortHttpStatusRuntime) + copy(dAtA[i:], m.AbortHttpStatusRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortHttpStatusRuntime))) + i-- + dAtA[i] = 0x5a + } + if len(m.DelayDurationRuntime) > 0 { + i -= len(m.DelayDurationRuntime) + copy(dAtA[i:], m.DelayDurationRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DelayDurationRuntime))) + i-- + dAtA[i] = 0x52 + } + if len(m.AbortPercentRuntime) > 0 { + i -= len(m.AbortPercentRuntime) + copy(dAtA[i:], m.AbortPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AbortPercentRuntime))) + i-- + dAtA[i] = 0x4a + } + if len(m.DelayPercentRuntime) > 0 { + i -= len(m.DelayPercentRuntime) + copy(dAtA[i:], m.DelayPercentRuntime) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DelayPercentRuntime))) + i-- + dAtA[i] = 0x42 + } + if m.ResponseRateLimit != nil { + if vtmsg, ok := interface{}(m.ResponseRateLimit).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ResponseRateLimit) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.MaxActiveFaults != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxActiveFaults).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.DownstreamNodes) > 0 { + for iNdEx := len(m.DownstreamNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DownstreamNodes[iNdEx]) + copy(dAtA[i:], m.DownstreamNodes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DownstreamNodes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Headers) > 0 { + for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Headers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Headers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.UpstreamCluster) > 0 { + i -= len(m.UpstreamCluster) + copy(dAtA[i:], m.UpstreamCluster) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpstreamCluster))) + i-- + dAtA[i] = 0x1a + } + if m.Abort != nil { + size, err := m.Abort.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Delay != nil { + if vtmsg, ok := interface{}(m.Delay).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Delay) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FaultAbort_HeaderAbort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FaultAbort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.ErrorType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Percentage != nil { + if size, ok := interface{}(m.Percentage).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Percentage) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FaultAbort_HttpStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.HttpStatus)) + return n +} +func (m *FaultAbort_HeaderAbort_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderAbort != nil { + l = m.HeaderAbort.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *FaultAbort_GrpcStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.GrpcStatus)) + return n +} +func (m *HTTPFault) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Delay != nil { + if size, ok := interface{}(m.Delay).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Delay) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Abort != nil { + l = m.Abort.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.UpstreamCluster) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Headers) > 0 { + for _, e := range m.Headers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.DownstreamNodes) > 0 { + for _, s := range m.DownstreamNodes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxActiveFaults != nil { + l = (*wrapperspb.UInt32Value)(m.MaxActiveFaults).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResponseRateLimit != nil { + if size, ok := interface{}(m.ResponseRateLimit).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ResponseRateLimit) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DelayPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DelayDurationRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortHttpStatusRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.MaxActiveFaultsRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ResponseRateLimitPercentRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.AbortGrpcStatusRuntime) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableDownstreamClusterStats { + n += 2 + } + if m.FilterMetadata != nil { + l = (*structpb.Struct)(m.FilterMetadata).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go new file mode 100644 index 0000000000..bcf5c7d407 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + v31 "github.com/cncf/xds/go/xds/type/matcher/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// RBAC filter config. +// [#next-free-field: 8] +type RBAC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + // If present and empty, DENY. + // If both rules and matcher are configured, rules will be ignored. + Rules *v3.RBAC `protobuf:"bytes,1,opt,name=rules,proto3" json:"rules,omitempty"` + // If specified, rules will emit stats with the given prefix. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // rules. + RulesStatPrefix string `protobuf:"bytes,6,opt,name=rules_stat_prefix,json=rulesStatPrefix,proto3" json:"rules_stat_prefix,omitempty"` + // The match tree to use when resolving RBAC action for incoming requests. Requests do not + // match any matcher will be denied. + // If absent, no enforcing RBAC matcher will be applied. + // If present and empty, deny all requests. + Matcher *v31.Matcher `protobuf:"bytes,4,opt,name=matcher,proto3" json:"matcher,omitempty"` + // Shadow rules are not enforced by the filter (i.e., returning a 403) + // but will emit stats and logs and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + // If both shadow rules and shadow matcher are configured, shadow rules will be ignored. + ShadowRules *v3.RBAC `protobuf:"bytes,2,opt,name=shadow_rules,json=shadowRules,proto3" json:"shadow_rules,omitempty"` + // The match tree to use for emitting stats and logs which can be used for rule testing for + // incoming requests. + // If absent, no shadow matcher will be applied. + ShadowMatcher *v31.Matcher `protobuf:"bytes,5,opt,name=shadow_matcher,json=shadowMatcher,proto3" json:"shadow_matcher,omitempty"` + // If specified, shadow rules will emit stats with the given prefix. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // shadow rules. + ShadowRulesStatPrefix string `protobuf:"bytes,3,opt,name=shadow_rules_stat_prefix,json=shadowRulesStatPrefix,proto3" json:"shadow_rules_stat_prefix,omitempty"` + // If track_per_rule_stats is true, counters will be published for each rule and shadow rule. + TrackPerRuleStats bool `protobuf:"varint,7,opt,name=track_per_rule_stats,json=trackPerRuleStats,proto3" json:"track_per_rule_stats,omitempty"` +} + +func (x *RBAC) Reset() { + *x = RBAC{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBAC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBAC) ProtoMessage() {} + +func (x *RBAC) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBAC.ProtoReflect.Descriptor instead. +func (*RBAC) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescGZIP(), []int{0} +} + +func (x *RBAC) GetRules() *v3.RBAC { + if x != nil { + return x.Rules + } + return nil +} + +func (x *RBAC) GetRulesStatPrefix() string { + if x != nil { + return x.RulesStatPrefix + } + return "" +} + +func (x *RBAC) GetMatcher() *v31.Matcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *RBAC) GetShadowRules() *v3.RBAC { + if x != nil { + return x.ShadowRules + } + return nil +} + +func (x *RBAC) GetShadowMatcher() *v31.Matcher { + if x != nil { + return x.ShadowMatcher + } + return nil +} + +func (x *RBAC) GetShadowRulesStatPrefix() string { + if x != nil { + return x.ShadowRulesStatPrefix + } + return "" +} + +func (x *RBAC) GetTrackPerRuleStats() bool { + if x != nil { + return x.TrackPerRuleStats + } + return false +} + +type RBACPerRoute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Override the global configuration of the filter with this new config. + // If absent, the global RBAC policy will be disabled for this route. + Rbac *RBAC `protobuf:"bytes,2,opt,name=rbac,proto3" json:"rbac,omitempty"` +} + +func (x *RBACPerRoute) Reset() { + *x = RBACPerRoute{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RBACPerRoute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RBACPerRoute) ProtoMessage() {} + +func (x *RBACPerRoute) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RBACPerRoute.ProtoReflect.Descriptor instead. +func (*RBACPerRoute) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescGZIP(), []int{1} +} + +func (x *RBACPerRoute) GetRbac() *RBAC { + if x != nil { + return x.Rbac + } + return nil +} + +var File_envoy_extensions_filters_http_rbac_v3_rbac_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x62, 0x61, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xba, 0x04, 0x0a, 0x04, 0x52, 0x42, 0x41, 0x43, 0x12, 0x49, 0x0a, 0x05, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x42, 0x41, 0x43, 0x42, 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x57, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x1f, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0c, 0x73, 0x68, 0x61, + 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x42, 0x1e, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x73, 0x68, 0x61, + 0x64, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, + 0x6f, 0x77, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x26, + 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x18, 0x12, 0x16, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0xd2, 0xc6, + 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2f, + 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x74, 0x72, + 0x61, 0x63, 0x6b, 0x50, 0x65, 0x72, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, + 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x22, 0x8b, 0x01, + 0x0a, 0x0c, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x3f, + 0x0a, 0x04, 0x72, 0x62, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, + 0x63, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x52, 0x04, 0x72, 0x62, 0x61, 0x63, 0x3a, + 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x42, 0x41, 0x43, 0x50, 0x65, 0x72, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x9f, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x33, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x52, 0x62, 0x61, + 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x72, + 0x62, 0x61, 0x63, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x62, 0x61, 0x63, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData = file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc +) + +func file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData) + }) + return file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDescData +} + +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_goTypes = []interface{}{ + (*RBAC)(nil), // 0: envoy.extensions.filters.http.rbac.v3.RBAC + (*RBACPerRoute)(nil), // 1: envoy.extensions.filters.http.rbac.v3.RBACPerRoute + (*v3.RBAC)(nil), // 2: envoy.config.rbac.v3.RBAC + (*v31.Matcher)(nil), // 3: xds.type.matcher.v3.Matcher +} +var file_envoy_extensions_filters_http_rbac_v3_rbac_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.filters.http.rbac.v3.RBAC.rules:type_name -> envoy.config.rbac.v3.RBAC + 3, // 1: envoy.extensions.filters.http.rbac.v3.RBAC.matcher:type_name -> xds.type.matcher.v3.Matcher + 2, // 2: envoy.extensions.filters.http.rbac.v3.RBAC.shadow_rules:type_name -> envoy.config.rbac.v3.RBAC + 3, // 3: envoy.extensions.filters.http.rbac.v3.RBAC.shadow_matcher:type_name -> xds.type.matcher.v3.Matcher + 0, // 4: envoy.extensions.filters.http.rbac.v3.RBACPerRoute.rbac:type_name -> envoy.extensions.filters.http.rbac.v3.RBAC + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_http_rbac_v3_rbac_proto_init() } +func file_envoy_extensions_filters_http_rbac_v3_rbac_proto_init() { + if File_envoy_extensions_filters_http_rbac_v3_rbac_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBAC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RBACPerRoute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_depIdxs, + MessageInfos: file_envoy_extensions_filters_http_rbac_v3_rbac_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_http_rbac_v3_rbac_proto = out.File + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_rawDesc = nil + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_goTypes = nil + file_envoy_extensions_filters_http_rbac_v3_rbac_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go new file mode 100644 index 0000000000..1d820564ba --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac.pb.validate.go @@ -0,0 +1,385 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RBAC with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *RBAC) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBAC with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RBACMultiError, or nil if none found. +func (m *RBAC) ValidateAll() error { + return m.validate(true) +} + +func (m *RBAC) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RulesStatPrefix + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetShadowRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowRules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowRules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetShadowRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "ShadowRules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetShadowMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACValidationError{ + field: "ShadowMatcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetShadowMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACValidationError{ + field: "ShadowMatcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ShadowRulesStatPrefix + + // no validation rules for TrackPerRuleStats + + if len(errors) > 0 { + return RBACMultiError(errors) + } + + return nil +} + +// RBACMultiError is an error wrapping multiple validation errors returned by +// RBAC.ValidateAll() if the designated constraints aren't met. +type RBACMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBACMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBACMultiError) AllErrors() []error { return m } + +// RBACValidationError is the validation error returned by RBAC.Validate if the +// designated constraints aren't met. +type RBACValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBACValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBACValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBACValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBACValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBACValidationError) ErrorName() string { return "RBACValidationError" } + +// Error satisfies the builtin error interface +func (e RBACValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBAC.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBACValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBACValidationError{} + +// Validate checks the field values on RBACPerRoute with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RBACPerRoute) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RBACPerRoute with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RBACPerRouteMultiError, or +// nil if none found. +func (m *RBACPerRoute) ValidateAll() error { + return m.validate(true) +} + +func (m *RBACPerRoute) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRbac()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RBACPerRouteValidationError{ + field: "Rbac", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RBACPerRouteValidationError{ + field: "Rbac", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRbac()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RBACPerRouteValidationError{ + field: "Rbac", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RBACPerRouteMultiError(errors) + } + + return nil +} + +// RBACPerRouteMultiError is an error wrapping multiple validation errors +// returned by RBACPerRoute.ValidateAll() if the designated constraints aren't met. +type RBACPerRouteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RBACPerRouteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RBACPerRouteMultiError) AllErrors() []error { return m } + +// RBACPerRouteValidationError is the validation error returned by +// RBACPerRoute.Validate if the designated constraints aren't met. +type RBACPerRouteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RBACPerRouteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RBACPerRouteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RBACPerRouteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RBACPerRouteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RBACPerRouteValidationError) ErrorName() string { return "RBACPerRouteValidationError" } + +// Error satisfies the builtin error interface +func (e RBACPerRouteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRBACPerRoute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RBACPerRouteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RBACPerRouteValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go new file mode 100644 index 0000000000..1e9cd9a064 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3/rbac_vtproto.pb.go @@ -0,0 +1,283 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/rbac/v3/rbac.proto + +package rbacv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RBAC) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBAC) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBAC) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TrackPerRuleStats { + i-- + if m.TrackPerRuleStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.RulesStatPrefix) > 0 { + i -= len(m.RulesStatPrefix) + copy(dAtA[i:], m.RulesStatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RulesStatPrefix))) + i-- + dAtA[i] = 0x32 + } + if m.ShadowMatcher != nil { + if vtmsg, ok := interface{}(m.ShadowMatcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowMatcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ShadowRulesStatPrefix) > 0 { + i -= len(m.ShadowRulesStatPrefix) + copy(dAtA[i:], m.ShadowRulesStatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ShadowRulesStatPrefix))) + i-- + dAtA[i] = 0x1a + } + if m.ShadowRules != nil { + if vtmsg, ok := interface{}(m.ShadowRules).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ShadowRules) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Rules != nil { + if vtmsg, ok := interface{}(m.Rules).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Rules) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RBACPerRoute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBACPerRoute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RBACPerRoute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Rbac != nil { + size, err := m.Rbac.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *RBAC) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rules != nil { + if size, ok := interface{}(m.Rules).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Rules) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ShadowRules != nil { + if size, ok := interface{}(m.ShadowRules).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowRules) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ShadowRulesStatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ShadowMatcher != nil { + if size, ok := interface{}(m.ShadowMatcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ShadowMatcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RulesStatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrackPerRuleStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RBACPerRoute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rbac != nil { + l = m.Rbac.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go new file mode 100644 index 0000000000..01a368894b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.go @@ -0,0 +1,469 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-free-field: 10] +type Router struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + DynamicStats *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=dynamic_stats,json=dynamicStats,proto3" json:"dynamic_stats,omitempty"` + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + // + // .. attention:: + // + // This field is deprecated by the + // :ref:`spawn_upstream_span `. + // Please use that ``spawn_upstream_span`` field to control the span creation. + // + // Deprecated: Marked as deprecated in envoy/extensions/filters/http/router/v3/router.proto. + StartChildSpan bool `protobuf:"varint,2,opt,name=start_child_span,json=startChildSpan,proto3" json:"start_child_span,omitempty"` + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + UpstreamLog []*v3.AccessLog `protobuf:"bytes,3,rep,name=upstream_log,json=upstreamLog,proto3" json:"upstream_log,omitempty"` + // Additional upstream access log options. + UpstreamLogOptions *Router_UpstreamAccessLogOptions `protobuf:"bytes,9,opt,name=upstream_log_options,json=upstreamLogOptions,proto3" json:"upstream_log_options,omitempty"` + // Do not add any additional “x-envoy-“ headers to requests or responses. This + // only affects the :ref:`router filter generated x-envoy- headers + // `, other Envoy filters and the HTTP + // connection manager may continue to set “x-envoy-“ headers. + SuppressEnvoyHeaders bool `protobuf:"varint,4,opt,name=suppress_envoy_headers,json=suppressEnvoyHeaders,proto3" json:"suppress_envoy_headers,omitempty"` + // Specifies a list of HTTP headers to strictly validate. Envoy will reject a + // request and respond with HTTP status 400 if the request contains an invalid + // value for any of the headers listed in this field. Strict header checking + // is only supported for the following headers: + // + // Value must be a ','-delimited list (i.e. no spaces) of supported retry + // policy values: + // + // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` + // * :ref:`config_http_filters_router_x-envoy-retry-on` + // + // Value must be an integer: + // + // * :ref:`config_http_filters_router_x-envoy-max-retries` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` + // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` + StrictCheckHeaders []string `protobuf:"bytes,5,rep,name=strict_check_headers,json=strictCheckHeaders,proto3" json:"strict_check_headers,omitempty"` + // If not set, ingress Envoy will ignore + // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress + // Envoy, when deriving timeout for upstream cluster. + RespectExpectedRqTimeout bool `protobuf:"varint,6,opt,name=respect_expected_rq_timeout,json=respectExpectedRqTimeout,proto3" json:"respect_expected_rq_timeout,omitempty"` + // If set, Envoy will avoid incrementing HTTP failure code stats + // on gRPC requests. This includes the individual status code value + // (e.g. upstream_rq_504) and group stats (e.g. upstream_rq_5xx). + // This field is useful if interested in relying only on the gRPC + // stats filter to define success and failure metrics for gRPC requests + // as not all failed gRPC requests charge HTTP status code metrics. See + // :ref:`gRPC stats filter` documentation + // for more details. + SuppressGrpcRequestFailureCodeStats bool `protobuf:"varint,7,opt,name=suppress_grpc_request_failure_code_stats,json=suppressGrpcRequestFailureCodeStats,proto3" json:"suppress_grpc_request_failure_code_stats,omitempty"` + // .. note:: + // + // Upstream HTTP filters are currently in alpha. + // + // Optional HTTP filters for the upstream HTTP filter chain. + // + // These filters will be applied for all requests that pass through the router. + // They will also be applied to shadowed requests. + // Upstream HTTP filters cannot change route or cluster. + // Upstream HTTP filters specified on the cluster will override these filters. + // + // If using upstream HTTP filters, please be aware that local errors sent by + // upstream HTTP filters will not trigger retries, and local errors sent by + // upstream HTTP filters will count as a final response if hedging is configured. + // [#extension-category: envoy.filters.http.upstream] + UpstreamHttpFilters []*v31.HttpFilter `protobuf:"bytes,8,rep,name=upstream_http_filters,json=upstreamHttpFilters,proto3" json:"upstream_http_filters,omitempty"` +} + +func (x *Router) Reset() { + *x = Router{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Router) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Router) ProtoMessage() {} + +func (x *Router) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Router.ProtoReflect.Descriptor instead. +func (*Router) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP(), []int{0} +} + +func (x *Router) GetDynamicStats() *wrapperspb.BoolValue { + if x != nil { + return x.DynamicStats + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/filters/http/router/v3/router.proto. +func (x *Router) GetStartChildSpan() bool { + if x != nil { + return x.StartChildSpan + } + return false +} + +func (x *Router) GetUpstreamLog() []*v3.AccessLog { + if x != nil { + return x.UpstreamLog + } + return nil +} + +func (x *Router) GetUpstreamLogOptions() *Router_UpstreamAccessLogOptions { + if x != nil { + return x.UpstreamLogOptions + } + return nil +} + +func (x *Router) GetSuppressEnvoyHeaders() bool { + if x != nil { + return x.SuppressEnvoyHeaders + } + return false +} + +func (x *Router) GetStrictCheckHeaders() []string { + if x != nil { + return x.StrictCheckHeaders + } + return nil +} + +func (x *Router) GetRespectExpectedRqTimeout() bool { + if x != nil { + return x.RespectExpectedRqTimeout + } + return false +} + +func (x *Router) GetSuppressGrpcRequestFailureCodeStats() bool { + if x != nil { + return x.SuppressGrpcRequestFailureCodeStats + } + return false +} + +func (x *Router) GetUpstreamHttpFilters() []*v31.HttpFilter { + if x != nil { + return x.UpstreamHttpFilters + } + return nil +} + +type Router_UpstreamAccessLogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to true, an upstream access log will be recorded when an upstream stream is + // associated to an http request. Note: Each HTTP request received for an already established + // connection will result in an upstream access log record. This includes, for example, + // consecutive HTTP requests over the same connection or a request that is retried. + // In case a retry is applied, an upstream access log will be recorded for each retry. + FlushUpstreamLogOnUpstreamStream bool `protobuf:"varint,1,opt,name=flush_upstream_log_on_upstream_stream,json=flushUpstreamLogOnUpstreamStream,proto3" json:"flush_upstream_log_on_upstream_stream,omitempty"` + // The interval to flush the upstream access logs. By default, the router will flush an upstream + // access log on stream close, when the HTTP request is complete. If this field is set, the router + // will flush access logs periodically at the specified interval. This is especially useful in the + // case of long-lived requests, such as CONNECT and Websockets. + // The interval must be at least 1 millisecond. + UpstreamLogFlushInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=upstream_log_flush_interval,json=upstreamLogFlushInterval,proto3" json:"upstream_log_flush_interval,omitempty"` +} + +func (x *Router_UpstreamAccessLogOptions) Reset() { + *x = Router_UpstreamAccessLogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Router_UpstreamAccessLogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Router_UpstreamAccessLogOptions) ProtoMessage() {} + +func (x *Router_UpstreamAccessLogOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Router_UpstreamAccessLogOptions.ProtoReflect.Descriptor instead. +func (*Router_UpstreamAccessLogOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Router_UpstreamAccessLogOptions) GetFlushUpstreamLogOnUpstreamStream() bool { + if x != nil { + return x.FlushUpstreamLogOnUpstreamStream + } + return false +} + +func (x *Router_UpstreamAccessLogOptions) GetUpstreamLogFlushInterval() *durationpb.Duration { + if x != nil { + return x.UpstreamLogFlushInterval + } + return nil +} + +var File_envoy_extensions_filters_http_router_v3_router_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, + 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x59, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x08, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x35, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, + 0x68, 0x69, 0x6c, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x47, 0x0a, 0x0c, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x0b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, + 0x67, 0x12, 0x7a, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34, 0x0a, + 0x16, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, + 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x12, 0xc7, 0x01, 0x0a, 0x14, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x94, 0x01, 0xfa, 0x42, 0x90, 0x01, 0x92, 0x01, 0x8c, 0x01, 0x22, 0x89, 0x01, + 0x72, 0x86, 0x01, 0x52, 0x1e, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x2d, 0x6d, 0x73, 0x52, 0x26, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2d, 0x72, 0x71, 0x2d, 0x70, 0x65, 0x72, 0x2d, 0x74, 0x72, 0x79, + 0x2d, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2d, 0x6d, 0x73, 0x52, 0x13, 0x78, 0x2d, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x6d, 0x61, 0x78, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x52, 0x15, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, + 0x67, 0x72, 0x70, 0x63, 0x2d, 0x6f, 0x6e, 0x52, 0x10, 0x78, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x2d, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x74, 0x72, 0x69, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, + 0x1b, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x72, 0x71, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x18, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x52, 0x71, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x55, 0x0a, 0x28, + 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x23, + 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x70, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x7b, 0x0a, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x13, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x1a, 0xd3, 0x01, 0x0a, 0x18, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, + 0x25, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x66, 0x6c, + 0x75, 0x73, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, + 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x66, + 0x0a, 0x1b, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, + 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, + 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x18, 0x75, 0x70, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x42, 0xa7, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x35, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x57, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x72, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData = file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc +) + +func file_envoy_extensions_filters_http_router_v3_router_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData) + }) + return file_envoy_extensions_filters_http_router_v3_router_proto_rawDescData +} + +var file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_extensions_filters_http_router_v3_router_proto_goTypes = []interface{}{ + (*Router)(nil), // 0: envoy.extensions.filters.http.router.v3.Router + (*Router_UpstreamAccessLogOptions)(nil), // 1: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions + (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue + (*v3.AccessLog)(nil), // 3: envoy.config.accesslog.v3.AccessLog + (*v31.HttpFilter)(nil), // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + (*durationpb.Duration)(nil), // 5: google.protobuf.Duration +} +var file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.filters.http.router.v3.Router.dynamic_stats:type_name -> google.protobuf.BoolValue + 3, // 1: envoy.extensions.filters.http.router.v3.Router.upstream_log:type_name -> envoy.config.accesslog.v3.AccessLog + 1, // 2: envoy.extensions.filters.http.router.v3.Router.upstream_log_options:type_name -> envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions + 4, // 3: envoy.extensions.filters.http.router.v3.Router.upstream_http_filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 5, // 4: envoy.extensions.filters.http.router.v3.Router.UpstreamAccessLogOptions.upstream_log_flush_interval:type_name -> google.protobuf.Duration + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_filters_http_router_v3_router_proto_init() } +func file_envoy_extensions_filters_http_router_v3_router_proto_init() { + if File_envoy_extensions_filters_http_router_v3_router_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Router); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Router_UpstreamAccessLogOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_http_router_v3_router_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs, + MessageInfos: file_envoy_extensions_filters_http_router_v3_router_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_http_router_v3_router_proto = out.File + file_envoy_extensions_filters_http_router_v3_router_proto_rawDesc = nil + file_envoy_extensions_filters_http_router_v3_router_proto_goTypes = nil + file_envoy_extensions_filters_http_router_v3_router_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go new file mode 100644 index 0000000000..151afcaa33 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router.pb.validate.go @@ -0,0 +1,428 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Router with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Router) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Router with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RouterMultiError, or nil if none found. +func (m *Router) ValidateAll() error { + return m.validate(true) +} + +func (m *Router) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDynamicStats()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: "DynamicStats", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: "DynamicStats", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicStats()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: "DynamicStats", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StartChildSpan + + for idx, item := range m.GetUpstreamLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: fmt.Sprintf("UpstreamLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetUpstreamLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpstreamLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: "UpstreamLogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SuppressEnvoyHeaders + + for idx, item := range m.GetStrictCheckHeaders() { + _, _ = idx, item + + if _, ok := _Router_StrictCheckHeaders_InLookup[item]; !ok { + err := RouterValidationError{ + field: fmt.Sprintf("StrictCheckHeaders[%v]", idx), + reason: "value must be in list [x-envoy-upstream-rq-timeout-ms x-envoy-upstream-rq-per-try-timeout-ms x-envoy-max-retries x-envoy-retry-grpc-on x-envoy-retry-on]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for RespectExpectedRqTimeout + + // no validation rules for SuppressGrpcRequestFailureCodeStats + + for idx, item := range m.GetUpstreamHttpFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RouterValidationError{ + field: fmt.Sprintf("UpstreamHttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RouterMultiError(errors) + } + + return nil +} + +// RouterMultiError is an error wrapping multiple validation errors returned by +// Router.ValidateAll() if the designated constraints aren't met. +type RouterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RouterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RouterMultiError) AllErrors() []error { return m } + +// RouterValidationError is the validation error returned by Router.Validate if +// the designated constraints aren't met. +type RouterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RouterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RouterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RouterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RouterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RouterValidationError) ErrorName() string { return "RouterValidationError" } + +// Error satisfies the builtin error interface +func (e RouterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RouterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RouterValidationError{} + +var _Router_StrictCheckHeaders_InLookup = map[string]struct{}{ + "x-envoy-upstream-rq-timeout-ms": {}, + "x-envoy-upstream-rq-per-try-timeout-ms": {}, + "x-envoy-max-retries": {}, + "x-envoy-retry-grpc-on": {}, + "x-envoy-retry-on": {}, +} + +// Validate checks the field values on Router_UpstreamAccessLogOptions with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Router_UpstreamAccessLogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Router_UpstreamAccessLogOptions with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// Router_UpstreamAccessLogOptionsMultiError, or nil if none found. +func (m *Router_UpstreamAccessLogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *Router_UpstreamAccessLogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FlushUpstreamLogOnUpstreamStream + + if d := m.GetUpstreamLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = Router_UpstreamAccessLogOptionsValidationError{ + field: "UpstreamLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := Router_UpstreamAccessLogOptionsValidationError{ + field: "UpstreamLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return Router_UpstreamAccessLogOptionsMultiError(errors) + } + + return nil +} + +// Router_UpstreamAccessLogOptionsMultiError is an error wrapping multiple +// validation errors returned by Router_UpstreamAccessLogOptions.ValidateAll() +// if the designated constraints aren't met. +type Router_UpstreamAccessLogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Router_UpstreamAccessLogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Router_UpstreamAccessLogOptionsMultiError) AllErrors() []error { return m } + +// Router_UpstreamAccessLogOptionsValidationError is the validation error +// returned by Router_UpstreamAccessLogOptions.Validate if the designated +// constraints aren't met. +type Router_UpstreamAccessLogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Router_UpstreamAccessLogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Router_UpstreamAccessLogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Router_UpstreamAccessLogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Router_UpstreamAccessLogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Router_UpstreamAccessLogOptionsValidationError) ErrorName() string { + return "Router_UpstreamAccessLogOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e Router_UpstreamAccessLogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRouter_UpstreamAccessLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Router_UpstreamAccessLogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Router_UpstreamAccessLogOptionsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go new file mode 100644 index 0000000000..88105c3e18 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3/router_vtproto.pb.go @@ -0,0 +1,302 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/http/router/v3/router.proto + +package routerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Router_UpstreamAccessLogOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Router_UpstreamAccessLogOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Router_UpstreamAccessLogOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpstreamLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.UpstreamLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.FlushUpstreamLogOnUpstreamStream { + i-- + if m.FlushUpstreamLogOnUpstreamStream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Router) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Router) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Router) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpstreamLogOptions != nil { + size, err := m.UpstreamLogOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.UpstreamHttpFilters) > 0 { + for iNdEx := len(m.UpstreamHttpFilters) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.UpstreamHttpFilters[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamHttpFilters[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SuppressGrpcRequestFailureCodeStats { + i-- + if m.SuppressGrpcRequestFailureCodeStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.RespectExpectedRqTimeout { + i-- + if m.RespectExpectedRqTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.StrictCheckHeaders) > 0 { + for iNdEx := len(m.StrictCheckHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StrictCheckHeaders[iNdEx]) + copy(dAtA[i:], m.StrictCheckHeaders[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StrictCheckHeaders[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.SuppressEnvoyHeaders { + i-- + if m.SuppressEnvoyHeaders { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.UpstreamLog) > 0 { + for iNdEx := len(m.UpstreamLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.UpstreamLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.UpstreamLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.StartChildSpan { + i-- + if m.StartChildSpan { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.DynamicStats != nil { + size, err := (*wrapperspb.BoolValue)(m.DynamicStats).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Router_UpstreamAccessLogOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FlushUpstreamLogOnUpstreamStream { + n += 2 + } + if m.UpstreamLogFlushInterval != nil { + l = (*durationpb.Duration)(m.UpstreamLogFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Router) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DynamicStats != nil { + l = (*wrapperspb.BoolValue)(m.DynamicStats).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StartChildSpan { + n += 2 + } + if len(m.UpstreamLog) > 0 { + for _, e := range m.UpstreamLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.SuppressEnvoyHeaders { + n += 2 + } + if len(m.StrictCheckHeaders) > 0 { + for _, s := range m.StrictCheckHeaders { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RespectExpectedRqTimeout { + n += 2 + } + if m.SuppressGrpcRequestFailureCodeStats { + n += 2 + } + if len(m.UpstreamHttpFilters) > 0 { + for _, e := range m.UpstreamHttpFilters { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UpstreamLogOptions != nil { + l = m.UpstreamLogOptions.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go new file mode 100644 index 0000000000..03cc091234 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go @@ -0,0 +1,4304 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v35 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3" + v36 "github.com/envoyproxy/go-control-plane/envoy/type/http/v3" + v34 "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3" + v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HttpConnectionManager_CodecType int32 + +const ( + // For every new connection, the connection manager will determine which + // codec to use. This mode supports both ALPN for TLS listeners as well as + // protocol inference for plaintext listeners. If ALPN data is available, it + // is preferred, otherwise protocol inference is used. In almost all cases, + // this is the right option to choose for this setting. + HttpConnectionManager_AUTO HttpConnectionManager_CodecType = 0 + // The connection manager will assume that the client is speaking HTTP/1.1. + HttpConnectionManager_HTTP1 HttpConnectionManager_CodecType = 1 + // The connection manager will assume that the client is speaking HTTP/2 + // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + // Prior knowledge is allowed). + HttpConnectionManager_HTTP2 HttpConnectionManager_CodecType = 2 + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HttpConnectionManager_HTTP3 HttpConnectionManager_CodecType = 3 +) + +// Enum value maps for HttpConnectionManager_CodecType. +var ( + HttpConnectionManager_CodecType_name = map[int32]string{ + 0: "AUTO", + 1: "HTTP1", + 2: "HTTP2", + 3: "HTTP3", + } + HttpConnectionManager_CodecType_value = map[string]int32{ + "AUTO": 0, + "HTTP1": 1, + "HTTP2": 2, + "HTTP3": 3, + } +) + +func (x HttpConnectionManager_CodecType) Enum() *HttpConnectionManager_CodecType { + p := new(HttpConnectionManager_CodecType) + *p = x + return p +} + +func (x HttpConnectionManager_CodecType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_CodecType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[0].Descriptor() +} + +func (HttpConnectionManager_CodecType) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[0] +} + +func (x HttpConnectionManager_CodecType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_CodecType.Descriptor instead. +func (HttpConnectionManager_CodecType) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0} +} + +type HttpConnectionManager_ServerHeaderTransformation int32 + +const ( + // Overwrite any Server header with the contents of server_name. + HttpConnectionManager_OVERWRITE HttpConnectionManager_ServerHeaderTransformation = 0 + // If no Server header is present, append Server server_name + // If a Server header is present, pass it through. + HttpConnectionManager_APPEND_IF_ABSENT HttpConnectionManager_ServerHeaderTransformation = 1 + // Pass through the value of the server header, and do not append a header + // if none is present. + HttpConnectionManager_PASS_THROUGH HttpConnectionManager_ServerHeaderTransformation = 2 +) + +// Enum value maps for HttpConnectionManager_ServerHeaderTransformation. +var ( + HttpConnectionManager_ServerHeaderTransformation_name = map[int32]string{ + 0: "OVERWRITE", + 1: "APPEND_IF_ABSENT", + 2: "PASS_THROUGH", + } + HttpConnectionManager_ServerHeaderTransformation_value = map[string]int32{ + "OVERWRITE": 0, + "APPEND_IF_ABSENT": 1, + "PASS_THROUGH": 2, + } +) + +func (x HttpConnectionManager_ServerHeaderTransformation) Enum() *HttpConnectionManager_ServerHeaderTransformation { + p := new(HttpConnectionManager_ServerHeaderTransformation) + *p = x + return p +} + +func (x HttpConnectionManager_ServerHeaderTransformation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_ServerHeaderTransformation) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[1].Descriptor() +} + +func (HttpConnectionManager_ServerHeaderTransformation) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[1] +} + +func (x HttpConnectionManager_ServerHeaderTransformation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_ServerHeaderTransformation.Descriptor instead. +func (HttpConnectionManager_ServerHeaderTransformation) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 1} +} + +// How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP +// header. +type HttpConnectionManager_ForwardClientCertDetails int32 + +const ( + // Do not send the XFCC header to the next hop. This is the default value. + HttpConnectionManager_SANITIZE HttpConnectionManager_ForwardClientCertDetails = 0 + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + HttpConnectionManager_FORWARD_ONLY HttpConnectionManager_ForwardClientCertDetails = 1 + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + HttpConnectionManager_APPEND_FORWARD HttpConnectionManager_ForwardClientCertDetails = 2 + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + HttpConnectionManager_SANITIZE_SET HttpConnectionManager_ForwardClientCertDetails = 3 + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + HttpConnectionManager_ALWAYS_FORWARD_ONLY HttpConnectionManager_ForwardClientCertDetails = 4 +) + +// Enum value maps for HttpConnectionManager_ForwardClientCertDetails. +var ( + HttpConnectionManager_ForwardClientCertDetails_name = map[int32]string{ + 0: "SANITIZE", + 1: "FORWARD_ONLY", + 2: "APPEND_FORWARD", + 3: "SANITIZE_SET", + 4: "ALWAYS_FORWARD_ONLY", + } + HttpConnectionManager_ForwardClientCertDetails_value = map[string]int32{ + "SANITIZE": 0, + "FORWARD_ONLY": 1, + "APPEND_FORWARD": 2, + "SANITIZE_SET": 3, + "ALWAYS_FORWARD_ONLY": 4, + } +) + +func (x HttpConnectionManager_ForwardClientCertDetails) Enum() *HttpConnectionManager_ForwardClientCertDetails { + p := new(HttpConnectionManager_ForwardClientCertDetails) + *p = x + return p +} + +func (x HttpConnectionManager_ForwardClientCertDetails) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_ForwardClientCertDetails) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[2].Descriptor() +} + +func (HttpConnectionManager_ForwardClientCertDetails) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[2] +} + +func (x HttpConnectionManager_ForwardClientCertDetails) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_ForwardClientCertDetails.Descriptor instead. +func (HttpConnectionManager_ForwardClientCertDetails) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 2} +} + +// Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. +// This operation occurs before URL normalization and the merge slashes transformations if they were enabled. +type HttpConnectionManager_PathWithEscapedSlashesAction int32 + +const ( + // Default behavior specific to implementation (i.e. Envoy) of this configuration option. + // Envoy, by default, takes the KEEP_UNCHANGED action. + // NOTE: the implementation may change the default behavior at-will. + HttpConnectionManager_IMPLEMENTATION_SPECIFIC_DEFAULT HttpConnectionManager_PathWithEscapedSlashesAction = 0 + // Keep escaped slashes. + HttpConnectionManager_KEEP_UNCHANGED HttpConnectionManager_PathWithEscapedSlashesAction = 1 + // Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. + // The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. + HttpConnectionManager_REJECT_REQUEST HttpConnectionManager_PathWithEscapedSlashesAction = 2 + // Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. + // Redirect occurs after path normalization and merge slashes transformations if they were configured. + // NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. + // This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to + // traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. + // The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each + // redirected request. + HttpConnectionManager_UNESCAPE_AND_REDIRECT HttpConnectionManager_PathWithEscapedSlashesAction = 3 + // Unescape %2F and %5C sequences. + // Note: this option should not be enabled if intermediaries perform path based access control as + // it may lead to path confusion vulnerabilities. + HttpConnectionManager_UNESCAPE_AND_FORWARD HttpConnectionManager_PathWithEscapedSlashesAction = 4 +) + +// Enum value maps for HttpConnectionManager_PathWithEscapedSlashesAction. +var ( + HttpConnectionManager_PathWithEscapedSlashesAction_name = map[int32]string{ + 0: "IMPLEMENTATION_SPECIFIC_DEFAULT", + 1: "KEEP_UNCHANGED", + 2: "REJECT_REQUEST", + 3: "UNESCAPE_AND_REDIRECT", + 4: "UNESCAPE_AND_FORWARD", + } + HttpConnectionManager_PathWithEscapedSlashesAction_value = map[string]int32{ + "IMPLEMENTATION_SPECIFIC_DEFAULT": 0, + "KEEP_UNCHANGED": 1, + "REJECT_REQUEST": 2, + "UNESCAPE_AND_REDIRECT": 3, + "UNESCAPE_AND_FORWARD": 4, + } +) + +func (x HttpConnectionManager_PathWithEscapedSlashesAction) Enum() *HttpConnectionManager_PathWithEscapedSlashesAction { + p := new(HttpConnectionManager_PathWithEscapedSlashesAction) + *p = x + return p +} + +func (x HttpConnectionManager_PathWithEscapedSlashesAction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_PathWithEscapedSlashesAction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[3].Descriptor() +} + +func (HttpConnectionManager_PathWithEscapedSlashesAction) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[3] +} + +func (x HttpConnectionManager_PathWithEscapedSlashesAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_PathWithEscapedSlashesAction.Descriptor instead. +func (HttpConnectionManager_PathWithEscapedSlashesAction) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 3} +} + +type HttpConnectionManager_Tracing_OperationName int32 + +const ( + // The HTTP listener is used for ingress/incoming requests. + HttpConnectionManager_Tracing_INGRESS HttpConnectionManager_Tracing_OperationName = 0 + // The HTTP listener is used for egress/outgoing requests. + HttpConnectionManager_Tracing_EGRESS HttpConnectionManager_Tracing_OperationName = 1 +) + +// Enum value maps for HttpConnectionManager_Tracing_OperationName. +var ( + HttpConnectionManager_Tracing_OperationName_name = map[int32]string{ + 0: "INGRESS", + 1: "EGRESS", + } + HttpConnectionManager_Tracing_OperationName_value = map[string]int32{ + "INGRESS": 0, + "EGRESS": 1, + } +) + +func (x HttpConnectionManager_Tracing_OperationName) Enum() *HttpConnectionManager_Tracing_OperationName { + p := new(HttpConnectionManager_Tracing_OperationName) + *p = x + return p +} + +func (x HttpConnectionManager_Tracing_OperationName) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HttpConnectionManager_Tracing_OperationName) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[4].Descriptor() +} + +func (HttpConnectionManager_Tracing_OperationName) Type() protoreflect.EnumType { + return &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes[4] +} + +func (x HttpConnectionManager_Tracing_OperationName) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HttpConnectionManager_Tracing_OperationName.Descriptor instead. +func (HttpConnectionManager_Tracing_OperationName) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// [#next-free-field: 59] +type HttpConnectionManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Supplies the type of codec that the connection manager should use. + CodecType HttpConnectionManager_CodecType `protobuf:"varint,1,opt,name=codec_type,json=codecType,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_CodecType" json:"codec_type,omitempty"` + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + StatPrefix string `protobuf:"bytes,2,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"` + // Types that are assignable to RouteSpecifier: + // + // *HttpConnectionManager_Rds + // *HttpConnectionManager_RouteConfig + // *HttpConnectionManager_ScopedRoutes + RouteSpecifier isHttpConnectionManager_RouteSpecifier `protobuf_oneof:"route_specifier"` + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. :ref:`Order matters ` + // as the filters are processed sequentially as request events happen. + HttpFilters []*HttpFilter `protobuf:"bytes,5,rep,name=http_filters,json=httpFilters,proto3" json:"http_filters,omitempty"` + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + AddUserAgent *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=add_user_agent,json=addUserAgent,proto3" json:"add_user_agent,omitempty"` + // Presence of the object defines whether the connection manager + // emits :ref:`tracing ` data to the :ref:`configured tracing provider + // `. + Tracing *HttpConnectionManager_Tracing `protobuf:"bytes,7,opt,name=tracing,proto3" json:"tracing,omitempty"` + // Additional settings for HTTP requests handled by the connection manager. These will be + // applicable to both HTTP1 and HTTP2 requests. + CommonHttpProtocolOptions *v3.HttpProtocolOptions `protobuf:"bytes,35,opt,name=common_http_protocol_options,json=commonHttpProtocolOptions,proto3" json:"common_http_protocol_options,omitempty"` + // If set to true, Envoy will not start a drain timer for downstream HTTP1 connections after + // :ref:`common_http_protocol_options.max_connection_duration + // ` passes. + // Instead, Envoy will wait for the next downstream request, add connection:close to the response + // headers, then close the connection after the stream ends. + // + // This behavior is compliant with `RFC 9112 section 9.6 `_ + // + // If set to false, “max_connection_duration“ will cause Envoy to enter the normal drain + // sequence for HTTP1 with Envoy eventually closing the connection (once there are no active + // streams). + // + // Has no effect if “max_connection_duration“ is unset. Defaults to false. + Http1SafeMaxConnectionDuration bool `protobuf:"varint,58,opt,name=http1_safe_max_connection_duration,json=http1SafeMaxConnectionDuration,proto3" json:"http1_safe_max_connection_duration,omitempty"` + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. + // [#comment:TODO: The following fields are ignored when the + // :ref:`header validation configuration ` + // is present: + // 1. :ref:`allow_chunked_length `] + HttpProtocolOptions *v3.Http1ProtocolOptions `protobuf:"bytes,8,opt,name=http_protocol_options,json=httpProtocolOptions,proto3" json:"http_protocol_options,omitempty"` + // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + Http2ProtocolOptions *v3.Http2ProtocolOptions `protobuf:"bytes,9,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` + // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. + // [#not-implemented-hide:] + Http3ProtocolOptions *v3.Http3ProtocolOptions `protobuf:"bytes,44,opt,name=http3_protocol_options,json=http3ProtocolOptions,proto3" json:"http3_protocol_options,omitempty"` + // An optional override that the connection manager will write to the server + // header in responses. If not set, the default is “envoy“. + ServerName string `protobuf:"bytes,10,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"` + // Defines the action to be applied to the Server header on the response path. + // By default, Envoy will overwrite the header with the value specified in + // server_name. + ServerHeaderTransformation HttpConnectionManager_ServerHeaderTransformation `protobuf:"varint,34,opt,name=server_header_transformation,json=serverHeaderTransformation,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_ServerHeaderTransformation" json:"server_header_transformation,omitempty"` + // Allows for explicit transformation of the :scheme header on the request path. + // If not set, Envoy's default :ref:`scheme ` + // handling applies. + SchemeHeaderTransformation *v3.SchemeHeaderTransformation `protobuf:"bytes,48,opt,name=scheme_header_transformation,json=schemeHeaderTransformation,proto3" json:"scheme_header_transformation,omitempty"` + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + MaxRequestHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,29,opt,name=max_request_headers_kb,json=maxRequestHeadersKb,proto3" json:"max_request_headers_kb,omitempty"` + // The stream idle timeout for connections managed by the connection manager. + // If not specified, this defaults to 5 minutes. The default value was selected + // so as not to interfere with any smaller configured timeouts that may have + // existed in configurations prior to the introduction of this feature, while + // introducing robustness to TCP connections that terminate without a FIN. + // + // This idle timeout applies to new streams and is overridable by the + // :ref:`route-level idle_timeout + // `. Even on a stream in + // which the override applies, prior to receipt of the initial request + // headers, the :ref:`stream_idle_timeout + // ` + // applies. Each time an encode/decode event for headers or data is processed + // for the stream, the timer will be reset. If the timeout fires, the stream + // is terminated with a 408 Request Timeout error code if no upstream response + // header has been received, otherwise a stream reset occurs. + // + // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + // window to write any remaining stream data once the entirety of stream data (local end stream is + // true) has been buffered pending available window. In other words, this timeout defends against + // a peer that does not release enough window to completely write the stream, even though all + // data has been proxied within available flow control windows. If the timeout is hit in this + // case, the :ref:`tx_flush_timeout ` counter will be + // incremented. Note that :ref:`max_stream_duration + // ` does not apply to + // this corner case. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled according to the value for + // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + // + // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + // to the granularity of events presented to the connection manager. For example, while receiving + // very large request headers, it may be the case that there is traffic regularly arriving on the + // wire while the connection manage is only able to observe the end-of-headers event, hence the + // stream may still idle timeout. + // + // A value of 0 will completely disable the connection manager stream idle + // timeout, although per-route idle timeout overrides will continue to apply. + StreamIdleTimeout *durationpb.Duration `protobuf:"bytes,24,opt,name=stream_idle_timeout,json=streamIdleTimeout,proto3" json:"stream_idle_timeout,omitempty"` + // The amount of time that Envoy will wait for the entire request to be received. + // The timer is activated when the request is initiated, and is disarmed when the last byte of the + // request is sent upstream (i.e. all decoding filters have processed the request), OR when the + // response is initiated. If not specified or set to 0, this timeout is disabled. + RequestTimeout *durationpb.Duration `protobuf:"bytes,28,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + // The amount of time that Envoy will wait for the request headers to be received. The timer is + // activated when the first byte of the headers is received, and is disarmed when the last byte of + // the headers has been received. If not specified or set to 0, this timeout is disabled. + RequestHeadersTimeout *durationpb.Duration `protobuf:"bytes,41,opt,name=request_headers_timeout,json=requestHeadersTimeout,proto3" json:"request_headers_timeout,omitempty"` + // The time that Envoy will wait between sending an HTTP/2 “shutdown + // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. + // This is used so that Envoy provides a grace period for new streams that + // race with the final GOAWAY frame. During this grace period, Envoy will + // continue to accept new streams. After the grace period, a final GOAWAY + // frame is sent and Envoy will start refusing new streams. Draining occurs + // either when a connection hits the idle timeout, when :ref:`max_connection_duration + // ` + // is reached, or during general server draining. The default grace period is + // 5000 milliseconds (5 seconds) if this option is not specified. + DrainTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=drain_timeout,json=drainTimeout,proto3" json:"drain_timeout,omitempty"` + // The delayed close timeout is for downstream connections managed by the HTTP connection manager. + // It is defined as a grace period after connection close processing has been locally initiated + // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + // from the downstream connection) prior to Envoy closing the socket associated with that + // connection. + // NOTE: This timeout is enforced even when the socket associated with the downstream connection + // is pending a flush of the write buffer. However, any progress made writing data to the socket + // will restart the timer associated with this timeout. This means that the total grace period for + // a socket in this state will be + // +. + // + // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + // sequence mitigates a race condition that exists when downstream clients do not drain/process + // data in a connection's receive buffer after a remote close has been detected via a socket + // write(). This race leads to such clients failing to process the response code sent by Envoy, + // which could result in erroneous downstream processing. + // + // If the timeout triggers, Envoy will close the connection's socket. + // + // The default timeout is 1000 ms if this option is not specified. + // + // .. NOTE:: + // + // To be useful in avoiding the race condition described above, this timeout must be set + // to *at least* +<100ms to account for + // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. + // + // .. WARNING:: + // + // A value of 0 will completely disable delayed close processing. When disabled, the downstream + // connection's socket will be closed immediately after the write flush is completed or will + // never close if the write flush does not complete. + DelayedCloseTimeout *durationpb.Duration `protobuf:"bytes,26,opt,name=delayed_close_timeout,json=delayedCloseTimeout,proto3" json:"delayed_close_timeout,omitempty"` + // Configuration for :ref:`HTTP access logs ` + // emitted by the connection manager. + AccessLog []*v31.AccessLog `protobuf:"bytes,13,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"` + // The interval to flush the above access logs. + // + // .. attention:: + // + // This field is deprecated in favor of + // :ref:`access_log_flush_interval + // `. + // Note that if both this field and :ref:`access_log_flush_interval + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. + AccessLogFlushInterval *durationpb.Duration `protobuf:"bytes,54,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // If set to true, HCM will flush an access log once when a new HTTP request is received, after the request + // headers have been evaluated, and before iterating through the HTTP filter chain. + // + // .. attention:: + // + // This field is deprecated in favor of + // :ref:`flush_access_log_on_new_request + // `. + // Note that if both this field and :ref:`flush_access_log_on_new_request + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. + FlushAccessLogOnNewRequest bool `protobuf:"varint,55,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` + // Additional access log options for HTTP connection manager. + AccessLogOptions *HttpConnectionManager_HcmAccessLogOptions `protobuf:"bytes,56,opt,name=access_log_options,json=accessLogOptions,proto3" json:"access_log_options,omitempty"` + // If set to true, the connection manager will use the real remote address + // of the client connection when determining internal versus external origin and manipulating + // various headers. If set to false or absent, the connection manager will use the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for`, + // :ref:`config_http_conn_man_headers_x-envoy-internal`, and + // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + UseRemoteAddress *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=use_remote_address,json=useRemoteAddress,proto3" json:"use_remote_address,omitempty"` + // The number of additional ingress proxy hops from the right side of the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + // determining the origin client's IP address. The default is zero if this option + // is not specified. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + XffNumTrustedHops uint32 `protobuf:"varint,19,opt,name=xff_num_trusted_hops,json=xffNumTrustedHops,proto3" json:"xff_num_trusted_hops,omitempty"` + // The configuration for the original IP detection extensions. + // + // When configured the extensions will be called along with the request headers + // and information about the downstream connection, such as the directly connected address. + // Each extension will then use these parameters to decide the request's effective remote address. + // If an extension fails to detect the original IP address and isn't configured to reject + // the request, the HCM will try the remaining extensions until one succeeds or rejects + // the request. If the request isn't rejected nor any extension succeeds, the HCM will + // fallback to using the remote address. + // + // .. WARNING:: + // + // Extensions cannot be used in conjunction with :ref:`use_remote_address + // ` + // nor :ref:`xff_num_trusted_hops + // `. + // + // [#extension-category: envoy.http.original_ip_detection] + OriginalIpDetectionExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,46,rep,name=original_ip_detection_extensions,json=originalIpDetectionExtensions,proto3" json:"original_ip_detection_extensions,omitempty"` + // The configuration for the early header mutation extensions. + // + // When configured the extensions will be called before any routing, tracing, or any filter processing. + // Each extension will be applied in the order they are configured. + // If the same header is mutated by multiple extensions, then the last extension will win. + // + // [#extension-category: envoy.http.early_header_mutation] + EarlyHeaderMutationExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,52,rep,name=early_header_mutation_extensions,json=earlyHeaderMutationExtensions,proto3" json:"early_header_mutation_extensions,omitempty"` + // Configures what network addresses are considered internal for stats and header sanitation + // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information about internal/external addresses. + InternalAddressConfig *HttpConnectionManager_InternalAddressConfig `protobuf:"bytes,25,opt,name=internal_address_config,json=internalAddressConfig,proto3" json:"internal_address_config,omitempty"` + // If set, Envoy will not append the remote address to the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + // has mutated the request headers. While :ref:`use_remote_address + // ` + // will also suppress XFF addition, it has consequences for logging and other + // Envoy uses of the remote address, so “skip_xff_append“ should be used + // when only an elision of XFF addition is intended. + SkipXffAppend bool `protobuf:"varint,21,opt,name=skip_xff_append,json=skipXffAppend,proto3" json:"skip_xff_append,omitempty"` + // Via header value to append to request and response headers. If this is + // empty, no via header will be appended. + Via string `protobuf:"bytes,22,opt,name=via,proto3" json:"via,omitempty"` + // Whether the connection manager will generate the :ref:`x-request-id + // ` header if it does not exist. This defaults to + // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + // is not desired it can be disabled. + GenerateRequestId *wrapperspb.BoolValue `protobuf:"bytes,15,opt,name=generate_request_id,json=generateRequestId,proto3" json:"generate_request_id,omitempty"` + // Whether the connection manager will keep the :ref:`x-request-id + // ` header if passed for a request that is edge + // (Edge request is the request from external clients to front Envoy) and not reset it, which + // is the current Envoy behaviour. This defaults to false. + PreserveExternalRequestId bool `protobuf:"varint,32,opt,name=preserve_external_request_id,json=preserveExternalRequestId,proto3" json:"preserve_external_request_id,omitempty"` + // If set, Envoy will always set :ref:`x-request-id ` header in response. + // If this is false or not set, the request ID is returned in responses only if tracing is forced using + // :ref:`x-envoy-force-trace ` header. + AlwaysSetRequestIdInResponse bool `protobuf:"varint,37,opt,name=always_set_request_id_in_response,json=alwaysSetRequestIdInResponse,proto3" json:"always_set_request_id_in_response,omitempty"` + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + ForwardClientCertDetails HttpConnectionManager_ForwardClientCertDetails `protobuf:"varint,16,opt,name=forward_client_cert_details,json=forwardClientCertDetails,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_ForwardClientCertDetails" json:"forward_client_cert_details,omitempty"` + // This field is valid only when :ref:`forward_client_cert_details + // ` + // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + // the client certificate to be forwarded. Note that in the + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, “Hash“ is always set, and + // “By“ is always set when the client certificate presents the URI type Subject Alternative Name + // value. + SetCurrentClientCertDetails *HttpConnectionManager_SetCurrentClientCertDetails `protobuf:"bytes,17,opt,name=set_current_client_cert_details,json=setCurrentClientCertDetails,proto3" json:"set_current_client_cert_details,omitempty"` + // If proxy_100_continue is true, Envoy will proxy incoming "Expect: + // 100-continue" headers upstream, and forward "100 Continue" responses + // downstream. If this is false or not set, Envoy will instead strip the + // "Expect: 100-continue" header, and send a "100 Continue" response itself. + Proxy_100Continue bool `protobuf:"varint,18,opt,name=proxy_100_continue,json=proxy100Continue,proto3" json:"proxy_100_continue,omitempty"` + // If + // :ref:`use_remote_address + // ` + // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + // an IPv4 address, the address will be mapped to IPv6 before it is appended to “x-forwarded-for“. + // This is useful for testing compatibility of upstream services that parse the header value. For + // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + // `_ for details. This will also affect the + // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + // ` for runtime + // control. + // [#not-implemented-hide:] + RepresentIpv4RemoteAddressAsIpv4MappedIpv6 bool `protobuf:"varint,20,opt,name=represent_ipv4_remote_address_as_ipv4_mapped_ipv6,json=representIpv4RemoteAddressAsIpv4MappedIpv6,proto3" json:"represent_ipv4_remote_address_as_ipv4_mapped_ipv6,omitempty"` + UpgradeConfigs []*HttpConnectionManager_UpgradeConfig `protobuf:"bytes,23,rep,name=upgrade_configs,json=upgradeConfigs,proto3" json:"upgrade_configs,omitempty"` + // Should paths be normalized according to RFC 3986 before any processing of + // requests by HTTP filters or routing? This affects the upstream “:path“ header + // as well. For paths that fail this check, Envoy will respond with 400 to + // paths that are malformed. This defaults to false currently but will default + // true in the future. When not specified, this value may be overridden by the + // runtime variable + // :ref:`http_connection_manager.normalize_path`. + // See `Normalization and Comparison `_ + // for details of normalization. + // Note that Envoy does not perform + // `case normalization `_ + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + NormalizePath *wrapperspb.BoolValue `protobuf:"bytes,30,opt,name=normalize_path,json=normalizePath,proto3" json:"normalize_path,omitempty"` + // Determines if adjacent slashes in the path are merged into one before any processing of + // requests by HTTP filters or routing. This affects the upstream “:path“ header as well. Without + // setting this option, incoming requests with path “//dir///file“ will not match against route + // with “prefix“ match set to “/dir“. Defaults to “false“. Note that slash merging is not part of + // `HTTP spec `_ and is provided for convenience. + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + MergeSlashes bool `protobuf:"varint,33,opt,name=merge_slashes,json=mergeSlashes,proto3" json:"merge_slashes,omitempty"` + // Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). + // The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` + // runtime variable. + // The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime + // variable can be used to apply the action to a portion of all requests. + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + PathWithEscapedSlashesAction HttpConnectionManager_PathWithEscapedSlashesAction `protobuf:"varint,45,opt,name=path_with_escaped_slashes_action,json=pathWithEscapedSlashesAction,proto3,enum=envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager_PathWithEscapedSlashesAction" json:"path_with_escaped_slashes_action,omitempty"` + // The configuration of the request ID extension. This includes operations such as + // generation, validation, and associated tracing operations. If empty, the + // :ref:`UuidRequestIdConfig ` + // default extension is used with default parameters. See the documentation for that extension + // for details on what it does. Customizing the configuration for the default extension can be + // achieved by configuring it explicitly here. For example, to disable trace reason packing, + // the following configuration can be used: + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + // + // typed_config: + // "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig + // pack_trace_reason: false + // + // [#extension-category: envoy.request_id] + RequestIdExtension *RequestIDExtension `protobuf:"bytes,36,opt,name=request_id_extension,json=requestIdExtension,proto3" json:"request_id_extension,omitempty"` + // The configuration to customize local reply returned by Envoy. It can customize status code, + // body text and response content type. If not specified, status code and text body are hard + // coded in Envoy, the response content type is plain text. + LocalReplyConfig *LocalReplyConfig `protobuf:"bytes,38,opt,name=local_reply_config,json=localReplyConfig,proto3" json:"local_reply_config,omitempty"` + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + // local port. This affects the upstream host header unless the method is + // CONNECT in which case if no filter adds a port the original port will be restored before headers are + // sent upstream. + // Without setting this option, incoming requests with host “example:443“ will not match against + // route with :ref:`domains` match set to “example“. Defaults to “false“. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + // Only one of “strip_matching_host_port“ or “strip_any_host_port“ can be set. + StripMatchingHostPort bool `protobuf:"varint,39,opt,name=strip_matching_host_port,json=stripMatchingHostPort,proto3" json:"strip_matching_host_port,omitempty"` + // Types that are assignable to StripPortMode: + // + // *HttpConnectionManager_StripAnyHostPort + StripPortMode isHttpConnectionManager_StripPortMode `protobuf_oneof:"strip_port_mode"` + // Governs Envoy's behavior when receiving invalid HTTP from downstream. + // If this option is false (default), Envoy will err on the conservative side handling HTTP + // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + // If this option is set to true, Envoy will be more permissive, only resetting the invalid + // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + // request is read for HTTP/1.1) + // In general this should be true for deployments receiving trusted traffic (L2 Envoys, + // company-internal mesh) and false when receiving untrusted traffic (edge deployments). + // + // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message + // ` or the new HTTP/2 option + // :ref:`override_stream_error_on_invalid_http_message + // ` + // “not“ the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + // ` + StreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,40,opt,name=stream_error_on_invalid_http_message,json=streamErrorOnInvalidHttpMessage,proto3" json:"stream_error_on_invalid_http_message,omitempty"` + // [#not-implemented-hide:] Path normalization configuration. This includes + // configurations for transformations (e.g. RFC 3986 normalization or merge + // adjacent slashes) and the policy to apply them. The policy determines + // whether transformations affect the forwarded “:path“ header. RFC 3986 path + // normalization is enabled by default and the default policy is that the + // normalized header will be forwarded. See :ref:`PathNormalizationOptions + // ` + // for details. + PathNormalizationOptions *HttpConnectionManager_PathNormalizationOptions `protobuf:"bytes,43,opt,name=path_normalization_options,json=pathNormalizationOptions,proto3" json:"path_normalization_options,omitempty"` + // Determines if trailing dot of the host should be removed from host/authority header before any + // processing of request by HTTP filters or routing. + // This affects the upstream host header. + // Without setting this option, incoming requests with host “example.com.“ will not match against + // route with :ref:`domains` match set to “example.com“. Defaults to “false“. + // When the incoming request contains a host/authority header that includes a port number, + // setting this option will strip a trailing dot, if present, from the host section, + // leaving the port as is (e.g. host value “example.com.:443“ will be updated to “example.com:443“). + StripTrailingHostDot bool `protobuf:"varint,47,opt,name=strip_trailing_host_dot,json=stripTrailingHostDot,proto3" json:"strip_trailing_host_dot,omitempty"` + // Proxy-Status HTTP response header configuration. + // If this config is set, the Proxy-Status HTTP response header field is + // populated. By default, it is not. + ProxyStatusConfig *HttpConnectionManager_ProxyStatusConfig `protobuf:"bytes,49,opt,name=proxy_status_config,json=proxyStatusConfig,proto3" json:"proxy_status_config,omitempty"` + // Configuration options for Header Validation (UHV). + // UHV is an extensible mechanism for checking validity of HTTP requests as well as providing + // normalization for request attributes, such as URI path. + // If the typed_header_validation_config is present it overrides the following options: + // “normalize_path“, “merge_slashes“, “path_with_escaped_slashes_action“ + // “http_protocol_options.allow_chunked_length“, “common_http_protocol_options.headers_with_underscores_action“. + // + // The default UHV checks the following: + // + // #. HTTP/1 header map validity according to `RFC 7230 section 3.2`_ + // #. Syntax of HTTP/1 request target URI and response status + // #. HTTP/2 header map validity according to `RFC 7540 section 8.1.2`_ + // #. Syntax of HTTP/3 pseudo headers + // #. Syntax of “Content-Length“ and “Transfer-Encoding“ + // #. Validation of HTTP/1 requests with both “Content-Length“ and “Transfer-Encoding“ headers + // #. Normalization of the URI path according to `Normalization and Comparison `_ + // + // without `case normalization `_ + // + // [#not-implemented-hide:] + // [#extension-category: envoy.http.header_validators] + TypedHeaderValidationConfig *v3.TypedExtensionConfig `protobuf:"bytes,50,opt,name=typed_header_validation_config,json=typedHeaderValidationConfig,proto3" json:"typed_header_validation_config,omitempty"` + // Append the “x-forwarded-port“ header with the port value client used to connect to Envoy. It + // will be ignored if the “x-forwarded-port“ header has been set by any trusted proxy in front of Envoy. + AppendXForwardedPort bool `protobuf:"varint,51,opt,name=append_x_forwarded_port,json=appendXForwardedPort,proto3" json:"append_x_forwarded_port,omitempty"` + // Append the :ref:`config_http_conn_man_headers_x-envoy-local-overloaded` HTTP header in the scenario where + // the Overload Manager has been triggered. + AppendLocalOverload bool `protobuf:"varint,57,opt,name=append_local_overload,json=appendLocalOverload,proto3" json:"append_local_overload,omitempty"` + // Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to “true“. + // This should be set to “false“ in cases where Envoy's view of the downstream address may not correspond to the + // actual client address, for example, if there's another proxy in front of the Envoy. + AddProxyProtocolConnectionState *wrapperspb.BoolValue `protobuf:"bytes,53,opt,name=add_proxy_protocol_connection_state,json=addProxyProtocolConnectionState,proto3" json:"add_proxy_protocol_connection_state,omitempty"` +} + +func (x *HttpConnectionManager) Reset() { + *x = HttpConnectionManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager) ProtoMessage() {} + +func (x *HttpConnectionManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpConnectionManager) GetCodecType() HttpConnectionManager_CodecType { + if x != nil { + return x.CodecType + } + return HttpConnectionManager_AUTO +} + +func (x *HttpConnectionManager) GetStatPrefix() string { + if x != nil { + return x.StatPrefix + } + return "" +} + +func (m *HttpConnectionManager) GetRouteSpecifier() isHttpConnectionManager_RouteSpecifier { + if m != nil { + return m.RouteSpecifier + } + return nil +} + +func (x *HttpConnectionManager) GetRds() *Rds { + if x, ok := x.GetRouteSpecifier().(*HttpConnectionManager_Rds); ok { + return x.Rds + } + return nil +} + +func (x *HttpConnectionManager) GetRouteConfig() *v32.RouteConfiguration { + if x, ok := x.GetRouteSpecifier().(*HttpConnectionManager_RouteConfig); ok { + return x.RouteConfig + } + return nil +} + +func (x *HttpConnectionManager) GetScopedRoutes() *ScopedRoutes { + if x, ok := x.GetRouteSpecifier().(*HttpConnectionManager_ScopedRoutes); ok { + return x.ScopedRoutes + } + return nil +} + +func (x *HttpConnectionManager) GetHttpFilters() []*HttpFilter { + if x != nil { + return x.HttpFilters + } + return nil +} + +func (x *HttpConnectionManager) GetAddUserAgent() *wrapperspb.BoolValue { + if x != nil { + return x.AddUserAgent + } + return nil +} + +func (x *HttpConnectionManager) GetTracing() *HttpConnectionManager_Tracing { + if x != nil { + return x.Tracing + } + return nil +} + +func (x *HttpConnectionManager) GetCommonHttpProtocolOptions() *v3.HttpProtocolOptions { + if x != nil { + return x.CommonHttpProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetHttp1SafeMaxConnectionDuration() bool { + if x != nil { + return x.Http1SafeMaxConnectionDuration + } + return false +} + +func (x *HttpConnectionManager) GetHttpProtocolOptions() *v3.Http1ProtocolOptions { + if x != nil { + return x.HttpProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetHttp2ProtocolOptions() *v3.Http2ProtocolOptions { + if x != nil { + return x.Http2ProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetHttp3ProtocolOptions() *v3.Http3ProtocolOptions { + if x != nil { + return x.Http3ProtocolOptions + } + return nil +} + +func (x *HttpConnectionManager) GetServerName() string { + if x != nil { + return x.ServerName + } + return "" +} + +func (x *HttpConnectionManager) GetServerHeaderTransformation() HttpConnectionManager_ServerHeaderTransformation { + if x != nil { + return x.ServerHeaderTransformation + } + return HttpConnectionManager_OVERWRITE +} + +func (x *HttpConnectionManager) GetSchemeHeaderTransformation() *v3.SchemeHeaderTransformation { + if x != nil { + return x.SchemeHeaderTransformation + } + return nil +} + +func (x *HttpConnectionManager) GetMaxRequestHeadersKb() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxRequestHeadersKb + } + return nil +} + +func (x *HttpConnectionManager) GetStreamIdleTimeout() *durationpb.Duration { + if x != nil { + return x.StreamIdleTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetRequestTimeout() *durationpb.Duration { + if x != nil { + return x.RequestTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetRequestHeadersTimeout() *durationpb.Duration { + if x != nil { + return x.RequestHeadersTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetDrainTimeout() *durationpb.Duration { + if x != nil { + return x.DrainTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetDelayedCloseTimeout() *durationpb.Duration { + if x != nil { + return x.DelayedCloseTimeout + } + return nil +} + +func (x *HttpConnectionManager) GetAccessLog() []*v31.AccessLog { + if x != nil { + return x.AccessLog + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. +func (x *HttpConnectionManager) GetAccessLogFlushInterval() *durationpb.Duration { + if x != nil { + return x.AccessLogFlushInterval + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto. +func (x *HttpConnectionManager) GetFlushAccessLogOnNewRequest() bool { + if x != nil { + return x.FlushAccessLogOnNewRequest + } + return false +} + +func (x *HttpConnectionManager) GetAccessLogOptions() *HttpConnectionManager_HcmAccessLogOptions { + if x != nil { + return x.AccessLogOptions + } + return nil +} + +func (x *HttpConnectionManager) GetUseRemoteAddress() *wrapperspb.BoolValue { + if x != nil { + return x.UseRemoteAddress + } + return nil +} + +func (x *HttpConnectionManager) GetXffNumTrustedHops() uint32 { + if x != nil { + return x.XffNumTrustedHops + } + return 0 +} + +func (x *HttpConnectionManager) GetOriginalIpDetectionExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.OriginalIpDetectionExtensions + } + return nil +} + +func (x *HttpConnectionManager) GetEarlyHeaderMutationExtensions() []*v3.TypedExtensionConfig { + if x != nil { + return x.EarlyHeaderMutationExtensions + } + return nil +} + +func (x *HttpConnectionManager) GetInternalAddressConfig() *HttpConnectionManager_InternalAddressConfig { + if x != nil { + return x.InternalAddressConfig + } + return nil +} + +func (x *HttpConnectionManager) GetSkipXffAppend() bool { + if x != nil { + return x.SkipXffAppend + } + return false +} + +func (x *HttpConnectionManager) GetVia() string { + if x != nil { + return x.Via + } + return "" +} + +func (x *HttpConnectionManager) GetGenerateRequestId() *wrapperspb.BoolValue { + if x != nil { + return x.GenerateRequestId + } + return nil +} + +func (x *HttpConnectionManager) GetPreserveExternalRequestId() bool { + if x != nil { + return x.PreserveExternalRequestId + } + return false +} + +func (x *HttpConnectionManager) GetAlwaysSetRequestIdInResponse() bool { + if x != nil { + return x.AlwaysSetRequestIdInResponse + } + return false +} + +func (x *HttpConnectionManager) GetForwardClientCertDetails() HttpConnectionManager_ForwardClientCertDetails { + if x != nil { + return x.ForwardClientCertDetails + } + return HttpConnectionManager_SANITIZE +} + +func (x *HttpConnectionManager) GetSetCurrentClientCertDetails() *HttpConnectionManager_SetCurrentClientCertDetails { + if x != nil { + return x.SetCurrentClientCertDetails + } + return nil +} + +func (x *HttpConnectionManager) GetProxy_100Continue() bool { + if x != nil { + return x.Proxy_100Continue + } + return false +} + +func (x *HttpConnectionManager) GetRepresentIpv4RemoteAddressAsIpv4MappedIpv6() bool { + if x != nil { + return x.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 + } + return false +} + +func (x *HttpConnectionManager) GetUpgradeConfigs() []*HttpConnectionManager_UpgradeConfig { + if x != nil { + return x.UpgradeConfigs + } + return nil +} + +func (x *HttpConnectionManager) GetNormalizePath() *wrapperspb.BoolValue { + if x != nil { + return x.NormalizePath + } + return nil +} + +func (x *HttpConnectionManager) GetMergeSlashes() bool { + if x != nil { + return x.MergeSlashes + } + return false +} + +func (x *HttpConnectionManager) GetPathWithEscapedSlashesAction() HttpConnectionManager_PathWithEscapedSlashesAction { + if x != nil { + return x.PathWithEscapedSlashesAction + } + return HttpConnectionManager_IMPLEMENTATION_SPECIFIC_DEFAULT +} + +func (x *HttpConnectionManager) GetRequestIdExtension() *RequestIDExtension { + if x != nil { + return x.RequestIdExtension + } + return nil +} + +func (x *HttpConnectionManager) GetLocalReplyConfig() *LocalReplyConfig { + if x != nil { + return x.LocalReplyConfig + } + return nil +} + +func (x *HttpConnectionManager) GetStripMatchingHostPort() bool { + if x != nil { + return x.StripMatchingHostPort + } + return false +} + +func (m *HttpConnectionManager) GetStripPortMode() isHttpConnectionManager_StripPortMode { + if m != nil { + return m.StripPortMode + } + return nil +} + +func (x *HttpConnectionManager) GetStripAnyHostPort() bool { + if x, ok := x.GetStripPortMode().(*HttpConnectionManager_StripAnyHostPort); ok { + return x.StripAnyHostPort + } + return false +} + +func (x *HttpConnectionManager) GetStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue { + if x != nil { + return x.StreamErrorOnInvalidHttpMessage + } + return nil +} + +func (x *HttpConnectionManager) GetPathNormalizationOptions() *HttpConnectionManager_PathNormalizationOptions { + if x != nil { + return x.PathNormalizationOptions + } + return nil +} + +func (x *HttpConnectionManager) GetStripTrailingHostDot() bool { + if x != nil { + return x.StripTrailingHostDot + } + return false +} + +func (x *HttpConnectionManager) GetProxyStatusConfig() *HttpConnectionManager_ProxyStatusConfig { + if x != nil { + return x.ProxyStatusConfig + } + return nil +} + +func (x *HttpConnectionManager) GetTypedHeaderValidationConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.TypedHeaderValidationConfig + } + return nil +} + +func (x *HttpConnectionManager) GetAppendXForwardedPort() bool { + if x != nil { + return x.AppendXForwardedPort + } + return false +} + +func (x *HttpConnectionManager) GetAppendLocalOverload() bool { + if x != nil { + return x.AppendLocalOverload + } + return false +} + +func (x *HttpConnectionManager) GetAddProxyProtocolConnectionState() *wrapperspb.BoolValue { + if x != nil { + return x.AddProxyProtocolConnectionState + } + return nil +} + +type isHttpConnectionManager_RouteSpecifier interface { + isHttpConnectionManager_RouteSpecifier() +} + +type HttpConnectionManager_Rds struct { + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds *Rds `protobuf:"bytes,3,opt,name=rds,proto3,oneof"` +} + +type HttpConnectionManager_RouteConfig struct { + // The route table for the connection manager is static and is specified in this property. + RouteConfig *v32.RouteConfiguration `protobuf:"bytes,4,opt,name=route_config,json=routeConfig,proto3,oneof"` +} + +type HttpConnectionManager_ScopedRoutes struct { + // A route table will be dynamically assigned to each request based on request attributes + // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + // specified in this message. + ScopedRoutes *ScopedRoutes `protobuf:"bytes,31,opt,name=scoped_routes,json=scopedRoutes,proto3,oneof"` +} + +func (*HttpConnectionManager_Rds) isHttpConnectionManager_RouteSpecifier() {} + +func (*HttpConnectionManager_RouteConfig) isHttpConnectionManager_RouteSpecifier() {} + +func (*HttpConnectionManager_ScopedRoutes) isHttpConnectionManager_RouteSpecifier() {} + +type isHttpConnectionManager_StripPortMode interface { + isHttpConnectionManager_StripPortMode() +} + +type HttpConnectionManager_StripAnyHostPort struct { + // Determines if the port part should be removed from host/authority header before any processing + // of request by HTTP filters or routing. + // This affects the upstream host header unless the method is CONNECT in + // which case if no filter adds a port the original port will be restored before headers are sent upstream. + // Without setting this option, incoming requests with host “example:443“ will not match against + // route with :ref:`domains` match set to “example“. Defaults to “false“. Note that port removal is not part + // of `HTTP spec `_ and is provided for convenience. + // Only one of “strip_matching_host_port“ or “strip_any_host_port“ can be set. + StripAnyHostPort bool `protobuf:"varint,42,opt,name=strip_any_host_port,json=stripAnyHostPort,proto3,oneof"` +} + +func (*HttpConnectionManager_StripAnyHostPort) isHttpConnectionManager_StripPortMode() {} + +// The configuration to customize local reply returned by Envoy. +type LocalReplyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration of list of mappers which allows to filter and change local response. + // The mappers will be checked by the specified order until one is matched. + Mappers []*ResponseMapper `protobuf:"bytes,1,rep,name=mappers,proto3" json:"mappers,omitempty"` + // The configuration to form response body from the :ref:`command operators ` + // and to specify response content type as one of: plain/text or application/json. + // + // Example one: "plain/text" “body_format“. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // The following response body in "plain/text" format will be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + // + // Example two: "application/json" “body_format“. + // + // .. validated-code-block:: yaml + // + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // path: "%REQ(:path)%" + // + // The following response body in "application/json" format would be generated for a request with + // local reply body of "upstream connection error", response_code=503 and path=/foo. + // + // .. code-block:: json + // + // { + // "status": 503, + // "message": "upstream connection error", + // "path": "/foo" + // } + BodyFormat *v3.SubstitutionFormatString `protobuf:"bytes,2,opt,name=body_format,json=bodyFormat,proto3" json:"body_format,omitempty"` +} + +func (x *LocalReplyConfig) Reset() { + *x = LocalReplyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalReplyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalReplyConfig) ProtoMessage() {} + +func (x *LocalReplyConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalReplyConfig.ProtoReflect.Descriptor instead. +func (*LocalReplyConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{1} +} + +func (x *LocalReplyConfig) GetMappers() []*ResponseMapper { + if x != nil { + return x.Mappers + } + return nil +} + +func (x *LocalReplyConfig) GetBodyFormat() *v3.SubstitutionFormatString { + if x != nil { + return x.BodyFormat + } + return nil +} + +// The configuration to filter and change local response. +// [#next-free-field: 6] +type ResponseMapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Filter to determine if this mapper should apply. + Filter *v31.AccessLogFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The new response status code if specified. + StatusCode *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // The new local reply body text if specified. It will be used in the “%LOCAL_REPLY_BODY%“ + // command operator in the “body_format“. + Body *v3.DataSource `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` + // A per mapper “body_format“ to override the :ref:`body_format `. + // It will be used when this mapper is matched. + BodyFormatOverride *v3.SubstitutionFormatString `protobuf:"bytes,4,opt,name=body_format_override,json=bodyFormatOverride,proto3" json:"body_format_override,omitempty"` + // HTTP headers to add to a local reply. This allows the response mapper to append, to add + // or to override headers of any local reply before it is sent to a downstream client. + HeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,5,rep,name=headers_to_add,json=headersToAdd,proto3" json:"headers_to_add,omitempty"` +} + +func (x *ResponseMapper) Reset() { + *x = ResponseMapper{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMapper) ProtoMessage() {} + +func (x *ResponseMapper) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMapper.ProtoReflect.Descriptor instead. +func (*ResponseMapper) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{2} +} + +func (x *ResponseMapper) GetFilter() *v31.AccessLogFilter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ResponseMapper) GetStatusCode() *wrapperspb.UInt32Value { + if x != nil { + return x.StatusCode + } + return nil +} + +func (x *ResponseMapper) GetBody() *v3.DataSource { + if x != nil { + return x.Body + } + return nil +} + +func (x *ResponseMapper) GetBodyFormatOverride() *v3.SubstitutionFormatString { + if x != nil { + return x.BodyFormatOverride + } + return nil +} + +func (x *ResponseMapper) GetHeadersToAdd() []*v3.HeaderValueOption { + if x != nil { + return x.HeadersToAdd + } + return nil +} + +type Rds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration source specifier for RDS. + ConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"` + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + RouteConfigName string `protobuf:"bytes,2,opt,name=route_config_name,json=routeConfigName,proto3" json:"route_config_name,omitempty"` +} + +func (x *Rds) Reset() { + *x = Rds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Rds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Rds) ProtoMessage() {} + +func (x *Rds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Rds.ProtoReflect.Descriptor instead. +func (*Rds) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{3} +} + +func (x *Rds) GetConfigSource() *v3.ConfigSource { + if x != nil { + return x.ConfigSource + } + return nil +} + +func (x *Rds) GetRouteConfigName() string { + if x != nil { + return x.RouteConfigName + } + return "" +} + +// This message is used to work around the limitations with 'oneof' and repeated fields. +type ScopedRouteConfigurationsList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScopedRouteConfigurations []*v32.ScopedRouteConfiguration `protobuf:"bytes,1,rep,name=scoped_route_configurations,json=scopedRouteConfigurations,proto3" json:"scoped_route_configurations,omitempty"` +} + +func (x *ScopedRouteConfigurationsList) Reset() { + *x = ScopedRouteConfigurationsList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRouteConfigurationsList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRouteConfigurationsList) ProtoMessage() {} + +func (x *ScopedRouteConfigurationsList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRouteConfigurationsList.ProtoReflect.Descriptor instead. +func (*ScopedRouteConfigurationsList) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{4} +} + +func (x *ScopedRouteConfigurationsList) GetScopedRouteConfigurations() []*v32.ScopedRouteConfiguration { + if x != nil { + return x.ScopedRouteConfigurations + } + return nil +} + +// [#next-free-field: 6] +type ScopedRoutes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name assigned to the scoped routing configuration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The algorithm to use for constructing a scope key for each request. + ScopeKeyBuilder *ScopedRoutes_ScopeKeyBuilder `protobuf:"bytes,2,opt,name=scope_key_builder,json=scopeKeyBuilder,proto3" json:"scope_key_builder,omitempty"` + // Configuration source specifier for RDS. + // This config source is used to subscribe to RouteConfiguration resources specified in + // ScopedRouteConfiguration messages. + RdsConfigSource *v3.ConfigSource `protobuf:"bytes,3,opt,name=rds_config_source,json=rdsConfigSource,proto3" json:"rds_config_source,omitempty"` + // Types that are assignable to ConfigSpecifier: + // + // *ScopedRoutes_ScopedRouteConfigurationsList + // *ScopedRoutes_ScopedRds + ConfigSpecifier isScopedRoutes_ConfigSpecifier `protobuf_oneof:"config_specifier"` +} + +func (x *ScopedRoutes) Reset() { + *x = ScopedRoutes{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes) ProtoMessage() {} + +func (x *ScopedRoutes) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes.ProtoReflect.Descriptor instead. +func (*ScopedRoutes) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5} +} + +func (x *ScopedRoutes) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutes) GetScopeKeyBuilder() *ScopedRoutes_ScopeKeyBuilder { + if x != nil { + return x.ScopeKeyBuilder + } + return nil +} + +func (x *ScopedRoutes) GetRdsConfigSource() *v3.ConfigSource { + if x != nil { + return x.RdsConfigSource + } + return nil +} + +func (m *ScopedRoutes) GetConfigSpecifier() isScopedRoutes_ConfigSpecifier { + if m != nil { + return m.ConfigSpecifier + } + return nil +} + +func (x *ScopedRoutes) GetScopedRouteConfigurationsList() *ScopedRouteConfigurationsList { + if x, ok := x.GetConfigSpecifier().(*ScopedRoutes_ScopedRouteConfigurationsList); ok { + return x.ScopedRouteConfigurationsList + } + return nil +} + +func (x *ScopedRoutes) GetScopedRds() *ScopedRds { + if x, ok := x.GetConfigSpecifier().(*ScopedRoutes_ScopedRds); ok { + return x.ScopedRds + } + return nil +} + +type isScopedRoutes_ConfigSpecifier interface { + isScopedRoutes_ConfigSpecifier() +} + +type ScopedRoutes_ScopedRouteConfigurationsList struct { + // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by + // matching a key constructed from the request's attributes according to the algorithm specified + // by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRouteConfigurationsList *ScopedRouteConfigurationsList `protobuf:"bytes,4,opt,name=scoped_route_configurations_list,json=scopedRouteConfigurationsList,proto3,oneof"` +} + +type ScopedRoutes_ScopedRds struct { + // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS + // API. A scope is assigned to a request by matching a key constructed from the request's + // attributes according to the algorithm specified by the + // :ref:`ScopeKeyBuilder` + // in this message. + ScopedRds *ScopedRds `protobuf:"bytes,5,opt,name=scoped_rds,json=scopedRds,proto3,oneof"` +} + +func (*ScopedRoutes_ScopedRouteConfigurationsList) isScopedRoutes_ConfigSpecifier() {} + +func (*ScopedRoutes_ScopedRds) isScopedRoutes_ConfigSpecifier() {} + +type ScopedRds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configuration source specifier for scoped RDS. + ScopedRdsConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=scoped_rds_config_source,json=scopedRdsConfigSource,proto3" json:"scoped_rds_config_source,omitempty"` + // xdstp:// resource locator for scoped RDS collection. + // [#not-implemented-hide:] + SrdsResourcesLocator string `protobuf:"bytes,2,opt,name=srds_resources_locator,json=srdsResourcesLocator,proto3" json:"srds_resources_locator,omitempty"` +} + +func (x *ScopedRds) Reset() { + *x = ScopedRds{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRds) ProtoMessage() {} + +func (x *ScopedRds) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRds.ProtoReflect.Descriptor instead. +func (*ScopedRds) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{6} +} + +func (x *ScopedRds) GetScopedRdsConfigSource() *v3.ConfigSource { + if x != nil { + return x.ScopedRdsConfigSource + } + return nil +} + +func (x *ScopedRds) GetSrdsResourcesLocator() string { + if x != nil { + return x.SrdsResourcesLocator + } + return "" +} + +// [#next-free-field: 8] +type HttpFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the filter configuration. It also serves as a resource name in ExtensionConfigDS. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to ConfigType: + // + // *HttpFilter_TypedConfig + // *HttpFilter_ConfigDiscovery + ConfigType isHttpFilter_ConfigType `protobuf_oneof:"config_type"` + // If true, clients that do not support this filter may ignore the + // filter but otherwise accept the config. + // Otherwise, clients that do not support this filter must reject the config. + IsOptional bool `protobuf:"varint,6,opt,name=is_optional,json=isOptional,proto3" json:"is_optional,omitempty"` + // If true, the filter is disabled by default and must be explicitly enabled by setting + // per filter configuration in the route configuration. + // See :ref:`route based filter chain ` + // for more details. + // + // Terminal filters (e.g. “envoy.filters.http.router“) cannot be marked as disabled. + Disabled bool `protobuf:"varint,7,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (x *HttpFilter) Reset() { + *x = HttpFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpFilter) ProtoMessage() {} + +func (x *HttpFilter) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpFilter.ProtoReflect.Descriptor instead. +func (*HttpFilter) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{7} +} + +func (x *HttpFilter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *HttpFilter) GetConfigType() isHttpFilter_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *HttpFilter) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*HttpFilter_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *HttpFilter) GetConfigDiscovery() *v3.ExtensionConfigSource { + if x, ok := x.GetConfigType().(*HttpFilter_ConfigDiscovery); ok { + return x.ConfigDiscovery + } + return nil +} + +func (x *HttpFilter) GetIsOptional() bool { + if x != nil { + return x.IsOptional + } + return false +} + +func (x *HttpFilter) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +type isHttpFilter_ConfigType interface { + isHttpFilter_ConfigType() +} + +type HttpFilter_TypedConfig struct { + // Filter specific configuration which depends on the filter being instantiated. See the supported + // filters for further documentation. + // + // To support configuring a :ref:`match tree `, use an + // :ref:`ExtensionWithMatcher ` + // with the desired HTTP filter. + // [#extension-category: envoy.filters.http] + TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +type HttpFilter_ConfigDiscovery struct { + // Configuration source specifier for an extension configuration discovery service. + // In case of a failure and without the default configuration, the HTTP listener responds with code 500. + // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + // + // To support configuring a :ref:`match tree `, use an + // :ref:`ExtensionWithMatcher ` + // with the desired HTTP filter. This works for both the default filter configuration as well + // as for filters provided via the API. + ConfigDiscovery *v3.ExtensionConfigSource `protobuf:"bytes,5,opt,name=config_discovery,json=configDiscovery,proto3,oneof"` +} + +func (*HttpFilter_TypedConfig) isHttpFilter_ConfigType() {} + +func (*HttpFilter_ConfigDiscovery) isHttpFilter_ConfigType() {} + +type RequestIDExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Request ID extension specific configuration. + TypedConfig *anypb.Any `protobuf:"bytes,1,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"` +} + +func (x *RequestIDExtension) Reset() { + *x = RequestIDExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestIDExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestIDExtension) ProtoMessage() {} + +func (x *RequestIDExtension) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestIDExtension.ProtoReflect.Descriptor instead. +func (*RequestIDExtension) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{8} +} + +func (x *RequestIDExtension) GetTypedConfig() *anypb.Any { + if x != nil { + return x.TypedConfig + } + return nil +} + +// [#protodoc-title: Envoy Mobile HTTP connection manager] +// HTTP connection manager for use in Envoy mobile. +// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] +type EnvoyMobileHttpConnectionManager struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The configuration for the underlying HttpConnectionManager which will be + // instantiated for Envoy mobile. + Config *HttpConnectionManager `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *EnvoyMobileHttpConnectionManager) Reset() { + *x = EnvoyMobileHttpConnectionManager{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnvoyMobileHttpConnectionManager) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnvoyMobileHttpConnectionManager) ProtoMessage() {} + +func (x *EnvoyMobileHttpConnectionManager) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnvoyMobileHttpConnectionManager.ProtoReflect.Descriptor instead. +func (*EnvoyMobileHttpConnectionManager) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{9} +} + +func (x *EnvoyMobileHttpConnectionManager) GetConfig() *HttpConnectionManager { + if x != nil { + return x.Config + } + return nil +} + +// [#next-free-field: 11] +type HttpConnectionManager_Tracing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + ClientSampling *v33.Percent `protobuf:"bytes,3,opt,name=client_sampling,json=clientSampling,proto3" json:"client_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + RandomSampling *v33.Percent `protobuf:"bytes,4,opt,name=random_sampling,json=randomSampling,proto3" json:"random_sampling,omitempty"` + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + OverallSampling *v33.Percent `protobuf:"bytes,5,opt,name=overall_sampling,json=overallSampling,proto3" json:"overall_sampling,omitempty"` + // Whether to annotate spans with additional data. If true, spans will include logs for stream + // events. + Verbose bool `protobuf:"varint,6,opt,name=verbose,proto3" json:"verbose,omitempty"` + // Maximum length of the request path to extract and include in the HttpUrl tag. Used to + // truncate lengthy request paths to meet the needs of a tracing backend. + // Default: 256 + MaxPathTagLength *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_path_tag_length,json=maxPathTagLength,proto3" json:"max_path_tag_length,omitempty"` + // A list of custom tags with unique tag name to create tags for the active span. + CustomTags []*v34.CustomTag `protobuf:"bytes,8,rep,name=custom_tags,json=customTags,proto3" json:"custom_tags,omitempty"` + // Configuration for an external tracing provider. + // If not specified, no tracing will be performed. + // + // .. attention:: + // + // Please be aware that ``envoy.tracers.opencensus`` provider can only be configured once + // in Envoy lifetime. + // Any attempts to reconfigure it or to use different configurations for different HCM filters + // will be rejected. + // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes + // on OpenCensus side. + Provider *v35.Tracing_Http `protobuf:"bytes,9,opt,name=provider,proto3" json:"provider,omitempty"` + // Create separate tracing span for each upstream request if true. And if this flag is set to true, + // the tracing provider will assume that Envoy will be independent hop in the trace chain and may + // set span type to client or server based on this flag. + // This will deprecate the + // :ref:`start_child_span ` + // in the router. + // + // Users should set appropriate value based on their tracing provider and actual scenario: + // + // - If Envoy is used as sidecar and users want to make the sidecar and its application as only one + // hop in the trace chain, this flag should be set to false. And please also make sure the + // :ref:`start_child_span ` + // in the router is not set to true. + // - If Envoy is used as gateway or independent proxy, or users want to make the sidecar and its + // application as different hops in the trace chain, this flag should be set to true. + // - If tracing provider that has explicit requirements on span creation (like SkyWalking), + // this flag should be set to true. + // + // The default value is false for now for backward compatibility. + SpawnUpstreamSpan *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=spawn_upstream_span,json=spawnUpstreamSpan,proto3" json:"spawn_upstream_span,omitempty"` +} + +func (x *HttpConnectionManager_Tracing) Reset() { + *x = HttpConnectionManager_Tracing{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_Tracing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_Tracing) ProtoMessage() {} + +func (x *HttpConnectionManager_Tracing) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_Tracing.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_Tracing) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *HttpConnectionManager_Tracing) GetClientSampling() *v33.Percent { + if x != nil { + return x.ClientSampling + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetRandomSampling() *v33.Percent { + if x != nil { + return x.RandomSampling + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetOverallSampling() *v33.Percent { + if x != nil { + return x.OverallSampling + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +func (x *HttpConnectionManager_Tracing) GetMaxPathTagLength() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxPathTagLength + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetCustomTags() []*v34.CustomTag { + if x != nil { + return x.CustomTags + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetProvider() *v35.Tracing_Http { + if x != nil { + return x.Provider + } + return nil +} + +func (x *HttpConnectionManager_Tracing) GetSpawnUpstreamSpan() *wrapperspb.BoolValue { + if x != nil { + return x.SpawnUpstreamSpan + } + return nil +} + +type HttpConnectionManager_InternalAddressConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether unix socket addresses should be considered internal. + UnixSockets bool `protobuf:"varint,1,opt,name=unix_sockets,json=unixSockets,proto3" json:"unix_sockets,omitempty"` + // List of CIDR ranges that are treated as internal. If unset, then RFC1918 / RFC4193 + // IP addresses will be considered internal. + CidrRanges []*v3.CidrRange `protobuf:"bytes,2,rep,name=cidr_ranges,json=cidrRanges,proto3" json:"cidr_ranges,omitempty"` +} + +func (x *HttpConnectionManager_InternalAddressConfig) Reset() { + *x = HttpConnectionManager_InternalAddressConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_InternalAddressConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_InternalAddressConfig) ProtoMessage() {} + +func (x *HttpConnectionManager_InternalAddressConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_InternalAddressConfig.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_InternalAddressConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *HttpConnectionManager_InternalAddressConfig) GetUnixSockets() bool { + if x != nil { + return x.UnixSockets + } + return false +} + +func (x *HttpConnectionManager_InternalAddressConfig) GetCidrRanges() []*v3.CidrRange { + if x != nil { + return x.CidrRanges + } + return nil +} + +// [#next-free-field: 7] +type HttpConnectionManager_SetCurrentClientCertDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether to forward the subject of the client cert. Defaults to false. + Subject *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + Cert bool `protobuf:"varint,3,opt,name=cert,proto3" json:"cert,omitempty"` + // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + // format. This will appear in the XFCC header comma separated from other values with the value + // Chain="PEM". + // Defaults to false. + Chain bool `protobuf:"varint,6,opt,name=chain,proto3" json:"chain,omitempty"` + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + Dns bool `protobuf:"varint,4,opt,name=dns,proto3" json:"dns,omitempty"` + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + Uri bool `protobuf:"varint,5,opt,name=uri,proto3" json:"uri,omitempty"` +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) Reset() { + *x = HttpConnectionManager_SetCurrentClientCertDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_SetCurrentClientCertDetails) ProtoMessage() {} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_SetCurrentClientCertDetails.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_SetCurrentClientCertDetails) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetSubject() *wrapperspb.BoolValue { + if x != nil { + return x.Subject + } + return nil +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetCert() bool { + if x != nil { + return x.Cert + } + return false +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetChain() bool { + if x != nil { + return x.Chain + } + return false +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetDns() bool { + if x != nil { + return x.Dns + } + return false +} + +func (x *HttpConnectionManager_SetCurrentClientCertDetails) GetUri() bool { + if x != nil { + return x.Uri + } + return false +} + +// The configuration for HTTP upgrades. +// For each upgrade type desired, an UpgradeConfig must be added. +// +// .. warning:: +// +// The current implementation of upgrade headers does not handle +// multi-valued upgrade headers. Support for multi-valued headers may be +// added in the future if needed. +// +// .. warning:: +// +// The current implementation of upgrade headers does not work with HTTP/2 +// upstreams. +type HttpConnectionManager_UpgradeConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] + // will be proxied upstream. + UpgradeType string `protobuf:"bytes,1,opt,name=upgrade_type,json=upgradeType,proto3" json:"upgrade_type,omitempty"` + // If present, this represents the filter chain which will be created for + // this type of upgrade. If no filters are present, the filter chain for + // HTTP connections will be used for this upgrade type. + Filters []*HttpFilter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` + // Determines if upgrades are enabled or disabled by default. Defaults to true. + // This can be overridden on a per-route basis with :ref:`cluster + // ` as documented in the + // :ref:`upgrade documentation `. + Enabled *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *HttpConnectionManager_UpgradeConfig) Reset() { + *x = HttpConnectionManager_UpgradeConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_UpgradeConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_UpgradeConfig) ProtoMessage() {} + +func (x *HttpConnectionManager_UpgradeConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_UpgradeConfig.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_UpgradeConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *HttpConnectionManager_UpgradeConfig) GetUpgradeType() string { + if x != nil { + return x.UpgradeType + } + return "" +} + +func (x *HttpConnectionManager_UpgradeConfig) GetFilters() []*HttpFilter { + if x != nil { + return x.Filters + } + return nil +} + +func (x *HttpConnectionManager_UpgradeConfig) GetEnabled() *wrapperspb.BoolValue { + if x != nil { + return x.Enabled + } + return nil +} + +// [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied +// before any processing of requests by HTTP filters, routing, and matching. Only the normalized +// path will be visible internally if a transformation is enabled. Any path rewrites that the +// router performs (e.g. :ref:`regex_rewrite +// ` or :ref:`prefix_rewrite +// `) will apply to the “:path“ header +// destined for the upstream. +// +// Note: access logging and tracing will show the original “:path“ header. +type HttpConnectionManager_PathNormalizationOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [#not-implemented-hide:] Normalization applies internally before any processing of requests by + // HTTP filters, routing, and matching *and* will affect the forwarded “:path“ header. Defaults + // to :ref:`NormalizePathRFC3986 + // `. When not + // specified, this value may be overridden by the runtime variable + // :ref:`http_connection_manager.normalize_path`. + // Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + // normalization due to disallowed characters.) + ForwardingTransformation *v36.PathTransformation `protobuf:"bytes,1,opt,name=forwarding_transformation,json=forwardingTransformation,proto3" json:"forwarding_transformation,omitempty"` + // [#not-implemented-hide:] Normalization only applies internally before any processing of + // requests by HTTP filters, routing, and matching. These will be applied after full + // transformation is applied. The “:path“ header before this transformation will be restored in + // the router filter and sent upstream unless it was mutated by a filter. Defaults to no + // transformations. + // Multiple actions can be applied in the same Transformation, forming a sequential + // pipeline. The transformations will be performed in the order that they appear. Envoy will + // respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + // normalization due to disallowed characters.) + HttpFilterTransformation *v36.PathTransformation `protobuf:"bytes,2,opt,name=http_filter_transformation,json=httpFilterTransformation,proto3" json:"http_filter_transformation,omitempty"` +} + +func (x *HttpConnectionManager_PathNormalizationOptions) Reset() { + *x = HttpConnectionManager_PathNormalizationOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_PathNormalizationOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_PathNormalizationOptions) ProtoMessage() {} + +func (x *HttpConnectionManager_PathNormalizationOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_PathNormalizationOptions.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_PathNormalizationOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 4} +} + +func (x *HttpConnectionManager_PathNormalizationOptions) GetForwardingTransformation() *v36.PathTransformation { + if x != nil { + return x.ForwardingTransformation + } + return nil +} + +func (x *HttpConnectionManager_PathNormalizationOptions) GetHttpFilterTransformation() *v36.PathTransformation { + if x != nil { + return x.HttpFilterTransformation + } + return nil +} + +// Configures the manner in which the Proxy-Status HTTP response header is +// populated. +// +// See the [Proxy-Status +// RFC](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-proxy-status-08). +// [#comment:TODO: Update this with the non-draft URL when finalized.] +// +// The Proxy-Status header is a string of the form: +// +// "; error=; details=
" +// +// [#next-free-field: 7] +type HttpConnectionManager_ProxyStatusConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, the details field of the Proxy-Status header is not populated with stream_info.response_code_details. + // This value defaults to “false“, i.e. the “details“ field is populated by default. + RemoveDetails bool `protobuf:"varint,1,opt,name=remove_details,json=removeDetails,proto3" json:"remove_details,omitempty"` + // If true, the details field of the Proxy-Status header will not contain + // connection termination details. This value defaults to “false“, i.e. the + // “details“ field will contain connection termination details by default. + RemoveConnectionTerminationDetails bool `protobuf:"varint,2,opt,name=remove_connection_termination_details,json=removeConnectionTerminationDetails,proto3" json:"remove_connection_termination_details,omitempty"` + // If true, the details field of the Proxy-Status header will not contain an + // enumeration of the Envoy ResponseFlags. This value defaults to “false“, + // i.e. the “details“ field will contain a list of ResponseFlags by default. + RemoveResponseFlags bool `protobuf:"varint,3,opt,name=remove_response_flags,json=removeResponseFlags,proto3" json:"remove_response_flags,omitempty"` + // If true, overwrites the existing Status header with the response code + // recommended by the Proxy-Status spec. + // This value defaults to “false“, i.e. the HTTP response code is not + // overwritten. + SetRecommendedResponseCode bool `protobuf:"varint,4,opt,name=set_recommended_response_code,json=setRecommendedResponseCode,proto3" json:"set_recommended_response_code,omitempty"` + // The name of the proxy as it appears at the start of the Proxy-Status + // header. + // + // If neither of these values are set, this value defaults to “server_name“, + // which itself defaults to "envoy". + // + // Types that are assignable to ProxyName: + // + // *HttpConnectionManager_ProxyStatusConfig_UseNodeId + // *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName + ProxyName isHttpConnectionManager_ProxyStatusConfig_ProxyName `protobuf_oneof:"proxy_name"` +} + +func (x *HttpConnectionManager_ProxyStatusConfig) Reset() { + *x = HttpConnectionManager_ProxyStatusConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_ProxyStatusConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_ProxyStatusConfig) ProtoMessage() {} + +func (x *HttpConnectionManager_ProxyStatusConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_ProxyStatusConfig.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_ProxyStatusConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 5} +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetRemoveDetails() bool { + if x != nil { + return x.RemoveDetails + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetRemoveConnectionTerminationDetails() bool { + if x != nil { + return x.RemoveConnectionTerminationDetails + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetRemoveResponseFlags() bool { + if x != nil { + return x.RemoveResponseFlags + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetSetRecommendedResponseCode() bool { + if x != nil { + return x.SetRecommendedResponseCode + } + return false +} + +func (m *HttpConnectionManager_ProxyStatusConfig) GetProxyName() isHttpConnectionManager_ProxyStatusConfig_ProxyName { + if m != nil { + return m.ProxyName + } + return nil +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetUseNodeId() bool { + if x, ok := x.GetProxyName().(*HttpConnectionManager_ProxyStatusConfig_UseNodeId); ok { + return x.UseNodeId + } + return false +} + +func (x *HttpConnectionManager_ProxyStatusConfig) GetLiteralProxyName() string { + if x, ok := x.GetProxyName().(*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName); ok { + return x.LiteralProxyName + } + return "" +} + +type isHttpConnectionManager_ProxyStatusConfig_ProxyName interface { + isHttpConnectionManager_ProxyStatusConfig_ProxyName() +} + +type HttpConnectionManager_ProxyStatusConfig_UseNodeId struct { + // If “use_node_id“ is set, Proxy-Status headers will use the Envoy's node + // ID as the name of the proxy. + UseNodeId bool `protobuf:"varint,5,opt,name=use_node_id,json=useNodeId,proto3,oneof"` +} + +type HttpConnectionManager_ProxyStatusConfig_LiteralProxyName struct { + // If “literal_proxy_name“ is set, Proxy-Status headers will use this + // value as the name of the proxy. + LiteralProxyName string `protobuf:"bytes,6,opt,name=literal_proxy_name,json=literalProxyName,proto3,oneof"` +} + +func (*HttpConnectionManager_ProxyStatusConfig_UseNodeId) isHttpConnectionManager_ProxyStatusConfig_ProxyName() { +} + +func (*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) isHttpConnectionManager_ProxyStatusConfig_ProxyName() { +} + +type HttpConnectionManager_HcmAccessLogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The interval to flush the above access logs. By default, the HCM will flush exactly one access log + // on stream close, when the HTTP request is complete. If this field is set, the HCM will flush access + // logs periodically at the specified interval. This is especially useful in the case of long-lived + // requests, such as CONNECT and Websockets. Final access logs can be detected via the + // “requestComplete()“ method of “StreamInfo“ in access log filters, or through the “%DURATION%“ substitution + // string. + // The interval must be at least 1 millisecond. + AccessLogFlushInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=access_log_flush_interval,json=accessLogFlushInterval,proto3" json:"access_log_flush_interval,omitempty"` + // If set to true, HCM will flush an access log when a new HTTP request is received, after request + // headers have been evaluated, before iterating through the HTTP filter chain. + // This log record, if enabled, does not depend on periodic log records or request completion log. + // Details related to upstream cluster, such as upstream host, will not be available for this log. + FlushAccessLogOnNewRequest bool `protobuf:"varint,2,opt,name=flush_access_log_on_new_request,json=flushAccessLogOnNewRequest,proto3" json:"flush_access_log_on_new_request,omitempty"` + // If true, the HCM will flush an access log when a tunnel is successfully established. For example, + // this could be when an upstream has successfully returned 101 Switching Protocols, or when the proxy + // has returned 200 to a CONNECT request. + FlushLogOnTunnelSuccessfullyEstablished bool `protobuf:"varint,3,opt,name=flush_log_on_tunnel_successfully_established,json=flushLogOnTunnelSuccessfullyEstablished,proto3" json:"flush_log_on_tunnel_successfully_established,omitempty"` +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) Reset() { + *x = HttpConnectionManager_HcmAccessLogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpConnectionManager_HcmAccessLogOptions) ProtoMessage() {} + +func (x *HttpConnectionManager_HcmAccessLogOptions) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpConnectionManager_HcmAccessLogOptions.ProtoReflect.Descriptor instead. +func (*HttpConnectionManager_HcmAccessLogOptions) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{0, 6} +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetAccessLogFlushInterval() *durationpb.Duration { + if x != nil { + return x.AccessLogFlushInterval + } + return nil +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetFlushAccessLogOnNewRequest() bool { + if x != nil { + return x.FlushAccessLogOnNewRequest + } + return false +} + +func (x *HttpConnectionManager_HcmAccessLogOptions) GetFlushLogOnTunnelSuccessfullyEstablished() bool { + if x != nil { + return x.FlushLogOnTunnelSuccessfullyEstablished + } + return false +} + +// Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These +// keys are matched against a set of :ref:`Key` +// objects assembled from :ref:`ScopedRouteConfiguration` +// messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via +// :ref:`scoped_route_configurations_list`. +// +// Upon receiving a request's headers, the Router will build a key using the algorithm specified +// by this message. This key will be used to look up the routing table (i.e., the +// :ref:`RouteConfiguration`) to use for the request. +type ScopedRoutes_ScopeKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the + // fragments of a :ref:`ScopedRouteConfiguration`. + // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. + Fragments []*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder `protobuf:"bytes,1,rep,name=fragments,proto3" json:"fragments,omitempty"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *ScopedRoutes_ScopeKeyBuilder) GetFragments() []*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder { + if x != nil { + return x.Fragments + } + return nil +} + +// Specifies the mechanism for constructing key fragments which are composed into scope keys. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ + Type isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type `protobuf_oneof:"type"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder_FragmentBuilder.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0, 0} +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) GetType() isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) GetHeaderValueExtractor() *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor { + if x, ok := x.GetType().(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_); ok { + return x.HeaderValueExtractor + } + return nil +} + +type isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type interface { + isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type() +} + +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ struct { + // Specifies how a header field's value should be extracted. + HeaderValueExtractor *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor `protobuf:"bytes,1,opt,name=header_value_extractor,json=headerValueExtractor,proto3,oneof"` +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_Type() { +} + +// Specifies how the value of a header should be extracted. +// The following example maps the structure of a header to the fields in this message. +// +// .. code:: +// +// <0> <1> <-- index +// X-Header: a=b;c=d +// | || | +// | || \----> +// | || +// | |\----> +// | | +// | \----> +// | +// \----> +// +// Each 'a=b' key-value pair constitutes an 'element' of the header field. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the header field to extract the value from. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The element separator (e.g., ';' separates 'a;b;c;d'). + // Default: empty string. This causes the entirety of the header field to be extracted. + // If this field is set to an empty string and 'index' is used in the oneof below, 'index' + // must be set to 0. + ElementSeparator string `protobuf:"bytes,2,opt,name=element_separator,json=elementSeparator,proto3" json:"element_separator,omitempty"` + // Types that are assignable to ExtractType: + // + // *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index + // *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element + ExtractType isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType `protobuf_oneof:"extract_type"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0, 0, 0} +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetElementSeparator() string { + if x != nil { + return x.ElementSeparator + } + return "" +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetExtractType() isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType { + if m != nil { + return m.ExtractType + } + return nil +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetIndex() uint32 { + if x, ok := x.GetExtractType().(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index); ok { + return x.Index + } + return 0 +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) GetElement() *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement { + if x, ok := x.GetExtractType().(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element); ok { + return x.Element + } + return nil +} + +type isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType interface { + isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType() +} + +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index struct { + // Specifies the zero based index of the element to extract. + // Note Envoy concatenates multiple values of the same header key into a comma separated + // string, the splitting always happens after the concatenation. + Index uint32 `protobuf:"varint,3,opt,name=index,proto3,oneof"` +} + +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element struct { + // Specifies the key value pair to extract the value from. + Element *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement `protobuf:"bytes,4,opt,name=element,proto3,oneof"` +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType() { +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) isScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_ExtractType() { +} + +// Specifies a header field's key value pair to match on. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The separator between key and value (e.g., '=' separates 'k=v;...'). + // If an element is an empty string, the element is ignored. + // If an element contains no separator, the whole element is parsed as key and the + // fragment value is an empty string. + // If there are multiple values for a matched key, the first value is returned. + Separator string `protobuf:"bytes,1,opt,name=separator,proto3" json:"separator,omitempty"` + // The key to match on. + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Reset() { + *x = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ProtoMessage() {} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.ProtoReflect.Descriptor instead. +func (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Descriptor() ([]byte, []int) { + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP(), []int{5, 0, 0, 0, 0} +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) GetSeparator() string { + if x != nil { + return x.Separator + } + return "" +} + +func (x *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +var File_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto protoreflect.FileDescriptor + +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc = []byte{ + 0x0a, 0x59, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3b, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, + 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, + 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x35, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x42, 0x0a, 0x15, 0x48, 0x74, + 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x12, 0x85, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, + 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x09, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x54, 0x0a, 0x03, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x64, 0x73, 0x48, 0x00, 0x52, 0x03, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0c, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, 0x0d, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x1f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, + 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x6a, 0x0a, + 0x0c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0b, 0x68, 0x74, + 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0e, 0x61, 0x64, 0x64, + 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x61, + 0x64, 0x64, 0x55, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x74, 0x0a, 0x07, 0x74, + 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, + 0x67, 0x12, 0x73, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x19, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4a, 0x0a, 0x22, 0x68, 0x74, 0x74, 0x70, 0x31, 0x5f, + 0x73, 0x61, 0x66, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x3a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x1e, 0x68, 0x74, 0x74, 0x70, 0x31, 0x53, 0x61, 0x66, 0x65, 0x4d, 0x61, 0x78, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x15, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x68, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x69, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, + 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, + 0x16, 0x68, 0x74, 0x74, 0x70, 0x33, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x33, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x2c, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, + 0x02, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0xb9, 0x01, + 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x1a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1c, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x30, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x1a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6b, 0x62, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, + 0x2a, 0x05, 0x18, 0x80, 0x40, 0x20, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4b, 0x62, 0x12, 0x52, 0x0a, 0x13, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x11, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x4b, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x62, 0x0a, + 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x05, 0xaa, 0x01, + 0x02, 0x32, 0x00, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4d, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x6f, + 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x65, 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x6d, 0x0a, 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x17, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x16, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x50, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x65, 0x77, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x37, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x1a, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x4e, 0x65, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x38, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x66, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x48, 0x63, 0x6d, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x51, 0x0a, + 0x12, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x8a, 0x93, 0xb7, 0x2a, 0x02, 0x08, 0x01, 0x52, 0x10, + 0x75, 0x73, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x2f, 0x0a, 0x14, 0x78, 0x66, 0x66, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x70, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x78, 0x66, 0x66, 0x4e, 0x75, 0x6d, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x70, + 0x73, 0x12, 0x73, 0x0a, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x70, + 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, + 0x6c, 0x49, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x73, 0x0a, 0x20, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x34, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1d, 0x65, 0x61, + 0x72, 0x6c, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x17, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x68, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x78, 0x66, 0x66, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, + 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x58, 0x66, 0x66, + 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x76, 0x69, 0x61, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, + 0x52, 0x03, 0x76, 0x69, 0x61, 0x12, 0x4a, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x47, 0x0a, 0x21, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x1b, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x1f, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x6e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x1b, 0x73, 0x65, + 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, + 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x5f, 0x31, 0x30, 0x30, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x31, 0x30, 0x30, 0x43, + 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x65, 0x0a, 0x31, 0x72, 0x65, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x70, 0x76, 0x34, + 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x76, 0x36, 0x18, 0x14, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x2a, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x49, 0x70, 0x76, + 0x34, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x41, 0x73, + 0x49, 0x70, 0x76, 0x34, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x49, 0x70, 0x76, 0x36, 0x12, 0x89, + 0x01, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x6f, + 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x1e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, + 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x6c, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x20, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x77, 0x69, 0x74, 0x68, + 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, + 0x70, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x53, + 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x81, 0x01, 0x0a, + 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x7b, 0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, + 0x18, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x17, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x11, 0x12, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x73, 0x74, 0x72, 0x69, 0x70, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x2f, 0x0a, 0x13, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x61, 0x6e, 0x79, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10, + 0x73, 0x74, 0x72, 0x69, 0x70, 0x41, 0x6e, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x69, 0x0a, 0x24, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0xa9, 0x01, 0x0a, 0x1a, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x6b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x18, 0x70, + 0x61, 0x74, 0x68, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x74, 0x72, 0x69, 0x70, + 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x64, + 0x6f, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x74, 0x72, 0x69, 0x70, 0x54, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x48, 0x6f, 0x73, 0x74, 0x44, 0x6f, 0x74, 0x12, 0x94, + 0x01, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x64, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6f, 0x0a, 0x1e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x39, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x68, 0x0a, 0x23, 0x61, 0x64, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1f, 0x61, 0x64, 0x64, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xc2, 0x05, 0x0a, 0x07, + 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, + 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x10, 0x6f, 0x76, 0x65, + 0x72, 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, + 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x67, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, + 0x67, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x08, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x70, 0x61, 0x77, 0x6e, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x11, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, + 0x70, 0x61, 0x6e, 0x22, 0x28, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x3a, 0x5b, 0x9a, + 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x74, 0x61, 0x67, 0x73, + 0x1a, 0xe7, 0x01, 0x0a, 0x15, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x40, 0x0a, + 0x0b, 0x63, 0x69, 0x64, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x63, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x3a, + 0x69, 0x9a, 0xc5, 0x88, 0x1e, 0x64, 0x0a, 0x62, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x98, 0x02, 0x0a, 0x1b, 0x53, + 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, + 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x63, 0x65, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75, 0x72, 0x69, 0x3a, 0x6f, + 0x9a, 0xc5, 0x88, 0x1e, 0x6a, 0x0a, 0x68, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0xae, 0x02, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x61, 0x0a, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, + 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x34, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x3a, 0x61, 0x9a, 0xc5, 0x88, 0x1e, 0x5c, 0x0a, 0x5a, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe5, 0x01, 0x0a, 0x18, 0x50, 0x61, 0x74, 0x68, 0x4e, + 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x63, 0x0a, 0x19, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xe4, + 0x02, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x51, 0x0a, 0x25, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, + 0x0a, 0x15, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x73, 0x65, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x75, 0x73, + 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x6c, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x9d, 0x02, 0x0a, 0x13, 0x48, 0x63, 0x6d, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, + 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x43, 0x0a, 0x1f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x66, 0x6c, 0x75, 0x73, + 0x68, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x4e, 0x65, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5d, 0x0a, 0x2c, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, + 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x6e, 0x5f, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x65, 0x73, 0x74, 0x61, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x27, 0x66, 0x6c, + 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x4f, 0x6e, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x22, 0x36, 0x0a, 0x09, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, + 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, 0x03, 0x22, 0x53, 0x0a, + 0x1a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x0a, 0x09, 0x4f, + 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x50, + 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x01, + 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, + 0x10, 0x02, 0x22, 0x79, 0x0a, 0x18, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x0c, + 0x0a, 0x08, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, + 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x12, + 0x0a, 0x0e, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x41, 0x4e, 0x49, 0x54, 0x49, 0x5a, 0x45, 0x5f, 0x53, + 0x45, 0x54, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, + 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x22, 0xa0, 0x01, + 0x0a, 0x1c, 0x50, 0x61, 0x74, 0x68, 0x57, 0x69, 0x74, 0x68, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x64, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x1f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, + 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x4b, 0x45, 0x45, 0x50, 0x5f, 0x55, 0x4e, 0x43, 0x48, + 0x41, 0x4e, 0x47, 0x45, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, + 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x55, + 0x4e, 0x45, 0x53, 0x43, 0x41, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x44, 0x49, + 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x4e, 0x45, 0x53, 0x43, 0x41, + 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x04, + 0x3a, 0x53, 0x9a, 0xc5, 0x88, 0x1e, 0x4e, 0x0a, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x11, 0x0a, + 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x4a, 0x04, 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x0c, 0x69, 0x64, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xca, 0x01, 0x0a, 0x10, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x4b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, 0x6d, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x62, 0x6f, 0x64, + 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9c, 0x03, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4a, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42, 0x08, + 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x60, 0x0a, 0x14, 0x62, 0x6f, + 0x64, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x12, 0x62, 0x6f, 0x64, 0x79, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x58, 0x0a, 0x0e, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x22, 0xc7, 0x01, 0x0a, 0x03, 0x52, 0x64, 0x73, 0x12, 0x51, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x41, 0x9a, + 0xc5, 0x88, 0x1e, 0x3c, 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x64, 0x73, + 0x22, 0xf7, 0x01, 0x0a, 0x1d, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x79, 0x0a, 0x1b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x19, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x5b, 0x9a, + 0xc5, 0x88, 0x1e, 0x56, 0x0a, 0x54, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xe5, 0x0e, 0x0a, 0x0c, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x11, 0x73, 0x63, 0x6f, + 0x70, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x59, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x11, 0x72, 0x64, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x64, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xa5, 0x01, 0x0a, 0x20, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x1d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x67, 0x0a, 0x0a, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x48, 0x00, + 0x52, 0x09, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x1a, 0xdf, 0x09, 0x0a, 0x0f, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, + 0x91, 0x01, 0x0a, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x69, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, + 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x09, 0x66, 0x72, 0x61, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x1a, 0xdb, 0x07, 0x0a, 0x0f, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0xb6, 0x01, 0x0a, 0x16, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x7e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x14, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x1a, 0x95, 0x05, 0x0a, 0x14, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0xa5, 0x01, 0x0a, 0x07, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x88, 0x01, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, + 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4b, 0x76, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x48, 0x00, + 0x52, 0x07, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0xdb, 0x01, 0x0a, 0x09, 0x4b, 0x76, + 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x19, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x8b, 0x01, 0x9a, 0xc5, 0x88, 0x1e, + 0x85, 0x01, 0x0a, 0x82, 0x01, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, + 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4b, 0x76, + 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0x9a, 0xc5, 0x88, 0x1e, 0x7a, 0x0a, 0x78, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x9a, 0xc5, 0x88, 0x1e, 0x65, 0x0a, + 0x63, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x3a, 0x5a, 0x9a, 0xc5, 0x88, 0x1e, 0x55, 0x0a, 0x53, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x3a, 0x4a, 0x9a, + 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x17, 0x0a, 0x10, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x22, 0xf1, 0x01, 0x0a, 0x09, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, + 0x12, 0x65, 0x0a, 0x18, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x64, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x15, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x72, 0x64, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x3a, 0x47, 0x9a, + 0xc5, 0x88, 0x1e, 0x42, 0x0a, 0x40, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x64, 0x73, 0x22, 0xe8, 0x02, 0x0a, 0x0a, 0x48, 0x74, 0x74, 0x70, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, + 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, + 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x48, 0x9a, 0xc5, 0x88, 0x1e, 0x43, 0x0a, 0x41, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, + 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x9f, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x3a, 0x50, 0x9a, 0xc5, 0x88, 0x1e, 0x4b, 0x0a, 0x49, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x8e, 0x01, 0x0a, 0x20, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x6f, 0x62, + 0x69, 0x6c, 0x65, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6a, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x42, 0xef, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x49, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x1a, 0x48, 0x74, 0x74, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x7c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, + 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x68, 0x74, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescOnce sync.Once + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData = file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc +) + +func file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescGZIP() []byte { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescOnce.Do(func() { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData) + }) + return file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDescData +} + +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes = []interface{}{ + (HttpConnectionManager_CodecType)(0), // 0: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.CodecType + (HttpConnectionManager_ServerHeaderTransformation)(0), // 1: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ServerHeaderTransformation + (HttpConnectionManager_ForwardClientCertDetails)(0), // 2: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ForwardClientCertDetails + (HttpConnectionManager_PathWithEscapedSlashesAction)(0), // 3: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathWithEscapedSlashesAction + (HttpConnectionManager_Tracing_OperationName)(0), // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.OperationName + (*HttpConnectionManager)(nil), // 5: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + (*LocalReplyConfig)(nil), // 6: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig + (*ResponseMapper)(nil), // 7: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper + (*Rds)(nil), // 8: envoy.extensions.filters.network.http_connection_manager.v3.Rds + (*ScopedRouteConfigurationsList)(nil), // 9: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList + (*ScopedRoutes)(nil), // 10: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes + (*ScopedRds)(nil), // 11: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds + (*HttpFilter)(nil), // 12: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + (*RequestIDExtension)(nil), // 13: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + (*EnvoyMobileHttpConnectionManager)(nil), // 14: envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager + (*HttpConnectionManager_Tracing)(nil), // 15: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing + (*HttpConnectionManager_InternalAddressConfig)(nil), // 16: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig + (*HttpConnectionManager_SetCurrentClientCertDetails)(nil), // 17: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails + (*HttpConnectionManager_UpgradeConfig)(nil), // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig + (*HttpConnectionManager_PathNormalizationOptions)(nil), // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions + (*HttpConnectionManager_ProxyStatusConfig)(nil), // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig + (*HttpConnectionManager_HcmAccessLogOptions)(nil), // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions + (*ScopedRoutes_ScopeKeyBuilder)(nil), // 22: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder)(nil), // 23: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor)(nil), // 24: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement)(nil), // 25: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement + (*v32.RouteConfiguration)(nil), // 26: envoy.config.route.v3.RouteConfiguration + (*wrapperspb.BoolValue)(nil), // 27: google.protobuf.BoolValue + (*v3.HttpProtocolOptions)(nil), // 28: envoy.config.core.v3.HttpProtocolOptions + (*v3.Http1ProtocolOptions)(nil), // 29: envoy.config.core.v3.Http1ProtocolOptions + (*v3.Http2ProtocolOptions)(nil), // 30: envoy.config.core.v3.Http2ProtocolOptions + (*v3.Http3ProtocolOptions)(nil), // 31: envoy.config.core.v3.Http3ProtocolOptions + (*v3.SchemeHeaderTransformation)(nil), // 32: envoy.config.core.v3.SchemeHeaderTransformation + (*wrapperspb.UInt32Value)(nil), // 33: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 34: google.protobuf.Duration + (*v31.AccessLog)(nil), // 35: envoy.config.accesslog.v3.AccessLog + (*v3.TypedExtensionConfig)(nil), // 36: envoy.config.core.v3.TypedExtensionConfig + (*v3.SubstitutionFormatString)(nil), // 37: envoy.config.core.v3.SubstitutionFormatString + (*v31.AccessLogFilter)(nil), // 38: envoy.config.accesslog.v3.AccessLogFilter + (*v3.DataSource)(nil), // 39: envoy.config.core.v3.DataSource + (*v3.HeaderValueOption)(nil), // 40: envoy.config.core.v3.HeaderValueOption + (*v3.ConfigSource)(nil), // 41: envoy.config.core.v3.ConfigSource + (*v32.ScopedRouteConfiguration)(nil), // 42: envoy.config.route.v3.ScopedRouteConfiguration + (*anypb.Any)(nil), // 43: google.protobuf.Any + (*v3.ExtensionConfigSource)(nil), // 44: envoy.config.core.v3.ExtensionConfigSource + (*v33.Percent)(nil), // 45: envoy.type.v3.Percent + (*v34.CustomTag)(nil), // 46: envoy.type.tracing.v3.CustomTag + (*v35.Tracing_Http)(nil), // 47: envoy.config.trace.v3.Tracing.Http + (*v3.CidrRange)(nil), // 48: envoy.config.core.v3.CidrRange + (*v36.PathTransformation)(nil), // 49: envoy.type.http.v3.PathTransformation +} +var file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.codec_type:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.CodecType + 8, // 1: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.Rds + 26, // 2: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.route_config:type_name -> envoy.config.route.v3.RouteConfiguration + 10, // 3: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scoped_routes:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes + 12, // 4: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 27, // 5: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_user_agent:type_name -> google.protobuf.BoolValue + 15, // 6: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.tracing:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing + 28, // 7: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options:type_name -> envoy.config.core.v3.HttpProtocolOptions + 29, // 8: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http_protocol_options:type_name -> envoy.config.core.v3.Http1ProtocolOptions + 30, // 9: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 31, // 10: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http3_protocol_options:type_name -> envoy.config.core.v3.Http3ProtocolOptions + 1, // 11: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.server_header_transformation:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ServerHeaderTransformation + 32, // 12: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.scheme_header_transformation:type_name -> envoy.config.core.v3.SchemeHeaderTransformation + 33, // 13: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.max_request_headers_kb:type_name -> google.protobuf.UInt32Value + 34, // 14: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout:type_name -> google.protobuf.Duration + 34, // 15: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout:type_name -> google.protobuf.Duration + 34, // 16: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_headers_timeout:type_name -> google.protobuf.Duration + 34, // 17: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout:type_name -> google.protobuf.Duration + 34, // 18: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.delayed_close_timeout:type_name -> google.protobuf.Duration + 35, // 19: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log:type_name -> envoy.config.accesslog.v3.AccessLog + 34, // 20: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log_flush_interval:type_name -> google.protobuf.Duration + 21, // 21: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.access_log_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions + 27, // 22: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address:type_name -> google.protobuf.BoolValue + 36, // 23: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.original_ip_detection_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 36, // 24: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.early_header_mutation_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig + 16, // 25: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.internal_address_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig + 27, // 26: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.generate_request_id:type_name -> google.protobuf.BoolValue + 2, // 27: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.forward_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ForwardClientCertDetails + 17, // 28: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.set_current_client_cert_details:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails + 18, // 29: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.upgrade_configs:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig + 27, // 30: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.normalize_path:type_name -> google.protobuf.BoolValue + 3, // 31: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_with_escaped_slashes_action:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathWithEscapedSlashesAction + 13, // 32: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_id_extension:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + 6, // 33: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.local_reply_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig + 27, // 34: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 19, // 35: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.path_normalization_options:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions + 20, // 36: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.proxy_status_config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.ProxyStatusConfig + 36, // 37: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.typed_header_validation_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 27, // 38: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.add_proxy_protocol_connection_state:type_name -> google.protobuf.BoolValue + 7, // 39: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.mappers:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper + 37, // 40: envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig.body_format:type_name -> envoy.config.core.v3.SubstitutionFormatString + 38, // 41: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter + 33, // 42: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.status_code:type_name -> google.protobuf.UInt32Value + 39, // 43: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body:type_name -> envoy.config.core.v3.DataSource + 37, // 44: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.body_format_override:type_name -> envoy.config.core.v3.SubstitutionFormatString + 40, // 45: envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper.headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption + 41, // 46: envoy.extensions.filters.network.http_connection_manager.v3.Rds.config_source:type_name -> envoy.config.core.v3.ConfigSource + 42, // 47: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList.scoped_route_configurations:type_name -> envoy.config.route.v3.ScopedRouteConfiguration + 22, // 48: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scope_key_builder:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder + 41, // 49: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.rds_config_source:type_name -> envoy.config.core.v3.ConfigSource + 9, // 50: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_route_configurations_list:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList + 11, // 51: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.scoped_rds:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds + 41, // 52: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds.scoped_rds_config_source:type_name -> envoy.config.core.v3.ConfigSource + 43, // 53: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.typed_config:type_name -> google.protobuf.Any + 44, // 54: envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter.config_discovery:type_name -> envoy.config.core.v3.ExtensionConfigSource + 43, // 55: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension.typed_config:type_name -> google.protobuf.Any + 5, // 56: envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager.config:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + 45, // 57: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.client_sampling:type_name -> envoy.type.v3.Percent + 45, // 58: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.random_sampling:type_name -> envoy.type.v3.Percent + 45, // 59: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.overall_sampling:type_name -> envoy.type.v3.Percent + 33, // 60: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.max_path_tag_length:type_name -> google.protobuf.UInt32Value + 46, // 61: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.custom_tags:type_name -> envoy.type.tracing.v3.CustomTag + 47, // 62: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.provider:type_name -> envoy.config.trace.v3.Tracing.Http + 27, // 63: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.spawn_upstream_span:type_name -> google.protobuf.BoolValue + 48, // 64: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.InternalAddressConfig.cidr_ranges:type_name -> envoy.config.core.v3.CidrRange + 27, // 65: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.SetCurrentClientCertDetails.subject:type_name -> google.protobuf.BoolValue + 12, // 66: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.filters:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter + 27, // 67: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.UpgradeConfig.enabled:type_name -> google.protobuf.BoolValue + 49, // 68: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.forwarding_transformation:type_name -> envoy.type.http.v3.PathTransformation + 49, // 69: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.PathNormalizationOptions.http_filter_transformation:type_name -> envoy.type.http.v3.PathTransformation + 34, // 70: envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.HcmAccessLogOptions.access_log_flush_interval:type_name -> google.protobuf.Duration + 23, // 71: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.fragments:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder + 24, // 72: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.header_value_extractor:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor + 25, // 73: envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.element:type_name -> envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement + 74, // [74:74] is the sub-list for method output_type + 74, // [74:74] is the sub-list for method input_type + 74, // [74:74] is the sub-list for extension type_name + 74, // [74:74] is the sub-list for extension extendee + 0, // [0:74] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_init() +} +func file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_init() { + if File_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalReplyConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseMapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Rds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRouteConfigurationsList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestIDExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnvoyMobileHttpConnectionManager); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_Tracing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_InternalAddressConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_SetCurrentClientCertDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_UpgradeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_PathNormalizationOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_ProxyStatusConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpConnectionManager_HcmAccessLogOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*HttpConnectionManager_Rds)(nil), + (*HttpConnectionManager_RouteConfig)(nil), + (*HttpConnectionManager_ScopedRoutes)(nil), + (*HttpConnectionManager_StripAnyHostPort)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*ScopedRoutes_ScopedRouteConfigurationsList)(nil), + (*ScopedRoutes_ScopedRds)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*HttpFilter_TypedConfig)(nil), + (*HttpFilter_ConfigDiscovery)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*HttpConnectionManager_ProxyStatusConfig_UseNodeId)(nil), + (*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[18].OneofWrappers = []interface{}{ + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_)(nil), + } + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index)(nil), + (*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc, + NumEnums: 5, + NumMessages: 21, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes, + DependencyIndexes: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs, + EnumInfos: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_enumTypes, + MessageInfos: file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_msgTypes, + }.Build() + File_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto = out.File + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_rawDesc = nil + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_goTypes = nil + file_envoy_extensions_filters_network_http_connection_manager_v3_http_connection_manager_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go new file mode 100644 index 0000000000..094e7faaf0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.go @@ -0,0 +1,4812 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpConnectionManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpConnectionManager with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpConnectionManagerMultiError, or nil if none found. +func (m *HttpConnectionManager) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := HttpConnectionManager_CodecType_name[int32(m.GetCodecType())]; !ok { + err := HttpConnectionManagerValidationError{ + field: "CodecType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetStatPrefix()) < 1 { + err := HttpConnectionManagerValidationError{ + field: "StatPrefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHttpFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("HttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("HttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("HttpFilters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetAddUserAgent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddUserAgent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddUserAgent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddUserAgent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AddUserAgent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTracing()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Tracing", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCommonHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "CommonHttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Http1SafeMaxConnectionDuration + + if all { + switch v := interface{}(m.GetHttpProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "HttpProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Http2ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttp3ProtocolOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http3ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Http3ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttp3ProtocolOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Http3ProtocolOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if !_HttpConnectionManager_ServerName_Pattern.MatchString(m.GetServerName()) { + err := HttpConnectionManagerValidationError{ + field: "ServerName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := HttpConnectionManager_ServerHeaderTransformation_name[int32(m.GetServerHeaderTransformation())]; !ok { + err := HttpConnectionManagerValidationError{ + field: "ServerHeaderTransformation", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSchemeHeaderTransformation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SchemeHeaderTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SchemeHeaderTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSchemeHeaderTransformation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "SchemeHeaderTransformation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetMaxRequestHeadersKb(); wrapper != nil { + + if val := wrapper.GetValue(); val <= 0 || val > 8192 { + err := HttpConnectionManagerValidationError{ + field: "MaxRequestHeadersKb", + reason: "value must be inside range (0, 8192]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetStreamIdleTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamIdleTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamIdleTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "StreamIdleTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRequestTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "RequestTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if d := m.GetRequestHeadersTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManagerValidationError{ + field: "RequestHeadersTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManagerValidationError{ + field: "RequestHeadersTimeout", + reason: "value must be greater than or equal to 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if all { + switch v := interface{}(m.GetDrainTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DrainTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DrainTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDrainTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "DrainTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDelayedCloseTimeout()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DelayedCloseTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "DelayedCloseTimeout", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDelayedCloseTimeout()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "DelayedCloseTimeout", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetAccessLog() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("AccessLog[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if d := m.GetAccessLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManagerValidationError{ + field: "AccessLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManagerValidationError{ + field: "AccessLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for FlushAccessLogOnNewRequest + + if all { + switch v := interface{}(m.GetAccessLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAccessLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AccessLogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUseRemoteAddress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "UseRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "UseRemoteAddress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUseRemoteAddress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "UseRemoteAddress", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for XffNumTrustedHops + + for idx, item := range m.GetOriginalIpDetectionExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("OriginalIpDetectionExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("OriginalIpDetectionExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("OriginalIpDetectionExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetEarlyHeaderMutationExtensions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("EarlyHeaderMutationExtensions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetInternalAddressConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "InternalAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "InternalAddressConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInternalAddressConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "InternalAddressConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SkipXffAppend + + if !_HttpConnectionManager_Via_Pattern.MatchString(m.GetVia()) { + err := HttpConnectionManagerValidationError{ + field: "Via", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGenerateRequestId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "GenerateRequestId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "GenerateRequestId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenerateRequestId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "GenerateRequestId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PreserveExternalRequestId + + // no validation rules for AlwaysSetRequestIdInResponse + + if _, ok := HttpConnectionManager_ForwardClientCertDetails_name[int32(m.GetForwardClientCertDetails())]; !ok { + err := HttpConnectionManagerValidationError{ + field: "ForwardClientCertDetails", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSetCurrentClientCertDetails()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SetCurrentClientCertDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "SetCurrentClientCertDetails", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSetCurrentClientCertDetails()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "SetCurrentClientCertDetails", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Proxy_100Continue + + // no validation rules for RepresentIpv4RemoteAddressAsIpv4MappedIpv6 + + for idx, item := range m.GetUpgradeConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: fmt.Sprintf("UpgradeConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetNormalizePath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "NormalizePath", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "NormalizePath", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNormalizePath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "NormalizePath", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for MergeSlashes + + // no validation rules for PathWithEscapedSlashesAction + + if all { + switch v := interface{}(m.GetRequestIdExtension()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestIdExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RequestIdExtension", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestIdExtension()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "RequestIdExtension", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalReplyConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "LocalReplyConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "LocalReplyConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalReplyConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "LocalReplyConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StripMatchingHostPort + + if all { + switch v := interface{}(m.GetStreamErrorOnInvalidHttpMessage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "StreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "StreamErrorOnInvalidHttpMessage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPathNormalizationOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "PathNormalizationOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "PathNormalizationOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPathNormalizationOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "PathNormalizationOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for StripTrailingHostDot + + if all { + switch v := interface{}(m.GetProxyStatusConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ProxyStatusConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ProxyStatusConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProxyStatusConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "ProxyStatusConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTypedHeaderValidationConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "TypedHeaderValidationConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "TypedHeaderValidationConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedHeaderValidationConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "TypedHeaderValidationConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AppendXForwardedPort + + // no validation rules for AppendLocalOverload + + if all { + switch v := interface{}(m.GetAddProxyProtocolConnectionState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAddProxyProtocolConnectionState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "AddProxyProtocolConnectionState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofRouteSpecifierPresent := false + switch v := m.RouteSpecifier.(type) { + case *HttpConnectionManager_Rds: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Rds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "Rds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "Rds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HttpConnectionManager_RouteConfig: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HttpConnectionManager_ScopedRoutes: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRouteSpecifierPresent = true + + if all { + switch v := interface{}(m.GetScopedRoutes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ScopedRoutes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManagerValidationError{ + field: "ScopedRoutes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRoutes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManagerValidationError{ + field: "ScopedRoutes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRouteSpecifierPresent { + err := HttpConnectionManagerValidationError{ + field: "RouteSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + switch v := m.StripPortMode.(type) { + case *HttpConnectionManager_StripAnyHostPort: + if v == nil { + err := HttpConnectionManagerValidationError{ + field: "StripPortMode", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for StripAnyHostPort + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HttpConnectionManagerMultiError(errors) + } + + return nil +} + +// HttpConnectionManagerMultiError is an error wrapping multiple validation +// errors returned by HttpConnectionManager.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManagerMultiError) AllErrors() []error { return m } + +// HttpConnectionManagerValidationError is the validation error returned by +// HttpConnectionManager.Validate if the designated constraints aren't met. +type HttpConnectionManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManagerValidationError) ErrorName() string { + return "HttpConnectionManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManagerValidationError{} + +var _HttpConnectionManager_ServerName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +var _HttpConnectionManager_Via_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on LocalReplyConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LocalReplyConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalReplyConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalReplyConfigMultiError, or nil if none found. +func (m *LocalReplyConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalReplyConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetMappers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: fmt.Sprintf("Mappers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: fmt.Sprintf("Mappers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalReplyConfigValidationError{ + field: fmt.Sprintf("Mappers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetBodyFormat()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: "BodyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalReplyConfigValidationError{ + field: "BodyFormat", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBodyFormat()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalReplyConfigValidationError{ + field: "BodyFormat", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return LocalReplyConfigMultiError(errors) + } + + return nil +} + +// LocalReplyConfigMultiError is an error wrapping multiple validation errors +// returned by LocalReplyConfig.ValidateAll() if the designated constraints +// aren't met. +type LocalReplyConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalReplyConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalReplyConfigMultiError) AllErrors() []error { return m } + +// LocalReplyConfigValidationError is the validation error returned by +// LocalReplyConfig.Validate if the designated constraints aren't met. +type LocalReplyConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalReplyConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalReplyConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalReplyConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalReplyConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalReplyConfigValidationError) ErrorName() string { return "LocalReplyConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LocalReplyConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalReplyConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalReplyConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalReplyConfigValidationError{} + +// Validate checks the field values on ResponseMapper with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResponseMapper) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResponseMapper with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResponseMapperMultiError, +// or nil if none found. +func (m *ResponseMapper) ValidateAll() error { + return m.validate(true) +} + +func (m *ResponseMapper) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetFilter() == nil { + err := ResponseMapperValidationError{ + field: "Filter", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetStatusCode(); wrapper != nil { + + if val := wrapper.GetValue(); val < 200 || val >= 600 { + err := ResponseMapperValidationError{ + field: "StatusCode", + reason: "value must be inside range [200, 600)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetBody()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBody()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: "Body", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetBodyFormatOverride()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "BodyFormatOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: "BodyFormatOverride", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBodyFormatOverride()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: "BodyFormatOverride", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetHeadersToAdd()) > 1000 { + err := ResponseMapperValidationError{ + field: "HeadersToAdd", + reason: "value must contain no more than 1000 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetHeadersToAdd() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: fmt.Sprintf("HeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResponseMapperValidationError{ + field: fmt.Sprintf("HeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResponseMapperValidationError{ + field: fmt.Sprintf("HeadersToAdd[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResponseMapperMultiError(errors) + } + + return nil +} + +// ResponseMapperMultiError is an error wrapping multiple validation errors +// returned by ResponseMapper.ValidateAll() if the designated constraints +// aren't met. +type ResponseMapperMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResponseMapperMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResponseMapperMultiError) AllErrors() []error { return m } + +// ResponseMapperValidationError is the validation error returned by +// ResponseMapper.Validate if the designated constraints aren't met. +type ResponseMapperValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResponseMapperValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResponseMapperValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResponseMapperValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResponseMapperValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResponseMapperValidationError) ErrorName() string { return "ResponseMapperValidationError" } + +// Error satisfies the builtin error interface +func (e ResponseMapperValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResponseMapper.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResponseMapperValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResponseMapperValidationError{} + +// Validate checks the field values on Rds with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Rds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Rds with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in RdsMultiError, or nil if none found. +func (m *Rds) ValidateAll() error { + return m.validate(true) +} + +func (m *Rds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetConfigSource() == nil { + err := RdsValidationError{ + field: "ConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RdsValidationError{ + field: "ConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RouteConfigName + + if len(errors) > 0 { + return RdsMultiError(errors) + } + + return nil +} + +// RdsMultiError is an error wrapping multiple validation errors returned by +// Rds.ValidateAll() if the designated constraints aren't met. +type RdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RdsMultiError) AllErrors() []error { return m } + +// RdsValidationError is the validation error returned by Rds.Validate if the +// designated constraints aren't met. +type RdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RdsValidationError) ErrorName() string { return "RdsValidationError" } + +// Error satisfies the builtin error interface +func (e RdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RdsValidationError{} + +// Validate checks the field values on ScopedRouteConfigurationsList with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRouteConfigurationsList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRouteConfigurationsList with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ScopedRouteConfigurationsListMultiError, or nil if none found. +func (m *ScopedRouteConfigurationsList) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRouteConfigurationsList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetScopedRouteConfigurations()) < 1 { + err := ScopedRouteConfigurationsListValidationError{ + field: "ScopedRouteConfigurations", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetScopedRouteConfigurations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRouteConfigurationsListValidationError{ + field: fmt.Sprintf("ScopedRouteConfigurations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRouteConfigurationsListValidationError{ + field: fmt.Sprintf("ScopedRouteConfigurations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRouteConfigurationsListValidationError{ + field: fmt.Sprintf("ScopedRouteConfigurations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRouteConfigurationsListMultiError(errors) + } + + return nil +} + +// ScopedRouteConfigurationsListMultiError is an error wrapping multiple +// validation errors returned by ScopedRouteConfigurationsList.ValidateAll() +// if the designated constraints aren't met. +type ScopedRouteConfigurationsListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRouteConfigurationsListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRouteConfigurationsListMultiError) AllErrors() []error { return m } + +// ScopedRouteConfigurationsListValidationError is the validation error +// returned by ScopedRouteConfigurationsList.Validate if the designated +// constraints aren't met. +type ScopedRouteConfigurationsListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRouteConfigurationsListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRouteConfigurationsListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRouteConfigurationsListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRouteConfigurationsListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRouteConfigurationsListValidationError) ErrorName() string { + return "ScopedRouteConfigurationsListValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRouteConfigurationsListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRouteConfigurationsList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRouteConfigurationsListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRouteConfigurationsListValidationError{} + +// Validate checks the field values on ScopedRoutes with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutes with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ScopedRoutesMultiError, or +// nil if none found. +func (m *ScopedRoutes) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ScopedRoutesValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetScopeKeyBuilder() == nil { + err := ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopeKeyBuilder()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopeKeyBuilder()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "ScopeKeyBuilder", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRdsConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "RdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "RdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRdsConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "RdsConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofConfigSpecifierPresent := false + switch v := m.ConfigSpecifier.(type) { + case *ScopedRoutes_ScopedRouteConfigurationsList: + if v == nil { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetScopedRouteConfigurationsList()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRouteConfigurationsList", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRouteConfigurationsList", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRouteConfigurationsList()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "ScopedRouteConfigurationsList", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ScopedRoutes_ScopedRds: + if v == nil { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetScopedRds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutesValidationError{ + field: "ScopedRds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutesValidationError{ + field: "ScopedRds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigSpecifierPresent { + err := ScopedRoutesValidationError{ + field: "ConfigSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRoutesMultiError(errors) + } + + return nil +} + +// ScopedRoutesMultiError is an error wrapping multiple validation errors +// returned by ScopedRoutes.ValidateAll() if the designated constraints aren't met. +type ScopedRoutesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutesMultiError) AllErrors() []error { return m } + +// ScopedRoutesValidationError is the validation error returned by +// ScopedRoutes.Validate if the designated constraints aren't met. +type ScopedRoutesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutesValidationError) ErrorName() string { return "ScopedRoutesValidationError" } + +// Error satisfies the builtin error interface +func (e ScopedRoutesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutesValidationError{} + +// Validate checks the field values on ScopedRds with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRds) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRds with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ScopedRdsMultiError, or nil +// if none found. +func (m *ScopedRds) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRds) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetScopedRdsConfigSource() == nil { + err := ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopedRdsConfigSource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRdsConfigSource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRdsValidationError{ + field: "ScopedRdsConfigSource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SrdsResourcesLocator + + if len(errors) > 0 { + return ScopedRdsMultiError(errors) + } + + return nil +} + +// ScopedRdsMultiError is an error wrapping multiple validation errors returned +// by ScopedRds.ValidateAll() if the designated constraints aren't met. +type ScopedRdsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRdsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRdsMultiError) AllErrors() []error { return m } + +// ScopedRdsValidationError is the validation error returned by +// ScopedRds.Validate if the designated constraints aren't met. +type ScopedRdsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRdsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRdsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRdsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRdsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRdsValidationError) ErrorName() string { return "ScopedRdsValidationError" } + +// Error satisfies the builtin error interface +func (e ScopedRdsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRds.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRdsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRdsValidationError{} + +// Validate checks the field values on HttpFilter with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpFilter with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpFilterMultiError, or +// nil if none found. +func (m *HttpFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := HttpFilterValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for IsOptional + + // no validation rules for Disabled + + switch v := m.ConfigType.(type) { + case *HttpFilter_TypedConfig: + if v == nil { + err := HttpFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpFilterValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HttpFilter_ConfigDiscovery: + if v == nil { + err := HttpFilterValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConfigDiscovery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigDiscovery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpFilterValidationError{ + field: "ConfigDiscovery", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HttpFilterMultiError(errors) + } + + return nil +} + +// HttpFilterMultiError is an error wrapping multiple validation errors +// returned by HttpFilter.ValidateAll() if the designated constraints aren't met. +type HttpFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpFilterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpFilterMultiError) AllErrors() []error { return m } + +// HttpFilterValidationError is the validation error returned by +// HttpFilter.Validate if the designated constraints aren't met. +type HttpFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpFilterValidationError) ErrorName() string { return "HttpFilterValidationError" } + +// Error satisfies the builtin error interface +func (e HttpFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpFilterValidationError{} + +// Validate checks the field values on RequestIDExtension with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RequestIDExtension) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RequestIDExtension with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RequestIDExtensionMultiError, or nil if none found. +func (m *RequestIDExtension) ValidateAll() error { + return m.validate(true) +} + +func (m *RequestIDExtension) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RequestIDExtensionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RequestIDExtensionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RequestIDExtensionValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RequestIDExtensionMultiError(errors) + } + + return nil +} + +// RequestIDExtensionMultiError is an error wrapping multiple validation errors +// returned by RequestIDExtension.ValidateAll() if the designated constraints +// aren't met. +type RequestIDExtensionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RequestIDExtensionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RequestIDExtensionMultiError) AllErrors() []error { return m } + +// RequestIDExtensionValidationError is the validation error returned by +// RequestIDExtension.Validate if the designated constraints aren't met. +type RequestIDExtensionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RequestIDExtensionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RequestIDExtensionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RequestIDExtensionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RequestIDExtensionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RequestIDExtensionValidationError) ErrorName() string { + return "RequestIDExtensionValidationError" +} + +// Error satisfies the builtin error interface +func (e RequestIDExtensionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRequestIDExtension.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RequestIDExtensionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RequestIDExtensionValidationError{} + +// Validate checks the field values on EnvoyMobileHttpConnectionManager with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *EnvoyMobileHttpConnectionManager) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EnvoyMobileHttpConnectionManager with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// EnvoyMobileHttpConnectionManagerMultiError, or nil if none found. +func (m *EnvoyMobileHttpConnectionManager) ValidateAll() error { + return m.validate(true) +} + +func (m *EnvoyMobileHttpConnectionManager) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EnvoyMobileHttpConnectionManagerValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EnvoyMobileHttpConnectionManagerValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EnvoyMobileHttpConnectionManagerValidationError{ + field: "Config", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EnvoyMobileHttpConnectionManagerMultiError(errors) + } + + return nil +} + +// EnvoyMobileHttpConnectionManagerMultiError is an error wrapping multiple +// validation errors returned by +// EnvoyMobileHttpConnectionManager.ValidateAll() if the designated +// constraints aren't met. +type EnvoyMobileHttpConnectionManagerMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EnvoyMobileHttpConnectionManagerMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EnvoyMobileHttpConnectionManagerMultiError) AllErrors() []error { return m } + +// EnvoyMobileHttpConnectionManagerValidationError is the validation error +// returned by EnvoyMobileHttpConnectionManager.Validate if the designated +// constraints aren't met. +type EnvoyMobileHttpConnectionManagerValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EnvoyMobileHttpConnectionManagerValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EnvoyMobileHttpConnectionManagerValidationError) ErrorName() string { + return "EnvoyMobileHttpConnectionManagerValidationError" +} + +// Error satisfies the builtin error interface +func (e EnvoyMobileHttpConnectionManagerValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEnvoyMobileHttpConnectionManager.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EnvoyMobileHttpConnectionManagerValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EnvoyMobileHttpConnectionManagerValidationError{} + +// Validate checks the field values on HttpConnectionManager_Tracing with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_Tracing) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpConnectionManager_Tracing with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpConnectionManager_TracingMultiError, or nil if none found. +func (m *HttpConnectionManager_Tracing) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_Tracing) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetClientSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClientSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "ClientSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRandomSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRandomSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "RandomSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverallSampling()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverallSampling()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "OverallSampling", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Verbose + + if all { + switch v := interface{}(m.GetMaxPathTagLength()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "MaxPathTagLength", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "MaxPathTagLength", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxPathTagLength()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "MaxPathTagLength", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetCustomTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: fmt.Sprintf("CustomTags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "Provider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "Provider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "Provider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSpawnUpstreamSpan()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "SpawnUpstreamSpan", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_TracingValidationError{ + field: "SpawnUpstreamSpan", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSpawnUpstreamSpan()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_TracingValidationError{ + field: "SpawnUpstreamSpan", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpConnectionManager_TracingMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_TracingMultiError is an error wrapping multiple +// validation errors returned by HttpConnectionManager_Tracing.ValidateAll() +// if the designated constraints aren't met. +type HttpConnectionManager_TracingMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_TracingMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_TracingMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_TracingValidationError is the validation error +// returned by HttpConnectionManager_Tracing.Validate if the designated +// constraints aren't met. +type HttpConnectionManager_TracingValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_TracingValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_TracingValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_TracingValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_TracingValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_TracingValidationError) ErrorName() string { + return "HttpConnectionManager_TracingValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_TracingValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_Tracing.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_TracingValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_TracingValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_InternalAddressConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_InternalAddressConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_InternalAddressConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// HttpConnectionManager_InternalAddressConfigMultiError, or nil if none found. +func (m *HttpConnectionManager_InternalAddressConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_InternalAddressConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UnixSockets + + for idx, item := range m.GetCidrRanges() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_InternalAddressConfigValidationError{ + field: fmt.Sprintf("CidrRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_InternalAddressConfigValidationError{ + field: fmt.Sprintf("CidrRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_InternalAddressConfigValidationError{ + field: fmt.Sprintf("CidrRanges[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HttpConnectionManager_InternalAddressConfigMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_InternalAddressConfigMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_InternalAddressConfig.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_InternalAddressConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_InternalAddressConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_InternalAddressConfigMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_InternalAddressConfigValidationError is the validation +// error returned by HttpConnectionManager_InternalAddressConfig.Validate if +// the designated constraints aren't met. +type HttpConnectionManager_InternalAddressConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_InternalAddressConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_InternalAddressConfigValidationError) ErrorName() string { + return "HttpConnectionManager_InternalAddressConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_InternalAddressConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_InternalAddressConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_InternalAddressConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_InternalAddressConfigValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_SetCurrentClientCertDetails with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_SetCurrentClientCertDetails) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_SetCurrentClientCertDetails with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// HttpConnectionManager_SetCurrentClientCertDetailsMultiError, or nil if none found. +func (m *HttpConnectionManager_SetCurrentClientCertDetails) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSubject()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_SetCurrentClientCertDetailsValidationError{ + field: "Subject", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_SetCurrentClientCertDetailsValidationError{ + field: "Subject", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSubject()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_SetCurrentClientCertDetailsValidationError{ + field: "Subject", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Cert + + // no validation rules for Chain + + // no validation rules for Dns + + // no validation rules for Uri + + if len(errors) > 0 { + return HttpConnectionManager_SetCurrentClientCertDetailsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_SetCurrentClientCertDetailsMultiError is an error +// wrapping multiple validation errors returned by +// HttpConnectionManager_SetCurrentClientCertDetails.ValidateAll() if the +// designated constraints aren't met. +type HttpConnectionManager_SetCurrentClientCertDetailsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_SetCurrentClientCertDetailsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_SetCurrentClientCertDetailsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_SetCurrentClientCertDetailsValidationError is the +// validation error returned by +// HttpConnectionManager_SetCurrentClientCertDetails.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_SetCurrentClientCertDetailsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) ErrorName() string { + return "HttpConnectionManager_SetCurrentClientCertDetailsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_SetCurrentClientCertDetailsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_SetCurrentClientCertDetails.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_SetCurrentClientCertDetailsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_SetCurrentClientCertDetailsValidationError{} + +// Validate checks the field values on HttpConnectionManager_UpgradeConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *HttpConnectionManager_UpgradeConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpConnectionManager_UpgradeConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpConnectionManager_UpgradeConfigMultiError, or nil if none found. +func (m *HttpConnectionManager_UpgradeConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_UpgradeConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UpgradeType + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_UpgradeConfigValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_UpgradeConfigValidationError{ + field: "Enabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpConnectionManager_UpgradeConfigMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_UpgradeConfigMultiError is an error wrapping multiple +// validation errors returned by +// HttpConnectionManager_UpgradeConfig.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_UpgradeConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_UpgradeConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_UpgradeConfigMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_UpgradeConfigValidationError is the validation error +// returned by HttpConnectionManager_UpgradeConfig.Validate if the designated +// constraints aren't met. +type HttpConnectionManager_UpgradeConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_UpgradeConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_UpgradeConfigValidationError) ErrorName() string { + return "HttpConnectionManager_UpgradeConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_UpgradeConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_UpgradeConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_UpgradeConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_UpgradeConfigValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_PathNormalizationOptions with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_PathNormalizationOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_PathNormalizationOptions with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// HttpConnectionManager_PathNormalizationOptionsMultiError, or nil if none found. +func (m *HttpConnectionManager_PathNormalizationOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_PathNormalizationOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetForwardingTransformation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "ForwardingTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "ForwardingTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetForwardingTransformation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "ForwardingTransformation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetHttpFilterTransformation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "HttpFilterTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "HttpFilterTransformation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHttpFilterTransformation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HttpConnectionManager_PathNormalizationOptionsValidationError{ + field: "HttpFilterTransformation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return HttpConnectionManager_PathNormalizationOptionsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_PathNormalizationOptionsMultiError is an error +// wrapping multiple validation errors returned by +// HttpConnectionManager_PathNormalizationOptions.ValidateAll() if the +// designated constraints aren't met. +type HttpConnectionManager_PathNormalizationOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_PathNormalizationOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_PathNormalizationOptionsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_PathNormalizationOptionsValidationError is the +// validation error returned by +// HttpConnectionManager_PathNormalizationOptions.Validate if the designated +// constraints aren't met. +type HttpConnectionManager_PathNormalizationOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) ErrorName() string { + return "HttpConnectionManager_PathNormalizationOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_PathNormalizationOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_PathNormalizationOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_PathNormalizationOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_PathNormalizationOptionsValidationError{} + +// Validate checks the field values on HttpConnectionManager_ProxyStatusConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpConnectionManager_ProxyStatusConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_ProxyStatusConfig with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// HttpConnectionManager_ProxyStatusConfigMultiError, or nil if none found. +func (m *HttpConnectionManager_ProxyStatusConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_ProxyStatusConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RemoveDetails + + // no validation rules for RemoveConnectionTerminationDetails + + // no validation rules for RemoveResponseFlags + + // no validation rules for SetRecommendedResponseCode + + switch v := m.ProxyName.(type) { + case *HttpConnectionManager_ProxyStatusConfig_UseNodeId: + if v == nil { + err := HttpConnectionManager_ProxyStatusConfigValidationError{ + field: "ProxyName", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for UseNodeId + case *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName: + if v == nil { + err := HttpConnectionManager_ProxyStatusConfigValidationError{ + field: "ProxyName", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for LiteralProxyName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return HttpConnectionManager_ProxyStatusConfigMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_ProxyStatusConfigMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_ProxyStatusConfig.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_ProxyStatusConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_ProxyStatusConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_ProxyStatusConfigMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_ProxyStatusConfigValidationError is the validation +// error returned by HttpConnectionManager_ProxyStatusConfig.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_ProxyStatusConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_ProxyStatusConfigValidationError) ErrorName() string { + return "HttpConnectionManager_ProxyStatusConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_ProxyStatusConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_ProxyStatusConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_ProxyStatusConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_ProxyStatusConfigValidationError{} + +// Validate checks the field values on +// HttpConnectionManager_HcmAccessLogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpConnectionManager_HcmAccessLogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// HttpConnectionManager_HcmAccessLogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// HttpConnectionManager_HcmAccessLogOptionsMultiError, or nil if none found. +func (m *HttpConnectionManager_HcmAccessLogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if d := m.GetAccessLogFlushInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = HttpConnectionManager_HcmAccessLogOptionsValidationError{ + field: "AccessLogFlushInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 1000000*time.Nanosecond) + + if dur < gte { + err := HttpConnectionManager_HcmAccessLogOptionsValidationError{ + field: "AccessLogFlushInterval", + reason: "value must be greater than or equal to 1ms", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for FlushAccessLogOnNewRequest + + // no validation rules for FlushLogOnTunnelSuccessfullyEstablished + + if len(errors) > 0 { + return HttpConnectionManager_HcmAccessLogOptionsMultiError(errors) + } + + return nil +} + +// HttpConnectionManager_HcmAccessLogOptionsMultiError is an error wrapping +// multiple validation errors returned by +// HttpConnectionManager_HcmAccessLogOptions.ValidateAll() if the designated +// constraints aren't met. +type HttpConnectionManager_HcmAccessLogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpConnectionManager_HcmAccessLogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpConnectionManager_HcmAccessLogOptionsMultiError) AllErrors() []error { return m } + +// HttpConnectionManager_HcmAccessLogOptionsValidationError is the validation +// error returned by HttpConnectionManager_HcmAccessLogOptions.Validate if the +// designated constraints aren't met. +type HttpConnectionManager_HcmAccessLogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) ErrorName() string { + return "HttpConnectionManager_HcmAccessLogOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpConnectionManager_HcmAccessLogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpConnectionManager_HcmAccessLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpConnectionManager_HcmAccessLogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpConnectionManager_HcmAccessLogOptionsValidationError{} + +// Validate checks the field values on ScopedRoutes_ScopeKeyBuilder with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopedRoutes_ScopeKeyBuilder with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilderMultiError, or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetFragments()) < 1 { + err := ScopedRoutes_ScopeKeyBuilderValidationError{ + field: "Fragments", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetFragments() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilderValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilderValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutes_ScopeKeyBuilderValidationError{ + field: fmt.Sprintf("Fragments[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilderMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilderMultiError is an error wrapping multiple +// validation errors returned by ScopedRoutes_ScopeKeyBuilder.ValidateAll() if +// the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilderMultiError) AllErrors() []error { return m } + +// ScopedRoutes_ScopeKeyBuilderValidationError is the validation error returned +// by ScopedRoutes_ScopeKeyBuilder.Validate if the designated constraints +// aren't met. +type ScopedRoutes_ScopeKeyBuilderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilderValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilderValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilderValidationError{} + +// Validate checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError, or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofTypePresent := false + switch v := m.Type.(type) { + case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetHeaderValueExtractor()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "HeaderValueExtractor", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "HeaderValueExtractor", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeaderValueExtractor()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "HeaderValueExtractor", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTypePresent { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{ + field: "Type", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError is an error wrapping +// multiple validation errors returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder.ValidateAll() if the +// designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilderMultiError) AllErrors() []error { return m } + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError is the +// validation error returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder.Validate if the designated +// constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder_FragmentBuilder.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilder_FragmentBuilderValidationError{} + +// Validate checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError, +// or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Name_Pattern.MatchString(m.GetName()) { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for ElementSeparator + + switch v := m.ExtractType.(type) { + case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "ExtractType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Index + case *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element: + if v == nil { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "ExtractType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetElement()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Element", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Element", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetElement()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{ + field: "Element", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError +// is an error wrapping multiple validation errors returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.ValidateAll() +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorMultiError) AllErrors() []error { + return m +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError +// is the validation error returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.Validate +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractorValidationError{} + +var _ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError, +// or nil if none found. +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetSeparator()) < 1 { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{ + field: "Separator", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError(errors) + } + + return nil +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError +// is an error wrapping multiple validation errors returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.ValidateAll() +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementMultiError) AllErrors() []error { + return m +} + +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError +// is the validation error returned by +// ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.Validate +// if the designated constraints aren't met. +type ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) ErrorName() string { + return "ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElementValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go new file mode 100644 index 0000000000..3ecbe1295a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager_vtproto.pb.go @@ -0,0 +1,3470 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +package http_connection_managerv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpConnectionManager_Tracing) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_Tracing) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_Tracing) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SpawnUpstreamSpan != nil { + size, err := (*wrapperspb.BoolValue)(m.SpawnUpstreamSpan).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.Provider != nil { + if vtmsg, ok := interface{}(m.Provider).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Provider) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if len(m.CustomTags) > 0 { + for iNdEx := len(m.CustomTags) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CustomTags[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTags[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + } + if m.MaxPathTagLength != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxPathTagLength).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.OverallSampling != nil { + if vtmsg, ok := interface{}(m.OverallSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OverallSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.RandomSampling != nil { + if vtmsg, ok := interface{}(m.RandomSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RandomSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.ClientSampling != nil { + if vtmsg, ok := interface{}(m.ClientSampling).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClientSampling) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_InternalAddressConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CidrRanges) > 0 { + for iNdEx := len(m.CidrRanges) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.CidrRanges[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CidrRanges[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.UnixSockets { + i-- + if m.UnixSockets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Chain { + i-- + if m.Chain { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Uri { + i-- + if m.Uri { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Dns { + i-- + if m.Dns { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Cert { + i-- + if m.Cert { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Subject != nil { + size, err := (*wrapperspb.BoolValue)(m.Subject).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_UpgradeConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Enabled != nil { + size, err := (*wrapperspb.BoolValue)(m.Enabled).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.UpgradeType) > 0 { + i -= len(m.UpgradeType) + copy(dAtA[i:], m.UpgradeType) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.UpgradeType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_PathNormalizationOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HttpFilterTransformation != nil { + if vtmsg, ok := interface{}(m.HttpFilterTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpFilterTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ForwardingTransformation != nil { + if vtmsg, ok := interface{}(m.ForwardingTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ForwardingTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ProxyName.(*HttpConnectionManager_ProxyStatusConfig_LiteralProxyName); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ProxyName.(*HttpConnectionManager_ProxyStatusConfig_UseNodeId); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.SetRecommendedResponseCode { + i-- + if m.SetRecommendedResponseCode { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.RemoveResponseFlags { + i-- + if m.RemoveResponseFlags { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RemoveConnectionTerminationDetails { + i-- + if m.RemoveConnectionTerminationDetails { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.RemoveDetails { + i-- + if m.RemoveDetails { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.UseNodeId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.LiteralProxyName) + copy(dAtA[i:], m.LiteralProxyName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LiteralProxyName))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_HcmAccessLogOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FlushLogOnTunnelSuccessfullyEstablished { + i-- + if m.FlushLogOnTunnelSuccessfullyEstablished { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.FlushAccessLogOnNewRequest { + i-- + if m.FlushAccessLogOnNewRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AccessLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.AccessLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpConnectionManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Http1SafeMaxConnectionDuration { + i-- + if m.Http1SafeMaxConnectionDuration { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xd0 + } + if m.AppendLocalOverload { + i-- + if m.AppendLocalOverload { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc8 + } + if m.AccessLogOptions != nil { + size, err := m.AccessLogOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xc2 + } + if m.FlushAccessLogOnNewRequest { + i-- + if m.FlushAccessLogOnNewRequest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb8 + } + if m.AccessLogFlushInterval != nil { + size, err := (*durationpb.Duration)(m.AccessLogFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xb2 + } + if m.AddProxyProtocolConnectionState != nil { + size, err := (*wrapperspb.BoolValue)(m.AddProxyProtocolConnectionState).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa + } + if len(m.EarlyHeaderMutationExtensions) > 0 { + for iNdEx := len(m.EarlyHeaderMutationExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.EarlyHeaderMutationExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EarlyHeaderMutationExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + } + if m.AppendXForwardedPort { + i-- + if m.AppendXForwardedPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 + } + if m.TypedHeaderValidationConfig != nil { + if vtmsg, ok := interface{}(m.TypedHeaderValidationConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedHeaderValidationConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x92 + } + if m.ProxyStatusConfig != nil { + size, err := m.ProxyStatusConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.SchemeHeaderTransformation != nil { + if vtmsg, ok := interface{}(m.SchemeHeaderTransformation).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SchemeHeaderTransformation) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x82 + } + if m.StripTrailingHostDot { + i-- + if m.StripTrailingHostDot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf8 + } + if len(m.OriginalIpDetectionExtensions) > 0 { + for iNdEx := len(m.OriginalIpDetectionExtensions) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.OriginalIpDetectionExtensions[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OriginalIpDetectionExtensions[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + } + if m.PathWithEscapedSlashesAction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PathWithEscapedSlashesAction)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe8 + } + if m.Http3ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http3ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http3ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if m.PathNormalizationOptions != nil { + size, err := m.PathNormalizationOptions.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xda + } + if msg, ok := m.StripPortMode.(*HttpConnectionManager_StripAnyHostPort); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RequestHeadersTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestHeadersTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xca + } + if m.StreamErrorOnInvalidHttpMessage != nil { + size, err := (*wrapperspb.BoolValue)(m.StreamErrorOnInvalidHttpMessage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } + if m.StripMatchingHostPort { + i-- + if m.StripMatchingHostPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb8 + } + if m.LocalReplyConfig != nil { + size, err := m.LocalReplyConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + if m.AlwaysSetRequestIdInResponse { + i-- + if m.AlwaysSetRequestIdInResponse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } + if m.RequestIdExtension != nil { + size, err := m.RequestIdExtension.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.CommonHttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CommonHttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.ServerHeaderTransformation != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ServerHeaderTransformation)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x90 + } + if m.MergeSlashes { + i-- + if m.MergeSlashes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.PreserveExternalRequestId { + i-- + if m.PreserveExternalRequestId { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_ScopedRoutes); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.NormalizePath != nil { + size, err := (*wrapperspb.BoolValue)(m.NormalizePath).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } + if m.MaxRequestHeadersKb != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxRequestHeadersKb).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if m.RequestTimeout != nil { + size, err := (*durationpb.Duration)(m.RequestTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.DelayedCloseTimeout != nil { + size, err := (*durationpb.Duration)(m.DelayedCloseTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.InternalAddressConfig != nil { + size, err := m.InternalAddressConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.StreamIdleTimeout != nil { + size, err := (*durationpb.Duration)(m.StreamIdleTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.UpgradeConfigs) > 0 { + for iNdEx := len(m.UpgradeConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpgradeConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if len(m.Via) > 0 { + i -= len(m.Via) + copy(dAtA[i:], m.Via) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Via))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.SkipXffAppend { + i-- + if m.SkipXffAppend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + i-- + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.XffNumTrustedHops != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.XffNumTrustedHops)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.Proxy_100Continue { + i-- + if m.Proxy_100Continue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.SetCurrentClientCertDetails != nil { + size, err := m.SetCurrentClientCertDetails.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.ForwardClientCertDetails != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ForwardClientCertDetails)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.GenerateRequestId != nil { + size, err := (*wrapperspb.BoolValue)(m.GenerateRequestId).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.UseRemoteAddress != nil { + size, err := (*wrapperspb.BoolValue)(m.UseRemoteAddress).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if len(m.AccessLog) > 0 { + for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.AccessLog[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + } + if m.DrainTimeout != nil { + size, err := (*durationpb.Duration)(m.DrainTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.ServerName) > 0 { + i -= len(m.ServerName) + copy(dAtA[i:], m.ServerName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServerName))) + i-- + dAtA[i] = 0x52 + } + if m.Http2ProtocolOptions != nil { + if vtmsg, ok := interface{}(m.Http2ProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Http2ProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.HttpProtocolOptions != nil { + if vtmsg, ok := interface{}(m.HttpProtocolOptions).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HttpProtocolOptions) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.Tracing != nil { + size, err := m.Tracing.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.AddUserAgent != nil { + size, err := (*wrapperspb.BoolValue)(m.AddUserAgent).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.HttpFilters) > 0 { + for iNdEx := len(m.HttpFilters) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.HttpFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_RouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.RouteSpecifier.(*HttpConnectionManager_Rds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.StatPrefix) > 0 { + i -= len(m.StatPrefix) + copy(dAtA[i:], m.StatPrefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatPrefix))) + i-- + dAtA[i] = 0x12 + } + if m.CodecType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CodecType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_Rds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_Rds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Rds != nil { + size, err := m.Rds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_RouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_RouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfig != nil { + if vtmsg, ok := interface{}(m.RouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_ScopedRoutes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_ScopedRoutes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRoutes != nil { + size, err := m.ScopedRoutes.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + return len(dAtA) - i, nil +} +func (m *HttpConnectionManager_StripAnyHostPort) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpConnectionManager_StripAnyHostPort) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.StripAnyHostPort { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd0 + return len(dAtA) - i, nil +} +func (m *LocalReplyConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalReplyConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalReplyConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BodyFormat != nil { + if vtmsg, ok := interface{}(m.BodyFormat).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BodyFormat) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Mappers) > 0 { + for iNdEx := len(m.Mappers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Mappers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseMapper) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseMapper) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResponseMapper) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeadersToAdd) > 0 { + for iNdEx := len(m.HeadersToAdd) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.HeadersToAdd[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.HeadersToAdd[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if m.BodyFormatOverride != nil { + if vtmsg, ok := interface{}(m.BodyFormatOverride).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.BodyFormatOverride) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Body != nil { + if vtmsg, ok := interface{}(m.Body).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Body) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.StatusCode != nil { + size, err := (*wrapperspb.UInt32Value)(m.StatusCode).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Filter != nil { + if vtmsg, ok := interface{}(m.Filter).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Filter) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Rds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Rds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RouteConfigName) > 0 { + i -= len(m.RouteConfigName) + copy(dAtA[i:], m.RouteConfigName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RouteConfigName))) + i-- + dAtA[i] = 0x12 + } + if m.ConfigSource != nil { + if vtmsg, ok := interface{}(m.ConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRouteConfigurationsList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRouteConfigurationsList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRouteConfigurationsList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScopedRouteConfigurations) > 0 { + for iNdEx := len(m.ScopedRouteConfigurations) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ScopedRouteConfigurations[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRouteConfigurations[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if len(m.Separator) > 0 { + i -= len(m.Separator) + copy(dAtA[i:], m.Separator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Separator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ExtractType.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ExtractType.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ElementSeparator) > 0 { + i -= len(m.ElementSeparator) + copy(dAtA[i:], m.ElementSeparator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ElementSeparator))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Element != nil { + size, err := m.Element.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HeaderValueExtractor != nil { + size, err := m.HeaderValueExtractor.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopeKeyBuilder) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Fragments) > 0 { + for iNdEx := len(m.Fragments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fragments[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRoutes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConfigSpecifier.(*ScopedRoutes_ScopedRds); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigSpecifier.(*ScopedRoutes_ScopedRouteConfigurationsList); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RdsConfigSource != nil { + if vtmsg, ok := interface{}(m.RdsConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RdsConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.ScopeKeyBuilder != nil { + size, err := m.ScopeKeyBuilder.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRouteConfigurationsList != nil { + size, err := m.ScopedRouteConfigurationsList.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *ScopedRoutes_ScopedRds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRoutes_ScopedRds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRds != nil { + size, err := m.ScopedRds.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *ScopedRds) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedRds) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ScopedRds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SrdsResourcesLocator) > 0 { + i -= len(m.SrdsResourcesLocator) + copy(dAtA[i:], m.SrdsResourcesLocator) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SrdsResourcesLocator))) + i-- + dAtA[i] = 0x12 + } + if m.ScopedRdsConfigSource != nil { + if vtmsg, ok := interface{}(m.ScopedRdsConfigSource).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRdsConfigSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpFilter) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpFilter) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.IsOptional { + i-- + if m.IsOptional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.ConfigType.(*HttpFilter_ConfigDiscovery); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConfigType.(*HttpFilter_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *HttpFilter_ConfigDiscovery) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpFilter_ConfigDiscovery) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConfigDiscovery != nil { + if vtmsg, ok := interface{}(m.ConfigDiscovery).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConfigDiscovery) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *RequestIDExtension) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestIDExtension) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RequestIDExtension) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *EnvoyMobileHttpConnectionManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Config != nil { + size, err := m.Config.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpConnectionManager_Tracing) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientSampling != nil { + if size, ok := interface{}(m.ClientSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClientSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RandomSampling != nil { + if size, ok := interface{}(m.RandomSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RandomSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OverallSampling != nil { + if size, ok := interface{}(m.OverallSampling).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OverallSampling) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Verbose { + n += 2 + } + if m.MaxPathTagLength != nil { + l = (*wrapperspb.UInt32Value)(m.MaxPathTagLength).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.CustomTags) > 0 { + for _, e := range m.CustomTags { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Provider != nil { + if size, ok := interface{}(m.Provider).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Provider) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SpawnUpstreamSpan != nil { + l = (*wrapperspb.BoolValue)(m.SpawnUpstreamSpan).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_InternalAddressConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UnixSockets { + n += 2 + } + if len(m.CidrRanges) > 0 { + for _, e := range m.CidrRanges { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_SetCurrentClientCertDetails) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Subject != nil { + l = (*wrapperspb.BoolValue)(m.Subject).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Cert { + n += 2 + } + if m.Dns { + n += 2 + } + if m.Uri { + n += 2 + } + if m.Chain { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_UpgradeConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UpgradeType) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Enabled != nil { + l = (*wrapperspb.BoolValue)(m.Enabled).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_PathNormalizationOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ForwardingTransformation != nil { + if size, ok := interface{}(m.ForwardingTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ForwardingTransformation) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpFilterTransformation != nil { + if size, ok := interface{}(m.HttpFilterTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpFilterTransformation) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_ProxyStatusConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoveDetails { + n += 2 + } + if m.RemoveConnectionTerminationDetails { + n += 2 + } + if m.RemoveResponseFlags { + n += 2 + } + if m.SetRecommendedResponseCode { + n += 2 + } + if vtmsg, ok := m.ProxyName.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_ProxyStatusConfig_UseNodeId) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *HttpConnectionManager_ProxyStatusConfig_LiteralProxyName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LiteralProxyName) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *HttpConnectionManager_HcmAccessLogOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AccessLogFlushInterval != nil { + l = (*durationpb.Duration)(m.AccessLogFlushInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FlushAccessLogOnNewRequest { + n += 2 + } + if m.FlushLogOnTunnelSuccessfullyEstablished { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CodecType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CodecType)) + } + l = len(m.StatPrefix) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.RouteSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.HttpFilters) > 0 { + for _, e := range m.HttpFilters { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AddUserAgent != nil { + l = (*wrapperspb.BoolValue)(m.AddUserAgent).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tracing != nil { + l = m.Tracing.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.HttpProtocolOptions != nil { + if size, ok := interface{}(m.HttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.HttpProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http2ProtocolOptions != nil { + if size, ok := interface{}(m.Http2ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http2ProtocolOptions) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ServerName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DrainTimeout != nil { + l = (*durationpb.Duration)(m.DrainTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.AccessLog) > 0 { + for _, e := range m.AccessLog { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.UseRemoteAddress != nil { + l = (*wrapperspb.BoolValue)(m.UseRemoteAddress).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.GenerateRequestId != nil { + l = (*wrapperspb.BoolValue)(m.GenerateRequestId).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ForwardClientCertDetails != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ForwardClientCertDetails)) + } + if m.SetCurrentClientCertDetails != nil { + l = m.SetCurrentClientCertDetails.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Proxy_100Continue { + n += 3 + } + if m.XffNumTrustedHops != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.XffNumTrustedHops)) + } + if m.RepresentIpv4RemoteAddressAsIpv4MappedIpv6 { + n += 3 + } + if m.SkipXffAppend { + n += 3 + } + l = len(m.Via) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.UpgradeConfigs) > 0 { + for _, e := range m.UpgradeConfigs { + l = e.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StreamIdleTimeout != nil { + l = (*durationpb.Duration)(m.StreamIdleTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.InternalAddressConfig != nil { + l = m.InternalAddressConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DelayedCloseTimeout != nil { + l = (*durationpb.Duration)(m.DelayedCloseTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestTimeout != nil { + l = (*durationpb.Duration)(m.RequestTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaxRequestHeadersKb != nil { + l = (*wrapperspb.UInt32Value)(m.MaxRequestHeadersKb).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NormalizePath != nil { + l = (*wrapperspb.BoolValue)(m.NormalizePath).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PreserveExternalRequestId { + n += 3 + } + if m.MergeSlashes { + n += 3 + } + if m.ServerHeaderTransformation != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.ServerHeaderTransformation)) + } + if m.CommonHttpProtocolOptions != nil { + if size, ok := interface{}(m.CommonHttpProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CommonHttpProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestIdExtension != nil { + l = m.RequestIdExtension.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AlwaysSetRequestIdInResponse { + n += 3 + } + if m.LocalReplyConfig != nil { + l = m.LocalReplyConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StripMatchingHostPort { + n += 3 + } + if m.StreamErrorOnInvalidHttpMessage != nil { + l = (*wrapperspb.BoolValue)(m.StreamErrorOnInvalidHttpMessage).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequestHeadersTimeout != nil { + l = (*durationpb.Duration)(m.RequestHeadersTimeout).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.StripPortMode.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.PathNormalizationOptions != nil { + l = m.PathNormalizationOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Http3ProtocolOptions != nil { + if size, ok := interface{}(m.Http3ProtocolOptions).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Http3ProtocolOptions) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PathWithEscapedSlashesAction != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.PathWithEscapedSlashesAction)) + } + if len(m.OriginalIpDetectionExtensions) > 0 { + for _, e := range m.OriginalIpDetectionExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StripTrailingHostDot { + n += 3 + } + if m.SchemeHeaderTransformation != nil { + if size, ok := interface{}(m.SchemeHeaderTransformation).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SchemeHeaderTransformation) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ProxyStatusConfig != nil { + l = m.ProxyStatusConfig.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TypedHeaderValidationConfig != nil { + if size, ok := interface{}(m.TypedHeaderValidationConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedHeaderValidationConfig) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendXForwardedPort { + n += 3 + } + if len(m.EarlyHeaderMutationExtensions) > 0 { + for _, e := range m.EarlyHeaderMutationExtensions { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AddProxyProtocolConnectionState != nil { + l = (*wrapperspb.BoolValue)(m.AddProxyProtocolConnectionState).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AccessLogFlushInterval != nil { + l = (*durationpb.Duration)(m.AccessLogFlushInterval).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FlushAccessLogOnNewRequest { + n += 3 + } + if m.AccessLogOptions != nil { + l = m.AccessLogOptions.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AppendLocalOverload { + n += 3 + } + if m.Http1SafeMaxConnectionDuration { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpConnectionManager_Rds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rds != nil { + l = m.Rds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpConnectionManager_RouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + if size, ok := interface{}(m.RouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpConnectionManager_ScopedRoutes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRoutes != nil { + l = m.ScopedRoutes.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *HttpConnectionManager_StripAnyHostPort) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 3 + return n +} +func (m *LocalReplyConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Mappers) > 0 { + for _, e := range m.Mappers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.BodyFormat != nil { + if size, ok := interface{}(m.BodyFormat).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BodyFormat) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResponseMapper) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + if size, ok := interface{}(m.Filter).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Filter) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.StatusCode != nil { + l = (*wrapperspb.UInt32Value)(m.StatusCode).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Body != nil { + if size, ok := interface{}(m.Body).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Body) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BodyFormatOverride != nil { + if size, ok := interface{}(m.BodyFormatOverride).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.BodyFormatOverride) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.HeadersToAdd) > 0 { + for _, e := range m.HeadersToAdd { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Rds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigSource != nil { + if size, ok := interface{}(m.ConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.RouteConfigName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRouteConfigurationsList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ScopedRouteConfigurations) > 0 { + for _, e := range m.ScopedRouteConfigurations { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Separator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ElementSeparator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ExtractType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Index) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.Index)) + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_Element) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Element != nil { + l = m.Element.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeaderValueExtractor != nil { + l = m.HeaderValueExtractor.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopeKeyBuilder) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fragments) > 0 { + for _, e := range m.Fragments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ScopeKeyBuilder != nil { + l = m.ScopeKeyBuilder.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RdsConfigSource != nil { + if size, ok := interface{}(m.RdsConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RdsConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ScopedRoutes_ScopedRouteConfigurationsList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRouteConfigurationsList != nil { + l = m.ScopedRouteConfigurationsList.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRoutes_ScopedRds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRds != nil { + l = m.ScopedRds.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ScopedRds) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRdsConfigSource != nil { + if size, ok := interface{}(m.ScopedRdsConfigSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ScopedRdsConfigSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.SrdsResourcesLocator) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpFilter) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.IsOptional { + n += 2 + } + if m.Disabled { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HttpFilter_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HttpFilter_ConfigDiscovery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigDiscovery != nil { + if size, ok := interface{}(m.ConfigDiscovery).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConfigDiscovery) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RequestIDExtension) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *EnvoyMobileHttpConnectionManager) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = m.Config.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go new file mode 100644 index 0000000000..7d69f6349d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go @@ -0,0 +1,303 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the client_side_weighted_round_robin LB policy. +// +// This policy differs from the built-in ROUND_ROBIN policy in terms of +// how the endpoint weights are determined. In the ROUND_ROBIN policy, +// the endpoint weights are sent by the control plane via EDS. However, +// in this policy, the endpoint weights are instead determined via qps (queries +// per second), eps (errors per second), and utilization metrics sent by the +// endpoint using the Open Request Cost Aggregation (ORCA) protocol. Utilization +// is determined by using the ORCA application_utilization field, if set, or +// else falling back to the cpu_utilization field. All queries count toward qps, +// regardless of result. Only failed queries count toward eps. A config +// parameter error_utilization_penalty controls the penalty to adjust endpoint +// weights using eps and qps. The weight of a given endpoint is computed as: +// +// qps / (utilization + eps/qps * error_utilization_penalty) +// +// See the :ref:`load balancing architecture overview` for more information. +// +// [#next-free-field: 7] +type ClientSideWeightedRoundRobin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether to enable out-of-band utilization reporting collection from + // the endpoints. By default, per-request utilization reporting is used. + EnableOobLoadReport *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enable_oob_load_report,json=enableOobLoadReport,proto3" json:"enable_oob_load_report,omitempty"` + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OobReportingPeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=oob_reporting_period,json=oobReportingPeriod,proto3" json:"oob_reporting_period,omitempty"` + // A given endpoint must report load metrics continuously for at least + // this long before the endpoint weight will be used. This avoids + // churn when the set of endpoint addresses changes. Takes effect + // both immediately after we establish a connection to an endpoint and + // after weight_expiration_period has caused us to stop using the most + // recent load metrics. Default is 10 seconds. + BlackoutPeriod *durationpb.Duration `protobuf:"bytes,3,opt,name=blackout_period,json=blackoutPeriod,proto3" json:"blackout_period,omitempty"` + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=weight_expiration_period,json=weightExpirationPeriod,proto3" json:"weight_expiration_period,omitempty"` + // How often endpoint weights are recalculated. Values less than 100ms are + // capped at 100ms. Default is 1 second. + WeightUpdatePeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=weight_update_period,json=weightUpdatePeriod,proto3" json:"weight_update_period,omitempty"` + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Configuration is rejected if this value is negative. + // Default is 1.0. + ErrorUtilizationPenalty *wrapperspb.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` +} + +func (x *ClientSideWeightedRoundRobin) Reset() { + *x = ClientSideWeightedRoundRobin{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSideWeightedRoundRobin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSideWeightedRoundRobin) ProtoMessage() {} + +func (x *ClientSideWeightedRoundRobin) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSideWeightedRoundRobin.ProtoReflect.Descriptor instead. +func (*ClientSideWeightedRoundRobin) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP(), []int{0} +} + +func (x *ClientSideWeightedRoundRobin) GetEnableOobLoadReport() *wrapperspb.BoolValue { + if x != nil { + return x.EnableOobLoadReport + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetOobReportingPeriod() *durationpb.Duration { + if x != nil { + return x.OobReportingPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetBlackoutPeriod() *durationpb.Duration { + if x != nil { + return x.BlackoutPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetWeightExpirationPeriod() *durationpb.Duration { + if x != nil { + return x.WeightExpirationPeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetWeightUpdatePeriod() *durationpb.Duration { + if x != nil { + return x.WeightUpdatePeriod + } + return nil +} + +func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrapperspb.FloatValue { + if x != nil { + return x.ErrorUtilizationPenalty + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = []byte{ + 0x0a, 0x73, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x04, 0x0a, 0x1c, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x4f, 0x0a, 0x16, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x4f, 0x6f, 0x62, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x4b, 0x0a, + 0x14, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x6f, 0x6f, 0x62, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x42, 0x0a, 0x0f, 0x62, 0x6c, + 0x61, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, + 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6f, 0x75, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x53, + 0x0a, 0x18, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x4b, 0x0a, 0x14, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x63, 0x0a, 0x19, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x0a, 0x05, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x17, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, + 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x42, 0xa2, 0x02, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x0a, 0x5a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x21, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x96, 0x01, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, + 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData = file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = []interface{}{ + (*ClientSideWeightedRoundRobin)(nil), // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin + (*wrapperspb.BoolValue)(nil), // 1: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration + (*wrapperspb.FloatValue)(nil), // 3: google.protobuf.FloatValue +} +var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.enable_oob_load_report:type_name -> google.protobuf.BoolValue + 2, // 1: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.oob_reporting_period:type_name -> google.protobuf.Duration + 2, // 2: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.blackout_period:type_name -> google.protobuf.Duration + 2, // 3: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.weight_expiration_period:type_name -> google.protobuf.Duration + 2, // 4: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.weight_update_period:type_name -> google.protobuf.Duration + 3, // 5: envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin.error_utilization_penalty:type_name -> google.protobuf.FloatValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_init() +} +func file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_init() { + if File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSideWeightedRoundRobin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto = out.File + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go new file mode 100644 index 0000000000..0d6a6ca7eb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.validate.go @@ -0,0 +1,300 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ClientSideWeightedRoundRobin with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientSideWeightedRoundRobin) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientSideWeightedRoundRobin with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientSideWeightedRoundRobinMultiError, or nil if none found. +func (m *ClientSideWeightedRoundRobin) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientSideWeightedRoundRobin) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetEnableOobLoadReport()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableOobLoadReport()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "EnableOobLoadReport", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOobReportingPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOobReportingPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "OobReportingPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetBlackoutPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBlackoutPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "BlackoutPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWeightExpirationPeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightExpirationPeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "WeightExpirationPeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWeightUpdatePeriod()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWeightUpdatePeriod()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientSideWeightedRoundRobinValidationError{ + field: "WeightUpdatePeriod", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if wrapper := m.GetErrorUtilizationPenalty(); wrapper != nil { + + if wrapper.GetValue() < 0 { + err := ClientSideWeightedRoundRobinValidationError{ + field: "ErrorUtilizationPenalty", + reason: "value must be greater than or equal to 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ClientSideWeightedRoundRobinMultiError(errors) + } + + return nil +} + +// ClientSideWeightedRoundRobinMultiError is an error wrapping multiple +// validation errors returned by ClientSideWeightedRoundRobin.ValidateAll() if +// the designated constraints aren't met. +type ClientSideWeightedRoundRobinMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientSideWeightedRoundRobinMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientSideWeightedRoundRobinMultiError) AllErrors() []error { return m } + +// ClientSideWeightedRoundRobinValidationError is the validation error returned +// by ClientSideWeightedRoundRobin.Validate if the designated constraints +// aren't met. +type ClientSideWeightedRoundRobinValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientSideWeightedRoundRobinValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientSideWeightedRoundRobinValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientSideWeightedRoundRobinValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientSideWeightedRoundRobinValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientSideWeightedRoundRobinValidationError) ErrorName() string { + return "ClientSideWeightedRoundRobinValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientSideWeightedRoundRobinValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientSideWeightedRoundRobin.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientSideWeightedRoundRobinValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientSideWeightedRoundRobinValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go new file mode 100644 index 0000000000..250bbcfd33 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go @@ -0,0 +1,148 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto + +package client_side_weighted_round_robinv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClientSideWeightedRoundRobin) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientSideWeightedRoundRobin) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ErrorUtilizationPenalty != nil { + size, err := (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.WeightUpdatePeriod != nil { + size, err := (*durationpb.Duration)(m.WeightUpdatePeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.WeightExpirationPeriod != nil { + size, err := (*durationpb.Duration)(m.WeightExpirationPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.BlackoutPeriod != nil { + size, err := (*durationpb.Duration)(m.BlackoutPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.OobReportingPeriod != nil { + size, err := (*durationpb.Duration)(m.OobReportingPeriod).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EnableOobLoadReport != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableOobLoadReport).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientSideWeightedRoundRobin) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EnableOobLoadReport != nil { + l = (*wrapperspb.BoolValue)(m.EnableOobLoadReport).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OobReportingPeriod != nil { + l = (*durationpb.Duration)(m.OobReportingPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BlackoutPeriod != nil { + l = (*durationpb.Duration)(m.BlackoutPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightExpirationPeriod != nil { + l = (*durationpb.Duration)(m.WeightExpirationPeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WeightUpdatePeriod != nil { + l = (*durationpb.Duration)(m.WeightUpdatePeriod).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorUtilizationPenalty != nil { + l = (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go new file mode 100644 index 0000000000..726732605b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.go @@ -0,0 +1,617 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LocalityLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LocalityConfigSpecifier: + // + // *LocalityLbConfig_ZoneAwareLbConfig_ + // *LocalityLbConfig_LocalityWeightedLbConfig_ + LocalityConfigSpecifier isLocalityLbConfig_LocalityConfigSpecifier `protobuf_oneof:"locality_config_specifier"` +} + +func (x *LocalityLbConfig) Reset() { + *x = LocalityLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (m *LocalityLbConfig) GetLocalityConfigSpecifier() isLocalityLbConfig_LocalityConfigSpecifier { + if m != nil { + return m.LocalityConfigSpecifier + } + return nil +} + +func (x *LocalityLbConfig) GetZoneAwareLbConfig() *LocalityLbConfig_ZoneAwareLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*LocalityLbConfig_ZoneAwareLbConfig_); ok { + return x.ZoneAwareLbConfig + } + return nil +} + +func (x *LocalityLbConfig) GetLocalityWeightedLbConfig() *LocalityLbConfig_LocalityWeightedLbConfig { + if x, ok := x.GetLocalityConfigSpecifier().(*LocalityLbConfig_LocalityWeightedLbConfig_); ok { + return x.LocalityWeightedLbConfig + } + return nil +} + +type isLocalityLbConfig_LocalityConfigSpecifier interface { + isLocalityLbConfig_LocalityConfigSpecifier() +} + +type LocalityLbConfig_ZoneAwareLbConfig_ struct { + // Configuration for local zone aware load balancing. + ZoneAwareLbConfig *LocalityLbConfig_ZoneAwareLbConfig `protobuf:"bytes,1,opt,name=zone_aware_lb_config,json=zoneAwareLbConfig,proto3,oneof"` +} + +type LocalityLbConfig_LocalityWeightedLbConfig_ struct { + // Enable locality weighted load balancing. + LocalityWeightedLbConfig *LocalityLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,2,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3,oneof"` +} + +func (*LocalityLbConfig_ZoneAwareLbConfig_) isLocalityLbConfig_LocalityConfigSpecifier() {} + +func (*LocalityLbConfig_LocalityWeightedLbConfig_) isLocalityLbConfig_LocalityConfigSpecifier() {} + +// Configuration for :ref:`slow start mode `. +type SlowStartConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + SlowStartWindow *durationpb.Duration `protobuf:"bytes,1,opt,name=slow_start_window,json=slowStartWindow,proto3" json:"slow_start_window,omitempty"` + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // “new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))“, + // where “time_factor=(time_since_start_seconds / slow_start_time_seconds)“. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + Aggression *v3.RuntimeDouble `protobuf:"bytes,2,opt,name=aggression,proto3" json:"aggression,omitempty"` + // Configures the minimum percentage of origin weight that avoids too small new weight, + // which may cause endpoints in slow start mode receive no traffic in slow start window. + // If not specified, the default is 10%. + MinWeightPercent *v31.Percent `protobuf:"bytes,3,opt,name=min_weight_percent,json=minWeightPercent,proto3" json:"min_weight_percent,omitempty"` +} + +func (x *SlowStartConfig) Reset() { + *x = SlowStartConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SlowStartConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SlowStartConfig) ProtoMessage() {} + +func (x *SlowStartConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SlowStartConfig.ProtoReflect.Descriptor instead. +func (*SlowStartConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *SlowStartConfig) GetSlowStartWindow() *durationpb.Duration { + if x != nil { + return x.SlowStartWindow + } + return nil +} + +func (x *SlowStartConfig) GetAggression() *v3.RuntimeDouble { + if x != nil { + return x.Aggression + } + return nil +} + +func (x *SlowStartConfig) GetMinWeightPercent() *v31.Percent { + if x != nil { + return x.MinWeightPercent + } + return nil +} + +// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) +type ConsistentHashingLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to “true“, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + UseHostnameForHashing bool `protobuf:"varint,1,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // Applies to both Ring Hash and Maglev load balancers. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // “hash_balance_factor“, requests to any upstream host are capped at “hash_balance_factor/100“ times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts + // being probed, so use a higher value if you require better performance. + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` +} + +func (x *ConsistentHashingLbConfig) Reset() { + *x = ConsistentHashingLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConsistentHashingLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConsistentHashingLbConfig) ProtoMessage() {} + +func (x *ConsistentHashingLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConsistentHashingLbConfig.ProtoReflect.Descriptor instead. +func (*ConsistentHashingLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *ConsistentHashingLbConfig) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +func (x *ConsistentHashingLbConfig) GetHashBalanceFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +// Configuration for :ref:`zone aware routing +// `. +type LocalityLbConfig_ZoneAwareLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + RoutingEnabled *v31.Percent `protobuf:"bytes,1,opt,name=routing_enabled,json=routingEnabled,proto3" json:"routing_enabled,omitempty"` + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + MinClusterSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=min_cluster_size,json=minClusterSize,proto3" json:"min_cluster_size,omitempty"` + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + FailTrafficOnPanic bool `protobuf:"varint,3,opt,name=fail_traffic_on_panic,json=failTrafficOnPanic,proto3" json:"fail_traffic_on_panic,omitempty"` +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) Reset() { + *x = LocalityLbConfig_ZoneAwareLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig_ZoneAwareLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig_ZoneAwareLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig_ZoneAwareLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetRoutingEnabled() *v31.Percent { + if x != nil { + return x.RoutingEnabled + } + return nil +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetMinClusterSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinClusterSize + } + return nil +} + +func (x *LocalityLbConfig_ZoneAwareLbConfig) GetFailTrafficOnPanic() bool { + if x != nil { + return x.FailTrafficOnPanic + } + return false +} + +// Configuration for :ref:`locality weighted load balancing +// ` +type LocalityLbConfig_LocalityWeightedLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) Reset() { + *x = LocalityLbConfig_LocalityWeightedLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalityLbConfig_LocalityWeightedLbConfig) ProtoMessage() {} + +func (x *LocalityLbConfig_LocalityWeightedLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalityLbConfig_LocalityWeightedLbConfig.ProtoReflect.Descriptor instead. +func (*LocalityLbConfig_LocalityWeightedLbConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP(), []int{0, 1} +} + +var File_envoy_extensions_load_balancing_policies_common_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x04, 0x0a, 0x10, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x89, 0x01, 0x0a, 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, + 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x56, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, + 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9e, 0x01, 0x0a, + 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xcf, 0x01, + 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, + 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, + 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, + 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, + 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, + 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x1a, + 0x1a, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x20, 0x0a, 0x19, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xe3, 0x01, + 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, + 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, + 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, 0x68, 0x61, + 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x52, 0x11, + 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x42, 0xbd, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x40, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x62, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData = file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes = []interface{}{ + (*LocalityLbConfig)(nil), // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + (*SlowStartConfig)(nil), // 1: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*ConsistentHashingLbConfig)(nil), // 2: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + (*LocalityLbConfig_ZoneAwareLbConfig)(nil), // 3: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig + (*LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + (*durationpb.Duration)(nil), // 5: google.protobuf.Duration + (*v3.RuntimeDouble)(nil), // 6: envoy.config.core.v3.RuntimeDouble + (*v31.Percent)(nil), // 7: envoy.type.v3.Percent + (*wrapperspb.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrapperspb.UInt64Value)(nil), // 9: google.protobuf.UInt64Value +} +var file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = []int32{ + 3, // 0: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.zone_aware_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig + 4, // 1: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.locality_weighted_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + 5, // 2: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 6, // 3: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 7, // 4: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 8, // 5: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 7, // 6: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 9, // 7: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_common_v3_common_proto_init() } +func file_envoy_extensions_load_balancing_policies_common_v3_common_proto_init() { + if File_envoy_extensions_load_balancing_policies_common_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SlowStartConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConsistentHashingLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig_ZoneAwareLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalityLbConfig_LocalityWeightedLbConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*LocalityLbConfig_ZoneAwareLbConfig_)(nil), + (*LocalityLbConfig_LocalityWeightedLbConfig_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_common_v3_common_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_common_v3_common_proto = out.File + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_common_v3_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go new file mode 100644 index 0000000000..2aa2f26dae --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common.pb.validate.go @@ -0,0 +1,814 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LocalityLbConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LocalityLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LocalityLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofLocalityConfigSpecifierPresent := false + switch v := m.LocalityConfigSpecifier.(type) { + case *LocalityLbConfig_ZoneAwareLbConfig_: + if v == nil { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLocalityConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetZoneAwareLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetZoneAwareLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfigValidationError{ + field: "ZoneAwareLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *LocalityLbConfig_LocalityWeightedLbConfig_: + if v == nil { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofLocalityConfigSpecifierPresent = true + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfigValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofLocalityConfigSpecifierPresent { + err := LocalityLbConfigValidationError{ + field: "LocalityConfigSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return LocalityLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfigMultiError is an error wrapping multiple validation errors +// returned by LocalityLbConfig.ValidateAll() if the designated constraints +// aren't met. +type LocalityLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfigValidationError is the validation error returned by +// LocalityLbConfig.Validate if the designated constraints aren't met. +type LocalityLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfigValidationError) ErrorName() string { return "LocalityLbConfigValidationError" } + +// Error satisfies the builtin error interface +func (e LocalityLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfigValidationError{} + +// Validate checks the field values on SlowStartConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SlowStartConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SlowStartConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SlowStartConfigMultiError, or nil if none found. +func (m *SlowStartConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SlowStartConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSlowStartWindow()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartWindow()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "SlowStartWindow", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetAggression()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAggression()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "Aggression", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinWeightPercent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinWeightPercent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SlowStartConfigValidationError{ + field: "MinWeightPercent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SlowStartConfigMultiError(errors) + } + + return nil +} + +// SlowStartConfigMultiError is an error wrapping multiple validation errors +// returned by SlowStartConfig.ValidateAll() if the designated constraints +// aren't met. +type SlowStartConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SlowStartConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SlowStartConfigMultiError) AllErrors() []error { return m } + +// SlowStartConfigValidationError is the validation error returned by +// SlowStartConfig.Validate if the designated constraints aren't met. +type SlowStartConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SlowStartConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SlowStartConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SlowStartConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SlowStartConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SlowStartConfigValidationError) ErrorName() string { return "SlowStartConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SlowStartConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSlowStartConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SlowStartConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SlowStartConfigValidationError{} + +// Validate checks the field values on ConsistentHashingLbConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ConsistentHashingLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ConsistentHashingLbConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ConsistentHashingLbConfigMultiError, or nil if none found. +func (m *ConsistentHashingLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ConsistentHashingLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := ConsistentHashingLbConfigValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return ConsistentHashingLbConfigMultiError(errors) + } + + return nil +} + +// ConsistentHashingLbConfigMultiError is an error wrapping multiple validation +// errors returned by ConsistentHashingLbConfig.ValidateAll() if the +// designated constraints aren't met. +type ConsistentHashingLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ConsistentHashingLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ConsistentHashingLbConfigMultiError) AllErrors() []error { return m } + +// ConsistentHashingLbConfigValidationError is the validation error returned by +// ConsistentHashingLbConfig.Validate if the designated constraints aren't met. +type ConsistentHashingLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ConsistentHashingLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ConsistentHashingLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ConsistentHashingLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ConsistentHashingLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ConsistentHashingLbConfigValidationError) ErrorName() string { + return "ConsistentHashingLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ConsistentHashingLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sConsistentHashingLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ConsistentHashingLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ConsistentHashingLbConfigValidationError{} + +// Validate checks the field values on LocalityLbConfig_ZoneAwareLbConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *LocalityLbConfig_ZoneAwareLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LocalityLbConfig_ZoneAwareLbConfig +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// LocalityLbConfig_ZoneAwareLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig_ZoneAwareLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRoutingEnabled()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoutingEnabled()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "RoutingEnabled", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMinClusterSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMinClusterSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LocalityLbConfig_ZoneAwareLbConfigValidationError{ + field: "MinClusterSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for FailTrafficOnPanic + + if len(errors) > 0 { + return LocalityLbConfig_ZoneAwareLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfig_ZoneAwareLbConfigMultiError is an error wrapping multiple +// validation errors returned by +// LocalityLbConfig_ZoneAwareLbConfig.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbConfig_ZoneAwareLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfig_ZoneAwareLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfig_ZoneAwareLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfig_ZoneAwareLbConfigValidationError is the validation error +// returned by LocalityLbConfig_ZoneAwareLbConfig.Validate if the designated +// constraints aren't met. +type LocalityLbConfig_ZoneAwareLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) ErrorName() string { + return "LocalityLbConfig_ZoneAwareLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbConfig_ZoneAwareLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig_ZoneAwareLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfig_ZoneAwareLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfig_ZoneAwareLbConfigValidationError{} + +// Validate checks the field values on +// LocalityLbConfig_LocalityWeightedLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LocalityLbConfig_LocalityWeightedLbConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// LocalityLbConfig_LocalityWeightedLbConfig with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// LocalityLbConfig_LocalityWeightedLbConfigMultiError, or nil if none found. +func (m *LocalityLbConfig_LocalityWeightedLbConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return LocalityLbConfig_LocalityWeightedLbConfigMultiError(errors) + } + + return nil +} + +// LocalityLbConfig_LocalityWeightedLbConfigMultiError is an error wrapping +// multiple validation errors returned by +// LocalityLbConfig_LocalityWeightedLbConfig.ValidateAll() if the designated +// constraints aren't met. +type LocalityLbConfig_LocalityWeightedLbConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LocalityLbConfig_LocalityWeightedLbConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LocalityLbConfig_LocalityWeightedLbConfigMultiError) AllErrors() []error { return m } + +// LocalityLbConfig_LocalityWeightedLbConfigValidationError is the validation +// error returned by LocalityLbConfig_LocalityWeightedLbConfig.Validate if the +// designated constraints aren't met. +type LocalityLbConfig_LocalityWeightedLbConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) ErrorName() string { + return "LocalityLbConfig_LocalityWeightedLbConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e LocalityLbConfig_LocalityWeightedLbConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLocalityLbConfig_LocalityWeightedLbConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LocalityLbConfig_LocalityWeightedLbConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LocalityLbConfig_LocalityWeightedLbConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go new file mode 100644 index 0000000000..ad3021a243 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3/common_vtproto.pb.go @@ -0,0 +1,492 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/common/v3/common.proto + +package commonv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FailTrafficOnPanic { + i-- + if m.FailTrafficOnPanic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.MinClusterSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinClusterSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.RoutingEnabled != nil { + if vtmsg, ok := interface{}(m.RoutingEnabled).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RoutingEnabled) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalityLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.LocalityConfigSpecifier.(*LocalityLbConfig_LocalityWeightedLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.LocalityConfigSpecifier.(*LocalityLbConfig_ZoneAwareLbConfig_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ZoneAwareLbConfig != nil { + size, err := m.ZoneAwareLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LocalityWeightedLbConfig != nil { + size, err := m.LocalityWeightedLbConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SlowStartConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlowStartConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SlowStartConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MinWeightPercent != nil { + if vtmsg, ok := interface{}(m.MinWeightPercent).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MinWeightPercent) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.Aggression != nil { + if vtmsg, ok := interface{}(m.Aggression).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Aggression) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SlowStartWindow != nil { + size, err := (*durationpb.Duration)(m.SlowStartWindow).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsistentHashingLbConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsistentHashingLbConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ConsistentHashingLbConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingEnabled != nil { + if size, ok := interface{}(m.RoutingEnabled).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RoutingEnabled) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinClusterSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinClusterSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FailTrafficOnPanic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig_LocalityWeightedLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.LocalityConfigSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *LocalityLbConfig_ZoneAwareLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ZoneAwareLbConfig != nil { + l = m.ZoneAwareLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *LocalityLbConfig_LocalityWeightedLbConfig_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocalityWeightedLbConfig != nil { + l = m.LocalityWeightedLbConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *SlowStartConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlowStartWindow != nil { + l = (*durationpb.Duration)(m.SlowStartWindow).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Aggression != nil { + if size, ok := interface{}(m.Aggression).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Aggression) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinWeightPercent != nil { + if size, ok := interface{}(m.MinWeightPercent).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MinWeightPercent) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConsistentHashingLbConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go new file mode 100644 index 0000000000..819df3d7b7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.go @@ -0,0 +1,383 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Available methods for selecting the host set from which to return the host with the +// fewest active requests. +type LeastRequest_SelectionMethod int32 + +const ( + // Return host with fewest requests from a set of “choice_count“ randomly selected hosts. + // Best selection method for most scenarios. + LeastRequest_N_CHOICES LeastRequest_SelectionMethod = 0 + // Return host with fewest requests from all hosts. + // Useful in some niche use cases involving low request rates and one of: + // (example 1) low request limits on workloads, or (example 2) few hosts. + // + // Example 1: Consider a workload type that can only accept one connection at a time. + // If such workloads are deployed across many hosts, only a small percentage of those + // workloads have zero connections at any given time, and the rate of new connections is low, + // the “FULL_SCAN“ method is more likely to select a suitable host than “N_CHOICES“. + // + // Example 2: Consider a workload type that is only deployed on 2 hosts. With default settings, + // the “N_CHOICES“ method will return the host with more active requests 25% of the time. + // If the request rate is sufficiently low, the behavior of always selecting the host with least + // requests as of the last metrics refresh may be preferable. + LeastRequest_FULL_SCAN LeastRequest_SelectionMethod = 1 +) + +// Enum value maps for LeastRequest_SelectionMethod. +var ( + LeastRequest_SelectionMethod_name = map[int32]string{ + 0: "N_CHOICES", + 1: "FULL_SCAN", + } + LeastRequest_SelectionMethod_value = map[string]int32{ + "N_CHOICES": 0, + "FULL_SCAN": 1, + } +) + +func (x LeastRequest_SelectionMethod) Enum() *LeastRequest_SelectionMethod { + p := new(LeastRequest_SelectionMethod) + *p = x + return p +} + +func (x LeastRequest_SelectionMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LeastRequest_SelectionMethod) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes[0].Descriptor() +} + +func (LeastRequest_SelectionMethod) Type() protoreflect.EnumType { + return &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes[0] +} + +func (x LeastRequest_SelectionMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LeastRequest_SelectionMethod.Descriptor instead. +func (LeastRequest_SelectionMethod) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0, 0} +} + +// This configuration allows the built-in LEAST_REQUEST LB policy to be configured via the LB policy +// extension point. See the :ref:`load balancing architecture overview +// ` for more information. +// [#next-free-field: 7] +type LeastRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + // Only applies to the “N_CHOICES“ selection method. + ChoiceCount *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // “weight = load_balancing_weight / (active_requests + 1)^active_request_bias“ + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // “active_request_bias“ must be greater than or equal to 0.0. + // + // When “active_request_bias == 0.0“ the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When “active_request_bias > 0.0“ the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // + // This setting only takes effect if all host weights are not equal. + ActiveRequestBias *v3.RuntimeDouble `protobuf:"bytes,2,opt,name=active_request_bias,json=activeRequestBias,proto3" json:"active_request_bias,omitempty"` + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig *v31.SlowStartConfig `protobuf:"bytes,3,opt,name=slow_start_config,json=slowStartConfig,proto3" json:"slow_start_config,omitempty"` + // Configuration for local zone aware load balancing or locality weighted load balancing. + LocalityLbConfig *v31.LocalityLbConfig `protobuf:"bytes,4,opt,name=locality_lb_config,json=localityLbConfig,proto3" json:"locality_lb_config,omitempty"` + // [#not-implemented-hide:] + // Unused. Replaced by the `selection_method` enum for better extensibility. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto. + EnableFullScan *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=enable_full_scan,json=enableFullScan,proto3" json:"enable_full_scan,omitempty"` + // Method for selecting the host set from which to return the host with the fewest active requests. + // + // Defaults to “N_CHOICES“. + SelectionMethod LeastRequest_SelectionMethod `protobuf:"varint,6,opt,name=selection_method,json=selectionMethod,proto3,enum=envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest_SelectionMethod" json:"selection_method,omitempty"` +} + +func (x *LeastRequest) Reset() { + *x = LeastRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeastRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeastRequest) ProtoMessage() {} + +func (x *LeastRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeastRequest.ProtoReflect.Descriptor instead. +func (*LeastRequest) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP(), []int{0} +} + +func (x *LeastRequest) GetChoiceCount() *wrapperspb.UInt32Value { + if x != nil { + return x.ChoiceCount + } + return nil +} + +func (x *LeastRequest) GetActiveRequestBias() *v3.RuntimeDouble { + if x != nil { + return x.ActiveRequestBias + } + return nil +} + +func (x *LeastRequest) GetSlowStartConfig() *v31.SlowStartConfig { + if x != nil { + return x.SlowStartConfig + } + return nil +} + +func (x *LeastRequest) GetLocalityLbConfig() *v31.LocalityLbConfig { + if x != nil { + return x.LocalityLbConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto. +func (x *LeastRequest) GetEnableFullScan() *wrapperspb.BoolValue { + if x != nil { + return x.EnableFullScan + } + return nil +} + +func (x *LeastRequest) GetSelectionMethod() LeastRequest_SelectionMethod { + if x != nil { + return x.SelectionMethod + } + return LeastRequest_N_CHOICES +} + +var File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = []byte{ + 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, + 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x33, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x05, 0x0a, 0x0c, 0x4c, 0x65, + 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x6f, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x72, 0x0a, 0x12, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, + 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, + 0x61, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, + 0x6e, 0x12, 0x8c, 0x01, 0x0a, 0x10, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x57, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x22, 0x2f, 0x0a, 0x0f, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x5f, 0x43, 0x48, 0x4f, 0x49, 0x43, 0x45, 0x53, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x55, 0x4c, 0x4c, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x10, + 0x01, 0x42, 0xd8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x47, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x70, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData = file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = []interface{}{ + (LeastRequest_SelectionMethod)(0), // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.SelectionMethod + (*LeastRequest)(nil), // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest + (*wrapperspb.UInt32Value)(nil), // 2: google.protobuf.UInt32Value + (*v3.RuntimeDouble)(nil), // 3: envoy.config.core.v3.RuntimeDouble + (*v31.SlowStartConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + (*v31.LocalityLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue +} +var file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = []int32{ + 2, // 0: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.choice_count:type_name -> google.protobuf.UInt32Value + 3, // 1: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 4, // 2: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.slow_start_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.SlowStartConfig + 5, // 3: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.locality_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig + 6, // 4: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.enable_full_scan:type_name -> google.protobuf.BoolValue + 0, // 5: envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.selection_method:type_name -> envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest.SelectionMethod + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_init() +} +func file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_init() { + if File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeastRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs, + EnumInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_enumTypes, + MessageInfos: file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto = out.File + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_least_request_v3_least_request_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go new file mode 100644 index 0000000000..75a3a2c2e8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request.pb.validate.go @@ -0,0 +1,278 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LeastRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LeastRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LeastRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LeastRequestMultiError, or +// nil if none found. +func (m *LeastRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *LeastRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if wrapper := m.GetChoiceCount(); wrapper != nil { + + if wrapper.GetValue() < 2 { + err := LeastRequestValidationError{ + field: "ChoiceCount", + reason: "value must be greater than or equal to 2", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetActiveRequestBias()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetActiveRequestBias()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "ActiveRequestBias", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSlowStartConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSlowStartConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "SlowStartConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalityLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "LocalityLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEnableFullScan()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "EnableFullScan", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LeastRequestValidationError{ + field: "EnableFullScan", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnableFullScan()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LeastRequestValidationError{ + field: "EnableFullScan", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if _, ok := LeastRequest_SelectionMethod_name[int32(m.GetSelectionMethod())]; !ok { + err := LeastRequestValidationError{ + field: "SelectionMethod", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return LeastRequestMultiError(errors) + } + + return nil +} + +// LeastRequestMultiError is an error wrapping multiple validation errors +// returned by LeastRequest.ValidateAll() if the designated constraints aren't met. +type LeastRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LeastRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LeastRequestMultiError) AllErrors() []error { return m } + +// LeastRequestValidationError is the validation error returned by +// LeastRequest.Validate if the designated constraints aren't met. +type LeastRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LeastRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LeastRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LeastRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LeastRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LeastRequestValidationError) ErrorName() string { return "LeastRequestValidationError" } + +// Error satisfies the builtin error interface +func (e LeastRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLeastRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LeastRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LeastRequestValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go new file mode 100644 index 0000000000..a7abe001b4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3/least_request_vtproto.pb.go @@ -0,0 +1,196 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/least_request/v3/least_request.proto + +package least_requestv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LeastRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeastRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LeastRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SelectionMethod != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SelectionMethod)) + i-- + dAtA[i] = 0x30 + } + if m.EnableFullScan != nil { + size, err := (*wrapperspb.BoolValue)(m.EnableFullScan).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.LocalityLbConfig != nil { + if vtmsg, ok := interface{}(m.LocalityLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalityLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.SlowStartConfig != nil { + if vtmsg, ok := interface{}(m.SlowStartConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SlowStartConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.ActiveRequestBias != nil { + if vtmsg, ok := interface{}(m.ActiveRequestBias).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ActiveRequestBias) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.ChoiceCount != nil { + size, err := (*wrapperspb.UInt32Value)(m.ChoiceCount).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LeastRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChoiceCount != nil { + l = (*wrapperspb.UInt32Value)(m.ChoiceCount).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ActiveRequestBias != nil { + if size, ok := interface{}(m.ActiveRequestBias).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ActiveRequestBias) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SlowStartConfig != nil { + if size, ok := interface{}(m.SlowStartConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SlowStartConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalityLbConfig != nil { + if size, ok := interface{}(m.LocalityLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalityLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnableFullScan != nil { + l = (*wrapperspb.BoolValue)(m.EnableFullScan).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SelectionMethod != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SelectionMethod)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go new file mode 100644 index 0000000000..7468da4ac8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This configuration allows the built-in PICK_FIRST LB policy to be configured +// via the LB policy extension point. +type PickFirst struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set to true, instructs the LB policy to shuffle the list of addresses + // received from the name resolver before attempting to connect to them. + ShuffleAddressList bool `protobuf:"varint,1,opt,name=shuffle_address_list,json=shuffleAddressList,proto3" json:"shuffle_address_list,omitempty"` +} + +func (x *PickFirst) Reset() { + *x = PickFirst{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PickFirst) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PickFirst) ProtoMessage() {} + +func (x *PickFirst) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PickFirst.ProtoReflect.Descriptor instead. +func (*PickFirst) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescGZIP(), []int{0} +} + +func (x *PickFirst) GetShuffleAddressList() bool { + if x != nil { + return x.ShuffleAddressList + } + return false +} + +var File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc = []byte{ + 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2e, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2e, 0x76, + 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x3d, 0x0a, 0x09, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x75, + 0x66, 0x66, 0x6c, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0xcc, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x44, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x2e, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2e, 0x76, 0x33, + 0x42, 0x0e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x6a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x2f, 0x76, + 0x33, 0x3b, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData = file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes = []interface{}{ + (*PickFirst)(nil), // 0: envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst +} +var file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_init() } +func file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_init() { + if File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PickFirst); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto = out.File + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_pick_first_v3_pick_first_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go new file mode 100644 index 0000000000..d142fed995 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.pb.validate.go @@ -0,0 +1,138 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PickFirst with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PickFirst) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PickFirst with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PickFirstMultiError, or nil +// if none found. +func (m *PickFirst) ValidateAll() error { + return m.validate(true) +} + +func (m *PickFirst) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ShuffleAddressList + + if len(errors) > 0 { + return PickFirstMultiError(errors) + } + + return nil +} + +// PickFirstMultiError is an error wrapping multiple validation errors returned +// by PickFirst.ValidateAll() if the designated constraints aren't met. +type PickFirstMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PickFirstMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PickFirstMultiError) AllErrors() []error { return m } + +// PickFirstValidationError is the validation error returned by +// PickFirst.Validate if the designated constraints aren't met. +type PickFirstValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PickFirstValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PickFirstValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PickFirstValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PickFirstValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PickFirstValidationError) ErrorName() string { return "PickFirstValidationError" } + +// Error satisfies the builtin error interface +func (e PickFirstValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPickFirst.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PickFirstValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PickFirstValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go new file mode 100644 index 0000000000..828e706214 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first_vtproto.pb.go @@ -0,0 +1,74 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + +package pick_firstv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PickFirst) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PickFirst) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PickFirst) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ShuffleAddressList { + i-- + if m.ShuffleAddressList { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PickFirst) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShuffleAddressList { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go new file mode 100644 index 0000000000..6c544cc726 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.go @@ -0,0 +1,393 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The hash function used to hash hosts onto the ketama ring. +type RingHash_HashFunction int32 + +const ( + // Currently defaults to XX_HASH. + RingHash_DEFAULT_HASH RingHash_HashFunction = 0 + // Use `xxHash `_. + RingHash_XX_HASH RingHash_HashFunction = 1 + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + RingHash_MURMUR_HASH_2 RingHash_HashFunction = 2 +) + +// Enum value maps for RingHash_HashFunction. +var ( + RingHash_HashFunction_name = map[int32]string{ + 0: "DEFAULT_HASH", + 1: "XX_HASH", + 2: "MURMUR_HASH_2", + } + RingHash_HashFunction_value = map[string]int32{ + "DEFAULT_HASH": 0, + "XX_HASH": 1, + "MURMUR_HASH_2": 2, + } +) + +func (x RingHash_HashFunction) Enum() *RingHash_HashFunction { + p := new(RingHash_HashFunction) + *p = x + return p +} + +func (x RingHash_HashFunction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RingHash_HashFunction) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes[0].Descriptor() +} + +func (RingHash_HashFunction) Type() protoreflect.EnumType { + return &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes[0] +} + +func (x RingHash_HashFunction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RingHash_HashFunction.Descriptor instead. +func (RingHash_HashFunction) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP(), []int{0, 0} +} + +// This configuration allows the built-in RING_HASH LB policy to be configured via the LB policy +// extension point. See the :ref:`load balancing architecture overview +// ` for more information. +// [#next-free-field: 8] +type RingHash struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction RingHash_HashFunction `protobuf:"varint,1,opt,name=hash_function,json=hashFunction,proto3,enum=envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash_HashFunction" json:"hash_function,omitempty"` + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + MinimumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,2,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + MaximumRingSize *wrapperspb.UInt64Value `protobuf:"bytes,3,opt,name=maximum_ring_size,json=maximumRingSize,proto3" json:"maximum_ring_size,omitempty"` + // If set to “true“, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + // + // .. note:: + // + // This is deprecated and please use :ref:`consistent_hashing_lb_config + // ` instead. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. + UseHostnameForHashing bool `protobuf:"varint,4,opt,name=use_hostname_for_hashing,json=useHostnameForHashing,proto3" json:"use_hostname_for_hashing,omitempty"` + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // “hash_balance_factor“, requests to any upstream host are capped at “hash_balance_factor/100“ times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower “hash_balance_factor“ results in more hosts + // being probed, so use a higher value if you require better performance. + // + // .. note:: + // + // This is deprecated and please use :ref:`consistent_hashing_lb_config + // ` instead. + // + // Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. + HashBalanceFactor *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=hash_balance_factor,json=hashBalanceFactor,proto3" json:"hash_balance_factor,omitempty"` + // Common configuration for hashing-based load balancing policies. + ConsistentHashingLbConfig *v3.ConsistentHashingLbConfig `protobuf:"bytes,6,opt,name=consistent_hashing_lb_config,json=consistentHashingLbConfig,proto3" json:"consistent_hashing_lb_config,omitempty"` + // Enable locality weighted load balancing for ring hash lb explicitly. + LocalityWeightedLbConfig *v3.LocalityLbConfig_LocalityWeightedLbConfig `protobuf:"bytes,7,opt,name=locality_weighted_lb_config,json=localityWeightedLbConfig,proto3" json:"locality_weighted_lb_config,omitempty"` +} + +func (x *RingHash) Reset() { + *x = RingHash{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RingHash) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RingHash) ProtoMessage() {} + +func (x *RingHash) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RingHash.ProtoReflect.Descriptor instead. +func (*RingHash) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP(), []int{0} +} + +func (x *RingHash) GetHashFunction() RingHash_HashFunction { + if x != nil { + return x.HashFunction + } + return RingHash_DEFAULT_HASH +} + +func (x *RingHash) GetMinimumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MinimumRingSize + } + return nil +} + +func (x *RingHash) GetMaximumRingSize() *wrapperspb.UInt64Value { + if x != nil { + return x.MaximumRingSize + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. +func (x *RingHash) GetUseHostnameForHashing() bool { + if x != nil { + return x.UseHostnameForHashing + } + return false +} + +// Deprecated: Marked as deprecated in envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto. +func (x *RingHash) GetHashBalanceFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.HashBalanceFactor + } + return nil +} + +func (x *RingHash) GetConsistentHashingLbConfig() *v3.ConsistentHashingLbConfig { + if x != nil { + return x.ConsistentHashingLbConfig + } + return nil +} + +func (x *RingHash) GetLocalityWeightedLbConfig() *v3.LocalityLbConfig_LocalityWeightedLbConfig { + if x != nil { + return x.LocalityWeightedLbConfig + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc = []byte{ + 0x0a, 0x45, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x1a, 0x3f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x06, 0x0a, + 0x08, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x12, 0x7b, 0x0a, 0x0d, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, + 0x68, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x0c, 0xfa, 0x42, 0x09, 0x32, 0x07, 0x18, 0x80, 0x80, 0x80, 0x04, 0x28, 0x01, 0x52, 0x0f, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x44, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x13, 0x68, 0x61, + 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, 0x92, 0xc7, + 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x8e, 0x01, 0x0a, + 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x9c, 0x01, + 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x5d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x40, 0x0a, 0x0c, + 0x48, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x0c, + 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x58, 0x58, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4d, + 0x55, 0x52, 0x4d, 0x55, 0x52, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x02, 0x42, 0xc8, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x43, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2e, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x76, 0x33, 0x42, 0x0d, + 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x68, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData = file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = []interface{}{ + (RingHash_HashFunction)(0), // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction + (*RingHash)(nil), // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash + (*wrapperspb.UInt64Value)(nil), // 2: google.protobuf.UInt64Value + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value + (*v3.ConsistentHashingLbConfig)(nil), // 4: envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + (*v3.LocalityLbConfig_LocalityWeightedLbConfig)(nil), // 5: envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig +} +var file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.hash_function:type_name -> envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.HashFunction + 2, // 1: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 2, // 2: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 3, // 3: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 4, // 4: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.consistent_hashing_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.ConsistentHashingLbConfig + 5, // 5: envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash.locality_weighted_lb_config:type_name -> envoy.extensions.load_balancing_policies.common.v3.LocalityLbConfig.LocalityWeightedLbConfig + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_init() } +func file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_init() { + if File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RingHash); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs, + EnumInfos: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_enumTypes, + MessageInfos: file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto = out.File + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_ring_hash_v3_ring_hash_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go new file mode 100644 index 0000000000..c5ec6e39ce --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.pb.validate.go @@ -0,0 +1,252 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RingHash with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RingHash) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RingHash with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RingHashMultiError, or nil +// if none found. +func (m *RingHash) ValidateAll() error { + return m.validate(true) +} + +func (m *RingHash) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := RingHash_HashFunction_name[int32(m.GetHashFunction())]; !ok { + err := RingHashValidationError{ + field: "HashFunction", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetMinimumRingSize(); wrapper != nil { + + if val := wrapper.GetValue(); val < 1 || val > 8388608 { + err := RingHashValidationError{ + field: "MinimumRingSize", + reason: "value must be inside range [1, 8388608]", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if wrapper := m.GetMaximumRingSize(); wrapper != nil { + + if wrapper.GetValue() > 8388608 { + err := RingHashValidationError{ + field: "MaximumRingSize", + reason: "value must be less than or equal to 8388608", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + // no validation rules for UseHostnameForHashing + + if wrapper := m.GetHashBalanceFactor(); wrapper != nil { + + if wrapper.GetValue() < 100 { + err := RingHashValidationError{ + field: "HashBalanceFactor", + reason: "value must be greater than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetConsistentHashingLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConsistentHashingLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RingHashValidationError{ + field: "ConsistentHashingLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLocalityWeightedLbConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLocalityWeightedLbConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RingHashValidationError{ + field: "LocalityWeightedLbConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RingHashMultiError(errors) + } + + return nil +} + +// RingHashMultiError is an error wrapping multiple validation errors returned +// by RingHash.ValidateAll() if the designated constraints aren't met. +type RingHashMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RingHashMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RingHashMultiError) AllErrors() []error { return m } + +// RingHashValidationError is the validation error returned by +// RingHash.Validate if the designated constraints aren't met. +type RingHashValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RingHashValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RingHashValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RingHashValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RingHashValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RingHashValidationError) ErrorName() string { return "RingHashValidationError" } + +// Error satisfies the builtin error interface +func (e RingHashValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRingHash.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RingHashValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RingHashValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go new file mode 100644 index 0000000000..f762ec4c6d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash_vtproto.pb.go @@ -0,0 +1,191 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +package ring_hashv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RingHash) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RingHash) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RingHash) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LocalityWeightedLbConfig != nil { + if vtmsg, ok := interface{}(m.LocalityWeightedLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalityWeightedLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.ConsistentHashingLbConfig != nil { + if vtmsg, ok := interface{}(m.ConsistentHashingLbConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ConsistentHashingLbConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if m.HashBalanceFactor != nil { + size, err := (*wrapperspb.UInt32Value)(m.HashBalanceFactor).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.UseHostnameForHashing { + i-- + if m.UseHostnameForHashing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaximumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MaximumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.MinimumRingSize != nil { + size, err := (*wrapperspb.UInt64Value)(m.MinimumRingSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.HashFunction != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HashFunction)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RingHash) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HashFunction != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HashFunction)) + } + if m.MinimumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MinimumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MaximumRingSize != nil { + l = (*wrapperspb.UInt64Value)(m.MaximumRingSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.UseHostnameForHashing { + n += 2 + } + if m.HashBalanceFactor != nil { + l = (*wrapperspb.UInt32Value)(m.HashBalanceFactor).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConsistentHashingLbConfig != nil { + if size, ok := interface{}(m.ConsistentHashingLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ConsistentHashingLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LocalityWeightedLbConfig != nil { + if size, ok := interface{}(m.LocalityWeightedLbConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.LocalityWeightedLbConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go new file mode 100644 index 0000000000..7c3741a23d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.go @@ -0,0 +1,181 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration for the wrr_locality LB policy. See the :ref:`load balancing architecture overview +// ` for more information. +type WrrLocality struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The child LB policy to create for endpoint-picking within the chosen locality. + EndpointPickingPolicy *v3.LoadBalancingPolicy `protobuf:"bytes,1,opt,name=endpoint_picking_policy,json=endpointPickingPolicy,proto3" json:"endpoint_picking_policy,omitempty"` +} + +func (x *WrrLocality) Reset() { + *x = WrrLocality{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WrrLocality) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WrrLocality) ProtoMessage() {} + +func (x *WrrLocality) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WrrLocality.ProtoReflect.Descriptor instead. +func (*WrrLocality) Descriptor() ([]byte, []int) { + return file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescGZIP(), []int{0} +} + +func (x *WrrLocality) GetEndpointPickingPolicy() *v3.LoadBalancingPolicy { + if x != nil { + return x.EndpointPickingPolicy + } + return nil +} + +var File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto protoreflect.FileDescriptor + +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x77, 0x72, 0x72, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x38, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x33, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7d, 0x0a, 0x0b, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x6e, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x15, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0xd4, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x0a, 0x46, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x6e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x77, 0x72, + 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x77, 0x72, + 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescOnce sync.Once + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData = file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc +) + +func file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescGZIP() []byte { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescOnce.Do(func() { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData) + }) + return file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDescData +} + +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes = []interface{}{ + (*WrrLocality)(nil), // 0: envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality + (*v3.LoadBalancingPolicy)(nil), // 1: envoy.config.cluster.v3.LoadBalancingPolicy +} +var file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality.endpoint_picking_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_init() } +func file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_init() { + if File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WrrLocality); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes, + DependencyIndexes: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs, + MessageInfos: file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_msgTypes, + }.Build() + File_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto = out.File + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_rawDesc = nil + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_goTypes = nil + file_envoy_extensions_load_balancing_policies_wrr_locality_v3_wrr_locality_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go new file mode 100644 index 0000000000..c4c33b4f00 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.pb.validate.go @@ -0,0 +1,176 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on WrrLocality with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *WrrLocality) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WrrLocality with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in WrrLocalityMultiError, or +// nil if none found. +func (m *WrrLocality) ValidateAll() error { + return m.validate(true) +} + +func (m *WrrLocality) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetEndpointPickingPolicy() == nil { + err := WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpointPickingPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointPickingPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WrrLocalityValidationError{ + field: "EndpointPickingPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WrrLocalityMultiError(errors) + } + + return nil +} + +// WrrLocalityMultiError is an error wrapping multiple validation errors +// returned by WrrLocality.ValidateAll() if the designated constraints aren't met. +type WrrLocalityMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WrrLocalityMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WrrLocalityMultiError) AllErrors() []error { return m } + +// WrrLocalityValidationError is the validation error returned by +// WrrLocality.Validate if the designated constraints aren't met. +type WrrLocalityValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WrrLocalityValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WrrLocalityValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WrrLocalityValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WrrLocalityValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WrrLocalityValidationError) ErrorName() string { return "WrrLocalityValidationError" } + +// Error satisfies the builtin error interface +func (e WrrLocalityValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWrrLocality.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WrrLocalityValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WrrLocalityValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go new file mode 100644 index 0000000000..1a01486614 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality_vtproto.pb.go @@ -0,0 +1,95 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +package wrr_localityv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *WrrLocality) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WrrLocality) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *WrrLocality) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EndpointPickingPolicy != nil { + if vtmsg, ok := interface{}(m.EndpointPickingPolicy).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EndpointPickingPolicy) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WrrLocality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointPickingPolicy != nil { + if size, ok := interface{}(m.EndpointPickingPolicy).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EndpointPickingPolicy) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go new file mode 100644 index 0000000000..4199deae58 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Custom configuration for the RBAC audit logger that writes log entries +// directly to the operating system's standard output. +// The logger outputs in JSON format and is currently not configurable. +type StdoutAuditLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StdoutAuditLog) Reset() { + *x = StdoutAuditLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StdoutAuditLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StdoutAuditLog) ProtoMessage() {} + +func (x *StdoutAuditLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StdoutAuditLog.ProtoReflect.Descriptor instead. +func (*StdoutAuditLog) Descriptor() ([]byte, []int) { + return file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto protoreflect.FileDescriptor + +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x72, 0x62, 0x61, 0x63, 0x2f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x67, 0x65, 0x72, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x2d, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, + 0x62, 0x61, 0x63, 0x2e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, + 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, + 0x64, 0x6f, 0x75, 0x74, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x42, 0xb3, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x3b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x72, 0x62, 0x61, 0x63, 0x2e, 0x61, 0x75, 0x64, + 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x5d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x62, 0x61, + 0x63, 0x2f, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x73, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescOnce sync.Once + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData = file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc +) + +func file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescGZIP() []byte { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescOnce.Do(func() { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData) + }) + return file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDescData +} + +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes = []interface{}{ + (*StdoutAuditLog)(nil), // 0: envoy.extensions.rbac.audit_loggers.stream.v3.StdoutAuditLog +} +var file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_init() } +func file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_init() { + if File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StdoutAuditLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes, + DependencyIndexes: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs, + MessageInfos: file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_msgTypes, + }.Build() + File_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto = out.File + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_rawDesc = nil + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_goTypes = nil + file_envoy_extensions_rbac_audit_loggers_stream_v3_stream_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go new file mode 100644 index 0000000000..5fe37d901f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream.pb.validate.go @@ -0,0 +1,137 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StdoutAuditLog with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StdoutAuditLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StdoutAuditLog with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StdoutAuditLogMultiError, +// or nil if none found. +func (m *StdoutAuditLog) ValidateAll() error { + return m.validate(true) +} + +func (m *StdoutAuditLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return StdoutAuditLogMultiError(errors) + } + + return nil +} + +// StdoutAuditLogMultiError is an error wrapping multiple validation errors +// returned by StdoutAuditLog.ValidateAll() if the designated constraints +// aren't met. +type StdoutAuditLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StdoutAuditLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StdoutAuditLogMultiError) AllErrors() []error { return m } + +// StdoutAuditLogValidationError is the validation error returned by +// StdoutAuditLog.Validate if the designated constraints aren't met. +type StdoutAuditLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StdoutAuditLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StdoutAuditLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StdoutAuditLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StdoutAuditLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StdoutAuditLogValidationError) ErrorName() string { return "StdoutAuditLogValidationError" } + +// Error satisfies the builtin error interface +func (e StdoutAuditLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStdoutAuditLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StdoutAuditLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StdoutAuditLogValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go new file mode 100644 index 0000000000..8e85a56806 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3/stream_vtproto.pb.go @@ -0,0 +1,61 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/rbac/audit_loggers/stream/v3/stream.proto + +package streamv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StdoutAuditLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StdoutAuditLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StdoutAuditLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *StdoutAuditLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go new file mode 100644 index 0000000000..37df4d7a63 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.go @@ -0,0 +1,89 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/cert.proto + +package tlsv3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_envoy_extensions_transport_sockets_tls_v3_cert_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_cert_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x72, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, + 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6c, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x9e, 0x01, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x42, 0x09, 0x43, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, + 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x50, 0x00, 0x50, 0x01, 0x50, 0x02, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_envoy_extensions_transport_sockets_tls_v3_cert_proto_goTypes = []interface{}{} +var file_envoy_extensions_transport_sockets_tls_v3_cert_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_cert_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_cert_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_cert_proto != nil { + return + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_cert_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_cert_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_cert_proto_depIdxs, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_cert_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_cert_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_cert_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_cert_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go new file mode 100644 index 0000000000..aa4f445f55 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/cert.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/cert.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go new file mode 100644 index 0000000000..0f7321c224 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go @@ -0,0 +1,1689 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TlsParameters_TlsProtocol int32 + +const ( + // Envoy will choose the optimal TLS version. + TlsParameters_TLS_AUTO TlsParameters_TlsProtocol = 0 + // TLS 1.0 + TlsParameters_TLSv1_0 TlsParameters_TlsProtocol = 1 + // TLS 1.1 + TlsParameters_TLSv1_1 TlsParameters_TlsProtocol = 2 + // TLS 1.2 + TlsParameters_TLSv1_2 TlsParameters_TlsProtocol = 3 + // TLS 1.3 + TlsParameters_TLSv1_3 TlsParameters_TlsProtocol = 4 +) + +// Enum value maps for TlsParameters_TlsProtocol. +var ( + TlsParameters_TlsProtocol_name = map[int32]string{ + 0: "TLS_AUTO", + 1: "TLSv1_0", + 2: "TLSv1_1", + 3: "TLSv1_2", + 4: "TLSv1_3", + } + TlsParameters_TlsProtocol_value = map[string]int32{ + "TLS_AUTO": 0, + "TLSv1_0": 1, + "TLSv1_1": 2, + "TLSv1_2": 3, + "TLSv1_3": 4, + } +) + +func (x TlsParameters_TlsProtocol) Enum() *TlsParameters_TlsProtocol { + p := new(TlsParameters_TlsProtocol) + *p = x + return p +} + +func (x TlsParameters_TlsProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TlsParameters_TlsProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[0].Descriptor() +} + +func (TlsParameters_TlsProtocol) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[0] +} + +func (x TlsParameters_TlsProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TlsParameters_TlsProtocol.Descriptor instead. +func (TlsParameters_TlsProtocol) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{0, 0} +} + +// Indicates the choice of GeneralName as defined in section 4.2.1.5 of RFC 5280 to match +// against. +type SubjectAltNameMatcher_SanType int32 + +const ( + SubjectAltNameMatcher_SAN_TYPE_UNSPECIFIED SubjectAltNameMatcher_SanType = 0 + SubjectAltNameMatcher_EMAIL SubjectAltNameMatcher_SanType = 1 + SubjectAltNameMatcher_DNS SubjectAltNameMatcher_SanType = 2 + SubjectAltNameMatcher_URI SubjectAltNameMatcher_SanType = 3 + SubjectAltNameMatcher_IP_ADDRESS SubjectAltNameMatcher_SanType = 4 + SubjectAltNameMatcher_OTHER_NAME SubjectAltNameMatcher_SanType = 5 +) + +// Enum value maps for SubjectAltNameMatcher_SanType. +var ( + SubjectAltNameMatcher_SanType_name = map[int32]string{ + 0: "SAN_TYPE_UNSPECIFIED", + 1: "EMAIL", + 2: "DNS", + 3: "URI", + 4: "IP_ADDRESS", + 5: "OTHER_NAME", + } + SubjectAltNameMatcher_SanType_value = map[string]int32{ + "SAN_TYPE_UNSPECIFIED": 0, + "EMAIL": 1, + "DNS": 2, + "URI": 3, + "IP_ADDRESS": 4, + "OTHER_NAME": 5, + } +) + +func (x SubjectAltNameMatcher_SanType) Enum() *SubjectAltNameMatcher_SanType { + p := new(SubjectAltNameMatcher_SanType) + *p = x + return p +} + +func (x SubjectAltNameMatcher_SanType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubjectAltNameMatcher_SanType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[1].Descriptor() +} + +func (SubjectAltNameMatcher_SanType) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[1] +} + +func (x SubjectAltNameMatcher_SanType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubjectAltNameMatcher_SanType.Descriptor instead. +func (SubjectAltNameMatcher_SanType) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{5, 0} +} + +// Peer certificate verification mode. +type CertificateValidationContext_TrustChainVerification int32 + +const ( + // Perform default certificate verification (e.g., against CA / verification lists) + CertificateValidationContext_VERIFY_TRUST_CHAIN CertificateValidationContext_TrustChainVerification = 0 + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + CertificateValidationContext_ACCEPT_UNTRUSTED CertificateValidationContext_TrustChainVerification = 1 +) + +// Enum value maps for CertificateValidationContext_TrustChainVerification. +var ( + CertificateValidationContext_TrustChainVerification_name = map[int32]string{ + 0: "VERIFY_TRUST_CHAIN", + 1: "ACCEPT_UNTRUSTED", + } + CertificateValidationContext_TrustChainVerification_value = map[string]int32{ + "VERIFY_TRUST_CHAIN": 0, + "ACCEPT_UNTRUSTED": 1, + } +) + +func (x CertificateValidationContext_TrustChainVerification) Enum() *CertificateValidationContext_TrustChainVerification { + p := new(CertificateValidationContext_TrustChainVerification) + *p = x + return p +} + +func (x CertificateValidationContext_TrustChainVerification) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CertificateValidationContext_TrustChainVerification) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[2].Descriptor() +} + +func (CertificateValidationContext_TrustChainVerification) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes[2] +} + +func (x CertificateValidationContext_TrustChainVerification) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CertificateValidationContext_TrustChainVerification.Descriptor instead. +func (CertificateValidationContext_TrustChainVerification) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6, 0} +} + +// [#next-free-field: 6] +type TlsParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Minimum TLS protocol version. By default, it's “TLSv1_2“ for both clients and servers. + // + // TLS protocol versions below TLSv1_2 require setting compatible ciphers with the + // “cipher_suites“ setting as the default ciphers no longer include compatible ciphers. + // + // .. attention:: + // + // Using TLS protocol versions below TLSv1_2 has serious security considerations and risks. + TlsMinimumProtocolVersion TlsParameters_TlsProtocol `protobuf:"varint,1,opt,name=tls_minimum_protocol_version,json=tlsMinimumProtocolVersion,proto3,enum=envoy.extensions.transport_sockets.tls.v3.TlsParameters_TlsProtocol" json:"tls_minimum_protocol_version,omitempty"` + // Maximum TLS protocol version. By default, it's “TLSv1_2“ for clients and “TLSv1_3“ for + // servers. + TlsMaximumProtocolVersion TlsParameters_TlsProtocol `protobuf:"varint,2,opt,name=tls_maximum_protocol_version,json=tlsMaximumProtocolVersion,proto3,enum=envoy.extensions.transport_sockets.tls.v3.TlsParameters_TlsProtocol" json:"tls_maximum_protocol_version,omitempty"` + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). + // + // If not specified, a default list will be used. Defaults are different for server (downstream) and + // client (upstream) TLS configurations. + // Defaults will change over time in response to security considerations; If you care, configure + // it instead of using the default. + // + // In non-FIPS builds, the default server cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In non-FIPS builds, the default client cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + CipherSuites []string `protobuf:"bytes,3,rep,name=cipher_suites,json=cipherSuites,proto3" json:"cipher_suites,omitempty"` + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + EcdhCurves []string `protobuf:"bytes,4,rep,name=ecdh_curves,json=ecdhCurves,proto3" json:"ecdh_curves,omitempty"` + // If specified, the TLS connection will only support the specified signature algorithms. + // The list is ordered by preference. + // If not specified, the default signature algorithms defined by BoringSSL will be used. + // + // Default signature algorithms selected by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // ecdsa_secp256r1_sha256 + // rsa_pss_rsae_sha256 + // rsa_pkcs1_sha256 + // ecdsa_secp384r1_sha384 + // rsa_pss_rsae_sha384 + // rsa_pkcs1_sha384 + // rsa_pss_rsae_sha512 + // rsa_pkcs1_sha512 + // rsa_pkcs1_sha1 + // + // Signature algorithms supported by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // rsa_pkcs1_sha256 + // rsa_pkcs1_sha384 + // rsa_pkcs1_sha512 + // ecdsa_secp256r1_sha256 + // ecdsa_secp384r1_sha384 + // ecdsa_secp521r1_sha512 + // rsa_pss_rsae_sha256 + // rsa_pss_rsae_sha384 + // rsa_pss_rsae_sha512 + // ed25519 + // rsa_pkcs1_sha1 + // ecdsa_sha1 + SignatureAlgorithms []string `protobuf:"bytes,5,rep,name=signature_algorithms,json=signatureAlgorithms,proto3" json:"signature_algorithms,omitempty"` +} + +func (x *TlsParameters) Reset() { + *x = TlsParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsParameters) ProtoMessage() {} + +func (x *TlsParameters) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsParameters.ProtoReflect.Descriptor instead. +func (*TlsParameters) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{0} +} + +func (x *TlsParameters) GetTlsMinimumProtocolVersion() TlsParameters_TlsProtocol { + if x != nil { + return x.TlsMinimumProtocolVersion + } + return TlsParameters_TLS_AUTO +} + +func (x *TlsParameters) GetTlsMaximumProtocolVersion() TlsParameters_TlsProtocol { + if x != nil { + return x.TlsMaximumProtocolVersion + } + return TlsParameters_TLS_AUTO +} + +func (x *TlsParameters) GetCipherSuites() []string { + if x != nil { + return x.CipherSuites + } + return nil +} + +func (x *TlsParameters) GetEcdhCurves() []string { + if x != nil { + return x.EcdhCurves + } + return nil +} + +func (x *TlsParameters) GetSignatureAlgorithms() []string { + if x != nil { + return x.SignatureAlgorithms + } + return nil +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +type PrivateKeyProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Private key method provider name. The name must match a + // supported private key method provider type. + ProviderName string `protobuf:"bytes,1,opt,name=provider_name,json=providerName,proto3" json:"provider_name,omitempty"` + // Private key method provider specific configuration. + // + // Types that are assignable to ConfigType: + // + // *PrivateKeyProvider_TypedConfig + ConfigType isPrivateKeyProvider_ConfigType `protobuf_oneof:"config_type"` + // If the private key provider isn't available (eg. the required hardware capability doesn't existed), + // Envoy will fallback to the BoringSSL default implementation when the “fallback“ is true. + // The default value is “false“. + Fallback bool `protobuf:"varint,4,opt,name=fallback,proto3" json:"fallback,omitempty"` +} + +func (x *PrivateKeyProvider) Reset() { + *x = PrivateKeyProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrivateKeyProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrivateKeyProvider) ProtoMessage() {} + +func (x *PrivateKeyProvider) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrivateKeyProvider.ProtoReflect.Descriptor instead. +func (*PrivateKeyProvider) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{1} +} + +func (x *PrivateKeyProvider) GetProviderName() string { + if x != nil { + return x.ProviderName + } + return "" +} + +func (m *PrivateKeyProvider) GetConfigType() isPrivateKeyProvider_ConfigType { + if m != nil { + return m.ConfigType + } + return nil +} + +func (x *PrivateKeyProvider) GetTypedConfig() *anypb.Any { + if x, ok := x.GetConfigType().(*PrivateKeyProvider_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +func (x *PrivateKeyProvider) GetFallback() bool { + if x != nil { + return x.Fallback + } + return false +} + +type isPrivateKeyProvider_ConfigType interface { + isPrivateKeyProvider_ConfigType() +} + +type PrivateKeyProvider_TypedConfig struct { + TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*PrivateKeyProvider_TypedConfig) isPrivateKeyProvider_ConfigType() {} + +// [#next-free-field: 9] +type TlsCertificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TLS certificate chain. + // + // If “certificate_chain“ is a filesystem path, a watch will be added to the + // parent directory for any file moves to support rotation. This currently + // only applies to dynamic secrets, when the “TlsCertificate“ is delivered via + // SDS. + CertificateChain *v3.DataSource `protobuf:"bytes,1,opt,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The TLS private key. + // + // If “private_key“ is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the “TlsCertificate“ is delivered via SDS. + PrivateKey *v3.DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // “Pkcs12“ data containing TLS certificate, chain, and private key. + // + // If “pkcs12“ is a filesystem path, the file will be read, but no watch will + // be added to the parent directory, since “pkcs12“ isn't used by SDS. + // This field is mutually exclusive with “certificate_chain“, “private_key“ and “private_key_provider“. + // This can't be marked as “oneof“ due to API compatibility reasons. Setting + // both :ref:`private_key `, + // :ref:`certificate_chain `, + // or :ref:`private_key_provider ` + // and :ref:`pkcs12 ` + // fields will result in an error. Use :ref:`password + // ` + // to specify the password to unprotect the “PKCS12“ data, if necessary. + Pkcs12 *v3.DataSource `protobuf:"bytes,8,opt,name=pkcs12,proto3" json:"pkcs12,omitempty"` + // If specified, updates of file-based “certificate_chain“ and “private_key“ + // sources will be triggered by this watch. The certificate/key pair will be + // read together and validated for atomic read consistency (i.e. no + // intervening modification occurred between cert/key read, verified by file + // hash comparisons). This allows explicit control over the path watched, by + // default the parent directories of the filesystem paths in + // “certificate_chain“ and “private_key“ are watched if this field is not + // specified. This only applies when a “TlsCertificate“ is delivered by SDS + // with references to filesystem paths. See the :ref:`SDS key rotation + // ` documentation for further details. + WatchedDirectory *v3.WatchedDirectory `protobuf:"bytes,7,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. This can't be + // marked as “oneof“ due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. + PrivateKeyProvider *PrivateKeyProvider `protobuf:"bytes,6,opt,name=private_key_provider,json=privateKeyProvider,proto3" json:"private_key_provider,omitempty"` + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + Password *v3.DataSource `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via “filename“ or + // “inline_bytes“. The response may pertain to only one certificate. + OcspStaple *v3.DataSource `protobuf:"bytes,4,opt,name=ocsp_staple,json=ocspStaple,proto3" json:"ocsp_staple,omitempty"` + // [#not-implemented-hide:] + SignedCertificateTimestamp []*v3.DataSource `protobuf:"bytes,5,rep,name=signed_certificate_timestamp,json=signedCertificateTimestamp,proto3" json:"signed_certificate_timestamp,omitempty"` +} + +func (x *TlsCertificate) Reset() { + *x = TlsCertificate{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsCertificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsCertificate) ProtoMessage() {} + +func (x *TlsCertificate) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsCertificate.ProtoReflect.Descriptor instead. +func (*TlsCertificate) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{2} +} + +func (x *TlsCertificate) GetCertificateChain() *v3.DataSource { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *TlsCertificate) GetPrivateKey() *v3.DataSource { + if x != nil { + return x.PrivateKey + } + return nil +} + +func (x *TlsCertificate) GetPkcs12() *v3.DataSource { + if x != nil { + return x.Pkcs12 + } + return nil +} + +func (x *TlsCertificate) GetWatchedDirectory() *v3.WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +func (x *TlsCertificate) GetPrivateKeyProvider() *PrivateKeyProvider { + if x != nil { + return x.PrivateKeyProvider + } + return nil +} + +func (x *TlsCertificate) GetPassword() *v3.DataSource { + if x != nil { + return x.Password + } + return nil +} + +func (x *TlsCertificate) GetOcspStaple() *v3.DataSource { + if x != nil { + return x.OcspStaple + } + return nil +} + +func (x *TlsCertificate) GetSignedCertificateTimestamp() []*v3.DataSource { + if x != nil { + return x.SignedCertificateTimestamp + } + return nil +} + +type TlsSessionTicketKeys struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of “openssl rand 80“. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + Keys []*v3.DataSource `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` +} + +func (x *TlsSessionTicketKeys) Reset() { + *x = TlsSessionTicketKeys{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsSessionTicketKeys) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsSessionTicketKeys) ProtoMessage() {} + +func (x *TlsSessionTicketKeys) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsSessionTicketKeys.ProtoReflect.Descriptor instead. +func (*TlsSessionTicketKeys) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{3} +} + +func (x *TlsSessionTicketKeys) GetKeys() []*v3.DataSource { + if x != nil { + return x.Keys + } + return nil +} + +// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. +// The plugin instances are defined in the client's bootstrap file. +// The plugin allows certificates to be fetched/refreshed over the network asynchronously with +// respect to the TLS handshake. +// [#not-implemented-hide:] +type CertificateProviderPluginInstance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provider instance name. If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + CertificateName string `protobuf:"bytes,2,opt,name=certificate_name,json=certificateName,proto3" json:"certificate_name,omitempty"` +} + +func (x *CertificateProviderPluginInstance) Reset() { + *x = CertificateProviderPluginInstance{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateProviderPluginInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateProviderPluginInstance) ProtoMessage() {} + +func (x *CertificateProviderPluginInstance) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateProviderPluginInstance.ProtoReflect.Descriptor instead. +func (*CertificateProviderPluginInstance) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{4} +} + +func (x *CertificateProviderPluginInstance) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *CertificateProviderPluginInstance) GetCertificateName() string { + if x != nil { + return x.CertificateName + } + return "" +} + +// Matcher for subject alternative names, to match both type and value of the SAN. +type SubjectAltNameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specification of type of SAN. Note that the default enum value is an invalid choice. + SanType SubjectAltNameMatcher_SanType `protobuf:"varint,1,opt,name=san_type,json=sanType,proto3,enum=envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher_SanType" json:"san_type,omitempty"` + // Matcher for SAN value. + // + // The string matching for OTHER_NAME SAN values depends on their ASN.1 type: + // + // - OBJECT: Validated against its dotted numeric notation (e.g., "1.2.3.4") + // - BOOLEAN: Validated against strings "true" or "false" + // - INTEGER/ENUMERATED: Validated against a string containing the integer value + // - NULL: Validated against an empty string + // - Other types: Validated directly against the string value + Matcher *v31.StringMatcher `protobuf:"bytes,2,opt,name=matcher,proto3" json:"matcher,omitempty"` + // OID Value which is required if OTHER_NAME SAN type is used. + // For example, UPN OID is 1.3.6.1.4.1.311.20.2.3 + // (Reference: http://oid-info.com/get/1.3.6.1.4.1.311.20.2.3). + // + // If set for SAN types other than OTHER_NAME, it will be ignored. + Oid string `protobuf:"bytes,3,opt,name=oid,proto3" json:"oid,omitempty"` +} + +func (x *SubjectAltNameMatcher) Reset() { + *x = SubjectAltNameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubjectAltNameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAltNameMatcher) ProtoMessage() {} + +func (x *SubjectAltNameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAltNameMatcher.ProtoReflect.Descriptor instead. +func (*SubjectAltNameMatcher) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{5} +} + +func (x *SubjectAltNameMatcher) GetSanType() SubjectAltNameMatcher_SanType { + if x != nil { + return x.SanType + } + return SubjectAltNameMatcher_SAN_TYPE_UNSPECIFIED +} + +func (x *SubjectAltNameMatcher) GetMatcher() *v31.StringMatcher { + if x != nil { + return x.Matcher + } + return nil +} + +func (x *SubjectAltNameMatcher) GetOid() string { + if x != nil { + return x.Oid + } + return "" +} + +// [#next-free-field: 18] +type CertificateValidationContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_typed_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. Note + // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be + // provided for all certificate authorities in that chain. Failure to do so will result in + // verification failure for both revoked and unrevoked certificates from that chain. + // The behavior of requiring all certificates to contain CRLs can be altered by + // setting :ref:`only_verify_leaf_cert_crl ` + // true. If set to true, only the final certificate in the chain undergoes CRL verification. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + // + // If “trusted_ca“ is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the “CertificateValidationContext“ is + // delivered via SDS. + // + // X509_V_FLAG_PARTIAL_CHAIN is set by default, so non-root/intermediate ca certificate in “trusted_ca“ + // can be treated as trust anchor as well. It allows verification with building valid partial chain instead + // of a full chain. + // + // If “ca_certificate_provider_instance“ is set, it takes precedence over “trusted_ca“. + TrustedCa *v3.DataSource `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"` + // Certificate provider instance for fetching TLS certificates. + // + // If set, takes precedence over “trusted_ca“. + // [#not-implemented-hide:] + CaCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,13,opt,name=ca_certificate_provider_instance,json=caCertificateProviderInstance,proto3" json:"ca_certificate_provider_instance,omitempty"` + // Use system root certs for validation. + // If present, system root certs are used only if neither of the “trusted_ca“ + // or “ca_certificate_provider_instance“ fields are set. + // [#not-implemented-hide:] + SystemRootCerts *CertificateValidationContext_SystemRootCerts `protobuf:"bytes,17,opt,name=system_root_certs,json=systemRootCerts,proto3" json:"system_root_certs,omitempty"` + // If specified, updates of a file-based “trusted_ca“ source will be triggered + // by this watch. This allows explicit control over the path watched, by + // default the parent directory of the filesystem path in “trusted_ca“ is + // watched if this field is not specified. This only applies when a + // “CertificateValidationContext“ is delivered by SDS with references to + // filesystem paths. See the :ref:`SDS key rotation ` + // documentation for further details. + WatchedDirectory *v3.WatchedDirectory `protobuf:"bytes,11,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"` + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + VerifyCertificateSpki []string `protobuf:"bytes,3,rep,name=verify_certificate_spki,json=verifyCertificateSpki,proto3" json:"verify_certificate_spki,omitempty"` + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + VerifyCertificateHash []string `protobuf:"bytes,2,rep,name=verify_certificate_hash,json=verifyCertificateHash,proto3" json:"verify_certificate_hash,omitempty"` + // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matchers. + // The matching uses "any" semantics, that is to say, the SAN is verified if at least one matcher is + // matched. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_typed_subject_alt_names: + // - san_type: DNS + // matcher: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + MatchTypedSubjectAltNames []*SubjectAltNameMatcher `protobuf:"bytes,15,rep,name=match_typed_subject_alt_names,json=matchTypedSubjectAltNames,proto3" json:"match_typed_subject_alt_names,omitempty"` + // This field is deprecated in favor of + // :ref:`match_typed_subject_alt_names + // `. + // Note that if both this field and :ref:`match_typed_subject_alt_names + // ` + // are specified, the former (deprecated field) is ignored. + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/common.proto. + MatchSubjectAltNames []*v31.StringMatcher `protobuf:"bytes,9,rep,name=match_subject_alt_names,json=matchSubjectAltNames,proto3" json:"match_subject_alt_names,omitempty"` + // [#not-implemented-hide:] Must present signed certificate time-stamp. + RequireSignedCertificateTimestamp *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=require_signed_certificate_timestamp,json=requireSignedCertificateTimestamp,proto3" json:"require_signed_certificate_timestamp,omitempty"` + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. Note that if a CRL is provided + // for any certificate authority in a trust chain, a CRL must be provided + // for all certificate authorities in that chain. Failure to do so will + // result in verification failure for both revoked and unrevoked certificates + // from that chain. This default behavior can be altered by setting + // :ref:`only_verify_leaf_cert_crl ` to + // true. + // + // If “crl“ is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the “CertificateValidationContext“ is + // delivered via SDS. + Crl *v3.DataSource `protobuf:"bytes,7,opt,name=crl,proto3" json:"crl,omitempty"` + // If specified, Envoy will not reject expired certificates. + AllowExpiredCertificate bool `protobuf:"varint,8,opt,name=allow_expired_certificate,json=allowExpiredCertificate,proto3" json:"allow_expired_certificate,omitempty"` + // Certificate trust chain verification mode. + TrustChainVerification CertificateValidationContext_TrustChainVerification `protobuf:"varint,10,opt,name=trust_chain_verification,json=trustChainVerification,proto3,enum=envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext_TrustChainVerification" json:"trust_chain_verification,omitempty"` + // The configuration of an extension specific certificate validator. + // If specified, all validation is done by the specified validator, + // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). + // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. + // [#extension-category: envoy.tls.cert_validator] + CustomValidatorConfig *v3.TypedExtensionConfig `protobuf:"bytes,12,opt,name=custom_validator_config,json=customValidatorConfig,proto3" json:"custom_validator_config,omitempty"` + // If this option is set to true, only the certificate at the end of the + // certificate chain will be subject to validation by :ref:`CRL `. + OnlyVerifyLeafCertCrl bool `protobuf:"varint,14,opt,name=only_verify_leaf_cert_crl,json=onlyVerifyLeafCertCrl,proto3" json:"only_verify_leaf_cert_crl,omitempty"` + // Defines maximum depth of a certificate chain accepted in verification, the default limit is 100, though this can be system-dependent. + // This number does not include the leaf but includes the trust anchor, so a depth of 1 allows the leaf and one CA certificate. If a trusted issuer + // appears in the chain, but in a depth larger than configured, the certificate validation will fail. + // This matches the semantics of “SSL_CTX_set_verify_depth“ in OpenSSL 1.0.x and older versions of BoringSSL. It differs from “SSL_CTX_set_verify_depth“ + // in OpenSSL 1.1.x and newer versions of BoringSSL in that the trust anchor is included. + // Trusted issues are specified by setting :ref:`trusted_ca ` + MaxVerifyDepth *wrapperspb.UInt32Value `protobuf:"bytes,16,opt,name=max_verify_depth,json=maxVerifyDepth,proto3" json:"max_verify_depth,omitempty"` +} + +func (x *CertificateValidationContext) Reset() { + *x = CertificateValidationContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateValidationContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateValidationContext) ProtoMessage() {} + +func (x *CertificateValidationContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateValidationContext.ProtoReflect.Descriptor instead. +func (*CertificateValidationContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6} +} + +func (x *CertificateValidationContext) GetTrustedCa() *v3.DataSource { + if x != nil { + return x.TrustedCa + } + return nil +} + +func (x *CertificateValidationContext) GetCaCertificateProviderInstance() *CertificateProviderPluginInstance { + if x != nil { + return x.CaCertificateProviderInstance + } + return nil +} + +func (x *CertificateValidationContext) GetSystemRootCerts() *CertificateValidationContext_SystemRootCerts { + if x != nil { + return x.SystemRootCerts + } + return nil +} + +func (x *CertificateValidationContext) GetWatchedDirectory() *v3.WatchedDirectory { + if x != nil { + return x.WatchedDirectory + } + return nil +} + +func (x *CertificateValidationContext) GetVerifyCertificateSpki() []string { + if x != nil { + return x.VerifyCertificateSpki + } + return nil +} + +func (x *CertificateValidationContext) GetVerifyCertificateHash() []string { + if x != nil { + return x.VerifyCertificateHash + } + return nil +} + +func (x *CertificateValidationContext) GetMatchTypedSubjectAltNames() []*SubjectAltNameMatcher { + if x != nil { + return x.MatchTypedSubjectAltNames + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/common.proto. +func (x *CertificateValidationContext) GetMatchSubjectAltNames() []*v31.StringMatcher { + if x != nil { + return x.MatchSubjectAltNames + } + return nil +} + +func (x *CertificateValidationContext) GetRequireSignedCertificateTimestamp() *wrapperspb.BoolValue { + if x != nil { + return x.RequireSignedCertificateTimestamp + } + return nil +} + +func (x *CertificateValidationContext) GetCrl() *v3.DataSource { + if x != nil { + return x.Crl + } + return nil +} + +func (x *CertificateValidationContext) GetAllowExpiredCertificate() bool { + if x != nil { + return x.AllowExpiredCertificate + } + return false +} + +func (x *CertificateValidationContext) GetTrustChainVerification() CertificateValidationContext_TrustChainVerification { + if x != nil { + return x.TrustChainVerification + } + return CertificateValidationContext_VERIFY_TRUST_CHAIN +} + +func (x *CertificateValidationContext) GetCustomValidatorConfig() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomValidatorConfig + } + return nil +} + +func (x *CertificateValidationContext) GetOnlyVerifyLeafCertCrl() bool { + if x != nil { + return x.OnlyVerifyLeafCertCrl + } + return false +} + +func (x *CertificateValidationContext) GetMaxVerifyDepth() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxVerifyDepth + } + return nil +} + +type CertificateValidationContext_SystemRootCerts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CertificateValidationContext_SystemRootCerts) Reset() { + *x = CertificateValidationContext_SystemRootCerts{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateValidationContext_SystemRootCerts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateValidationContext_SystemRootCerts) ProtoMessage() {} + +func (x *CertificateValidationContext_SystemRootCerts) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateValidationContext_SystemRootCerts.ProtoReflect.Descriptor instead. +func (*CertificateValidationContext_SystemRootCerts) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP(), []int{6, 0} +} + +var File_envoy_extensions_transport_sockets_tls_v3_common_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, + 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x04, 0x0a, 0x0d, + 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x8f, 0x01, + 0x0a, 0x1c, 0x74, 0x6c, 0x73, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x54, + 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x19, 0x74, 0x6c, 0x73, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x8f, 0x01, 0x0a, 0x1c, 0x74, 0x6c, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x19, 0x74, 0x6c, 0x73, 0x4d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x68, 0x5f, 0x63, + 0x75, 0x72, 0x76, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x63, 0x64, + 0x68, 0x43, 0x75, 0x72, 0x76, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x73, 0x22, 0x4f, 0x0a, 0x0b, 0x54, 0x6c, + 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4c, 0x53, + 0x5f, 0x41, 0x55, 0x54, 0x4f, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x31, 0x10, + 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x0b, + 0x0a, 0x07, 0x54, 0x4c, 0x53, 0x76, 0x31, 0x5f, 0x33, 0x10, 0x04, 0x3a, 0x26, 0x9a, 0xc5, 0x88, + 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x22, 0xeb, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x0d, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x48, 0x00, 0x52, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x66, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x66, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, + 0x68, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0xc8, 0x05, 0x0a, 0x0e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x12, 0x49, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, + 0x02, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x40, + 0x0a, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x06, 0x70, 0x6b, 0x63, 0x73, 0x31, 0x32, + 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x14, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x52, 0x12, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, + 0x02, 0x01, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x41, 0x0a, 0x0b, + 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x0a, 0x6f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x12, + 0x62, 0x0a, 0x1c, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x1a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, + 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x8b, 0x01, 0x0a, + 0x14, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x44, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0xb8, + 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x73, 0x0a, 0x21, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0xc6, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x08, 0x73, 0x61, 0x6e, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x48, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x61, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, + 0x00, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x22, 0x60, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, + 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, + 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, + 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x05, 0x22, 0xa9, 0x0d, 0x0a, 0x1c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, + 0x43, 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x11, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, + 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, + 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, + 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, + 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, + 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, + 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, + 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, + 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, + 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x1a, 0x11, 0x0a, + 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, + 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, + 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, + 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = []interface{}{ + (TlsParameters_TlsProtocol)(0), // 0: envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol + (SubjectAltNameMatcher_SanType)(0), // 1: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.SanType + (CertificateValidationContext_TrustChainVerification)(0), // 2: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.TrustChainVerification + (*TlsParameters)(nil), // 3: envoy.extensions.transport_sockets.tls.v3.TlsParameters + (*PrivateKeyProvider)(nil), // 4: envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider + (*TlsCertificate)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate + (*TlsSessionTicketKeys)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + (*CertificateProviderPluginInstance)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + (*SubjectAltNameMatcher)(nil), // 8: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher + (*CertificateValidationContext)(nil), // 9: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + (*CertificateValidationContext_SystemRootCerts)(nil), // 10: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.SystemRootCerts + (*anypb.Any)(nil), // 11: google.protobuf.Any + (*v3.DataSource)(nil), // 12: envoy.config.core.v3.DataSource + (*v3.WatchedDirectory)(nil), // 13: envoy.config.core.v3.WatchedDirectory + (*v31.StringMatcher)(nil), // 14: envoy.type.matcher.v3.StringMatcher + (*wrapperspb.BoolValue)(nil), // 15: google.protobuf.BoolValue + (*v3.TypedExtensionConfig)(nil), // 16: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value +} +var file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs = []int32{ + 0, // 0: envoy.extensions.transport_sockets.tls.v3.TlsParameters.tls_minimum_protocol_version:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol + 0, // 1: envoy.extensions.transport_sockets.tls.v3.TlsParameters.tls_maximum_protocol_version:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters.TlsProtocol + 11, // 2: envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider.typed_config:type_name -> google.protobuf.Any + 12, // 3: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.certificate_chain:type_name -> envoy.config.core.v3.DataSource + 12, // 4: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key:type_name -> envoy.config.core.v3.DataSource + 12, // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.pkcs12:type_name -> envoy.config.core.v3.DataSource + 13, // 6: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 4, // 7: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.private_key_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider + 12, // 8: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.password:type_name -> envoy.config.core.v3.DataSource + 12, // 9: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.ocsp_staple:type_name -> envoy.config.core.v3.DataSource + 12, // 10: envoy.extensions.transport_sockets.tls.v3.TlsCertificate.signed_certificate_timestamp:type_name -> envoy.config.core.v3.DataSource + 12, // 11: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys.keys:type_name -> envoy.config.core.v3.DataSource + 1, // 12: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.san_type:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.SanType + 14, // 13: envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher.matcher:type_name -> envoy.type.matcher.v3.StringMatcher + 12, // 14: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trusted_ca:type_name -> envoy.config.core.v3.DataSource + 7, // 15: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.ca_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + 10, // 16: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.system_root_certs:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.SystemRootCerts + 13, // 17: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory + 8, // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_typed_subject_alt_names:type_name -> envoy.extensions.transport_sockets.tls.v3.SubjectAltNameMatcher + 14, // 19: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.match_subject_alt_names:type_name -> envoy.type.matcher.v3.StringMatcher + 15, // 20: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.require_signed_certificate_timestamp:type_name -> google.protobuf.BoolValue + 12, // 21: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.crl:type_name -> envoy.config.core.v3.DataSource + 2, // 22: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.trust_chain_verification:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.TrustChainVerification + 16, // 23: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.custom_validator_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 17, // 24: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext.max_verify_depth:type_name -> google.protobuf.UInt32Value + 25, // [25:25] is the sub-list for method output_type + 25, // [25:25] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsParameters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrivateKeyProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsCertificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsSessionTicketKeys); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateProviderPluginInstance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubjectAltNameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateValidationContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateValidationContext_SystemRootCerts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*PrivateKeyProvider_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs, + EnumInfos: file_envoy_extensions_transport_sockets_tls_v3_common_proto_enumTypes, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_common_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_common_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_common_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go new file mode 100644 index 0000000000..c020641fbc --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go @@ -0,0 +1,1648 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TlsParameters with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TlsParameters) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsParameters with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TlsParametersMultiError, or +// nil if none found. +func (m *TlsParameters) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsParameters) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := TlsParameters_TlsProtocol_name[int32(m.GetTlsMinimumProtocolVersion())]; !ok { + err := TlsParametersValidationError{ + field: "TlsMinimumProtocolVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := TlsParameters_TlsProtocol_name[int32(m.GetTlsMaximumProtocolVersion())]; !ok { + err := TlsParametersValidationError{ + field: "TlsMaximumProtocolVersion", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return TlsParametersMultiError(errors) + } + + return nil +} + +// TlsParametersMultiError is an error wrapping multiple validation errors +// returned by TlsParameters.ValidateAll() if the designated constraints +// aren't met. +type TlsParametersMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsParametersMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsParametersMultiError) AllErrors() []error { return m } + +// TlsParametersValidationError is the validation error returned by +// TlsParameters.Validate if the designated constraints aren't met. +type TlsParametersValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsParametersValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsParametersValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsParametersValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsParametersValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsParametersValidationError) ErrorName() string { return "TlsParametersValidationError" } + +// Error satisfies the builtin error interface +func (e TlsParametersValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsParameters.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsParametersValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsParametersValidationError{} + +// Validate checks the field values on PrivateKeyProvider with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PrivateKeyProvider) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PrivateKeyProvider with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PrivateKeyProviderMultiError, or nil if none found. +func (m *PrivateKeyProvider) ValidateAll() error { + return m.validate(true) +} + +func (m *PrivateKeyProvider) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetProviderName()) < 1 { + err := PrivateKeyProviderValidationError{ + field: "ProviderName", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for Fallback + + switch v := m.ConfigType.(type) { + case *PrivateKeyProvider_TypedConfig: + if v == nil { + err := PrivateKeyProviderValidationError{ + field: "ConfigType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PrivateKeyProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PrivateKeyProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PrivateKeyProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return PrivateKeyProviderMultiError(errors) + } + + return nil +} + +// PrivateKeyProviderMultiError is an error wrapping multiple validation errors +// returned by PrivateKeyProvider.ValidateAll() if the designated constraints +// aren't met. +type PrivateKeyProviderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PrivateKeyProviderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PrivateKeyProviderMultiError) AllErrors() []error { return m } + +// PrivateKeyProviderValidationError is the validation error returned by +// PrivateKeyProvider.Validate if the designated constraints aren't met. +type PrivateKeyProviderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PrivateKeyProviderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PrivateKeyProviderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PrivateKeyProviderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PrivateKeyProviderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PrivateKeyProviderValidationError) ErrorName() string { + return "PrivateKeyProviderValidationError" +} + +// Error satisfies the builtin error interface +func (e PrivateKeyProviderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrivateKeyProvider.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PrivateKeyProviderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PrivateKeyProviderValidationError{} + +// Validate checks the field values on TlsCertificate with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TlsCertificate) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsCertificate with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TlsCertificateMultiError, +// or nil if none found. +func (m *TlsCertificate) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsCertificate) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCertificateChain()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "CertificateChain", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "CertificateChain", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCertificateChain()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "CertificateChain", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrivateKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "PrivateKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPkcs12()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Pkcs12", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Pkcs12", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPkcs12()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "Pkcs12", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrivateKeyProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKeyProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "PrivateKeyProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrivateKeyProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "PrivateKeyProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPassword()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Password", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "Password", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPassword()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "Password", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOcspStaple()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "OcspStaple", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: "OcspStaple", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOcspStaple()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: "OcspStaple", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetSignedCertificateTimestamp() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: fmt.Sprintf("SignedCertificateTimestamp[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsCertificateValidationError{ + field: fmt.Sprintf("SignedCertificateTimestamp[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsCertificateValidationError{ + field: fmt.Sprintf("SignedCertificateTimestamp[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TlsCertificateMultiError(errors) + } + + return nil +} + +// TlsCertificateMultiError is an error wrapping multiple validation errors +// returned by TlsCertificate.ValidateAll() if the designated constraints +// aren't met. +type TlsCertificateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsCertificateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsCertificateMultiError) AllErrors() []error { return m } + +// TlsCertificateValidationError is the validation error returned by +// TlsCertificate.Validate if the designated constraints aren't met. +type TlsCertificateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsCertificateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsCertificateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsCertificateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsCertificateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsCertificateValidationError) ErrorName() string { return "TlsCertificateValidationError" } + +// Error satisfies the builtin error interface +func (e TlsCertificateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsCertificate.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsCertificateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsCertificateValidationError{} + +// Validate checks the field values on TlsSessionTicketKeys with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TlsSessionTicketKeys) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsSessionTicketKeys with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TlsSessionTicketKeysMultiError, or nil if none found. +func (m *TlsSessionTicketKeys) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsSessionTicketKeys) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetKeys()) < 1 { + err := TlsSessionTicketKeysValidationError{ + field: "Keys", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetKeys() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsSessionTicketKeysValidationError{ + field: fmt.Sprintf("Keys[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsSessionTicketKeysValidationError{ + field: fmt.Sprintf("Keys[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsSessionTicketKeysValidationError{ + field: fmt.Sprintf("Keys[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TlsSessionTicketKeysMultiError(errors) + } + + return nil +} + +// TlsSessionTicketKeysMultiError is an error wrapping multiple validation +// errors returned by TlsSessionTicketKeys.ValidateAll() if the designated +// constraints aren't met. +type TlsSessionTicketKeysMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsSessionTicketKeysMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsSessionTicketKeysMultiError) AllErrors() []error { return m } + +// TlsSessionTicketKeysValidationError is the validation error returned by +// TlsSessionTicketKeys.Validate if the designated constraints aren't met. +type TlsSessionTicketKeysValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsSessionTicketKeysValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsSessionTicketKeysValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsSessionTicketKeysValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsSessionTicketKeysValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsSessionTicketKeysValidationError) ErrorName() string { + return "TlsSessionTicketKeysValidationError" +} + +// Error satisfies the builtin error interface +func (e TlsSessionTicketKeysValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsSessionTicketKeys.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsSessionTicketKeysValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsSessionTicketKeysValidationError{} + +// Validate checks the field values on CertificateProviderPluginInstance with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *CertificateProviderPluginInstance) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateProviderPluginInstance +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// CertificateProviderPluginInstanceMultiError, or nil if none found. +func (m *CertificateProviderPluginInstance) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateProviderPluginInstance) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for InstanceName + + // no validation rules for CertificateName + + if len(errors) > 0 { + return CertificateProviderPluginInstanceMultiError(errors) + } + + return nil +} + +// CertificateProviderPluginInstanceMultiError is an error wrapping multiple +// validation errors returned by +// CertificateProviderPluginInstance.ValidateAll() if the designated +// constraints aren't met. +type CertificateProviderPluginInstanceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateProviderPluginInstanceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateProviderPluginInstanceMultiError) AllErrors() []error { return m } + +// CertificateProviderPluginInstanceValidationError is the validation error +// returned by CertificateProviderPluginInstance.Validate if the designated +// constraints aren't met. +type CertificateProviderPluginInstanceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateProviderPluginInstanceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateProviderPluginInstanceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateProviderPluginInstanceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateProviderPluginInstanceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateProviderPluginInstanceValidationError) ErrorName() string { + return "CertificateProviderPluginInstanceValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateProviderPluginInstanceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateProviderPluginInstance.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateProviderPluginInstanceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateProviderPluginInstanceValidationError{} + +// Validate checks the field values on SubjectAltNameMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubjectAltNameMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubjectAltNameMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubjectAltNameMatcherMultiError, or nil if none found. +func (m *SubjectAltNameMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *SubjectAltNameMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := _SubjectAltNameMatcher_SanType_NotInLookup[m.GetSanType()]; ok { + err := SubjectAltNameMatcherValidationError{ + field: "SanType", + reason: "value must not be in list [SAN_TYPE_UNSPECIFIED]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := SubjectAltNameMatcher_SanType_name[int32(m.GetSanType())]; !ok { + err := SubjectAltNameMatcherValidationError{ + field: "SanType", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetMatcher() == nil { + err := SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetMatcher()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SubjectAltNameMatcherValidationError{ + field: "Matcher", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Oid + + if len(errors) > 0 { + return SubjectAltNameMatcherMultiError(errors) + } + + return nil +} + +// SubjectAltNameMatcherMultiError is an error wrapping multiple validation +// errors returned by SubjectAltNameMatcher.ValidateAll() if the designated +// constraints aren't met. +type SubjectAltNameMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubjectAltNameMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubjectAltNameMatcherMultiError) AllErrors() []error { return m } + +// SubjectAltNameMatcherValidationError is the validation error returned by +// SubjectAltNameMatcher.Validate if the designated constraints aren't met. +type SubjectAltNameMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubjectAltNameMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubjectAltNameMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubjectAltNameMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubjectAltNameMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubjectAltNameMatcherValidationError) ErrorName() string { + return "SubjectAltNameMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e SubjectAltNameMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubjectAltNameMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubjectAltNameMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubjectAltNameMatcherValidationError{} + +var _SubjectAltNameMatcher_SanType_NotInLookup = map[SubjectAltNameMatcher_SanType]struct{}{ + 0: {}, +} + +// Validate checks the field values on CertificateValidationContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CertificateValidationContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateValidationContext with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CertificateValidationContextMultiError, or nil if none found. +func (m *CertificateValidationContext) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateValidationContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTrustedCa()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "TrustedCa", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "TrustedCa", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTrustedCa()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "TrustedCa", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCaCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CaCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CaCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCaCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "CaCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSystemRootCerts()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSystemRootCerts()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "SystemRootCerts", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWatchedDirectory()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "WatchedDirectory", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetVerifyCertificateSpki() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 44 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateSpki[%v]", idx), + reason: "value length must be at least 44 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(item) > 44 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateSpki[%v]", idx), + reason: "value length must be at most 44 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetVerifyCertificateHash() { + _, _ = idx, item + + if utf8.RuneCountInString(item) < 64 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateHash[%v]", idx), + reason: "value length must be at least 64 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(item) > 95 { + err := CertificateValidationContextValidationError{ + field: fmt.Sprintf("VerifyCertificateHash[%v]", idx), + reason: "value length must be at most 95 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetMatchTypedSubjectAltNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchTypedSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchTypedSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchTypedSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetMatchSubjectAltNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: fmt.Sprintf("MatchSubjectAltNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRequireSignedCertificateTimestamp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "RequireSignedCertificateTimestamp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "RequireSignedCertificateTimestamp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequireSignedCertificateTimestamp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "RequireSignedCertificateTimestamp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCrl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "Crl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "Crl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCrl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "Crl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for AllowExpiredCertificate + + if _, ok := CertificateValidationContext_TrustChainVerification_name[int32(m.GetTrustChainVerification())]; !ok { + err := CertificateValidationContextValidationError{ + field: "TrustChainVerification", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCustomValidatorConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CustomValidatorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateValidationContextValidationError{ + field: "CustomValidatorConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomValidatorConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateValidationContextValidationError{ + field: "CustomValidatorConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for OnlyVerifyLeafCertCrl + + if wrapper := m.GetMaxVerifyDepth(); wrapper != nil { + + if wrapper.GetValue() > 100 { + err := CertificateValidationContextValidationError{ + field: "MaxVerifyDepth", + reason: "value must be less than or equal to 100", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return CertificateValidationContextMultiError(errors) + } + + return nil +} + +// CertificateValidationContextMultiError is an error wrapping multiple +// validation errors returned by CertificateValidationContext.ValidateAll() if +// the designated constraints aren't met. +type CertificateValidationContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateValidationContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateValidationContextMultiError) AllErrors() []error { return m } + +// CertificateValidationContextValidationError is the validation error returned +// by CertificateValidationContext.Validate if the designated constraints +// aren't met. +type CertificateValidationContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationContextValidationError) ErrorName() string { + return "CertificateValidationContextValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateValidationContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateValidationContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationContextValidationError{} + +// Validate checks the field values on +// CertificateValidationContext_SystemRootCerts with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CertificateValidationContext_SystemRootCerts) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CertificateValidationContext_SystemRootCerts with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// CertificateValidationContext_SystemRootCertsMultiError, or nil if none found. +func (m *CertificateValidationContext_SystemRootCerts) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateValidationContext_SystemRootCerts) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return CertificateValidationContext_SystemRootCertsMultiError(errors) + } + + return nil +} + +// CertificateValidationContext_SystemRootCertsMultiError is an error wrapping +// multiple validation errors returned by +// CertificateValidationContext_SystemRootCerts.ValidateAll() if the +// designated constraints aren't met. +type CertificateValidationContext_SystemRootCertsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateValidationContext_SystemRootCertsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateValidationContext_SystemRootCertsMultiError) AllErrors() []error { return m } + +// CertificateValidationContext_SystemRootCertsValidationError is the +// validation error returned by +// CertificateValidationContext_SystemRootCerts.Validate if the designated +// constraints aren't met. +type CertificateValidationContext_SystemRootCertsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateValidationContext_SystemRootCertsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateValidationContext_SystemRootCertsValidationError) ErrorName() string { + return "CertificateValidationContext_SystemRootCertsValidationError" +} + +// Error satisfies the builtin error interface +func (e CertificateValidationContext_SystemRootCertsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateValidationContext_SystemRootCerts.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateValidationContext_SystemRootCertsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateValidationContext_SystemRootCertsValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go new file mode 100644 index 0000000000..00d5573d93 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common_vtproto.pb.go @@ -0,0 +1,1155 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/common.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TlsParameters) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsParameters) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsParameters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SignatureAlgorithms) > 0 { + for iNdEx := len(m.SignatureAlgorithms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SignatureAlgorithms[iNdEx]) + copy(dAtA[i:], m.SignatureAlgorithms[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SignatureAlgorithms[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.EcdhCurves) > 0 { + for iNdEx := len(m.EcdhCurves) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EcdhCurves[iNdEx]) + copy(dAtA[i:], m.EcdhCurves[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EcdhCurves[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.CipherSuites) > 0 { + for iNdEx := len(m.CipherSuites) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CipherSuites[iNdEx]) + copy(dAtA[i:], m.CipherSuites[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CipherSuites[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.TlsMaximumProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsMaximumProtocolVersion)) + i-- + dAtA[i] = 0x10 + } + if m.TlsMinimumProtocolVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TlsMinimumProtocolVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PrivateKeyProvider) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrivateKeyProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PrivateKeyProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Fallback { + i-- + if m.Fallback { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if msg, ok := m.ConfigType.(*PrivateKeyProvider_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.ProviderName) > 0 { + i -= len(m.ProviderName) + copy(dAtA[i:], m.ProviderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProviderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PrivateKeyProvider_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PrivateKeyProvider_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *TlsCertificate) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsCertificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsCertificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Pkcs12 != nil { + if vtmsg, ok := interface{}(m.Pkcs12).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Pkcs12) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.WatchedDirectory != nil { + if vtmsg, ok := interface{}(m.WatchedDirectory).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.WatchedDirectory) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.PrivateKeyProvider != nil { + size, err := m.PrivateKeyProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.SignedCertificateTimestamp) > 0 { + for iNdEx := len(m.SignedCertificateTimestamp) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SignedCertificateTimestamp[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SignedCertificateTimestamp[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + } + if m.OcspStaple != nil { + if vtmsg, ok := interface{}(m.OcspStaple).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.OcspStaple) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.Password != nil { + if vtmsg, ok := interface{}(m.Password).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Password) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + if m.PrivateKey != nil { + if vtmsg, ok := interface{}(m.PrivateKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.PrivateKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.CertificateChain != nil { + if vtmsg, ok := interface{}(m.CertificateChain).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CertificateChain) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TlsSessionTicketKeys) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsSessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsSessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Keys[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Keys[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CertificateProviderPluginInstance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateProviderPluginInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateProviderPluginInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertificateName) > 0 { + i -= len(m.CertificateName) + copy(dAtA[i:], m.CertificateName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CertificateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAltNameMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAltNameMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SubjectAltNameMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Oid) > 0 { + i -= len(m.Oid) + copy(dAtA[i:], m.Oid) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Oid))) + i-- + dAtA[i] = 0x1a + } + if m.Matcher != nil { + if vtmsg, ok := interface{}(m.Matcher).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Matcher) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.SanType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SanType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateValidationContext_SystemRootCerts) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *CertificateValidationContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CertificateValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SystemRootCerts != nil { + size, err := m.SystemRootCerts.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.MaxVerifyDepth != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxVerifyDepth).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.MatchTypedSubjectAltNames) > 0 { + for iNdEx := len(m.MatchTypedSubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.MatchTypedSubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } + if m.OnlyVerifyLeafCertCrl { + i-- + if m.OnlyVerifyLeafCertCrl { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.CaCertificateProviderInstance != nil { + size, err := m.CaCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.CustomValidatorConfig != nil { + if vtmsg, ok := interface{}(m.CustomValidatorConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomValidatorConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x62 + } + if m.WatchedDirectory != nil { + if vtmsg, ok := interface{}(m.WatchedDirectory).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.WatchedDirectory) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x5a + } + if m.TrustChainVerification != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TrustChainVerification)) + i-- + dAtA[i] = 0x50 + } + if len(m.MatchSubjectAltNames) > 0 { + for iNdEx := len(m.MatchSubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.MatchSubjectAltNames[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MatchSubjectAltNames[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + } + if m.AllowExpiredCertificate { + i-- + if m.AllowExpiredCertificate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Crl != nil { + if vtmsg, ok := interface{}(m.Crl).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Crl) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if m.RequireSignedCertificateTimestamp != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireSignedCertificateTimestamp).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.VerifyCertificateSpki) > 0 { + for iNdEx := len(m.VerifyCertificateSpki) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateSpki[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateSpki[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifyCertificateSpki[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.VerifyCertificateHash) > 0 { + for iNdEx := len(m.VerifyCertificateHash) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateHash[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateHash[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifyCertificateHash[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TrustedCa != nil { + if vtmsg, ok := interface{}(m.TrustedCa).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TrustedCa) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TlsParameters) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsMinimumProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsMinimumProtocolVersion)) + } + if m.TlsMaximumProtocolVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TlsMaximumProtocolVersion)) + } + if len(m.CipherSuites) > 0 { + for _, s := range m.CipherSuites { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.EcdhCurves) > 0 { + for _, s := range m.EcdhCurves { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SignatureAlgorithms) > 0 { + for _, s := range m.SignatureAlgorithms { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PrivateKeyProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProviderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Fallback { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PrivateKeyProvider_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + l = (*anypb.Any)(m.TypedConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *TlsCertificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CertificateChain != nil { + if size, ok := interface{}(m.CertificateChain).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CertificateChain) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.PrivateKey != nil { + if size, ok := interface{}(m.PrivateKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.PrivateKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Password != nil { + if size, ok := interface{}(m.Password).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Password) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspStaple != nil { + if size, ok := interface{}(m.OcspStaple).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.OcspStaple) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SignedCertificateTimestamp) > 0 { + for _, e := range m.SignedCertificateTimestamp { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.PrivateKeyProvider != nil { + l = m.PrivateKeyProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.WatchedDirectory != nil { + if size, ok := interface{}(m.WatchedDirectory).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.WatchedDirectory) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Pkcs12 != nil { + if size, ok := interface{}(m.Pkcs12).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Pkcs12) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TlsSessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keys) > 0 { + for _, e := range m.Keys { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateProviderPluginInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CertificateName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SubjectAltNameMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SanType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SanType)) + } + if m.Matcher != nil { + if size, ok := interface{}(m.Matcher).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Matcher) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Oid) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CertificateValidationContext_SystemRootCerts) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *CertificateValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TrustedCa != nil { + if size, ok := interface{}(m.TrustedCa).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TrustedCa) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.VerifyCertificateHash) > 0 { + for _, s := range m.VerifyCertificateHash { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.VerifyCertificateSpki) > 0 { + for _, s := range m.VerifyCertificateSpki { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.RequireSignedCertificateTimestamp != nil { + l = (*wrapperspb.BoolValue)(m.RequireSignedCertificateTimestamp).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Crl != nil { + if size, ok := interface{}(m.Crl).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Crl) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowExpiredCertificate { + n += 2 + } + if len(m.MatchSubjectAltNames) > 0 { + for _, e := range m.MatchSubjectAltNames { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TrustChainVerification != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TrustChainVerification)) + } + if m.WatchedDirectory != nil { + if size, ok := interface{}(m.WatchedDirectory).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.WatchedDirectory) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomValidatorConfig != nil { + if size, ok := interface{}(m.CustomValidatorConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomValidatorConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CaCertificateProviderInstance != nil { + l = m.CaCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OnlyVerifyLeafCertCrl { + n += 2 + } + if len(m.MatchTypedSubjectAltNames) > 0 { + for _, e := range m.MatchTypedSubjectAltNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.MaxVerifyDepth != nil { + l = (*wrapperspb.UInt32Value)(m.MaxVerifyDepth).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SystemRootCerts != nil { + l = m.SystemRootCerts.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go new file mode 100644 index 0000000000..cf919ed971 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.go @@ -0,0 +1,444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GenericSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Secret of generic type and is available to filters. + Secret *v3.DataSource `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` +} + +func (x *GenericSecret) Reset() { + *x = GenericSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenericSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenericSecret) ProtoMessage() {} + +func (x *GenericSecret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenericSecret.ProtoReflect.Descriptor instead. +func (*GenericSecret) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP(), []int{0} +} + +func (x *GenericSecret) GetSecret() *v3.DataSource { + if x != nil { + return x.Secret + } + return nil +} + +type SdsSecretConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name by which the secret can be uniquely referred to. When both name and config are specified, + // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret + // will be loaded from static resources. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + SdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=sds_config,json=sdsConfig,proto3" json:"sds_config,omitempty"` +} + +func (x *SdsSecretConfig) Reset() { + *x = SdsSecretConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SdsSecretConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SdsSecretConfig) ProtoMessage() {} + +func (x *SdsSecretConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SdsSecretConfig.ProtoReflect.Descriptor instead. +func (*SdsSecretConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP(), []int{1} +} + +func (x *SdsSecretConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SdsSecretConfig) GetSdsConfig() *v3.ConfigSource { + if x != nil { + return x.SdsConfig + } + return nil +} + +// [#next-free-field: 6] +type Secret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to Type: + // + // *Secret_TlsCertificate + // *Secret_SessionTicketKeys + // *Secret_ValidationContext + // *Secret_GenericSecret + Type isSecret_Type `protobuf_oneof:"type"` +} + +func (x *Secret) Reset() { + *x = Secret{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Secret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Secret) ProtoMessage() {} + +func (x *Secret) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Secret.ProtoReflect.Descriptor instead. +func (*Secret) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP(), []int{2} +} + +func (x *Secret) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Secret) GetType() isSecret_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *Secret) GetTlsCertificate() *TlsCertificate { + if x, ok := x.GetType().(*Secret_TlsCertificate); ok { + return x.TlsCertificate + } + return nil +} + +func (x *Secret) GetSessionTicketKeys() *TlsSessionTicketKeys { + if x, ok := x.GetType().(*Secret_SessionTicketKeys); ok { + return x.SessionTicketKeys + } + return nil +} + +func (x *Secret) GetValidationContext() *CertificateValidationContext { + if x, ok := x.GetType().(*Secret_ValidationContext); ok { + return x.ValidationContext + } + return nil +} + +func (x *Secret) GetGenericSecret() *GenericSecret { + if x, ok := x.GetType().(*Secret_GenericSecret); ok { + return x.GenericSecret + } + return nil +} + +type isSecret_Type interface { + isSecret_Type() +} + +type Secret_TlsCertificate struct { + TlsCertificate *TlsCertificate `protobuf:"bytes,2,opt,name=tls_certificate,json=tlsCertificate,proto3,oneof"` +} + +type Secret_SessionTicketKeys struct { + SessionTicketKeys *TlsSessionTicketKeys `protobuf:"bytes,3,opt,name=session_ticket_keys,json=sessionTicketKeys,proto3,oneof"` +} + +type Secret_ValidationContext struct { + ValidationContext *CertificateValidationContext `protobuf:"bytes,4,opt,name=validation_context,json=validationContext,proto3,oneof"` +} + +type Secret_GenericSecret struct { + GenericSecret *GenericSecret `protobuf:"bytes,5,opt,name=generic_secret,json=genericSecret,proto3,oneof"` +} + +func (*Secret_TlsCertificate) isSecret_Type() {} + +func (*Secret_SessionTicketKeys) isSecret_Type() {} + +func (*Secret_ValidationContext) isSecret_Type() {} + +func (*Secret_GenericSecret) isSecret_Type() {} + +var File_envoy_extensions_transport_sockets_tls_v3_secret_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, + 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x06, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x9b, + 0x01, 0x0a, 0x0f, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x41, 0x0a, 0x0a, 0x73, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x73, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x53, 0x64, 0x73, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xfb, 0x03, 0x0a, + 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x0f, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, + 0x00, 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x12, 0x71, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x48, + 0x00, 0x52, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x61, + 0x0a, 0x0e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x3a, 0x1f, 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_goTypes = []interface{}{ + (*GenericSecret)(nil), // 0: envoy.extensions.transport_sockets.tls.v3.GenericSecret + (*SdsSecretConfig)(nil), // 1: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + (*Secret)(nil), // 2: envoy.extensions.transport_sockets.tls.v3.Secret + (*v3.DataSource)(nil), // 3: envoy.config.core.v3.DataSource + (*v3.ConfigSource)(nil), // 4: envoy.config.core.v3.ConfigSource + (*TlsCertificate)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.TlsCertificate + (*TlsSessionTicketKeys)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + (*CertificateValidationContext)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +} +var file_envoy_extensions_transport_sockets_tls_v3_secret_proto_depIdxs = []int32{ + 3, // 0: envoy.extensions.transport_sockets.tls.v3.GenericSecret.secret:type_name -> envoy.config.core.v3.DataSource + 4, // 1: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig.sds_config:type_name -> envoy.config.core.v3.ConfigSource + 5, // 2: envoy.extensions.transport_sockets.tls.v3.Secret.tls_certificate:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate + 6, // 3: envoy.extensions.transport_sockets.tls.v3.Secret.session_ticket_keys:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + 7, // 4: envoy.extensions.transport_sockets.tls.v3.Secret.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 0, // 5: envoy.extensions.transport_sockets.tls.v3.Secret.generic_secret:type_name -> envoy.extensions.transport_sockets.tls.v3.GenericSecret + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_secret_proto != nil { + return + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenericSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SdsSecretConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Secret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Secret_TlsCertificate)(nil), + (*Secret_SessionTicketKeys)(nil), + (*Secret_ValidationContext)(nil), + (*Secret_GenericSecret)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_depIdxs, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_secret_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_secret_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go new file mode 100644 index 0000000000..913c549220 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret.pb.validate.go @@ -0,0 +1,575 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GenericSecret with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GenericSecret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GenericSecret with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GenericSecretMultiError, or +// nil if none found. +func (m *GenericSecret) ValidateAll() error { + return m.validate(true) +} + +func (m *GenericSecret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GenericSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GenericSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GenericSecretValidationError{ + field: "Secret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GenericSecretMultiError(errors) + } + + return nil +} + +// GenericSecretMultiError is an error wrapping multiple validation errors +// returned by GenericSecret.ValidateAll() if the designated constraints +// aren't met. +type GenericSecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GenericSecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GenericSecretMultiError) AllErrors() []error { return m } + +// GenericSecretValidationError is the validation error returned by +// GenericSecret.Validate if the designated constraints aren't met. +type GenericSecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GenericSecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GenericSecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GenericSecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GenericSecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GenericSecretValidationError) ErrorName() string { return "GenericSecretValidationError" } + +// Error satisfies the builtin error interface +func (e GenericSecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGenericSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GenericSecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GenericSecretValidationError{} + +// Validate checks the field values on SdsSecretConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SdsSecretConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SdsSecretConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SdsSecretConfigMultiError, or nil if none found. +func (m *SdsSecretConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SdsSecretConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := SdsSecretConfigValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SdsSecretConfigValidationError{ + field: "SdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SdsSecretConfigValidationError{ + field: "SdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SdsSecretConfigValidationError{ + field: "SdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SdsSecretConfigMultiError(errors) + } + + return nil +} + +// SdsSecretConfigMultiError is an error wrapping multiple validation errors +// returned by SdsSecretConfig.ValidateAll() if the designated constraints +// aren't met. +type SdsSecretConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SdsSecretConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SdsSecretConfigMultiError) AllErrors() []error { return m } + +// SdsSecretConfigValidationError is the validation error returned by +// SdsSecretConfig.Validate if the designated constraints aren't met. +type SdsSecretConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SdsSecretConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SdsSecretConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SdsSecretConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SdsSecretConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SdsSecretConfigValidationError) ErrorName() string { return "SdsSecretConfigValidationError" } + +// Error satisfies the builtin error interface +func (e SdsSecretConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSdsSecretConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SdsSecretConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SdsSecretConfigValidationError{} + +// Validate checks the field values on Secret with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Secret) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Secret with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in SecretMultiError, or nil if none found. +func (m *Secret) ValidateAll() error { + return m.validate(true) +} + +func (m *Secret) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + switch v := m.Type.(type) { + case *Secret_TlsCertificate: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTlsCertificate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "TlsCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "TlsCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "TlsCertificate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Secret_SessionTicketKeys: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSessionTicketKeys()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSessionTicketKeys()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Secret_ValidationContext: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Secret_GenericSecret: + if v == nil { + err := SecretValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGenericSecret()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecretValidationError{ + field: "GenericSecret", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecretValidationError{ + field: "GenericSecret", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGenericSecret()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecretValidationError{ + field: "GenericSecret", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SecretMultiError(errors) + } + + return nil +} + +// SecretMultiError is an error wrapping multiple validation errors returned by +// Secret.ValidateAll() if the designated constraints aren't met. +type SecretMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecretMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecretMultiError) AllErrors() []error { return m } + +// SecretValidationError is the validation error returned by Secret.Validate if +// the designated constraints aren't met. +type SecretValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecretValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecretValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecretValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecretValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecretValidationError) ErrorName() string { return "SecretValidationError" } + +// Error satisfies the builtin error interface +func (e SecretValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecret.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecretValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecretValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go new file mode 100644 index 0000000000..35e8a3ce28 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/secret_vtproto.pb.go @@ -0,0 +1,415 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/secret.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GenericSecret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *GenericSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Secret != nil { + if vtmsg, ok := interface{}(m.Secret).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Secret) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SdsSecretConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SdsConfig != nil { + if vtmsg, ok := interface{}(m.SdsConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SdsConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Secret) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*Secret_GenericSecret); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_ValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_SessionTicketKeys); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*Secret_TlsCertificate); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Secret_TlsCertificate) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_TlsCertificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TlsCertificate != nil { + size, err := m.TlsCertificate.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Secret_SessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_SessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeys != nil { + size, err := m.SessionTicketKeys.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Secret_ValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_ValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContext != nil { + size, err := m.ValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Secret_GenericSecret) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Secret_GenericSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GenericSecret != nil { + size, err := m.GenericSecret.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *GenericSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + if size, ok := interface{}(m.Secret).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Secret) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SdsConfig != nil { + if size, ok := interface{}(m.SdsConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.SdsConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Secret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Secret_TlsCertificate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsCertificate != nil { + l = m.TlsCertificate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_SessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeys != nil { + l = m.SessionTicketKeys.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_ValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContext != nil { + l = m.ValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Secret_GenericSecret) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GenericSecret != nil { + l = m.GenericSecret.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go new file mode 100644 index 0000000000..db43da6745 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.go @@ -0,0 +1,1545 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DownstreamTlsContext_OcspStaplePolicy int32 + +const ( + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. + DownstreamTlsContext_LENIENT_STAPLING DownstreamTlsContext_OcspStaplePolicy = 0 + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. + DownstreamTlsContext_STRICT_STAPLING DownstreamTlsContext_OcspStaplePolicy = 1 + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. + DownstreamTlsContext_MUST_STAPLE DownstreamTlsContext_OcspStaplePolicy = 2 +) + +// Enum value maps for DownstreamTlsContext_OcspStaplePolicy. +var ( + DownstreamTlsContext_OcspStaplePolicy_name = map[int32]string{ + 0: "LENIENT_STAPLING", + 1: "STRICT_STAPLING", + 2: "MUST_STAPLE", + } + DownstreamTlsContext_OcspStaplePolicy_value = map[string]int32{ + "LENIENT_STAPLING": 0, + "STRICT_STAPLING": 1, + "MUST_STAPLE": 2, + } +) + +func (x DownstreamTlsContext_OcspStaplePolicy) Enum() *DownstreamTlsContext_OcspStaplePolicy { + p := new(DownstreamTlsContext_OcspStaplePolicy) + *p = x + return p +} + +func (x DownstreamTlsContext_OcspStaplePolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DownstreamTlsContext_OcspStaplePolicy) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes[0].Descriptor() +} + +func (DownstreamTlsContext_OcspStaplePolicy) Type() protoreflect.EnumType { + return &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes[0] +} + +func (x DownstreamTlsContext_OcspStaplePolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DownstreamTlsContext_OcspStaplePolicy.Descriptor instead. +func (DownstreamTlsContext_OcspStaplePolicy) EnumDescriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{1, 0} +} + +// [#next-free-field: 6] +type UpstreamTlsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + CommonTlsContext *CommonTlsContext `protobuf:"bytes,1,opt,name=common_tls_context,json=commonTlsContext,proto3" json:"common_tls_context,omitempty"` + // SNI string to use when creating TLS backend connections. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + AllowRenegotiation bool `protobuf:"varint,3,opt,name=allow_renegotiation,json=allowRenegotiation,proto3" json:"allow_renegotiation,omitempty"` + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to store for the purpose of session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + MaxSessionKeys *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_session_keys,json=maxSessionKeys,proto3" json:"max_session_keys,omitempty"` + // This field is used to control the enforcement, whereby the handshake will fail if the keyUsage extension + // is present and incompatible with the TLS usage. Currently, the default value is false (i.e., enforcement off) + // but it is expected to be changed to true by default in a future release. + // “ssl.was_key_usage_invalid“ in :ref:`listener metrics ` will be set for certificate + // configurations that would fail if this option were set to true. + EnforceRsaKeyUsage *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=enforce_rsa_key_usage,json=enforceRsaKeyUsage,proto3" json:"enforce_rsa_key_usage,omitempty"` +} + +func (x *UpstreamTlsContext) Reset() { + *x = UpstreamTlsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpstreamTlsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpstreamTlsContext) ProtoMessage() {} + +func (x *UpstreamTlsContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpstreamTlsContext.ProtoReflect.Descriptor instead. +func (*UpstreamTlsContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{0} +} + +func (x *UpstreamTlsContext) GetCommonTlsContext() *CommonTlsContext { + if x != nil { + return x.CommonTlsContext + } + return nil +} + +func (x *UpstreamTlsContext) GetSni() string { + if x != nil { + return x.Sni + } + return "" +} + +func (x *UpstreamTlsContext) GetAllowRenegotiation() bool { + if x != nil { + return x.AllowRenegotiation + } + return false +} + +func (x *UpstreamTlsContext) GetMaxSessionKeys() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxSessionKeys + } + return nil +} + +func (x *UpstreamTlsContext) GetEnforceRsaKeyUsage() *wrapperspb.BoolValue { + if x != nil { + return x.EnforceRsaKeyUsage + } + return nil +} + +// [#next-free-field: 12] +type DownstreamTlsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Common TLS context settings. + CommonTlsContext *CommonTlsContext `protobuf:"bytes,1,opt,name=common_tls_context,json=commonTlsContext,proto3" json:"common_tls_context,omitempty"` + // If specified, Envoy will reject connections without a valid client + // certificate. + RequireClientCertificate *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=require_client_certificate,json=requireClientCertificate,proto3" json:"require_client_certificate,omitempty"` + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + RequireSni *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=require_sni,json=requireSni,proto3" json:"require_sni,omitempty"` + // Types that are assignable to SessionTicketKeysType: + // + // *DownstreamTlsContext_SessionTicketKeys + // *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig + // *DownstreamTlsContext_DisableStatelessSessionResumption + SessionTicketKeysType isDownstreamTlsContext_SessionTicketKeysType `protobuf_oneof:"session_ticket_keys_type"` + // If set to true, the TLS server will not maintain a session cache of TLS sessions. (This is + // relevant only for TLSv1.2 and earlier.) + DisableStatefulSessionResumption bool `protobuf:"varint,10,opt,name=disable_stateful_session_resumption,json=disableStatefulSessionResumption,proto3" json:"disable_stateful_session_resumption,omitempty"` + // If specified, “session_timeout“ will change the maximum lifetime (in seconds) of the TLS session. + // Currently this value is used as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. + // Only seconds can be specified (fractional seconds are ignored). + SessionTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_timeout,json=sessionTimeout,proto3" json:"session_timeout,omitempty"` + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING + OcspStaplePolicy DownstreamTlsContext_OcspStaplePolicy `protobuf:"varint,8,opt,name=ocsp_staple_policy,json=ocspStaplePolicy,proto3,enum=envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext_OcspStaplePolicy" json:"ocsp_staple_policy,omitempty"` + // Multiple certificates are allowed in Downstream transport socket to serve different SNI. + // If the client provides SNI but no such cert matched, it will decide to full scan certificates or not based on this config. + // Defaults to false. See more details in :ref:`Multiple TLS certificates `. + FullScanCertsOnSniMismatch *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=full_scan_certs_on_sni_mismatch,json=fullScanCertsOnSniMismatch,proto3" json:"full_scan_certs_on_sni_mismatch,omitempty"` + // By default, Envoy as a server uses its preferred cipher during the handshake. + // Setting this to true would allow the downstream client's preferred cipher to be used instead. + // Has no effect when using TLSv1_3. + PreferClientCiphers bool `protobuf:"varint,11,opt,name=prefer_client_ciphers,json=preferClientCiphers,proto3" json:"prefer_client_ciphers,omitempty"` +} + +func (x *DownstreamTlsContext) Reset() { + *x = DownstreamTlsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownstreamTlsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownstreamTlsContext) ProtoMessage() {} + +func (x *DownstreamTlsContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownstreamTlsContext.ProtoReflect.Descriptor instead. +func (*DownstreamTlsContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{1} +} + +func (x *DownstreamTlsContext) GetCommonTlsContext() *CommonTlsContext { + if x != nil { + return x.CommonTlsContext + } + return nil +} + +func (x *DownstreamTlsContext) GetRequireClientCertificate() *wrapperspb.BoolValue { + if x != nil { + return x.RequireClientCertificate + } + return nil +} + +func (x *DownstreamTlsContext) GetRequireSni() *wrapperspb.BoolValue { + if x != nil { + return x.RequireSni + } + return nil +} + +func (m *DownstreamTlsContext) GetSessionTicketKeysType() isDownstreamTlsContext_SessionTicketKeysType { + if m != nil { + return m.SessionTicketKeysType + } + return nil +} + +func (x *DownstreamTlsContext) GetSessionTicketKeys() *TlsSessionTicketKeys { + if x, ok := x.GetSessionTicketKeysType().(*DownstreamTlsContext_SessionTicketKeys); ok { + return x.SessionTicketKeys + } + return nil +} + +func (x *DownstreamTlsContext) GetSessionTicketKeysSdsSecretConfig() *SdsSecretConfig { + if x, ok := x.GetSessionTicketKeysType().(*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig); ok { + return x.SessionTicketKeysSdsSecretConfig + } + return nil +} + +func (x *DownstreamTlsContext) GetDisableStatelessSessionResumption() bool { + if x, ok := x.GetSessionTicketKeysType().(*DownstreamTlsContext_DisableStatelessSessionResumption); ok { + return x.DisableStatelessSessionResumption + } + return false +} + +func (x *DownstreamTlsContext) GetDisableStatefulSessionResumption() bool { + if x != nil { + return x.DisableStatefulSessionResumption + } + return false +} + +func (x *DownstreamTlsContext) GetSessionTimeout() *durationpb.Duration { + if x != nil { + return x.SessionTimeout + } + return nil +} + +func (x *DownstreamTlsContext) GetOcspStaplePolicy() DownstreamTlsContext_OcspStaplePolicy { + if x != nil { + return x.OcspStaplePolicy + } + return DownstreamTlsContext_LENIENT_STAPLING +} + +func (x *DownstreamTlsContext) GetFullScanCertsOnSniMismatch() *wrapperspb.BoolValue { + if x != nil { + return x.FullScanCertsOnSniMismatch + } + return nil +} + +func (x *DownstreamTlsContext) GetPreferClientCiphers() bool { + if x != nil { + return x.PreferClientCiphers + } + return false +} + +type isDownstreamTlsContext_SessionTicketKeysType interface { + isDownstreamTlsContext_SessionTicketKeysType() +} + +type DownstreamTlsContext_SessionTicketKeys struct { + // TLS session ticket key settings. + SessionTicketKeys *TlsSessionTicketKeys `protobuf:"bytes,4,opt,name=session_ticket_keys,json=sessionTicketKeys,proto3,oneof"` +} + +type DownstreamTlsContext_SessionTicketKeysSdsSecretConfig struct { + // Config for fetching TLS session ticket keys via SDS API. + SessionTicketKeysSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,5,opt,name=session_ticket_keys_sds_secret_config,json=sessionTicketKeysSdsSecretConfig,proto3,oneof"` +} + +type DownstreamTlsContext_DisableStatelessSessionResumption struct { + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + DisableStatelessSessionResumption bool `protobuf:"varint,7,opt,name=disable_stateless_session_resumption,json=disableStatelessSessionResumption,proto3,oneof"` +} + +func (*DownstreamTlsContext_SessionTicketKeys) isDownstreamTlsContext_SessionTicketKeysType() {} + +func (*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) isDownstreamTlsContext_SessionTicketKeysType() { +} + +func (*DownstreamTlsContext_DisableStatelessSessionResumption) isDownstreamTlsContext_SessionTicketKeysType() { +} + +// TLS key log configuration. +// The key log file format is "format used by NSS for its SSLKEYLOGFILE debugging output" (text taken from openssl man page) +type TlsKeyLog struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path to save the TLS key log. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // The local IP address that will be used to filter the connection which should save the TLS key log + // If it is not set, any local IP address will be matched. + LocalAddressRange []*v3.CidrRange `protobuf:"bytes,2,rep,name=local_address_range,json=localAddressRange,proto3" json:"local_address_range,omitempty"` + // The remote IP address that will be used to filter the connection which should save the TLS key log + // If it is not set, any remote IP address will be matched. + RemoteAddressRange []*v3.CidrRange `protobuf:"bytes,3,rep,name=remote_address_range,json=remoteAddressRange,proto3" json:"remote_address_range,omitempty"` +} + +func (x *TlsKeyLog) Reset() { + *x = TlsKeyLog{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TlsKeyLog) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TlsKeyLog) ProtoMessage() {} + +func (x *TlsKeyLog) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TlsKeyLog.ProtoReflect.Descriptor instead. +func (*TlsKeyLog) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{2} +} + +func (x *TlsKeyLog) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *TlsKeyLog) GetLocalAddressRange() []*v3.CidrRange { + if x != nil { + return x.LocalAddressRange + } + return nil +} + +func (x *TlsKeyLog) GetRemoteAddressRange() []*v3.CidrRange { + if x != nil { + return x.RemoteAddressRange + } + return nil +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 17] +type CommonTlsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TLS protocol versions, cipher suites etc. + TlsParams *TlsParameters `protobuf:"bytes,1,opt,name=tls_params,json=tlsParams,proto3" json:"tls_params,omitempty"` + // Only a single TLS certificate is supported in client contexts. In server contexts, + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates and support SNI-based selection. + // + // If “tls_certificate_provider_instance“ is set, this field is ignored. + // If this field is set, “tls_certificate_sds_secret_configs“ is ignored. + TlsCertificates []*TlsCertificate `protobuf:"bytes,2,rep,name=tls_certificates,json=tlsCertificates,proto3" json:"tls_certificates,omitempty"` + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // + // The same number and types of certificates as :ref:`tls_certificates ` + // are valid in the the certificates fetched through this setting. + // + // If “tls_certificates“ or “tls_certificate_provider_instance“ are set, this field + // is ignored. + TlsCertificateSdsSecretConfigs []*SdsSecretConfig `protobuf:"bytes,6,rep,name=tls_certificate_sds_secret_configs,json=tlsCertificateSdsSecretConfigs,proto3" json:"tls_certificate_sds_secret_configs,omitempty"` + // Certificate provider instance for fetching TLS certs. + // + // If this field is set, “tls_certificates“ and “tls_certificate_provider_instance“ + // are ignored. + // [#not-implemented-hide:] + TlsCertificateProviderInstance *CertificateProviderPluginInstance `protobuf:"bytes,14,opt,name=tls_certificate_provider_instance,json=tlsCertificateProviderInstance,proto3" json:"tls_certificate_provider_instance,omitempty"` + // Custom TLS certificate selector. + // + // Select TLS certificate based on TLS client hello. + // If empty, defaults to native TLS certificate selection behavior: + // DNS SANs or Subject Common Name in TLS certificates is extracted as server name pattern to match SNI. + CustomTlsCertificateSelector *v3.TypedExtensionConfig `protobuf:"bytes,16,opt,name=custom_tls_certificate_selector,json=customTlsCertificateSelector,proto3" json:"custom_tls_certificate_selector,omitempty"` + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + TlsCertificateCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,9,opt,name=tls_certificate_certificate_provider,json=tlsCertificateCertificateProvider,proto3" json:"tls_certificate_certificate_provider,omitempty"` + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + TlsCertificateCertificateProviderInstance *CommonTlsContext_CertificateProviderInstance `protobuf:"bytes,11,opt,name=tls_certificate_certificate_provider_instance,json=tlsCertificateCertificateProviderInstance,proto3" json:"tls_certificate_certificate_provider_instance,omitempty"` + // Types that are assignable to ValidationContextType: + // + // *CommonTlsContext_ValidationContext + // *CommonTlsContext_ValidationContextSdsSecretConfig + // *CommonTlsContext_CombinedValidationContext + // *CommonTlsContext_ValidationContextCertificateProvider + // *CommonTlsContext_ValidationContextCertificateProviderInstance + ValidationContextType isCommonTlsContext_ValidationContextType `protobuf_oneof:"validation_context_type"` + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + AlpnProtocols []string `protobuf:"bytes,4,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"` + // Custom TLS handshaker. If empty, defaults to native TLS handshaking + // behavior. + CustomHandshaker *v3.TypedExtensionConfig `protobuf:"bytes,13,opt,name=custom_handshaker,json=customHandshaker,proto3" json:"custom_handshaker,omitempty"` + // TLS key log configuration + KeyLog *TlsKeyLog `protobuf:"bytes,15,opt,name=key_log,json=keyLog,proto3" json:"key_log,omitempty"` +} + +func (x *CommonTlsContext) Reset() { + *x = CommonTlsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext) ProtoMessage() {} + +func (x *CommonTlsContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext.ProtoReflect.Descriptor instead. +func (*CommonTlsContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3} +} + +func (x *CommonTlsContext) GetTlsParams() *TlsParameters { + if x != nil { + return x.TlsParams + } + return nil +} + +func (x *CommonTlsContext) GetTlsCertificates() []*TlsCertificate { + if x != nil { + return x.TlsCertificates + } + return nil +} + +func (x *CommonTlsContext) GetTlsCertificateSdsSecretConfigs() []*SdsSecretConfig { + if x != nil { + return x.TlsCertificateSdsSecretConfigs + } + return nil +} + +func (x *CommonTlsContext) GetTlsCertificateProviderInstance() *CertificateProviderPluginInstance { + if x != nil { + return x.TlsCertificateProviderInstance + } + return nil +} + +func (x *CommonTlsContext) GetCustomTlsCertificateSelector() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomTlsCertificateSelector + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetTlsCertificateCertificateProvider() *CommonTlsContext_CertificateProvider { + if x != nil { + return x.TlsCertificateCertificateProvider + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetTlsCertificateCertificateProviderInstance() *CommonTlsContext_CertificateProviderInstance { + if x != nil { + return x.TlsCertificateCertificateProviderInstance + } + return nil +} + +func (m *CommonTlsContext) GetValidationContextType() isCommonTlsContext_ValidationContextType { + if m != nil { + return m.ValidationContextType + } + return nil +} + +func (x *CommonTlsContext) GetValidationContext() *CertificateValidationContext { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContext); ok { + return x.ValidationContext + } + return nil +} + +func (x *CommonTlsContext) GetValidationContextSdsSecretConfig() *SdsSecretConfig { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContextSdsSecretConfig); ok { + return x.ValidationContextSdsSecretConfig + } + return nil +} + +func (x *CommonTlsContext) GetCombinedValidationContext() *CommonTlsContext_CombinedCertificateValidationContext { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_CombinedValidationContext); ok { + return x.CombinedValidationContext + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetValidationContextCertificateProvider() *CommonTlsContext_CertificateProvider { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContextCertificateProvider); ok { + return x.ValidationContextCertificateProvider + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext) GetValidationContextCertificateProviderInstance() *CommonTlsContext_CertificateProviderInstance { + if x, ok := x.GetValidationContextType().(*CommonTlsContext_ValidationContextCertificateProviderInstance); ok { + return x.ValidationContextCertificateProviderInstance + } + return nil +} + +func (x *CommonTlsContext) GetAlpnProtocols() []string { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +func (x *CommonTlsContext) GetCustomHandshaker() *v3.TypedExtensionConfig { + if x != nil { + return x.CustomHandshaker + } + return nil +} + +func (x *CommonTlsContext) GetKeyLog() *TlsKeyLog { + if x != nil { + return x.KeyLog + } + return nil +} + +type isCommonTlsContext_ValidationContextType interface { + isCommonTlsContext_ValidationContextType() +} + +type CommonTlsContext_ValidationContext struct { + // How to validate peer certificates. + ValidationContext *CertificateValidationContext `protobuf:"bytes,3,opt,name=validation_context,json=validationContext,proto3,oneof"` +} + +type CommonTlsContext_ValidationContextSdsSecretConfig struct { + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + ValidationContextSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,7,opt,name=validation_context_sds_secret_config,json=validationContextSdsSecretConfig,proto3,oneof"` +} + +type CommonTlsContext_CombinedValidationContext struct { + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. + CombinedValidationContext *CommonTlsContext_CombinedCertificateValidationContext `protobuf:"bytes,8,opt,name=combined_validation_context,json=combinedValidationContext,proto3,oneof"` +} + +type CommonTlsContext_ValidationContextCertificateProvider struct { + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,10,opt,name=validation_context_certificate_provider,json=validationContextCertificateProvider,proto3,oneof"` +} + +type CommonTlsContext_ValidationContextCertificateProviderInstance struct { + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProviderInstance *CommonTlsContext_CertificateProviderInstance `protobuf:"bytes,12,opt,name=validation_context_certificate_provider_instance,json=validationContextCertificateProviderInstance,proto3,oneof"` +} + +func (*CommonTlsContext_ValidationContext) isCommonTlsContext_ValidationContextType() {} + +func (*CommonTlsContext_ValidationContextSdsSecretConfig) isCommonTlsContext_ValidationContextType() { +} + +func (*CommonTlsContext_CombinedValidationContext) isCommonTlsContext_ValidationContextType() {} + +func (*CommonTlsContext_ValidationContextCertificateProvider) isCommonTlsContext_ValidationContextType() { +} + +func (*CommonTlsContext_ValidationContextCertificateProviderInstance) isCommonTlsContext_ValidationContextType() { +} + +// Config for Certificate provider to get certificates. This provider should allow certificates to be +// fetched/refreshed over the network asynchronously with respect to the TLS handshake. +// +// DEPRECATED: This message is not currently used, but if we ever do need it, we will want to +// move it out of CommonTlsContext and into common.proto, similar to the existing +// CertificateProviderPluginInstance message. +// +// [#not-implemented-hide:] +type CommonTlsContext_CertificateProvider struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + // + // Types that are assignable to Config: + // + // *CommonTlsContext_CertificateProvider_TypedConfig + Config isCommonTlsContext_CertificateProvider_Config `protobuf_oneof:"config"` +} + +func (x *CommonTlsContext_CertificateProvider) Reset() { + *x = CommonTlsContext_CertificateProvider{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext_CertificateProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext_CertificateProvider) ProtoMessage() {} + +func (x *CommonTlsContext_CertificateProvider) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext_CertificateProvider.ProtoReflect.Descriptor instead. +func (*CommonTlsContext_CertificateProvider) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *CommonTlsContext_CertificateProvider) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *CommonTlsContext_CertificateProvider) GetConfig() isCommonTlsContext_CertificateProvider_Config { + if m != nil { + return m.Config + } + return nil +} + +func (x *CommonTlsContext_CertificateProvider) GetTypedConfig() *v3.TypedExtensionConfig { + if x, ok := x.GetConfig().(*CommonTlsContext_CertificateProvider_TypedConfig); ok { + return x.TypedConfig + } + return nil +} + +type isCommonTlsContext_CertificateProvider_Config interface { + isCommonTlsContext_CertificateProvider_Config() +} + +type CommonTlsContext_CertificateProvider_TypedConfig struct { + TypedConfig *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3,oneof"` +} + +func (*CommonTlsContext_CertificateProvider_TypedConfig) isCommonTlsContext_CertificateProvider_Config() { +} + +// Similar to CertificateProvider above, but allows the provider instances to be configured on +// the client side instead of being sent from the control plane. +// +// DEPRECATED: This message was moved outside of CommonTlsContext +// and now lives in common.proto. +// +// [#not-implemented-hide:] +type CommonTlsContext_CertificateProviderInstance struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"` + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + CertificateName string `protobuf:"bytes,2,opt,name=certificate_name,json=certificateName,proto3" json:"certificate_name,omitempty"` +} + +func (x *CommonTlsContext_CertificateProviderInstance) Reset() { + *x = CommonTlsContext_CertificateProviderInstance{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext_CertificateProviderInstance) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext_CertificateProviderInstance) ProtoMessage() {} + +func (x *CommonTlsContext_CertificateProviderInstance) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext_CertificateProviderInstance.ProtoReflect.Descriptor instead. +func (*CommonTlsContext_CertificateProviderInstance) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *CommonTlsContext_CertificateProviderInstance) GetInstanceName() string { + if x != nil { + return x.InstanceName + } + return "" +} + +func (x *CommonTlsContext_CertificateProviderInstance) GetCertificateName() string { + if x != nil { + return x.CertificateName + } + return "" +} + +type CommonTlsContext_CombinedCertificateValidationContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How to validate peer certificates. + DefaultValidationContext *CertificateValidationContext `protobuf:"bytes,1,opt,name=default_validation_context,json=defaultValidationContext,proto3" json:"default_validation_context,omitempty"` + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + ValidationContextSdsSecretConfig *SdsSecretConfig `protobuf:"bytes,2,opt,name=validation_context_sds_secret_config,json=validationContextSdsSecretConfig,proto3" json:"validation_context_sds_secret_config,omitempty"` + // Certificate provider for fetching CA certs. This will populate the + // “default_validation_context.trusted_ca“ field. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProvider *CommonTlsContext_CertificateProvider `protobuf:"bytes,3,opt,name=validation_context_certificate_provider,json=validationContextCertificateProvider,proto3" json:"validation_context_certificate_provider,omitempty"` + // Certificate provider instance for fetching CA certs. This will populate the + // “default_validation_context.trusted_ca“ field. + // [#not-implemented-hide:] + // + // Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. + ValidationContextCertificateProviderInstance *CommonTlsContext_CertificateProviderInstance `protobuf:"bytes,4,opt,name=validation_context_certificate_provider_instance,json=validationContextCertificateProviderInstance,proto3" json:"validation_context_certificate_provider_instance,omitempty"` +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) Reset() { + *x = CommonTlsContext_CombinedCertificateValidationContext{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonTlsContext_CombinedCertificateValidationContext) ProtoMessage() {} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonTlsContext_CombinedCertificateValidationContext.ProtoReflect.Descriptor instead. +func (*CommonTlsContext_CombinedCertificateValidationContext) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetDefaultValidationContext() *CertificateValidationContext { + if x != nil { + return x.DefaultValidationContext + } + return nil +} + +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetValidationContextSdsSecretConfig() *SdsSecretConfig { + if x != nil { + return x.ValidationContextSdsSecretConfig + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetValidationContextCertificateProvider() *CommonTlsContext_CertificateProvider { + if x != nil { + return x.ValidationContextCertificateProvider + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/extensions/transport_sockets/tls/v3/tls.proto. +func (x *CommonTlsContext_CombinedCertificateValidationContext) GetValidationContextCertificateProviderInstance() *CommonTlsContext_CertificateProviderInstance { + if x != nil { + return x.ValidationContextCertificateProviderInstance + } + return nil +} + +var File_envoy_extensions_transport_sockets_tls_v3_tls_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6c, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, + 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x03, 0x0a, 0x12, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1a, 0x0a, 0x03, + 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, + 0x28, 0xff, 0x01, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x72, 0x65, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x6e, 0x65, + 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0x4d, 0x0a, 0x15, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x73, 0x61, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x52, 0x73, 0x61, 0x4b, 0x65, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, + 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0xce, 0x09, + 0x0a, 0x14, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, + 0x10, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x12, 0x58, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x6e, 0x69, 0x12, 0x71, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x8d, 0x01, 0x0a, 0x25, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x64, 0x73, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x24, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6c, 0x65, 0x73, 0x73, + 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x21, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, + 0x0a, 0x23, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, + 0x75, 0x6c, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, + 0x0f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0xaa, 0x01, 0x0a, 0x1a, 0x06, 0x08, 0x80, 0x80, 0x80, 0x80, + 0x10, 0x32, 0x00, 0x52, 0x0e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x88, 0x01, 0x0a, 0x12, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x73, 0x74, 0x61, + 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x50, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x77, + 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x6f, 0x63, + 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, + 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x73, 0x6e, 0x69, 0x5f, 0x6d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x65, 0x72, + 0x74, 0x73, 0x4f, 0x6e, 0x53, 0x6e, 0x69, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x22, 0x4e, 0x0a, 0x10, 0x4f, 0x63, 0x73, 0x70, 0x53, 0x74, 0x61, 0x70, 0x6c, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x4c, 0x45, 0x4e, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, 0x49, 0x4e, 0x47, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x55, 0x53, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x50, 0x4c, + 0x45, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x44, 0x6f, + 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x42, 0x1a, 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xcc, + 0x01, 0x0a, 0x09, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x1b, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x4f, 0x0a, 0x13, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, + 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x14, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xdd, 0x18, + 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x52, 0x09, 0x74, 0x6c, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x10, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x12, 0x86, 0x01, 0x0a, 0x22, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x21, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x1e, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x71, 0x0a, 0x1f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0xad, 0x01, 0x0a, 0x24, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x21, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xc6, 0x01, 0x0a, 0x2d, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, + 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x29, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x12, 0x78, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x8c, 0x01, 0x0a, 0x24, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, + 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa2, 0x01, 0x0a, 0x1b, 0x63, 0x6f, + 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x60, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, + 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x48, 0x00, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0xb5, + 0x01, 0x0a, 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x4f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, + 0x52, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0xce, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x57, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x57, + 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6c, + 0x6f, 0x67, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x52, 0x06, + 0x6b, 0x65, 0x79, 0x4c, 0x6f, 0x67, 0x1a, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0d, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6d, 0x0a, 0x1b, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xa4, 0x06, 0x0a, 0x24, 0x43, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x12, 0x8f, 0x01, 0x0a, 0x1a, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, + 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x18, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x94, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53, 0x64, 0x73, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xb3, 0x01, 0x0a, + 0x27, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0xcc, 0x01, 0x0a, 0x30, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x2c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x3a, 0x4e, 0x9a, 0xc5, 0x88, 0x1e, 0x49, 0x0a, 0x47, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6f, 0x6d, + 0x62, 0x69, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x19, 0x0a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0xa5, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x42, 0x08, 0x54, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes = []interface{}{ + (DownstreamTlsContext_OcspStaplePolicy)(0), // 0: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.OcspStaplePolicy + (*UpstreamTlsContext)(nil), // 1: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + (*DownstreamTlsContext)(nil), // 2: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext + (*TlsKeyLog)(nil), // 3: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog + (*CommonTlsContext)(nil), // 4: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext + (*CommonTlsContext_CertificateProvider)(nil), // 5: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + (*CommonTlsContext_CertificateProviderInstance)(nil), // 6: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + (*CommonTlsContext_CombinedCertificateValidationContext)(nil), // 7: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext + (*wrapperspb.UInt32Value)(nil), // 8: google.protobuf.UInt32Value + (*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue + (*TlsSessionTicketKeys)(nil), // 10: envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + (*SdsSecretConfig)(nil), // 11: envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration + (*v3.CidrRange)(nil), // 13: envoy.config.core.v3.CidrRange + (*TlsParameters)(nil), // 14: envoy.extensions.transport_sockets.tls.v3.TlsParameters + (*TlsCertificate)(nil), // 15: envoy.extensions.transport_sockets.tls.v3.TlsCertificate + (*CertificateProviderPluginInstance)(nil), // 16: envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + (*v3.TypedExtensionConfig)(nil), // 17: envoy.config.core.v3.TypedExtensionConfig + (*CertificateValidationContext)(nil), // 18: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +} +var file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = []int32{ + 4, // 0: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.common_tls_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext + 8, // 1: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.max_session_keys:type_name -> google.protobuf.UInt32Value + 9, // 2: envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.enforce_rsa_key_usage:type_name -> google.protobuf.BoolValue + 4, // 3: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.common_tls_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext + 9, // 4: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.require_client_certificate:type_name -> google.protobuf.BoolValue + 9, // 5: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.require_sni:type_name -> google.protobuf.BoolValue + 10, // 6: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys + 11, // 7: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_ticket_keys_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 12, // 8: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.session_timeout:type_name -> google.protobuf.Duration + 0, // 9: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.ocsp_staple_policy:type_name -> envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.OcspStaplePolicy + 9, // 10: envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext.full_scan_certs_on_sni_mismatch:type_name -> google.protobuf.BoolValue + 13, // 11: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.local_address_range:type_name -> envoy.config.core.v3.CidrRange + 13, // 12: envoy.extensions.transport_sockets.tls.v3.TlsKeyLog.remote_address_range:type_name -> envoy.config.core.v3.CidrRange + 14, // 13: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_params:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsParameters + 15, // 14: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificates:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsCertificate + 11, // 15: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_sds_secret_configs:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 16, // 16: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateProviderPluginInstance + 17, // 17: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_tls_certificate_selector:type_name -> envoy.config.core.v3.TypedExtensionConfig + 5, // 18: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 19: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.tls_certificate_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 18, // 20: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 21: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 7, // 22: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.combined_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext + 5, // 23: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 24: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 17, // 25: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.custom_handshaker:type_name -> envoy.config.core.v3.TypedExtensionConfig + 3, // 26: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.key_log:type_name -> envoy.extensions.transport_sockets.tls.v3.TlsKeyLog + 17, // 27: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider.typed_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 28: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.default_validation_context:type_name -> envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext + 11, // 29: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_sds_secret_config:type_name -> envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig + 5, // 30: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider + 6, // 31: envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CombinedCertificateValidationContext.validation_context_certificate_provider_instance:type_name -> envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_tls_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_tls_proto != nil { + return + } + file_envoy_extensions_transport_sockets_tls_v3_common_proto_init() + file_envoy_extensions_transport_sockets_tls_v3_secret_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpstreamTlsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownstreamTlsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TlsKeyLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext_CertificateProvider); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext_CertificateProviderInstance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonTlsContext_CombinedCertificateValidationContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*DownstreamTlsContext_SessionTicketKeys)(nil), + (*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig)(nil), + (*DownstreamTlsContext_DisableStatelessSessionResumption)(nil), + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*CommonTlsContext_ValidationContext)(nil), + (*CommonTlsContext_ValidationContextSdsSecretConfig)(nil), + (*CommonTlsContext_CombinedValidationContext)(nil), + (*CommonTlsContext_ValidationContextCertificateProvider)(nil), + (*CommonTlsContext_ValidationContextCertificateProviderInstance)(nil), + } + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*CommonTlsContext_CertificateProvider_TypedConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc, + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs, + EnumInfos: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_enumTypes, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_tls_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_tls_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go new file mode 100644 index 0000000000..6468ff227c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls.pb.validate.go @@ -0,0 +1,1902 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on UpstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpstreamTlsContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpstreamTlsContextMultiError, or nil if none found. +func (m *UpstreamTlsContext) ValidateAll() error { + return m.validate(true) +} + +func (m *UpstreamTlsContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(m.GetSni()) > 255 { + err := UpstreamTlsContextValidationError{ + field: "Sni", + reason: "value length must be at most 255 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for AllowRenegotiation + + if all { + switch v := interface{}(m.GetMaxSessionKeys()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "MaxSessionKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "MaxSessionKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxSessionKeys()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamTlsContextValidationError{ + field: "MaxSessionKeys", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEnforceRsaKeyUsage()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "EnforceRsaKeyUsage", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpstreamTlsContextValidationError{ + field: "EnforceRsaKeyUsage", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnforceRsaKeyUsage()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpstreamTlsContextValidationError{ + field: "EnforceRsaKeyUsage", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpstreamTlsContextMultiError(errors) + } + + return nil +} + +// UpstreamTlsContextMultiError is an error wrapping multiple validation errors +// returned by UpstreamTlsContext.ValidateAll() if the designated constraints +// aren't met. +type UpstreamTlsContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpstreamTlsContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpstreamTlsContextMultiError) AllErrors() []error { return m } + +// UpstreamTlsContextValidationError is the validation error returned by +// UpstreamTlsContext.Validate if the designated constraints aren't met. +type UpstreamTlsContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpstreamTlsContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpstreamTlsContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpstreamTlsContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpstreamTlsContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpstreamTlsContextValidationError) ErrorName() string { + return "UpstreamTlsContextValidationError" +} + +// Error satisfies the builtin error interface +func (e UpstreamTlsContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpstreamTlsContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpstreamTlsContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpstreamTlsContextValidationError{} + +// Validate checks the field values on DownstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DownstreamTlsContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DownstreamTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DownstreamTlsContextMultiError, or nil if none found. +func (m *DownstreamTlsContext) ValidateAll() error { + return m.validate(true) +} + +func (m *DownstreamTlsContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetCommonTlsContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommonTlsContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "CommonTlsContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRequireClientCertificate()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireClientCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireClientCertificate", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequireClientCertificate()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "RequireClientCertificate", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRequireSni()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireSni", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "RequireSni", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequireSni()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "RequireSni", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisableStatefulSessionResumption + + if d := m.GetSessionTimeout(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = DownstreamTlsContextValidationError{ + field: "SessionTimeout", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + lt := time.Duration(4294967296*time.Second + 0*time.Nanosecond) + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte || dur >= lt { + err := DownstreamTlsContextValidationError{ + field: "SessionTimeout", + reason: "value must be inside range [0s, 1193046h28m16s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if _, ok := DownstreamTlsContext_OcspStaplePolicy_name[int32(m.GetOcspStaplePolicy())]; !ok { + err := DownstreamTlsContextValidationError{ + field: "OcspStaplePolicy", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetFullScanCertsOnSniMismatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFullScanCertsOnSniMismatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "FullScanCertsOnSniMismatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PreferClientCiphers + + switch v := m.SessionTicketKeysType.(type) { + case *DownstreamTlsContext_SessionTicketKeys: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSessionTicketKeys()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSessionTicketKeys()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "SessionTicketKeys", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSessionTicketKeysSdsSecretConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeysSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DownstreamTlsContextValidationError{ + field: "SessionTicketKeysSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSessionTicketKeysSdsSecretConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DownstreamTlsContextValidationError{ + field: "SessionTicketKeysSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DownstreamTlsContext_DisableStatelessSessionResumption: + if v == nil { + err := DownstreamTlsContextValidationError{ + field: "SessionTicketKeysType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for DisableStatelessSessionResumption + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return DownstreamTlsContextMultiError(errors) + } + + return nil +} + +// DownstreamTlsContextMultiError is an error wrapping multiple validation +// errors returned by DownstreamTlsContext.ValidateAll() if the designated +// constraints aren't met. +type DownstreamTlsContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DownstreamTlsContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DownstreamTlsContextMultiError) AllErrors() []error { return m } + +// DownstreamTlsContextValidationError is the validation error returned by +// DownstreamTlsContext.Validate if the designated constraints aren't met. +type DownstreamTlsContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DownstreamTlsContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DownstreamTlsContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DownstreamTlsContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DownstreamTlsContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DownstreamTlsContextValidationError) ErrorName() string { + return "DownstreamTlsContextValidationError" +} + +// Error satisfies the builtin error interface +func (e DownstreamTlsContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDownstreamTlsContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DownstreamTlsContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DownstreamTlsContextValidationError{} + +// Validate checks the field values on TlsKeyLog with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TlsKeyLog) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TlsKeyLog with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TlsKeyLogMultiError, or nil +// if none found. +func (m *TlsKeyLog) ValidateAll() error { + return m.validate(true) +} + +func (m *TlsKeyLog) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetPath()) < 1 { + err := TlsKeyLogValidationError{ + field: "Path", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetLocalAddressRange() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("LocalAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("LocalAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsKeyLogValidationError{ + field: fmt.Sprintf("LocalAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetRemoteAddressRange() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("RemoteAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TlsKeyLogValidationError{ + field: fmt.Sprintf("RemoteAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TlsKeyLogValidationError{ + field: fmt.Sprintf("RemoteAddressRange[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return TlsKeyLogMultiError(errors) + } + + return nil +} + +// TlsKeyLogMultiError is an error wrapping multiple validation errors returned +// by TlsKeyLog.ValidateAll() if the designated constraints aren't met. +type TlsKeyLogMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TlsKeyLogMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TlsKeyLogMultiError) AllErrors() []error { return m } + +// TlsKeyLogValidationError is the validation error returned by +// TlsKeyLog.Validate if the designated constraints aren't met. +type TlsKeyLogValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TlsKeyLogValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TlsKeyLogValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TlsKeyLogValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TlsKeyLogValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TlsKeyLogValidationError) ErrorName() string { return "TlsKeyLogValidationError" } + +// Error satisfies the builtin error interface +func (e TlsKeyLogValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTlsKeyLog.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TlsKeyLogValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TlsKeyLogValidationError{} + +// Validate checks the field values on CommonTlsContext with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CommonTlsContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommonTlsContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommonTlsContextMultiError, or nil if none found. +func (m *CommonTlsContext) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTlsParams()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsParams", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsParams", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsParams()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsParams", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetTlsCertificates() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificates[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetTlsCertificateSdsSecretConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificateSdsSecretConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificateSdsSecretConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: fmt.Sprintf("TlsCertificateSdsSecretConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetTlsCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCustomTlsCertificateSelector()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomTlsCertificateSelector()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CustomTlsCertificateSelector", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsCertificateCertificateProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificateCertificateProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTlsCertificateCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTlsCertificateCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "TlsCertificateCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCustomHandshaker()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomHandshaker", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CustomHandshaker", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomHandshaker()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CustomHandshaker", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeyLog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "KeyLog", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "KeyLog", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeyLog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "KeyLog", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.ValidationContextType.(type) { + case *CommonTlsContext_ValidationContext: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_ValidationContextSdsSecretConfig: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextSdsSecretConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextSdsSecretConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_CombinedValidationContext: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCombinedValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CombinedValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "CombinedValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCombinedValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "CombinedValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_ValidationContextCertificateProvider: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CommonTlsContext_ValidationContextCertificateProviderInstance: + if v == nil { + err := CommonTlsContextValidationError{ + field: "ValidationContextType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CommonTlsContextMultiError(errors) + } + + return nil +} + +// CommonTlsContextMultiError is an error wrapping multiple validation errors +// returned by CommonTlsContext.ValidateAll() if the designated constraints +// aren't met. +type CommonTlsContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContextMultiError) AllErrors() []error { return m } + +// CommonTlsContextValidationError is the validation error returned by +// CommonTlsContext.Validate if the designated constraints aren't met. +type CommonTlsContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContextValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonTlsContextValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonTlsContextValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonTlsContextValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonTlsContextValidationError) ErrorName() string { return "CommonTlsContextValidationError" } + +// Error satisfies the builtin error interface +func (e CommonTlsContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContextValidationError{} + +// Validate checks the field values on CommonTlsContext_CertificateProvider +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *CommonTlsContext_CertificateProvider) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommonTlsContext_CertificateProvider +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// CommonTlsContext_CertificateProviderMultiError, or nil if none found. +func (m *CommonTlsContext_CertificateProvider) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext_CertificateProvider) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofConfigPresent := false + switch v := m.Config.(type) { + case *CommonTlsContext_CertificateProvider_TypedConfig: + if v == nil { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Config", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConfigPresent = true + + if all { + switch v := interface{}(m.GetTypedConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CertificateProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CertificateProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CertificateProviderValidationError{ + field: "TypedConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConfigPresent { + err := CommonTlsContext_CertificateProviderValidationError{ + field: "Config", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CommonTlsContext_CertificateProviderMultiError(errors) + } + + return nil +} + +// CommonTlsContext_CertificateProviderMultiError is an error wrapping multiple +// validation errors returned by +// CommonTlsContext_CertificateProvider.ValidateAll() if the designated +// constraints aren't met. +type CommonTlsContext_CertificateProviderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContext_CertificateProviderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContext_CertificateProviderMultiError) AllErrors() []error { return m } + +// CommonTlsContext_CertificateProviderValidationError is the validation error +// returned by CommonTlsContext_CertificateProvider.Validate if the designated +// constraints aren't met. +type CommonTlsContext_CertificateProviderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContext_CertificateProviderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonTlsContext_CertificateProviderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonTlsContext_CertificateProviderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonTlsContext_CertificateProviderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonTlsContext_CertificateProviderValidationError) ErrorName() string { + return "CommonTlsContext_CertificateProviderValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonTlsContext_CertificateProviderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext_CertificateProvider.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContext_CertificateProviderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContext_CertificateProviderValidationError{} + +// Validate checks the field values on +// CommonTlsContext_CertificateProviderInstance with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CommonTlsContext_CertificateProviderInstance) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CommonTlsContext_CertificateProviderInstance with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// CommonTlsContext_CertificateProviderInstanceMultiError, or nil if none found. +func (m *CommonTlsContext_CertificateProviderInstance) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext_CertificateProviderInstance) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for InstanceName + + // no validation rules for CertificateName + + if len(errors) > 0 { + return CommonTlsContext_CertificateProviderInstanceMultiError(errors) + } + + return nil +} + +// CommonTlsContext_CertificateProviderInstanceMultiError is an error wrapping +// multiple validation errors returned by +// CommonTlsContext_CertificateProviderInstance.ValidateAll() if the +// designated constraints aren't met. +type CommonTlsContext_CertificateProviderInstanceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContext_CertificateProviderInstanceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContext_CertificateProviderInstanceMultiError) AllErrors() []error { return m } + +// CommonTlsContext_CertificateProviderInstanceValidationError is the +// validation error returned by +// CommonTlsContext_CertificateProviderInstance.Validate if the designated +// constraints aren't met. +type CommonTlsContext_CertificateProviderInstanceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonTlsContext_CertificateProviderInstanceValidationError) ErrorName() string { + return "CommonTlsContext_CertificateProviderInstanceValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonTlsContext_CertificateProviderInstanceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext_CertificateProviderInstance.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContext_CertificateProviderInstanceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContext_CertificateProviderInstanceValidationError{} + +// Validate checks the field values on +// CommonTlsContext_CombinedCertificateValidationContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CommonTlsContext_CombinedCertificateValidationContext) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CommonTlsContext_CombinedCertificateValidationContext with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommonTlsContext_CombinedCertificateValidationContextMultiError, or nil if +// none found. +func (m *CommonTlsContext_CombinedCertificateValidationContext) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetDefaultValidationContext() == nil { + err := CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDefaultValidationContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValidationContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "DefaultValidationContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetValidationContextSdsSecretConfig() == nil { + err := CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValidationContextSdsSecretConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextSdsSecretConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextSdsSecretConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProvider()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProvider()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProvider", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetValidationContextCertificateProviderInstance()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValidationContextCertificateProviderInstance()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonTlsContext_CombinedCertificateValidationContextValidationError{ + field: "ValidationContextCertificateProviderInstance", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CommonTlsContext_CombinedCertificateValidationContextMultiError(errors) + } + + return nil +} + +// CommonTlsContext_CombinedCertificateValidationContextMultiError is an error +// wrapping multiple validation errors returned by +// CommonTlsContext_CombinedCertificateValidationContext.ValidateAll() if the +// designated constraints aren't met. +type CommonTlsContext_CombinedCertificateValidationContextMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonTlsContext_CombinedCertificateValidationContextMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonTlsContext_CombinedCertificateValidationContextMultiError) AllErrors() []error { + return m +} + +// CommonTlsContext_CombinedCertificateValidationContextValidationError is the +// validation error returned by +// CommonTlsContext_CombinedCertificateValidationContext.Validate if the +// designated constraints aren't met. +type CommonTlsContext_CombinedCertificateValidationContextValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Key() bool { + return e.key +} + +// ErrorName returns error name. +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) ErrorName() string { + return "CommonTlsContext_CombinedCertificateValidationContextValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonTlsContext_CombinedCertificateValidationContextValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonTlsContext_CombinedCertificateValidationContext.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonTlsContext_CombinedCertificateValidationContextValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonTlsContext_CombinedCertificateValidationContextValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go new file mode 100644 index 0000000000..7e9ee89672 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.go @@ -0,0 +1,286 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configuration specific to the `SPIFFE `_ certificate validator. +// +// Example: +// +// .. validated-code-block:: yaml +// +// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +// +// custom_validator_config: +// name: envoy.tls.cert_validator.spiffe +// typed_config: +// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig +// trust_domains: +// - name: foo.com +// trust_bundle: +// filename: "foo.pem" +// - name: envoy.com +// trust_bundle: +// filename: "envoy.pem" +// +// In this example, a presented peer certificate whose SAN matches “spiffe://foo.com/**“ is validated against +// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint +// a SVID belonging to another trust domain. That means, in this example, a SVID signed by “envoy.com“'s CA with “spiffe://foo.com/**“ +// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. +// +// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. +// +// - :ref:`allow_expired_certificate ` to allow expired certificates. +// - :ref:`match_typed_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. +type SPIFFECertValidatorConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field specifies trust domains used for validating incoming X.509-SVID(s). + TrustDomains []*SPIFFECertValidatorConfig_TrustDomain `protobuf:"bytes,1,rep,name=trust_domains,json=trustDomains,proto3" json:"trust_domains,omitempty"` +} + +func (x *SPIFFECertValidatorConfig) Reset() { + *x = SPIFFECertValidatorConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SPIFFECertValidatorConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SPIFFECertValidatorConfig) ProtoMessage() {} + +func (x *SPIFFECertValidatorConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SPIFFECertValidatorConfig.ProtoReflect.Descriptor instead. +func (*SPIFFECertValidatorConfig) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescGZIP(), []int{0} +} + +func (x *SPIFFECertValidatorConfig) GetTrustDomains() []*SPIFFECertValidatorConfig_TrustDomain { + if x != nil { + return x.TrustDomains + } + return nil +} + +type SPIFFECertValidatorConfig_TrustDomain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the trust domain, “example.com“, “foo.bar.gov“ for example. + // Note that this must *not* have "spiffe://" prefix. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. + TrustBundle *v3.DataSource `protobuf:"bytes,2,opt,name=trust_bundle,json=trustBundle,proto3" json:"trust_bundle,omitempty"` +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) Reset() { + *x = SPIFFECertValidatorConfig_TrustDomain{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SPIFFECertValidatorConfig_TrustDomain) ProtoMessage() {} + +func (x *SPIFFECertValidatorConfig_TrustDomain) ProtoReflect() protoreflect.Message { + mi := &file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SPIFFECertValidatorConfig_TrustDomain.ProtoReflect.Descriptor instead. +func (*SPIFFECertValidatorConfig_TrustDomain) Descriptor() ([]byte, []int) { + return file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SPIFFECertValidatorConfig_TrustDomain) GetTrustBundle() *v3.DataSource { + if x != nil { + return x.TrustBundle + } + return nil +} + +var File_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto protoreflect.FileDescriptor + +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6c, 0x73, 0x5f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x19, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x43, 0x65, 0x72, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x7f, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x43, 0x65, 0x72, 0x74, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, + 0x1a, 0x6f, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0c, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x42, 0xba, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x37, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, + 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x54, 0x6c, 0x73, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, + 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescOnce sync.Once + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData = file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc +) + +func file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescGZIP() []byte { + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescOnce.Do(func() { + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData) + }) + return file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDescData +} + +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_goTypes = []interface{}{ + (*SPIFFECertValidatorConfig)(nil), // 0: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig + (*SPIFFECertValidatorConfig_TrustDomain)(nil), // 1: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain + (*v3.DataSource)(nil), // 2: envoy.config.core.v3.DataSource +} +var file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_depIdxs = []int32{ + 1, // 0: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.trust_domains:type_name -> envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain + 2, // 1: envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig.TrustDomain.trust_bundle:type_name -> envoy.config.core.v3.DataSource + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_init() } +func file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_init() { + if File_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SPIFFECertValidatorConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SPIFFECertValidatorConfig_TrustDomain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_goTypes, + DependencyIndexes: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_depIdxs, + MessageInfos: file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_msgTypes, + }.Build() + File_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto = out.File + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_rawDesc = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_goTypes = nil + file_envoy_extensions_transport_sockets_tls_v3_tls_spiffe_validator_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go new file mode 100644 index 0000000000..4992dec753 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.pb.validate.go @@ -0,0 +1,329 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SPIFFECertValidatorConfig with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SPIFFECertValidatorConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SPIFFECertValidatorConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SPIFFECertValidatorConfigMultiError, or nil if none found. +func (m *SPIFFECertValidatorConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *SPIFFECertValidatorConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetTrustDomains()) < 1 { + err := SPIFFECertValidatorConfigValidationError{ + field: "TrustDomains", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetTrustDomains() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SPIFFECertValidatorConfigValidationError{ + field: fmt.Sprintf("TrustDomains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SPIFFECertValidatorConfigValidationError{ + field: fmt.Sprintf("TrustDomains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SPIFFECertValidatorConfigValidationError{ + field: fmt.Sprintf("TrustDomains[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return SPIFFECertValidatorConfigMultiError(errors) + } + + return nil +} + +// SPIFFECertValidatorConfigMultiError is an error wrapping multiple validation +// errors returned by SPIFFECertValidatorConfig.ValidateAll() if the +// designated constraints aren't met. +type SPIFFECertValidatorConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SPIFFECertValidatorConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SPIFFECertValidatorConfigMultiError) AllErrors() []error { return m } + +// SPIFFECertValidatorConfigValidationError is the validation error returned by +// SPIFFECertValidatorConfig.Validate if the designated constraints aren't met. +type SPIFFECertValidatorConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SPIFFECertValidatorConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SPIFFECertValidatorConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SPIFFECertValidatorConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SPIFFECertValidatorConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SPIFFECertValidatorConfigValidationError) ErrorName() string { + return "SPIFFECertValidatorConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e SPIFFECertValidatorConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSPIFFECertValidatorConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SPIFFECertValidatorConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SPIFFECertValidatorConfigValidationError{} + +// Validate checks the field values on SPIFFECertValidatorConfig_TrustDomain +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *SPIFFECertValidatorConfig_TrustDomain) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SPIFFECertValidatorConfig_TrustDomain +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// SPIFFECertValidatorConfig_TrustDomainMultiError, or nil if none found. +func (m *SPIFFECertValidatorConfig_TrustDomain) ValidateAll() error { + return m.validate(true) +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTrustBundle()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "TrustBundle", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "TrustBundle", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTrustBundle()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SPIFFECertValidatorConfig_TrustDomainValidationError{ + field: "TrustBundle", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return SPIFFECertValidatorConfig_TrustDomainMultiError(errors) + } + + return nil +} + +// SPIFFECertValidatorConfig_TrustDomainMultiError is an error wrapping +// multiple validation errors returned by +// SPIFFECertValidatorConfig_TrustDomain.ValidateAll() if the designated +// constraints aren't met. +type SPIFFECertValidatorConfig_TrustDomainMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SPIFFECertValidatorConfig_TrustDomainMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SPIFFECertValidatorConfig_TrustDomainMultiError) AllErrors() []error { return m } + +// SPIFFECertValidatorConfig_TrustDomainValidationError is the validation error +// returned by SPIFFECertValidatorConfig_TrustDomain.Validate if the +// designated constraints aren't met. +type SPIFFECertValidatorConfig_TrustDomainValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) ErrorName() string { + return "SPIFFECertValidatorConfig_TrustDomainValidationError" +} + +// Error satisfies the builtin error interface +func (e SPIFFECertValidatorConfig_TrustDomainValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSPIFFECertValidatorConfig_TrustDomain.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SPIFFECertValidatorConfig_TrustDomainValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SPIFFECertValidatorConfig_TrustDomainValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go new file mode 100644 index 0000000000..6cad4f635f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config_vtproto.pb.go @@ -0,0 +1,167 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TrustBundle != nil { + if vtmsg, ok := interface{}(m.TrustBundle).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TrustBundle) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SPIFFECertValidatorConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SPIFFECertValidatorConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SPIFFECertValidatorConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TrustDomains) > 0 { + for iNdEx := len(m.TrustDomains) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TrustDomains[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SPIFFECertValidatorConfig_TrustDomain) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TrustBundle != nil { + if size, ok := interface{}(m.TrustBundle).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TrustBundle) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SPIFFECertValidatorConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TrustDomains) > 0 { + for _, e := range m.TrustDomains { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go new file mode 100644 index 0000000000..287129049b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/tls_vtproto.pb.go @@ -0,0 +1,1265 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/extensions/transport_sockets/tls/v3/tls.proto + +package tlsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *UpstreamTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpstreamTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UpstreamTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.EnforceRsaKeyUsage != nil { + size, err := (*wrapperspb.BoolValue)(m.EnforceRsaKeyUsage).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.MaxSessionKeys != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxSessionKeys).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.AllowRenegotiation { + i-- + if m.AllowRenegotiation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Sni) > 0 { + i -= len(m.Sni) + copy(dAtA[i:], m.Sni) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Sni))) + i-- + dAtA[i] = 0x12 + } + if m.CommonTlsContext != nil { + size, err := m.CommonTlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownstreamTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownstreamTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PreferClientCiphers { + i-- + if m.PreferClientCiphers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.DisableStatefulSessionResumption { + i-- + if m.DisableStatefulSessionResumption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.FullScanCertsOnSniMismatch != nil { + size, err := (*wrapperspb.BoolValue)(m.FullScanCertsOnSniMismatch).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.OcspStaplePolicy != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.OcspStaplePolicy)) + i-- + dAtA[i] = 0x40 + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_DisableStatelessSessionResumption); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.SessionTimeout != nil { + size, err := (*durationpb.Duration)(m.SessionTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_SessionTicketKeysSdsSecretConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.SessionTicketKeysType.(*DownstreamTlsContext_SessionTicketKeys); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.RequireSni != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireSni).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.RequireClientCertificate != nil { + size, err := (*wrapperspb.BoolValue)(m.RequireClientCertificate).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.CommonTlsContext != nil { + size, err := m.CommonTlsContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DownstreamTlsContext_SessionTicketKeys) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_SessionTicketKeys) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeys != nil { + size, err := m.SessionTicketKeys.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SessionTicketKeysSdsSecretConfig != nil { + size, err := m.SessionTicketKeysSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.DisableStatelessSessionResumption { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + return len(dAtA) - i, nil +} +func (m *TlsKeyLog) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TlsKeyLog) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TlsKeyLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RemoteAddressRange) > 0 { + for iNdEx := len(m.RemoteAddressRange) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.RemoteAddressRange[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RemoteAddressRange[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.LocalAddressRange) > 0 { + for iNdEx := len(m.LocalAddressRange) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.LocalAddressRange[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.LocalAddressRange[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CertificateProvider) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CertificateProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Config.(*CommonTlsContext_CertificateProvider_TypedConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TypedConfig != nil { + if vtmsg, ok := interface{}(m.TypedConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TypedConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_CertificateProviderInstance) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CertificateProviderInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CertificateProviderInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CertificateName) > 0 { + i -= len(m.CertificateName) + copy(dAtA[i:], m.CertificateName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CertificateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.InstanceName) > 0 { + i -= len(m.InstanceName) + copy(dAtA[i:], m.InstanceName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InstanceName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ValidationContextCertificateProviderInstance != nil { + size, err := m.ValidationContextCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.ValidationContextCertificateProvider != nil { + size, err := m.ValidationContextCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ValidationContextSdsSecretConfig != nil { + size, err := m.ValidationContextSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.DefaultValidationContext != nil { + size, err := m.DefaultValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonTlsContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CustomTlsCertificateSelector != nil { + if vtmsg, ok := interface{}(m.CustomTlsCertificateSelector).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomTlsCertificateSelector) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.KeyLog != nil { + size, err := m.KeyLog.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.TlsCertificateProviderInstance != nil { + size, err := m.TlsCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.CustomHandshaker != nil { + if vtmsg, ok := interface{}(m.CustomHandshaker).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CustomHandshaker) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x6a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextCertificateProviderInstance); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsCertificateCertificateProviderInstance != nil { + size, err := m.TlsCertificateCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextCertificateProvider); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.TlsCertificateCertificateProvider != nil { + size, err := m.TlsCertificateCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_CombinedValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContextSdsSecretConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TlsCertificateSdsSecretConfigs) > 0 { + for iNdEx := len(m.TlsCertificateSdsSecretConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TlsCertificateSdsSecretConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.AlpnProtocols) > 0 { + for iNdEx := len(m.AlpnProtocols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AlpnProtocols[iNdEx]) + copy(dAtA[i:], m.AlpnProtocols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AlpnProtocols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if msg, ok := m.ValidationContextType.(*CommonTlsContext_ValidationContext); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.TlsCertificates) > 0 { + for iNdEx := len(m.TlsCertificates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TlsCertificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.TlsParams != nil { + size, err := m.TlsParams.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommonTlsContext_ValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContext != nil { + size, err := m.ValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextSdsSecretConfig != nil { + size, err := m.ValidationContextSdsSecretConfig.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_CombinedValidationContext) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_CombinedValidationContext) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.CombinedValidationContext != nil { + size, err := m.CombinedValidationContext.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextCertificateProvider) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextCertificateProvider) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextCertificateProvider != nil { + size, err := m.ValidationContextCertificateProvider.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ValidationContextCertificateProviderInstance != nil { + size, err := m.ValidationContextCertificateProviderInstance.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x62 + } + return len(dAtA) - i, nil +} +func (m *UpstreamTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonTlsContext != nil { + l = m.CommonTlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Sni) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AllowRenegotiation { + n += 2 + } + if m.MaxSessionKeys != nil { + l = (*wrapperspb.UInt32Value)(m.MaxSessionKeys).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.EnforceRsaKeyUsage != nil { + l = (*wrapperspb.BoolValue)(m.EnforceRsaKeyUsage).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DownstreamTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommonTlsContext != nil { + l = m.CommonTlsContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequireClientCertificate != nil { + l = (*wrapperspb.BoolValue)(m.RequireClientCertificate).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RequireSni != nil { + l = (*wrapperspb.BoolValue)(m.RequireSni).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.SessionTicketKeysType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.SessionTimeout != nil { + l = (*durationpb.Duration)(m.SessionTimeout).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.OcspStaplePolicy != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.OcspStaplePolicy)) + } + if m.FullScanCertsOnSniMismatch != nil { + l = (*wrapperspb.BoolValue)(m.FullScanCertsOnSniMismatch).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DisableStatefulSessionResumption { + n += 2 + } + if m.PreferClientCiphers { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DownstreamTlsContext_SessionTicketKeys) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeys != nil { + l = m.SessionTicketKeys.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DownstreamTlsContext_SessionTicketKeysSdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SessionTicketKeysSdsSecretConfig != nil { + l = m.SessionTicketKeysSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DownstreamTlsContext_DisableStatelessSessionResumption) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *TlsKeyLog) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LocalAddressRange) > 0 { + for _, e := range m.LocalAddressRange { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RemoteAddressRange) > 0 { + for _, e := range m.RemoteAddressRange { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CertificateProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Config.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CertificateProvider_TypedConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypedConfig != nil { + if size, ok := interface{}(m.TypedConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.TypedConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_CertificateProviderInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InstanceName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.CertificateName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_CombinedCertificateValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DefaultValidationContext != nil { + l = m.DefaultValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextSdsSecretConfig != nil { + l = m.ValidationContextSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextCertificateProvider != nil { + l = m.ValidationContextCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ValidationContextCertificateProviderInstance != nil { + l = m.ValidationContextCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TlsParams != nil { + l = m.TlsParams.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TlsCertificates) > 0 { + for _, e := range m.TlsCertificates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if vtmsg, ok := m.ValidationContextType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.AlpnProtocols) > 0 { + for _, s := range m.AlpnProtocols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.TlsCertificateSdsSecretConfigs) > 0 { + for _, e := range m.TlsCertificateSdsSecretConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.TlsCertificateCertificateProvider != nil { + l = m.TlsCertificateCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsCertificateCertificateProviderInstance != nil { + l = m.TlsCertificateCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomHandshaker != nil { + if size, ok := interface{}(m.CustomHandshaker).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomHandshaker) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TlsCertificateProviderInstance != nil { + l = m.TlsCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.KeyLog != nil { + l = m.KeyLog.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CustomTlsCertificateSelector != nil { + if size, ok := interface{}(m.CustomTlsCertificateSelector).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CustomTlsCertificateSelector) + } + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CommonTlsContext_ValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContext != nil { + l = m.ValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextSdsSecretConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextSdsSecretConfig != nil { + l = m.ValidationContextSdsSecretConfig.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_CombinedValidationContext) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CombinedValidationContext != nil { + l = m.CombinedValidationContext.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextCertificateProvider) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextCertificateProvider != nil { + l = m.ValidationContextCertificateProvider.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CommonTlsContext_ValidationContextCertificateProviderInstance) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidationContextCertificateProviderInstance != nil { + l = m.ValidationContextCertificateProviderInstance.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go new file mode 100644 index 0000000000..6f09930e96 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +type AdsDummy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AdsDummy) Reset() { + *x = AdsDummy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_ads_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdsDummy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdsDummy) ProtoMessage() {} + +func (x *AdsDummy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_ads_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdsDummy.ProtoReflect.Descriptor instead. +func (*AdsDummy) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_ads_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_service_discovery_v3_ads_proto protoreflect.FileDescriptor + +var file_envoy_service_discovery_v3_ads_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x73, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x64, 0x73, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x32, 0xa6, 0x02, 0x0a, 0x1a, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x87, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x28, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x41, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_service_discovery_v3_ads_proto_rawDescOnce sync.Once + file_envoy_service_discovery_v3_ads_proto_rawDescData = file_envoy_service_discovery_v3_ads_proto_rawDesc +) + +func file_envoy_service_discovery_v3_ads_proto_rawDescGZIP() []byte { + file_envoy_service_discovery_v3_ads_proto_rawDescOnce.Do(func() { + file_envoy_service_discovery_v3_ads_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_discovery_v3_ads_proto_rawDescData) + }) + return file_envoy_service_discovery_v3_ads_proto_rawDescData +} + +var file_envoy_service_discovery_v3_ads_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_service_discovery_v3_ads_proto_goTypes = []interface{}{ + (*AdsDummy)(nil), // 0: envoy.service.discovery.v3.AdsDummy + (*DiscoveryRequest)(nil), // 1: envoy.service.discovery.v3.DiscoveryRequest + (*DeltaDiscoveryRequest)(nil), // 2: envoy.service.discovery.v3.DeltaDiscoveryRequest + (*DiscoveryResponse)(nil), // 3: envoy.service.discovery.v3.DiscoveryResponse + (*DeltaDiscoveryResponse)(nil), // 4: envoy.service.discovery.v3.DeltaDiscoveryResponse +} +var file_envoy_service_discovery_v3_ads_proto_depIdxs = []int32{ + 1, // 0: envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources:input_type -> envoy.service.discovery.v3.DiscoveryRequest + 2, // 1: envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources:input_type -> envoy.service.discovery.v3.DeltaDiscoveryRequest + 3, // 2: envoy.service.discovery.v3.AggregatedDiscoveryService.StreamAggregatedResources:output_type -> envoy.service.discovery.v3.DiscoveryResponse + 4, // 3: envoy.service.discovery.v3.AggregatedDiscoveryService.DeltaAggregatedResources:output_type -> envoy.service.discovery.v3.DeltaDiscoveryResponse + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_service_discovery_v3_ads_proto_init() } +func file_envoy_service_discovery_v3_ads_proto_init() { + if File_envoy_service_discovery_v3_ads_proto != nil { + return + } + file_envoy_service_discovery_v3_discovery_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_service_discovery_v3_ads_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdsDummy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_discovery_v3_ads_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_envoy_service_discovery_v3_ads_proto_goTypes, + DependencyIndexes: file_envoy_service_discovery_v3_ads_proto_depIdxs, + MessageInfos: file_envoy_service_discovery_v3_ads_proto_msgTypes, + }.Build() + File_envoy_service_discovery_v3_ads_proto = out.File + file_envoy_service_discovery_v3_ads_proto_rawDesc = nil + file_envoy_service_discovery_v3_ads_proto_goTypes = nil + file_envoy_service_discovery_v3_ads_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go new file mode 100644 index 0000000000..9080fedd85 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads.pb.validate.go @@ -0,0 +1,136 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on AdsDummy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AdsDummy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AdsDummy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AdsDummyMultiError, or nil +// if none found. +func (m *AdsDummy) ValidateAll() error { + return m.validate(true) +} + +func (m *AdsDummy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return AdsDummyMultiError(errors) + } + + return nil +} + +// AdsDummyMultiError is an error wrapping multiple validation errors returned +// by AdsDummy.ValidateAll() if the designated constraints aren't met. +type AdsDummyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AdsDummyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AdsDummyMultiError) AllErrors() []error { return m } + +// AdsDummyValidationError is the validation error returned by +// AdsDummy.Validate if the designated constraints aren't met. +type AdsDummyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AdsDummyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AdsDummyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AdsDummyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AdsDummyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AdsDummyValidationError) ErrorName() string { return "AdsDummyValidationError" } + +// Error satisfies the builtin error interface +func (e AdsDummyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAdsDummy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AdsDummyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AdsDummyValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go new file mode 100644 index 0000000000..7a7f1af970 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_grpc.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName = "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources" + AggregatedDiscoveryService_DeltaAggregatedResources_FullMethodName = "/envoy.service.discovery.v3.AggregatedDiscoveryService/DeltaAggregatedResources" +) + +// AggregatedDiscoveryServiceClient is the client API for AggregatedDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AggregatedDiscoveryServiceClient interface { + // This is a gRPC-only API. + StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) + DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) +} + +type aggregatedDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAggregatedDiscoveryServiceClient(cc grpc.ClientConnInterface) AggregatedDiscoveryServiceClient { + return &aggregatedDiscoveryServiceClient{cc} +} + +func (c *aggregatedDiscoveryServiceClient) StreamAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &AggregatedDiscoveryService_ServiceDesc.Streams[0], AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &aggregatedDiscoveryServiceStreamAggregatedResourcesClient{stream} + return x, nil +} + +type AggregatedDiscoveryService_StreamAggregatedResourcesClient interface { + Send(*DiscoveryRequest) error + Recv() (*DiscoveryResponse, error) + grpc.ClientStream +} + +type aggregatedDiscoveryServiceStreamAggregatedResourcesClient struct { + grpc.ClientStream +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Send(m *DiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesClient) Recv() (*DiscoveryResponse, error) { + m := new(DiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aggregatedDiscoveryServiceClient) DeltaAggregatedResources(ctx context.Context, opts ...grpc.CallOption) (AggregatedDiscoveryService_DeltaAggregatedResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &AggregatedDiscoveryService_ServiceDesc.Streams[1], AggregatedDiscoveryService_DeltaAggregatedResources_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &aggregatedDiscoveryServiceDeltaAggregatedResourcesClient{stream} + return x, nil +} + +type AggregatedDiscoveryService_DeltaAggregatedResourcesClient interface { + Send(*DeltaDiscoveryRequest) error + Recv() (*DeltaDiscoveryResponse, error) + grpc.ClientStream +} + +type aggregatedDiscoveryServiceDeltaAggregatedResourcesClient struct { + grpc.ClientStream +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Send(m *DeltaDiscoveryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesClient) Recv() (*DeltaDiscoveryResponse, error) { + m := new(DeltaDiscoveryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// AggregatedDiscoveryServiceServer is the server API for AggregatedDiscoveryService service. +// All implementations should embed UnimplementedAggregatedDiscoveryServiceServer +// for forward compatibility +type AggregatedDiscoveryServiceServer interface { + // This is a gRPC-only API. + StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error + DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error +} + +// UnimplementedAggregatedDiscoveryServiceServer should be embedded to have forward compatible implementations. +type UnimplementedAggregatedDiscoveryServiceServer struct { +} + +func (UnimplementedAggregatedDiscoveryServiceServer) StreamAggregatedResources(AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamAggregatedResources not implemented") +} +func (UnimplementedAggregatedDiscoveryServiceServer) DeltaAggregatedResources(AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method DeltaAggregatedResources not implemented") +} + +// UnsafeAggregatedDiscoveryServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AggregatedDiscoveryServiceServer will +// result in compilation errors. +type UnsafeAggregatedDiscoveryServiceServer interface { + mustEmbedUnimplementedAggregatedDiscoveryServiceServer() +} + +func RegisterAggregatedDiscoveryServiceServer(s grpc.ServiceRegistrar, srv AggregatedDiscoveryServiceServer) { + s.RegisterService(&AggregatedDiscoveryService_ServiceDesc, srv) +} + +func _AggregatedDiscoveryService_StreamAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AggregatedDiscoveryServiceServer).StreamAggregatedResources(&aggregatedDiscoveryServiceStreamAggregatedResourcesServer{stream}) +} + +type AggregatedDiscoveryService_StreamAggregatedResourcesServer interface { + Send(*DiscoveryResponse) error + Recv() (*DiscoveryRequest, error) + grpc.ServerStream +} + +type aggregatedDiscoveryServiceStreamAggregatedResourcesServer struct { + grpc.ServerStream +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Send(m *DiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceStreamAggregatedResourcesServer) Recv() (*DiscoveryRequest, error) { + m := new(DiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _AggregatedDiscoveryService_DeltaAggregatedResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AggregatedDiscoveryServiceServer).DeltaAggregatedResources(&aggregatedDiscoveryServiceDeltaAggregatedResourcesServer{stream}) +} + +type AggregatedDiscoveryService_DeltaAggregatedResourcesServer interface { + Send(*DeltaDiscoveryResponse) error + Recv() (*DeltaDiscoveryRequest, error) + grpc.ServerStream +} + +type aggregatedDiscoveryServiceDeltaAggregatedResourcesServer struct { + grpc.ServerStream +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Send(m *DeltaDiscoveryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *aggregatedDiscoveryServiceDeltaAggregatedResourcesServer) Recv() (*DeltaDiscoveryRequest, error) { + m := new(DeltaDiscoveryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// AggregatedDiscoveryService_ServiceDesc is the grpc.ServiceDesc for AggregatedDiscoveryService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AggregatedDiscoveryService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.discovery.v3.AggregatedDiscoveryService", + HandlerType: (*AggregatedDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamAggregatedResources", + Handler: _AggregatedDiscoveryService_StreamAggregatedResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "DeltaAggregatedResources", + Handler: _AggregatedDiscoveryService_DeltaAggregatedResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/discovery/v3/ads.proto", +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go new file mode 100644 index 0000000000..0e604235d1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/ads_vtproto.pb.go @@ -0,0 +1,61 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/discovery/v3/ads.proto + +package discoveryv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AdsDummy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdsDummy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *AdsDummy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *AdsDummy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go new file mode 100644 index 0000000000..a9b5f69358 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.go @@ -0,0 +1,1693 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies a resource to be subscribed to. +type ResourceLocator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name to subscribe to. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A set of dynamic parameters used to match against the dynamic parameter + // constraints on the resource. This allows clients to select between + // multiple variants of the same resource. + DynamicParameters map[string]string `protobuf:"bytes,2,rep,name=dynamic_parameters,json=dynamicParameters,proto3" json:"dynamic_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ResourceLocator) Reset() { + *x = ResourceLocator{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLocator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLocator) ProtoMessage() {} + +func (x *ResourceLocator) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLocator.ProtoReflect.Descriptor instead. +func (*ResourceLocator) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceLocator) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceLocator) GetDynamicParameters() map[string]string { + if x != nil { + return x.DynamicParameters + } + return nil +} + +// Specifies a concrete resource name. +type ResourceName struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the resource. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Dynamic parameter constraints associated with this resource. To be used by client-side caches + // (including xDS proxies) when matching subscribed resource locators. + DynamicParameterConstraints *DynamicParameterConstraints `protobuf:"bytes,2,opt,name=dynamic_parameter_constraints,json=dynamicParameterConstraints,proto3" json:"dynamic_parameter_constraints,omitempty"` +} + +func (x *ResourceName) Reset() { + *x = ResourceName{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceName) ProtoMessage() {} + +func (x *ResourceName) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceName.ProtoReflect.Descriptor instead. +func (*ResourceName) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceName) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ResourceName) GetDynamicParameterConstraints() *DynamicParameterConstraints { + if x != nil { + return x.DynamicParameterConstraints + } + return nil +} + +// A DiscoveryRequest requests a set of versioned resources of the same type for +// a given Envoy node on some API. +// [#next-free-field: 8] +type DiscoveryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version_info provided in the request messages will be the version_info + // received with the most recent successfully processed response or empty on + // the first request. It is expected that no new request is sent after a + // response is received until the Envoy instance is ready to ACK/NACK the new + // configuration. ACK/NACK takes place by returning the new API config version + // as applied or the previous API config version respectively. Each type_url + // (see below) has an independent version associated with it. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The node making the request. + Node *v3.Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + // List of resources to subscribe to, e.g. list of cluster names or a route + // configuration name. If this is empty, all resources for the API are + // returned. LDS/CDS may have empty resource_names, which will cause all + // resources for the Envoy instance to be returned. The LDS and CDS responses + // will then imply a number of resources that need to be fetched via EDS/RDS, + // which will be explicitly enumerated in resource_names. + ResourceNames []string `protobuf:"bytes,3,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"` + // [#not-implemented-hide:] + // Alternative to “resource_names“ field that allows specifying dynamic + // parameters along with each resource name. Clients that populate this + // field must be able to handle responses from the server where resources + // are wrapped in a Resource message. + // Note that it is legal for a request to have some resources listed + // in “resource_names“ and others in “resource_locators“. + ResourceLocators []*ResourceLocator `protobuf:"bytes,7,rep,name=resource_locators,json=resourceLocators,proto3" json:"resource_locators,omitempty"` + // Type of the resource that is being requested, e.g. + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit + // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is + // required for ADS. + TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above + // discussion on version_info and the DiscoveryResponse nonce comment. This + // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, + // or 2) the client has not yet accepted an update in this xDS stream (unlike + // delta, where it is populated only for new explicit ACKs). + ResponseNonce string `protobuf:"bytes,5,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"` + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The “message“ field in “error_details“ provides the Envoy + // internal exception related to the failure. It is only intended for consumption during manual + // debugging, the string provided is not guaranteed to be stable across Envoy versions. + ErrorDetail *status.Status `protobuf:"bytes,6,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"` +} + +func (x *DiscoveryRequest) Reset() { + *x = DiscoveryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiscoveryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoveryRequest) ProtoMessage() {} + +func (x *DiscoveryRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoveryRequest.ProtoReflect.Descriptor instead. +func (*DiscoveryRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{2} +} + +func (x *DiscoveryRequest) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *DiscoveryRequest) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *DiscoveryRequest) GetResourceNames() []string { + if x != nil { + return x.ResourceNames + } + return nil +} + +func (x *DiscoveryRequest) GetResourceLocators() []*ResourceLocator { + if x != nil { + return x.ResourceLocators + } + return nil +} + +func (x *DiscoveryRequest) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DiscoveryRequest) GetResponseNonce() string { + if x != nil { + return x.ResponseNonce + } + return "" +} + +func (x *DiscoveryRequest) GetErrorDetail() *status.Status { + if x != nil { + return x.ErrorDetail + } + return nil +} + +// [#next-free-field: 7] +type DiscoveryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the response data. + VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The response resources. These resources are typed and depend on the API being called. + Resources []*anypb.Any `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` + // [#not-implemented-hide:] + // Canary is used to support two Envoy command line flags: + // + // - --terminate-on-canary-transition-failure. When set, Envoy is able to + // terminate if it detects that configuration is stuck at canary. Consider + // this example sequence of updates: + // - Management server applies a canary config successfully. + // - Management server rolls back to a production config. + // - Envoy rejects the new production config. + // Since there is no sensible way to continue receiving configuration + // updates, Envoy will then terminate and apply production config from a + // clean slate. + // - --dry-run-canary. When set, a canary response will never be applied, only + // validated via a dry run. + Canary bool `protobuf:"varint,3,opt,name=canary,proto3" json:"canary,omitempty"` + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). + TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // For gRPC based subscriptions, the nonce provides a way to explicitly ack a + // specific DiscoveryResponse in a following DiscoveryRequest. Additional + // messages may have been sent by Envoy to the management server for the + // previous version on the stream prior to this DiscoveryResponse, that were + // unprocessed at response send time. The nonce allows the management server + // to ignore any further DiscoveryRequests for the previous version until a + // DiscoveryRequest bearing the nonce. The nonce is optional and is not + // required for non-stream based xDS implementations. + Nonce string `protobuf:"bytes,5,opt,name=nonce,proto3" json:"nonce,omitempty"` + // The control plane instance that sent the response. + ControlPlane *v3.ControlPlane `protobuf:"bytes,6,opt,name=control_plane,json=controlPlane,proto3" json:"control_plane,omitempty"` +} + +func (x *DiscoveryResponse) Reset() { + *x = DiscoveryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiscoveryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoveryResponse) ProtoMessage() {} + +func (x *DiscoveryResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoveryResponse.ProtoReflect.Descriptor instead. +func (*DiscoveryResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{3} +} + +func (x *DiscoveryResponse) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *DiscoveryResponse) GetResources() []*anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *DiscoveryResponse) GetCanary() bool { + if x != nil { + return x.Canary + } + return false +} + +func (x *DiscoveryResponse) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DiscoveryResponse) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *DiscoveryResponse) GetControlPlane() *v3.ControlPlane { + if x != nil { + return x.ControlPlane + } + return nil +} + +// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +// endpoint for Delta xDS. +// +// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +// diff to the state of a xDS client. +// In Delta XDS there are per-resource versions, which allow tracking state at +// the resource granularity. +// An xDS Delta session is always in the context of a gRPC bidirectional +// stream. This allows the xDS server to keep track of the state of xDS clients +// connected to it. +// +// In Delta xDS the nonce field is required and used to pair +// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. +// Optionally, a response message level system_version_info is present for +// debugging purposes only. +// +// DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest +// can be either or both of: [1] informing the server of what resources the +// client has gained/lost interest in (using resource_names_subscribe and +// resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from +// the server (using response_nonce, with presence of error_detail making it a NACK). +// Additionally, the first message (for a given type_url) of a reconnected gRPC stream +// has a third role: informing the server of the resources (and their versions) +// that the client already possesses, using the initial_resource_versions field. +// +// As with state-of-the-world, when multiple resource types are multiplexed (ADS), +// all requests/acknowledgments/updates are logically walled off by type_url: +// a Cluster ACK exists in a completely separate world from a prior Route NACK. +// In particular, initial_resource_versions being sent at the "start" of every +// gRPC stream actually entails a message for each type_url, each with its own +// initial_resource_versions. +// [#next-free-field: 10] +type DeltaDiscoveryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The node making the request. + Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Type of the resource that is being requested, e.g. + // “type.googleapis.com/envoy.api.v2.ClusterLoadAssignment“. This does not need to be set if + // resources are only referenced via “xds_resource_subscribe“ and + // “xds_resources_unsubscribe“. + TypeUrl string `protobuf:"bytes,2,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // DeltaDiscoveryRequests allow the client to add or remove individual + // resources to the set of tracked resources in the context of a stream. + // All resource names in the resource_names_subscribe list are added to the + // set of tracked resources and all resource names in the resource_names_unsubscribe + // list are removed from the set of tracked resources. + // + // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or + // resource_names_unsubscribe list simply means that no resources are to be + // added or removed to the resource list. + // *Like* state-of-the-world xDS, the server must send updates for all tracked + // resources, but can also send updates for resources the client has not subscribed to. + // + // NOTE: the server must respond with all resources listed in resource_names_subscribe, + // even if it believes the client has the most recent version of them. The reason: + // the client may have dropped them, but then regained interest before it had a chance + // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. + // + // These two fields can be set in any DeltaDiscoveryRequest, including ACKs + // and initial_resource_versions. + // + // A list of Resource names to add to the list of tracked resources. + ResourceNamesSubscribe []string `protobuf:"bytes,3,rep,name=resource_names_subscribe,json=resourceNamesSubscribe,proto3" json:"resource_names_subscribe,omitempty"` + // A list of Resource names to remove from the list of tracked resources. + ResourceNamesUnsubscribe []string `protobuf:"bytes,4,rep,name=resource_names_unsubscribe,json=resourceNamesUnsubscribe,proto3" json:"resource_names_unsubscribe,omitempty"` + // [#not-implemented-hide:] + // Alternative to “resource_names_subscribe“ field that allows specifying dynamic parameters + // along with each resource name. + // Note that it is legal for a request to have some resources listed + // in “resource_names_subscribe“ and others in “resource_locators_subscribe“. + ResourceLocatorsSubscribe []*ResourceLocator `protobuf:"bytes,8,rep,name=resource_locators_subscribe,json=resourceLocatorsSubscribe,proto3" json:"resource_locators_subscribe,omitempty"` + // [#not-implemented-hide:] + // Alternative to “resource_names_unsubscribe“ field that allows specifying dynamic parameters + // along with each resource name. + // Note that it is legal for a request to have some resources listed + // in “resource_names_unsubscribe“ and others in “resource_locators_unsubscribe“. + ResourceLocatorsUnsubscribe []*ResourceLocator `protobuf:"bytes,9,rep,name=resource_locators_unsubscribe,json=resourceLocatorsUnsubscribe,proto3" json:"resource_locators_unsubscribe,omitempty"` + // Informs the server of the versions of the resources the xDS client knows of, to enable the + // client to continue the same logical xDS session even in the face of gRPC stream reconnection. + // It will not be populated: [1] in the very first stream of a session, since the client will + // not yet have any resources, [2] in any message after the first in a stream (for a given + // type_url), since the server will already be correctly tracking the client's state. + // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) + // The map's keys are names of xDS resources known to the xDS client. + // The map's values are opaque resource versions. + InitialResourceVersions map[string]string `protobuf:"bytes,5,rep,name=initial_resource_versions,json=initialResourceVersions,proto3" json:"initial_resource_versions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // When the DeltaDiscoveryRequest is a ACK or NACK message in response + // to a previous DeltaDiscoveryResponse, the response_nonce must be the + // nonce in the DeltaDiscoveryResponse. + // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. + ResponseNonce string `protobuf:"bytes,6,opt,name=response_nonce,json=responseNonce,proto3" json:"response_nonce,omitempty"` + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The “message“ field in “error_details“ + // provides the Envoy internal exception related to the failure. + ErrorDetail *status.Status `protobuf:"bytes,7,opt,name=error_detail,json=errorDetail,proto3" json:"error_detail,omitempty"` +} + +func (x *DeltaDiscoveryRequest) Reset() { + *x = DeltaDiscoveryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeltaDiscoveryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeltaDiscoveryRequest) ProtoMessage() {} + +func (x *DeltaDiscoveryRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeltaDiscoveryRequest.ProtoReflect.Descriptor instead. +func (*DeltaDiscoveryRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{4} +} + +func (x *DeltaDiscoveryRequest) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DeltaDiscoveryRequest) GetResourceNamesSubscribe() []string { + if x != nil { + return x.ResourceNamesSubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResourceNamesUnsubscribe() []string { + if x != nil { + return x.ResourceNamesUnsubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResourceLocatorsSubscribe() []*ResourceLocator { + if x != nil { + return x.ResourceLocatorsSubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResourceLocatorsUnsubscribe() []*ResourceLocator { + if x != nil { + return x.ResourceLocatorsUnsubscribe + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetInitialResourceVersions() map[string]string { + if x != nil { + return x.InitialResourceVersions + } + return nil +} + +func (x *DeltaDiscoveryRequest) GetResponseNonce() string { + if x != nil { + return x.ResponseNonce + } + return "" +} + +func (x *DeltaDiscoveryRequest) GetErrorDetail() *status.Status { + if x != nil { + return x.ErrorDetail + } + return nil +} + +// [#next-free-field: 9] +type DeltaDiscoveryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The version of the response data (used for debugging). + SystemVersionInfo string `protobuf:"bytes,1,opt,name=system_version_info,json=systemVersionInfo,proto3" json:"system_version_info,omitempty"` + // The response resources. These are typed resources, whose types must match + // the type_url field. + Resources []*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + TypeUrl string `protobuf:"bytes,4,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Resources names of resources that have be deleted and to be removed from the xDS Client. + // Removed resources for missing resources can be ignored. + RemovedResources []string `protobuf:"bytes,6,rep,name=removed_resources,json=removedResources,proto3" json:"removed_resources,omitempty"` + // Alternative to removed_resources that allows specifying which variant of + // a resource is being removed. This variant must be used for any resource + // for which dynamic parameter constraints were sent to the client. + RemovedResourceNames []*ResourceName `protobuf:"bytes,8,rep,name=removed_resource_names,json=removedResourceNames,proto3" json:"removed_resource_names,omitempty"` + // The nonce provides a way for DeltaDiscoveryRequests to uniquely + // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. + Nonce string `protobuf:"bytes,5,opt,name=nonce,proto3" json:"nonce,omitempty"` + // [#not-implemented-hide:] + // The control plane instance that sent the response. + ControlPlane *v3.ControlPlane `protobuf:"bytes,7,opt,name=control_plane,json=controlPlane,proto3" json:"control_plane,omitempty"` +} + +func (x *DeltaDiscoveryResponse) Reset() { + *x = DeltaDiscoveryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeltaDiscoveryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeltaDiscoveryResponse) ProtoMessage() {} + +func (x *DeltaDiscoveryResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeltaDiscoveryResponse.ProtoReflect.Descriptor instead. +func (*DeltaDiscoveryResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{5} +} + +func (x *DeltaDiscoveryResponse) GetSystemVersionInfo() string { + if x != nil { + return x.SystemVersionInfo + } + return "" +} + +func (x *DeltaDiscoveryResponse) GetResources() []*Resource { + if x != nil { + return x.Resources + } + return nil +} + +func (x *DeltaDiscoveryResponse) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *DeltaDiscoveryResponse) GetRemovedResources() []string { + if x != nil { + return x.RemovedResources + } + return nil +} + +func (x *DeltaDiscoveryResponse) GetRemovedResourceNames() []*ResourceName { + if x != nil { + return x.RemovedResourceNames + } + return nil +} + +func (x *DeltaDiscoveryResponse) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *DeltaDiscoveryResponse) GetControlPlane() *v3.ControlPlane { + if x != nil { + return x.ControlPlane + } + return nil +} + +// A set of dynamic parameter constraints associated with a variant of an individual xDS resource. +// These constraints determine whether the resource matches a subscription based on the set of +// dynamic parameters in the subscription, as specified in the +// :ref:`ResourceLocator.dynamic_parameters` +// field. This allows xDS implementations (clients, servers, and caching proxies) to determine +// which variant of a resource is appropriate for a given client. +type DynamicParameterConstraints struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: + // + // *DynamicParameterConstraints_Constraint + // *DynamicParameterConstraints_OrConstraints + // *DynamicParameterConstraints_AndConstraints + // *DynamicParameterConstraints_NotConstraints + Type isDynamicParameterConstraints_Type `protobuf_oneof:"type"` +} + +func (x *DynamicParameterConstraints) Reset() { + *x = DynamicParameterConstraints{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints) ProtoMessage() {} + +func (x *DynamicParameterConstraints) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6} +} + +func (m *DynamicParameterConstraints) GetType() isDynamicParameterConstraints_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *DynamicParameterConstraints) GetConstraint() *DynamicParameterConstraints_SingleConstraint { + if x, ok := x.GetType().(*DynamicParameterConstraints_Constraint); ok { + return x.Constraint + } + return nil +} + +func (x *DynamicParameterConstraints) GetOrConstraints() *DynamicParameterConstraints_ConstraintList { + if x, ok := x.GetType().(*DynamicParameterConstraints_OrConstraints); ok { + return x.OrConstraints + } + return nil +} + +func (x *DynamicParameterConstraints) GetAndConstraints() *DynamicParameterConstraints_ConstraintList { + if x, ok := x.GetType().(*DynamicParameterConstraints_AndConstraints); ok { + return x.AndConstraints + } + return nil +} + +func (x *DynamicParameterConstraints) GetNotConstraints() *DynamicParameterConstraints { + if x, ok := x.GetType().(*DynamicParameterConstraints_NotConstraints); ok { + return x.NotConstraints + } + return nil +} + +type isDynamicParameterConstraints_Type interface { + isDynamicParameterConstraints_Type() +} + +type DynamicParameterConstraints_Constraint struct { + // A single constraint to evaluate. + Constraint *DynamicParameterConstraints_SingleConstraint `protobuf:"bytes,1,opt,name=constraint,proto3,oneof"` +} + +type DynamicParameterConstraints_OrConstraints struct { + // A list of constraints that match if any one constraint in the list + // matches. + OrConstraints *DynamicParameterConstraints_ConstraintList `protobuf:"bytes,2,opt,name=or_constraints,json=orConstraints,proto3,oneof"` +} + +type DynamicParameterConstraints_AndConstraints struct { + // A list of constraints that must all match. + AndConstraints *DynamicParameterConstraints_ConstraintList `protobuf:"bytes,3,opt,name=and_constraints,json=andConstraints,proto3,oneof"` +} + +type DynamicParameterConstraints_NotConstraints struct { + // The inverse (NOT) of a set of constraints. + NotConstraints *DynamicParameterConstraints `protobuf:"bytes,4,opt,name=not_constraints,json=notConstraints,proto3,oneof"` +} + +func (*DynamicParameterConstraints_Constraint) isDynamicParameterConstraints_Type() {} + +func (*DynamicParameterConstraints_OrConstraints) isDynamicParameterConstraints_Type() {} + +func (*DynamicParameterConstraints_AndConstraints) isDynamicParameterConstraints_Type() {} + +func (*DynamicParameterConstraints_NotConstraints) isDynamicParameterConstraints_Type() {} + +// [#next-free-field: 10] +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource's name, to distinguish it from others of the same type of resource. + // Only one of “name“ or “resource_name“ may be set. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Alternative to the “name“ field, to be used when the server supports + // multiple variants of the named resource that are differentiated by + // dynamic parameter constraints. + // Only one of “name“ or “resource_name“ may be set. + ResourceName *ResourceName `protobuf:"bytes,8,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The aliases are a list of other names that this resource can go by. + Aliases []string `protobuf:"bytes,4,rep,name=aliases,proto3" json:"aliases,omitempty"` + // The resource level version. It allows xDS to track the state of individual + // resources. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // The resource being tracked. + Resource *anypb.Any `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Time-to-live value for the resource. For each resource, a timer is started. The timer is + // reset each time the resource is received with a new TTL. If the resource is received with + // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the + // configuration for the resource will be removed. + // + // The TTL can be refreshed or changed by sending a response that doesn't change the resource + // version. In this case the resource field does not need to be populated, which allows for + // light-weight "heartbeat" updates to keep a resource with a TTL alive. + // + // The TTL feature is meant to support configurations that should be removed in the event of + // a management server failure. For example, the feature may be used for fault injection + // testing where the fault injection should be terminated in the event that Envoy loses contact + // with the management server. + Ttl *durationpb.Duration `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` + // Cache control properties for the resource. + // [#not-implemented-hide:] + CacheControl *Resource_CacheControl `protobuf:"bytes,7,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // The Metadata field can be used to provide additional information for the resource. + // E.g. the trace data for debugging. + Metadata *v3.Metadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{7} +} + +func (x *Resource) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Resource) GetResourceName() *ResourceName { + if x != nil { + return x.ResourceName + } + return nil +} + +func (x *Resource) GetAliases() []string { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Resource) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Resource) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +func (x *Resource) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +func (x *Resource) GetCacheControl() *Resource_CacheControl { + if x != nil { + return x.CacheControl + } + return nil +} + +func (x *Resource) GetMetadata() *v3.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// A single constraint for a given key. +type DynamicParameterConstraints_SingleConstraint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key to match against. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Types that are assignable to ConstraintType: + // + // *DynamicParameterConstraints_SingleConstraint_Value + // *DynamicParameterConstraints_SingleConstraint_Exists_ + ConstraintType isDynamicParameterConstraints_SingleConstraint_ConstraintType `protobuf_oneof:"constraint_type"` +} + +func (x *DynamicParameterConstraints_SingleConstraint) Reset() { + *x = DynamicParameterConstraints_SingleConstraint{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints_SingleConstraint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints_SingleConstraint) ProtoMessage() {} + +func (x *DynamicParameterConstraints_SingleConstraint) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints_SingleConstraint.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints_SingleConstraint) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *DynamicParameterConstraints_SingleConstraint) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (m *DynamicParameterConstraints_SingleConstraint) GetConstraintType() isDynamicParameterConstraints_SingleConstraint_ConstraintType { + if m != nil { + return m.ConstraintType + } + return nil +} + +func (x *DynamicParameterConstraints_SingleConstraint) GetValue() string { + if x, ok := x.GetConstraintType().(*DynamicParameterConstraints_SingleConstraint_Value); ok { + return x.Value + } + return "" +} + +func (x *DynamicParameterConstraints_SingleConstraint) GetExists() *DynamicParameterConstraints_SingleConstraint_Exists { + if x, ok := x.GetConstraintType().(*DynamicParameterConstraints_SingleConstraint_Exists_); ok { + return x.Exists + } + return nil +} + +type isDynamicParameterConstraints_SingleConstraint_ConstraintType interface { + isDynamicParameterConstraints_SingleConstraint_ConstraintType() +} + +type DynamicParameterConstraints_SingleConstraint_Value struct { + // Matches this exact value. + Value string `protobuf:"bytes,2,opt,name=value,proto3,oneof"` +} + +type DynamicParameterConstraints_SingleConstraint_Exists_ struct { + // Key is present (matches any value except for the key being absent). + // This allows setting a default constraint for clients that do + // not send a key at all, while there may be other clients that need + // special configuration based on that key. + Exists *DynamicParameterConstraints_SingleConstraint_Exists `protobuf:"bytes,3,opt,name=exists,proto3,oneof"` +} + +func (*DynamicParameterConstraints_SingleConstraint_Value) isDynamicParameterConstraints_SingleConstraint_ConstraintType() { +} + +func (*DynamicParameterConstraints_SingleConstraint_Exists_) isDynamicParameterConstraints_SingleConstraint_ConstraintType() { +} + +type DynamicParameterConstraints_ConstraintList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Constraints []*DynamicParameterConstraints `protobuf:"bytes,1,rep,name=constraints,proto3" json:"constraints,omitempty"` +} + +func (x *DynamicParameterConstraints_ConstraintList) Reset() { + *x = DynamicParameterConstraints_ConstraintList{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints_ConstraintList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints_ConstraintList) ProtoMessage() {} + +func (x *DynamicParameterConstraints_ConstraintList) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints_ConstraintList.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints_ConstraintList) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *DynamicParameterConstraints_ConstraintList) GetConstraints() []*DynamicParameterConstraints { + if x != nil { + return x.Constraints + } + return nil +} + +type DynamicParameterConstraints_SingleConstraint_Exists struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DynamicParameterConstraints_SingleConstraint_Exists) Reset() { + *x = DynamicParameterConstraints_SingleConstraint_Exists{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicParameterConstraints_SingleConstraint_Exists) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicParameterConstraints_SingleConstraint_Exists) ProtoMessage() {} + +func (x *DynamicParameterConstraints_SingleConstraint_Exists) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicParameterConstraints_SingleConstraint_Exists.ProtoReflect.Descriptor instead. +func (*DynamicParameterConstraints_SingleConstraint_Exists) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{6, 0, 0} +} + +// Cache control properties for the resource. +// [#not-implemented-hide:] +type Resource_CacheControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, xDS proxies may not cache this resource. + // Note that this does not apply to clients other than xDS proxies, which must cache resources + // for their own use, regardless of the value of this field. + DoNotCache bool `protobuf:"varint,1,opt,name=do_not_cache,json=doNotCache,proto3" json:"do_not_cache,omitempty"` +} + +func (x *Resource_CacheControl) Reset() { + *x = Resource_CacheControl{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource_CacheControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource_CacheControl) ProtoMessage() {} + +func (x *Resource_CacheControl) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_discovery_v3_discovery_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource_CacheControl.ProtoReflect.Descriptor instead. +func (*Resource_CacheControl) Descriptor() ([]byte, []int) { + return file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *Resource_CacheControl) GetDoNotCache() bool { + if x != nil { + return x.DoNotCache + } + return false +} + +var File_envoy_service_discovery_v3_discovery_proto protoreflect.FileDescriptor + +var file_envoy_service_discovery_v3_discovery_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, + 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x71, 0x0a, 0x12, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x1a, 0x44, 0x0a, 0x16, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9f, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7b, + 0x0a, 0x1d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x1b, + 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x03, 0x0a, 0x10, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x11, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, + 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x24, 0x9a, + 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xa3, 0x02, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x09, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x63, 0x61, 0x6e, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, + 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, + 0x6c, 0x61, 0x6e, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, + 0x6e, 0x65, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x06, 0x0a, 0x15, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x38, + 0x0a, 0x18, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x16, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x3c, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x75, 0x6e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x55, 0x6e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x6b, 0x0a, 0x1b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x19, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x12, 0x6f, 0x0a, 0x1d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x75, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x1b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x55, 0x6e, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x19, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x1a, + 0x4a, 0x0a, 0x1c, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, + 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x03, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x74, 0x61, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x42, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, + 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x5e, 0x0a, + 0x16, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x52, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x92, 0x06, 0x0a, 0x1b, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x12, 0x6f, 0x0a, 0x0e, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x71, 0x0a, 0x0f, 0x61, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, + 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x6e, 0x64, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x6e, 0x6f, + 0x74, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0xc9, 0x01, 0x0a, + 0x10, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x69, 0x0a, 0x06, 0x65, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, + 0x69, 0x6e, 0x74, 0x73, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x1a, 0x08, 0x0a, 0x06, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, + 0x42, 0x16, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x59, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xe4, 0x03, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4d, + 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x56, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x1a, 0x30, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x64, 0x6f, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x6f, 0x4e, 0x6f, + 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x93, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x28, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4d, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x3b, 0x64, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_service_discovery_v3_discovery_proto_rawDescOnce sync.Once + file_envoy_service_discovery_v3_discovery_proto_rawDescData = file_envoy_service_discovery_v3_discovery_proto_rawDesc +) + +func file_envoy_service_discovery_v3_discovery_proto_rawDescGZIP() []byte { + file_envoy_service_discovery_v3_discovery_proto_rawDescOnce.Do(func() { + file_envoy_service_discovery_v3_discovery_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_discovery_v3_discovery_proto_rawDescData) + }) + return file_envoy_service_discovery_v3_discovery_proto_rawDescData +} + +var file_envoy_service_discovery_v3_discovery_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_envoy_service_discovery_v3_discovery_proto_goTypes = []interface{}{ + (*ResourceLocator)(nil), // 0: envoy.service.discovery.v3.ResourceLocator + (*ResourceName)(nil), // 1: envoy.service.discovery.v3.ResourceName + (*DiscoveryRequest)(nil), // 2: envoy.service.discovery.v3.DiscoveryRequest + (*DiscoveryResponse)(nil), // 3: envoy.service.discovery.v3.DiscoveryResponse + (*DeltaDiscoveryRequest)(nil), // 4: envoy.service.discovery.v3.DeltaDiscoveryRequest + (*DeltaDiscoveryResponse)(nil), // 5: envoy.service.discovery.v3.DeltaDiscoveryResponse + (*DynamicParameterConstraints)(nil), // 6: envoy.service.discovery.v3.DynamicParameterConstraints + (*Resource)(nil), // 7: envoy.service.discovery.v3.Resource + nil, // 8: envoy.service.discovery.v3.ResourceLocator.DynamicParametersEntry + nil, // 9: envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry + (*DynamicParameterConstraints_SingleConstraint)(nil), // 10: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint + (*DynamicParameterConstraints_ConstraintList)(nil), // 11: envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList + (*DynamicParameterConstraints_SingleConstraint_Exists)(nil), // 12: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists + (*Resource_CacheControl)(nil), // 13: envoy.service.discovery.v3.Resource.CacheControl + (*v3.Node)(nil), // 14: envoy.config.core.v3.Node + (*status.Status)(nil), // 15: google.rpc.Status + (*anypb.Any)(nil), // 16: google.protobuf.Any + (*v3.ControlPlane)(nil), // 17: envoy.config.core.v3.ControlPlane + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration + (*v3.Metadata)(nil), // 19: envoy.config.core.v3.Metadata +} +var file_envoy_service_discovery_v3_discovery_proto_depIdxs = []int32{ + 8, // 0: envoy.service.discovery.v3.ResourceLocator.dynamic_parameters:type_name -> envoy.service.discovery.v3.ResourceLocator.DynamicParametersEntry + 6, // 1: envoy.service.discovery.v3.ResourceName.dynamic_parameter_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 14, // 2: envoy.service.discovery.v3.DiscoveryRequest.node:type_name -> envoy.config.core.v3.Node + 0, // 3: envoy.service.discovery.v3.DiscoveryRequest.resource_locators:type_name -> envoy.service.discovery.v3.ResourceLocator + 15, // 4: envoy.service.discovery.v3.DiscoveryRequest.error_detail:type_name -> google.rpc.Status + 16, // 5: envoy.service.discovery.v3.DiscoveryResponse.resources:type_name -> google.protobuf.Any + 17, // 6: envoy.service.discovery.v3.DiscoveryResponse.control_plane:type_name -> envoy.config.core.v3.ControlPlane + 14, // 7: envoy.service.discovery.v3.DeltaDiscoveryRequest.node:type_name -> envoy.config.core.v3.Node + 0, // 8: envoy.service.discovery.v3.DeltaDiscoveryRequest.resource_locators_subscribe:type_name -> envoy.service.discovery.v3.ResourceLocator + 0, // 9: envoy.service.discovery.v3.DeltaDiscoveryRequest.resource_locators_unsubscribe:type_name -> envoy.service.discovery.v3.ResourceLocator + 9, // 10: envoy.service.discovery.v3.DeltaDiscoveryRequest.initial_resource_versions:type_name -> envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry + 15, // 11: envoy.service.discovery.v3.DeltaDiscoveryRequest.error_detail:type_name -> google.rpc.Status + 7, // 12: envoy.service.discovery.v3.DeltaDiscoveryResponse.resources:type_name -> envoy.service.discovery.v3.Resource + 1, // 13: envoy.service.discovery.v3.DeltaDiscoveryResponse.removed_resource_names:type_name -> envoy.service.discovery.v3.ResourceName + 17, // 14: envoy.service.discovery.v3.DeltaDiscoveryResponse.control_plane:type_name -> envoy.config.core.v3.ControlPlane + 10, // 15: envoy.service.discovery.v3.DynamicParameterConstraints.constraint:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint + 11, // 16: envoy.service.discovery.v3.DynamicParameterConstraints.or_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList + 11, // 17: envoy.service.discovery.v3.DynamicParameterConstraints.and_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList + 6, // 18: envoy.service.discovery.v3.DynamicParameterConstraints.not_constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 1, // 19: envoy.service.discovery.v3.Resource.resource_name:type_name -> envoy.service.discovery.v3.ResourceName + 16, // 20: envoy.service.discovery.v3.Resource.resource:type_name -> google.protobuf.Any + 18, // 21: envoy.service.discovery.v3.Resource.ttl:type_name -> google.protobuf.Duration + 13, // 22: envoy.service.discovery.v3.Resource.cache_control:type_name -> envoy.service.discovery.v3.Resource.CacheControl + 19, // 23: envoy.service.discovery.v3.Resource.metadata:type_name -> envoy.config.core.v3.Metadata + 12, // 24: envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.exists:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists + 6, // 25: envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList.constraints:type_name -> envoy.service.discovery.v3.DynamicParameterConstraints + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_envoy_service_discovery_v3_discovery_proto_init() } +func file_envoy_service_discovery_v3_discovery_proto_init() { + if File_envoy_service_discovery_v3_discovery_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_service_discovery_v3_discovery_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLocator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiscoveryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiscoveryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeltaDiscoveryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeltaDiscoveryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints_SingleConstraint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints_ConstraintList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicParameterConstraints_SingleConstraint_Exists); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource_CacheControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*DynamicParameterConstraints_Constraint)(nil), + (*DynamicParameterConstraints_OrConstraints)(nil), + (*DynamicParameterConstraints_AndConstraints)(nil), + (*DynamicParameterConstraints_NotConstraints)(nil), + } + file_envoy_service_discovery_v3_discovery_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*DynamicParameterConstraints_SingleConstraint_Value)(nil), + (*DynamicParameterConstraints_SingleConstraint_Exists_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_discovery_v3_discovery_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_service_discovery_v3_discovery_proto_goTypes, + DependencyIndexes: file_envoy_service_discovery_v3_discovery_proto_depIdxs, + MessageInfos: file_envoy_service_discovery_v3_discovery_proto_msgTypes, + }.Build() + File_envoy_service_discovery_v3_discovery_proto = out.File + file_envoy_service_discovery_v3_discovery_proto_rawDesc = nil + file_envoy_service_discovery_v3_discovery_proto_goTypes = nil + file_envoy_service_discovery_v3_discovery_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go new file mode 100644 index 0000000000..e30bb1e439 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery.pb.validate.go @@ -0,0 +1,2139 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceLocator with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceLocator) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceLocator with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceLocatorMultiError, or nil if none found. +func (m *ResourceLocator) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceLocator) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for DynamicParameters + + if len(errors) > 0 { + return ResourceLocatorMultiError(errors) + } + + return nil +} + +// ResourceLocatorMultiError is an error wrapping multiple validation errors +// returned by ResourceLocator.ValidateAll() if the designated constraints +// aren't met. +type ResourceLocatorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceLocatorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceLocatorMultiError) AllErrors() []error { return m } + +// ResourceLocatorValidationError is the validation error returned by +// ResourceLocator.Validate if the designated constraints aren't met. +type ResourceLocatorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceLocatorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceLocatorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceLocatorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceLocatorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceLocatorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceLocator.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceLocatorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceLocatorValidationError{} + +// Validate checks the field values on ResourceName with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceName) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceName with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceNameMultiError, or +// nil if none found. +func (m *ResourceName) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceName) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetDynamicParameterConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "DynamicParameterConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceNameValidationError{ + field: "DynamicParameterConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDynamicParameterConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceNameValidationError{ + field: "DynamicParameterConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceNameMultiError(errors) + } + + return nil +} + +// ResourceNameMultiError is an error wrapping multiple validation errors +// returned by ResourceName.ValidateAll() if the designated constraints aren't met. +type ResourceNameMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceNameMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceNameMultiError) AllErrors() []error { return m } + +// ResourceNameValidationError is the validation error returned by +// ResourceName.Validate if the designated constraints aren't met. +type ResourceNameValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceNameValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceNameValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceNameValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceNameValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceNameValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceName.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceNameValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceNameValidationError{} + +// Validate checks the field values on DiscoveryRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DiscoveryRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DiscoveryRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DiscoveryRequestMultiError, or nil if none found. +func (m *DiscoveryRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DiscoveryRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetResourceLocators() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocators[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TypeUrl + + // no validation rules for ResponseNonce + + if all { + switch v := interface{}(m.GetErrorDetail()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorDetail()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DiscoveryRequestMultiError(errors) + } + + return nil +} + +// DiscoveryRequestMultiError is an error wrapping multiple validation errors +// returned by DiscoveryRequest.ValidateAll() if the designated constraints +// aren't met. +type DiscoveryRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DiscoveryRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DiscoveryRequestMultiError) AllErrors() []error { return m } + +// DiscoveryRequestValidationError is the validation error returned by +// DiscoveryRequest.Validate if the designated constraints aren't met. +type DiscoveryRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DiscoveryRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DiscoveryRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DiscoveryRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DiscoveryRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DiscoveryRequestValidationError) ErrorName() string { return "DiscoveryRequestValidationError" } + +// Error satisfies the builtin error interface +func (e DiscoveryRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDiscoveryRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DiscoveryRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DiscoveryRequestValidationError{} + +// Validate checks the field values on DiscoveryResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DiscoveryResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DiscoveryResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DiscoveryResponseMultiError, or nil if none found. +func (m *DiscoveryResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DiscoveryResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for VersionInfo + + for idx, item := range m.GetResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Canary + + // no validation rules for TypeUrl + + // no validation rules for Nonce + + if all { + switch v := interface{}(m.GetControlPlane()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetControlPlane()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DiscoveryResponseMultiError(errors) + } + + return nil +} + +// DiscoveryResponseMultiError is an error wrapping multiple validation errors +// returned by DiscoveryResponse.ValidateAll() if the designated constraints +// aren't met. +type DiscoveryResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DiscoveryResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DiscoveryResponseMultiError) AllErrors() []error { return m } + +// DiscoveryResponseValidationError is the validation error returned by +// DiscoveryResponse.Validate if the designated constraints aren't met. +type DiscoveryResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DiscoveryResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DiscoveryResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DiscoveryResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DiscoveryResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DiscoveryResponseValidationError) ErrorName() string { + return "DiscoveryResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DiscoveryResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDiscoveryResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DiscoveryResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DiscoveryResponseValidationError{} + +// Validate checks the field values on DeltaDiscoveryRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeltaDiscoveryRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeltaDiscoveryRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeltaDiscoveryRequestMultiError, or nil if none found. +func (m *DeltaDiscoveryRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DeltaDiscoveryRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TypeUrl + + for idx, item := range m.GetResourceLocatorsSubscribe() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsSubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsSubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsSubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResourceLocatorsUnsubscribe() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsUnsubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsUnsubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: fmt.Sprintf("ResourceLocatorsUnsubscribe[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for InitialResourceVersions + + // no validation rules for ResponseNonce + + if all { + switch v := interface{}(m.GetErrorDetail()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorDetail()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryRequestValidationError{ + field: "ErrorDetail", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DeltaDiscoveryRequestMultiError(errors) + } + + return nil +} + +// DeltaDiscoveryRequestMultiError is an error wrapping multiple validation +// errors returned by DeltaDiscoveryRequest.ValidateAll() if the designated +// constraints aren't met. +type DeltaDiscoveryRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeltaDiscoveryRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeltaDiscoveryRequestMultiError) AllErrors() []error { return m } + +// DeltaDiscoveryRequestValidationError is the validation error returned by +// DeltaDiscoveryRequest.Validate if the designated constraints aren't met. +type DeltaDiscoveryRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeltaDiscoveryRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeltaDiscoveryRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeltaDiscoveryRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeltaDiscoveryRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeltaDiscoveryRequestValidationError) ErrorName() string { + return "DeltaDiscoveryRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e DeltaDiscoveryRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeltaDiscoveryRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeltaDiscoveryRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeltaDiscoveryRequestValidationError{} + +// Validate checks the field values on DeltaDiscoveryResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeltaDiscoveryResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeltaDiscoveryResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeltaDiscoveryResponseMultiError, or nil if none found. +func (m *DeltaDiscoveryResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DeltaDiscoveryResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SystemVersionInfo + + for idx, item := range m.GetResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TypeUrl + + for idx, item := range m.GetRemovedResourceNames() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("RemovedResourceNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("RemovedResourceNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryResponseValidationError{ + field: fmt.Sprintf("RemovedResourceNames[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Nonce + + if all { + switch v := interface{}(m.GetControlPlane()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeltaDiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetControlPlane()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeltaDiscoveryResponseValidationError{ + field: "ControlPlane", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DeltaDiscoveryResponseMultiError(errors) + } + + return nil +} + +// DeltaDiscoveryResponseMultiError is an error wrapping multiple validation +// errors returned by DeltaDiscoveryResponse.ValidateAll() if the designated +// constraints aren't met. +type DeltaDiscoveryResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeltaDiscoveryResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeltaDiscoveryResponseMultiError) AllErrors() []error { return m } + +// DeltaDiscoveryResponseValidationError is the validation error returned by +// DeltaDiscoveryResponse.Validate if the designated constraints aren't met. +type DeltaDiscoveryResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeltaDiscoveryResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeltaDiscoveryResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeltaDiscoveryResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeltaDiscoveryResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeltaDiscoveryResponseValidationError) ErrorName() string { + return "DeltaDiscoveryResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DeltaDiscoveryResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeltaDiscoveryResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeltaDiscoveryResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeltaDiscoveryResponseValidationError{} + +// Validate checks the field values on DynamicParameterConstraints with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DynamicParameterConstraints with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DynamicParameterConstraintsMultiError, or nil if none found. +func (m *DynamicParameterConstraints) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Type.(type) { + case *DynamicParameterConstraints_Constraint: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetConstraint()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "Constraint", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "Constraint", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConstraint()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "Constraint", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DynamicParameterConstraints_OrConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOrConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "OrConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "OrConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "OrConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DynamicParameterConstraints_AndConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAndConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "AndConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "AndConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAndConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "AndConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DynamicParameterConstraints_NotConstraints: + if v == nil { + err := DynamicParameterConstraintsValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetNotConstraints()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "NotConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraintsValidationError{ + field: "NotConstraints", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNotConstraints()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraintsValidationError{ + field: "NotConstraints", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return DynamicParameterConstraintsMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraintsMultiError is an error wrapping multiple +// validation errors returned by DynamicParameterConstraints.ValidateAll() if +// the designated constraints aren't met. +type DynamicParameterConstraintsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraintsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraintsMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraintsValidationError is the validation error returned +// by DynamicParameterConstraints.Validate if the designated constraints +// aren't met. +type DynamicParameterConstraintsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraintsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicParameterConstraintsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicParameterConstraintsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicParameterConstraintsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraintsValidationError) ErrorName() string { + return "DynamicParameterConstraintsValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraintsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraintsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraintsValidationError{} + +// Validate checks the field values on Resource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetResourceName()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ResourceName", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ResourceName", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceName()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "ResourceName", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Version + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTtl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTtl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Ttl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCacheControl()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "CacheControl", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "CacheControl", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCacheControl()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "CacheControl", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceMultiError(errors) + } + + return nil +} + +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + +// ResourceValidationError is the validation error returned by +// Resource.Validate if the designated constraints aren't met. +type ResourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceValidationError{} + +// Validate checks the field values on +// DynamicParameterConstraints_SingleConstraint with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints_SingleConstraint) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DynamicParameterConstraints_SingleConstraint with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// DynamicParameterConstraints_SingleConstraintMultiError, or nil if none found. +func (m *DynamicParameterConstraints_SingleConstraint) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints_SingleConstraint) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + oneofConstraintTypePresent := false + switch v := m.ConstraintType.(type) { + case *DynamicParameterConstraints_SingleConstraint_Value: + if v == nil { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConstraintTypePresent = true + // no validation rules for Value + case *DynamicParameterConstraints_SingleConstraint_Exists_: + if v == nil { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofConstraintTypePresent = true + + if all { + switch v := interface{}(m.GetExists()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraints_SingleConstraintValidationError{ + field: "Exists", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraints_SingleConstraintValidationError{ + field: "Exists", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExists()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraints_SingleConstraintValidationError{ + field: "Exists", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofConstraintTypePresent { + err := DynamicParameterConstraints_SingleConstraintValidationError{ + field: "ConstraintType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DynamicParameterConstraints_SingleConstraintMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraints_SingleConstraintMultiError is an error wrapping +// multiple validation errors returned by +// DynamicParameterConstraints_SingleConstraint.ValidateAll() if the +// designated constraints aren't met. +type DynamicParameterConstraints_SingleConstraintMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraints_SingleConstraintMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraints_SingleConstraintMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraints_SingleConstraintValidationError is the +// validation error returned by +// DynamicParameterConstraints_SingleConstraint.Validate if the designated +// constraints aren't met. +type DynamicParameterConstraints_SingleConstraintValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicParameterConstraints_SingleConstraintValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraints_SingleConstraintValidationError) ErrorName() string { + return "DynamicParameterConstraints_SingleConstraintValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraints_SingleConstraintValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints_SingleConstraint.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraints_SingleConstraintValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraints_SingleConstraintValidationError{} + +// Validate checks the field values on +// DynamicParameterConstraints_ConstraintList with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints_ConstraintList) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DynamicParameterConstraints_ConstraintList with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// DynamicParameterConstraints_ConstraintListMultiError, or nil if none found. +func (m *DynamicParameterConstraints_ConstraintList) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints_ConstraintList) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConstraints() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DynamicParameterConstraints_ConstraintListValidationError{ + field: fmt.Sprintf("Constraints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DynamicParameterConstraints_ConstraintListValidationError{ + field: fmt.Sprintf("Constraints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DynamicParameterConstraints_ConstraintListValidationError{ + field: fmt.Sprintf("Constraints[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return DynamicParameterConstraints_ConstraintListMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraints_ConstraintListMultiError is an error wrapping +// multiple validation errors returned by +// DynamicParameterConstraints_ConstraintList.ValidateAll() if the designated +// constraints aren't met. +type DynamicParameterConstraints_ConstraintListMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraints_ConstraintListMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraints_ConstraintListMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraints_ConstraintListValidationError is the validation +// error returned by DynamicParameterConstraints_ConstraintList.Validate if +// the designated constraints aren't met. +type DynamicParameterConstraints_ConstraintListValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DynamicParameterConstraints_ConstraintListValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraints_ConstraintListValidationError) ErrorName() string { + return "DynamicParameterConstraints_ConstraintListValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraints_ConstraintListValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints_ConstraintList.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraints_ConstraintListValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraints_ConstraintListValidationError{} + +// Validate checks the field values on +// DynamicParameterConstraints_SingleConstraint_Exists with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *DynamicParameterConstraints_SingleConstraint_Exists) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DynamicParameterConstraints_SingleConstraint_Exists with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// DynamicParameterConstraints_SingleConstraint_ExistsMultiError, or nil if +// none found. +func (m *DynamicParameterConstraints_SingleConstraint_Exists) ValidateAll() error { + return m.validate(true) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return DynamicParameterConstraints_SingleConstraint_ExistsMultiError(errors) + } + + return nil +} + +// DynamicParameterConstraints_SingleConstraint_ExistsMultiError is an error +// wrapping multiple validation errors returned by +// DynamicParameterConstraints_SingleConstraint_Exists.ValidateAll() if the +// designated constraints aren't met. +type DynamicParameterConstraints_SingleConstraint_ExistsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DynamicParameterConstraints_SingleConstraint_ExistsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DynamicParameterConstraints_SingleConstraint_ExistsMultiError) AllErrors() []error { return m } + +// DynamicParameterConstraints_SingleConstraint_ExistsValidationError is the +// validation error returned by +// DynamicParameterConstraints_SingleConstraint_Exists.Validate if the +// designated constraints aren't met. +type DynamicParameterConstraints_SingleConstraint_ExistsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) ErrorName() string { + return "DynamicParameterConstraints_SingleConstraint_ExistsValidationError" +} + +// Error satisfies the builtin error interface +func (e DynamicParameterConstraints_SingleConstraint_ExistsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDynamicParameterConstraints_SingleConstraint_Exists.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DynamicParameterConstraints_SingleConstraint_ExistsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DynamicParameterConstraints_SingleConstraint_ExistsValidationError{} + +// Validate checks the field values on Resource_CacheControl with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Resource_CacheControl) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource_CacheControl with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Resource_CacheControlMultiError, or nil if none found. +func (m *Resource_CacheControl) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource_CacheControl) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for DoNotCache + + if len(errors) > 0 { + return Resource_CacheControlMultiError(errors) + } + + return nil +} + +// Resource_CacheControlMultiError is an error wrapping multiple validation +// errors returned by Resource_CacheControl.ValidateAll() if the designated +// constraints aren't met. +type Resource_CacheControlMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Resource_CacheControlMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Resource_CacheControlMultiError) AllErrors() []error { return m } + +// Resource_CacheControlValidationError is the validation error returned by +// Resource_CacheControl.Validate if the designated constraints aren't met. +type Resource_CacheControlValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Resource_CacheControlValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Resource_CacheControlValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Resource_CacheControlValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Resource_CacheControlValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Resource_CacheControlValidationError) ErrorName() string { + return "Resource_CacheControlValidationError" +} + +// Error satisfies the builtin error interface +func (e Resource_CacheControlValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource_CacheControl.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Resource_CacheControlValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Resource_CacheControlValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go new file mode 100644 index 0000000000..56a3ef579f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3/discovery_vtproto.pb.go @@ -0,0 +1,1546 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/discovery/v3/discovery.proto + +package discoveryv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ResourceLocator) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceLocator) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceLocator) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DynamicParameters) > 0 { + for k := range m.DynamicParameters { + v := m.DynamicParameters[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceName) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceName) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ResourceName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DynamicParameterConstraints != nil { + size, err := m.DynamicParameterConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DiscoveryRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DiscoveryRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceLocators) > 0 { + for iNdEx := len(m.ResourceLocators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocators[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if m.ErrorDetail != nil { + if vtmsg, ok := interface{}(m.ErrorDetail).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorDetail) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if len(m.ResponseNonce) > 0 { + i -= len(m.ResponseNonce) + copy(dAtA[i:], m.ResponseNonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseNonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DiscoveryResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscoveryResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DiscoveryResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ControlPlane != nil { + if vtmsg, ok := interface{}(m.ControlPlane).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ControlPlane) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Nonce) > 0 { + i -= len(m.Nonce) + copy(dAtA[i:], m.Nonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Nonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.Canary { + i-- + if m.Canary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*anypb.Any)(m.Resources[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeltaDiscoveryRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeltaDiscoveryRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DeltaDiscoveryRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceLocatorsUnsubscribe) > 0 { + for iNdEx := len(m.ResourceLocatorsUnsubscribe) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocatorsUnsubscribe[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ResourceLocatorsSubscribe) > 0 { + for iNdEx := len(m.ResourceLocatorsSubscribe) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceLocatorsSubscribe[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ErrorDetail != nil { + if vtmsg, ok := interface{}(m.ErrorDetail).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorDetail) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if len(m.ResponseNonce) > 0 { + i -= len(m.ResponseNonce) + copy(dAtA[i:], m.ResponseNonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResponseNonce))) + i-- + dAtA[i] = 0x32 + } + if len(m.InitialResourceVersions) > 0 { + for k := range m.InitialResourceVersions { + v := m.InitialResourceVersions[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResourceNamesUnsubscribe) > 0 { + for iNdEx := len(m.ResourceNamesUnsubscribe) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNamesUnsubscribe[iNdEx]) + copy(dAtA[i:], m.ResourceNamesUnsubscribe[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNamesUnsubscribe[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceNamesSubscribe) > 0 { + for iNdEx := len(m.ResourceNamesSubscribe) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNamesSubscribe[iNdEx]) + copy(dAtA[i:], m.ResourceNamesSubscribe[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ResourceNamesSubscribe[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x12 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeltaDiscoveryResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeltaDiscoveryResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DeltaDiscoveryResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RemovedResourceNames) > 0 { + for iNdEx := len(m.RemovedResourceNames) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RemovedResourceNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ControlPlane != nil { + if vtmsg, ok := interface{}(m.ControlPlane).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ControlPlane) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x3a + } + if len(m.RemovedResources) > 0 { + for iNdEx := len(m.RemovedResources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemovedResources[iNdEx]) + copy(dAtA[i:], m.RemovedResources[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RemovedResources[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Nonce) > 0 { + i -= len(m.Nonce) + copy(dAtA[i:], m.Nonce) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Nonce))) + i-- + dAtA[i] = 0x2a + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Resources[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.SystemVersionInfo) > 0 { + i -= len(m.SystemVersionInfo) + copy(dAtA[i:], m.SystemVersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SystemVersionInfo))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.ConstraintType.(*DynamicParameterConstraints_SingleConstraint_Exists_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.ConstraintType.(*DynamicParameterConstraints_SingleConstraint_Value); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exists != nil { + size, err := m.Exists.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_ConstraintList) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints_ConstraintList) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_ConstraintList) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Constraints[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DynamicParameterConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*DynamicParameterConstraints_NotConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_AndConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_OrConstraints); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*DynamicParameterConstraints_Constraint); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DynamicParameterConstraints_Constraint) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_Constraint) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Constraint != nil { + size, err := m.Constraint.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_OrConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_OrConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrConstraints != nil { + size, err := m.OrConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_AndConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_AndConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.AndConstraints != nil { + size, err := m.AndConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *DynamicParameterConstraints_NotConstraints) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DynamicParameterConstraints_NotConstraints) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NotConstraints != nil { + size, err := m.NotConstraints.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Resource_CacheControl) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource_CacheControl) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Resource_CacheControl) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DoNotCache { + i-- + if m.DoNotCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Resource) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Resource) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x4a + } + if m.ResourceName != nil { + size, err := m.ResourceName.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.CacheControl != nil { + size, err := m.CacheControl.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Aliases) > 0 { + for iNdEx := len(m.Aliases) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Aliases[iNdEx]) + copy(dAtA[i:], m.Aliases[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Aliases[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.Resource != nil { + size, err := (*anypb.Any)(m.Resource).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceLocator) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.DynamicParameters) > 0 { + for k, v := range m.DynamicParameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceName) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.DynamicParameterConstraints != nil { + l = m.DynamicParameterConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DiscoveryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.ResponseNonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorDetail != nil { + if size, ok := interface{}(m.ErrorDetail).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorDetail) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceLocators) > 0 { + for _, e := range m.ResourceLocators { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DiscoveryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = (*anypb.Any)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Canary { + n += 2 + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ControlPlane != nil { + if size, ok := interface{}(m.ControlPlane).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ControlPlane) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeltaDiscoveryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceNamesSubscribe) > 0 { + for _, s := range m.ResourceNamesSubscribe { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResourceNamesUnsubscribe) > 0 { + for _, s := range m.ResourceNamesUnsubscribe { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.InitialResourceVersions) > 0 { + for k, v := range m.InitialResourceVersions { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + l = len(m.ResponseNonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ErrorDetail != nil { + if size, ok := interface{}(m.ErrorDetail).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorDetail) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ResourceLocatorsSubscribe) > 0 { + for _, e := range m.ResourceLocatorsSubscribe { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.ResourceLocatorsUnsubscribe) > 0 { + for _, e := range m.ResourceLocatorsUnsubscribe { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DeltaDiscoveryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SystemVersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RemovedResources) > 0 { + for _, s := range m.RemovedResources { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.ControlPlane != nil { + if size, ok := interface{}(m.ControlPlane).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ControlPlane) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.RemovedResourceNames) > 0 { + for _, e := range m.RemovedResourceNames { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint_Exists) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.ConstraintType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_SingleConstraint_Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *DynamicParameterConstraints_SingleConstraint_Exists_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists != nil { + l = m.Exists.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_ConstraintList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DynamicParameterConstraints_Constraint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Constraint != nil { + l = m.Constraint.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_OrConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrConstraints != nil { + l = m.OrConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_AndConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AndConstraints != nil { + l = m.AndConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DynamicParameterConstraints_NotConstraints) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotConstraints != nil { + l = m.NotConstraints.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *Resource_CacheControl) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoNotCache { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Resource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Resource != nil { + l = (*anypb.Any)(m.Resource).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CacheControl != nil { + l = m.CacheControl.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ResourceName != nil { + l = m.ResourceName.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go new file mode 100644 index 0000000000..6b47a93c6b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.go @@ -0,0 +1,325 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A load report Envoy sends to the management server. +type LoadStatsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node identifier for Envoy instance. + Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of load stats to report. + ClusterStats []*v31.ClusterStats `protobuf:"bytes,2,rep,name=cluster_stats,json=clusterStats,proto3" json:"cluster_stats,omitempty"` +} + +func (x *LoadStatsRequest) Reset() { + *x = LoadStatsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadStatsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadStatsRequest) ProtoMessage() {} + +func (x *LoadStatsRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadStatsRequest.ProtoReflect.Descriptor instead. +func (*LoadStatsRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP(), []int{0} +} + +func (x *LoadStatsRequest) GetNode() *v3.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *LoadStatsRequest) GetClusterStats() []*v31.ClusterStats { + if x != nil { + return x.ClusterStats + } + return nil +} + +// The management server sends envoy a LoadStatsResponse with all clusters it +// is interested in learning load stats about. +type LoadStatsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clusters to report stats for. + // Not populated if “send_all_clusters“ is true. + Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // If true, the client should send all clusters it knows about. + // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their + // :ref:`client_features` field will honor this field. + SendAllClusters bool `protobuf:"varint,4,opt,name=send_all_clusters,json=sendAllClusters,proto3" json:"send_all_clusters,omitempty"` + // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // + // 1. There may be some delay from when the timer fires until stats sampling occurs. + // 2. For clusters that were already feature in the previous “LoadStatsResponse“, any traffic + // that is observed in between the corresponding previous “LoadStatsRequest“ and this + // “LoadStatsResponse“ will also be accumulated and billed to the cluster. This avoids a period + // of inobservability that might otherwise exists between the messages. New clusters are not + // subject to this consideration. + LoadReportingInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=load_reporting_interval,json=loadReportingInterval,proto3" json:"load_reporting_interval,omitempty"` + // Set to “true“ if the management server supports endpoint granularity + // report. + ReportEndpointGranularity bool `protobuf:"varint,3,opt,name=report_endpoint_granularity,json=reportEndpointGranularity,proto3" json:"report_endpoint_granularity,omitempty"` +} + +func (x *LoadStatsResponse) Reset() { + *x = LoadStatsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadStatsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadStatsResponse) ProtoMessage() {} + +func (x *LoadStatsResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_load_stats_v3_lrs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadStatsResponse.ProtoReflect.Descriptor instead. +func (*LoadStatsResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP(), []int{1} +} + +func (x *LoadStatsResponse) GetClusters() []string { + if x != nil { + return x.Clusters + } + return nil +} + +func (x *LoadStatsResponse) GetSendAllClusters() bool { + if x != nil { + return x.SendAllClusters + } + return false +} + +func (x *LoadStatsResponse) GetLoadReportingInterval() *durationpb.Duration { + if x != nil { + return x.LoadReportingInterval + } + return nil +} + +func (x *LoadStatsResponse) GetReportEndpointGranularity() bool { + if x != nil { + return x.ReportEndpointGranularity + } + return false +} + +var File_envoy_service_load_stats_v3_lrs_proto protoreflect.FileDescriptor + +var file_envoy_service_load_stats_v3_lrs_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x72, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x33, 0x2f, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc4, 0x01, 0x0a, 0x10, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa4, 0x02, 0x0a, 0x11, 0x4c, + 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x11, + 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x65, 0x6e, 0x64, 0x41, 0x6c, 0x6c, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x51, 0x0a, 0x17, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x3e, 0x0a, 0x1b, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x67, + 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x19, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x3a, 0x34, 0x9a, 0xc5, 0x88, + 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x32, 0x8e, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x76, 0x0a, 0x0f, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x29, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x4c, 0x72, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_service_load_stats_v3_lrs_proto_rawDescOnce sync.Once + file_envoy_service_load_stats_v3_lrs_proto_rawDescData = file_envoy_service_load_stats_v3_lrs_proto_rawDesc +) + +func file_envoy_service_load_stats_v3_lrs_proto_rawDescGZIP() []byte { + file_envoy_service_load_stats_v3_lrs_proto_rawDescOnce.Do(func() { + file_envoy_service_load_stats_v3_lrs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_load_stats_v3_lrs_proto_rawDescData) + }) + return file_envoy_service_load_stats_v3_lrs_proto_rawDescData +} + +var file_envoy_service_load_stats_v3_lrs_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_service_load_stats_v3_lrs_proto_goTypes = []interface{}{ + (*LoadStatsRequest)(nil), // 0: envoy.service.load_stats.v3.LoadStatsRequest + (*LoadStatsResponse)(nil), // 1: envoy.service.load_stats.v3.LoadStatsResponse + (*v3.Node)(nil), // 2: envoy.config.core.v3.Node + (*v31.ClusterStats)(nil), // 3: envoy.config.endpoint.v3.ClusterStats + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration +} +var file_envoy_service_load_stats_v3_lrs_proto_depIdxs = []int32{ + 2, // 0: envoy.service.load_stats.v3.LoadStatsRequest.node:type_name -> envoy.config.core.v3.Node + 3, // 1: envoy.service.load_stats.v3.LoadStatsRequest.cluster_stats:type_name -> envoy.config.endpoint.v3.ClusterStats + 4, // 2: envoy.service.load_stats.v3.LoadStatsResponse.load_reporting_interval:type_name -> google.protobuf.Duration + 0, // 3: envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats:input_type -> envoy.service.load_stats.v3.LoadStatsRequest + 1, // 4: envoy.service.load_stats.v3.LoadReportingService.StreamLoadStats:output_type -> envoy.service.load_stats.v3.LoadStatsResponse + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_service_load_stats_v3_lrs_proto_init() } +func file_envoy_service_load_stats_v3_lrs_proto_init() { + if File_envoy_service_load_stats_v3_lrs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_service_load_stats_v3_lrs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadStatsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_load_stats_v3_lrs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoadStatsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_load_stats_v3_lrs_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_envoy_service_load_stats_v3_lrs_proto_goTypes, + DependencyIndexes: file_envoy_service_load_stats_v3_lrs_proto_depIdxs, + MessageInfos: file_envoy_service_load_stats_v3_lrs_proto_msgTypes, + }.Build() + File_envoy_service_load_stats_v3_lrs_proto = out.File + file_envoy_service_load_stats_v3_lrs_proto_rawDesc = nil + file_envoy_service_load_stats_v3_lrs_proto_goTypes = nil + file_envoy_service_load_stats_v3_lrs_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go new file mode 100644 index 0000000000..cf4e395c2a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs.pb.validate.go @@ -0,0 +1,335 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on LoadStatsRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LoadStatsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadStatsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadStatsRequestMultiError, or nil if none found. +func (m *LoadStatsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadStatsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadStatsRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetClusterStats() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: fmt.Sprintf("ClusterStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadStatsRequestValidationError{ + field: fmt.Sprintf("ClusterStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadStatsRequestValidationError{ + field: fmt.Sprintf("ClusterStats[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return LoadStatsRequestMultiError(errors) + } + + return nil +} + +// LoadStatsRequestMultiError is an error wrapping multiple validation errors +// returned by LoadStatsRequest.ValidateAll() if the designated constraints +// aren't met. +type LoadStatsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadStatsRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadStatsRequestMultiError) AllErrors() []error { return m } + +// LoadStatsRequestValidationError is the validation error returned by +// LoadStatsRequest.Validate if the designated constraints aren't met. +type LoadStatsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadStatsRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadStatsRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadStatsRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadStatsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadStatsRequestValidationError) ErrorName() string { return "LoadStatsRequestValidationError" } + +// Error satisfies the builtin error interface +func (e LoadStatsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadStatsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadStatsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadStatsRequestValidationError{} + +// Validate checks the field values on LoadStatsResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *LoadStatsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LoadStatsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// LoadStatsResponseMultiError, or nil if none found. +func (m *LoadStatsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *LoadStatsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SendAllClusters + + if all { + switch v := interface{}(m.GetLoadReportingInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LoadStatsResponseValidationError{ + field: "LoadReportingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LoadStatsResponseValidationError{ + field: "LoadReportingInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLoadReportingInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LoadStatsResponseValidationError{ + field: "LoadReportingInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ReportEndpointGranularity + + if len(errors) > 0 { + return LoadStatsResponseMultiError(errors) + } + + return nil +} + +// LoadStatsResponseMultiError is an error wrapping multiple validation errors +// returned by LoadStatsResponse.ValidateAll() if the designated constraints +// aren't met. +type LoadStatsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LoadStatsResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LoadStatsResponseMultiError) AllErrors() []error { return m } + +// LoadStatsResponseValidationError is the validation error returned by +// LoadStatsResponse.Validate if the designated constraints aren't met. +type LoadStatsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LoadStatsResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LoadStatsResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LoadStatsResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LoadStatsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LoadStatsResponseValidationError) ErrorName() string { + return "LoadStatsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e LoadStatsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLoadStatsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LoadStatsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LoadStatsResponseValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go new file mode 100644 index 0000000000..4eb34c1733 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_grpc.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + LoadReportingService_StreamLoadStats_FullMethodName = "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats" +) + +// LoadReportingServiceClient is the client API for LoadReportingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoadReportingServiceClient interface { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) +} + +type loadReportingServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewLoadReportingServiceClient(cc grpc.ClientConnInterface) LoadReportingServiceClient { + return &loadReportingServiceClient{cc} +} + +func (c *loadReportingServiceClient) StreamLoadStats(ctx context.Context, opts ...grpc.CallOption) (LoadReportingService_StreamLoadStatsClient, error) { + stream, err := c.cc.NewStream(ctx, &LoadReportingService_ServiceDesc.Streams[0], LoadReportingService_StreamLoadStats_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &loadReportingServiceStreamLoadStatsClient{stream} + return x, nil +} + +type LoadReportingService_StreamLoadStatsClient interface { + Send(*LoadStatsRequest) error + Recv() (*LoadStatsResponse, error) + grpc.ClientStream +} + +type loadReportingServiceStreamLoadStatsClient struct { + grpc.ClientStream +} + +func (x *loadReportingServiceStreamLoadStatsClient) Send(m *LoadStatsRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadReportingServiceStreamLoadStatsClient) Recv() (*LoadStatsResponse, error) { + m := new(LoadStatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadReportingServiceServer is the server API for LoadReportingService service. +// All implementations should embed UnimplementedLoadReportingServiceServer +// for forward compatibility +type LoadReportingServiceServer interface { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error +} + +// UnimplementedLoadReportingServiceServer should be embedded to have forward compatible implementations. +type UnimplementedLoadReportingServiceServer struct { +} + +func (UnimplementedLoadReportingServiceServer) StreamLoadStats(LoadReportingService_StreamLoadStatsServer) error { + return status.Errorf(codes.Unimplemented, "method StreamLoadStats not implemented") +} + +// UnsafeLoadReportingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoadReportingServiceServer will +// result in compilation errors. +type UnsafeLoadReportingServiceServer interface { + mustEmbedUnimplementedLoadReportingServiceServer() +} + +func RegisterLoadReportingServiceServer(s grpc.ServiceRegistrar, srv LoadReportingServiceServer) { + s.RegisterService(&LoadReportingService_ServiceDesc, srv) +} + +func _LoadReportingService_StreamLoadStats_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadReportingServiceServer).StreamLoadStats(&loadReportingServiceStreamLoadStatsServer{stream}) +} + +type LoadReportingService_StreamLoadStatsServer interface { + Send(*LoadStatsResponse) error + Recv() (*LoadStatsRequest, error) + grpc.ServerStream +} + +type loadReportingServiceStreamLoadStatsServer struct { + grpc.ServerStream +} + +func (x *loadReportingServiceStreamLoadStatsServer) Send(m *LoadStatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadReportingServiceStreamLoadStatsServer) Recv() (*LoadStatsRequest, error) { + m := new(LoadStatsRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadReportingService_ServiceDesc is the grpc.ServiceDesc for LoadReportingService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LoadReportingService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.load_stats.v3.LoadReportingService", + HandlerType: (*LoadReportingServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamLoadStats", + Handler: _LoadReportingService_StreamLoadStats_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/load_stats/v3/lrs.proto", +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go new file mode 100644 index 0000000000..ca86ae7740 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3/lrs_vtproto.pb.go @@ -0,0 +1,230 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/load_stats/v3/lrs.proto + +package load_statsv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *LoadStatsRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadStatsRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadStatsRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClusterStats) > 0 { + for iNdEx := len(m.ClusterStats) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.ClusterStats[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClusterStats[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadStatsResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadStatsResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *LoadStatsResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SendAllClusters { + i-- + if m.SendAllClusters { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ReportEndpointGranularity { + i-- + if m.ReportEndpointGranularity { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.LoadReportingInterval != nil { + size, err := (*durationpb.Duration)(m.LoadReportingInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Clusters) > 0 { + for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Clusters[iNdEx]) + copy(dAtA[i:], m.Clusters[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Clusters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LoadStatsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.ClusterStats) > 0 { + for _, e := range m.ClusterStats { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LoadStatsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clusters) > 0 { + for _, s := range m.Clusters { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.LoadReportingInterval != nil { + l = (*durationpb.Duration)(m.LoadReportingInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ReportEndpointGranularity { + n += 2 + } + if m.SendAllClusters { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go new file mode 100644 index 0000000000..4635ca0284 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.go @@ -0,0 +1,985 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v32 "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Status of a config from a management server view. +type ConfigStatus int32 + +const ( + // Status info is not available/unknown. + ConfigStatus_UNKNOWN ConfigStatus = 0 + // Management server has sent the config to client and received ACK. + ConfigStatus_SYNCED ConfigStatus = 1 + // Config is not sent. + ConfigStatus_NOT_SENT ConfigStatus = 2 + // Management server has sent the config to client but hasn’t received + // ACK/NACK. + ConfigStatus_STALE ConfigStatus = 3 + // Management server has sent the config to client but received NACK. The + // attached config dump will be the latest config (the rejected one), since + // it is the persisted version in the management server. + ConfigStatus_ERROR ConfigStatus = 4 +) + +// Enum value maps for ConfigStatus. +var ( + ConfigStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SYNCED", + 2: "NOT_SENT", + 3: "STALE", + 4: "ERROR", + } + ConfigStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SYNCED": 1, + "NOT_SENT": 2, + "STALE": 3, + "ERROR": 4, + } +) + +func (x ConfigStatus) Enum() *ConfigStatus { + p := new(ConfigStatus) + *p = x + return p +} + +func (x ConfigStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConfigStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_service_status_v3_csds_proto_enumTypes[0].Descriptor() +} + +func (ConfigStatus) Type() protoreflect.EnumType { + return &file_envoy_service_status_v3_csds_proto_enumTypes[0] +} + +func (x ConfigStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConfigStatus.Descriptor instead. +func (ConfigStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{0} +} + +// Config status from a client-side view. +type ClientConfigStatus int32 + +const ( + // Config status is not available/unknown. + ClientConfigStatus_CLIENT_UNKNOWN ClientConfigStatus = 0 + // Client requested the config but hasn't received any config from management + // server yet. + ClientConfigStatus_CLIENT_REQUESTED ClientConfigStatus = 1 + // Client received the config and replied with ACK. + ClientConfigStatus_CLIENT_ACKED ClientConfigStatus = 2 + // Client received the config and replied with NACK. Notably, the attached + // config dump is not the NACKed version, but the most recent accepted one. If + // no config is accepted yet, the attached config dump will be empty. + ClientConfigStatus_CLIENT_NACKED ClientConfigStatus = 3 +) + +// Enum value maps for ClientConfigStatus. +var ( + ClientConfigStatus_name = map[int32]string{ + 0: "CLIENT_UNKNOWN", + 1: "CLIENT_REQUESTED", + 2: "CLIENT_ACKED", + 3: "CLIENT_NACKED", + } + ClientConfigStatus_value = map[string]int32{ + "CLIENT_UNKNOWN": 0, + "CLIENT_REQUESTED": 1, + "CLIENT_ACKED": 2, + "CLIENT_NACKED": 3, + } +) + +func (x ClientConfigStatus) Enum() *ClientConfigStatus { + p := new(ClientConfigStatus) + *p = x + return p +} + +func (x ClientConfigStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientConfigStatus) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_service_status_v3_csds_proto_enumTypes[1].Descriptor() +} + +func (ClientConfigStatus) Type() protoreflect.EnumType { + return &file_envoy_service_status_v3_csds_proto_enumTypes[1] +} + +func (x ClientConfigStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientConfigStatus.Descriptor instead. +func (ClientConfigStatus) EnumDescriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{1} +} + +// Request for client status of clients identified by a list of NodeMatchers. +type ClientStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Management server can use these match criteria to identify clients. + // The match follows OR semantics. + NodeMatchers []*v3.NodeMatcher `protobuf:"bytes,1,rep,name=node_matchers,json=nodeMatchers,proto3" json:"node_matchers,omitempty"` + // The node making the csds request. + Node *v31.Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + // If true, the server will not include the resource contents in the response + // (i.e., the generic_xds_configs.xds_config field will not be populated). + // [#not-implemented-hide:] + ExcludeResourceContents bool `protobuf:"varint,3,opt,name=exclude_resource_contents,json=excludeResourceContents,proto3" json:"exclude_resource_contents,omitempty"` +} + +func (x *ClientStatusRequest) Reset() { + *x = ClientStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatusRequest) ProtoMessage() {} + +func (x *ClientStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatusRequest.ProtoReflect.Descriptor instead. +func (*ClientStatusRequest) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{0} +} + +func (x *ClientStatusRequest) GetNodeMatchers() []*v3.NodeMatcher { + if x != nil { + return x.NodeMatchers + } + return nil +} + +func (x *ClientStatusRequest) GetNode() *v31.Node { + if x != nil { + return x.Node + } + return nil +} + +func (x *ClientStatusRequest) GetExcludeResourceContents() bool { + if x != nil { + return x.ExcludeResourceContents + } + return false +} + +// Detailed config (per xDS) with status. +// [#next-free-field: 8] +type PerXdsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Config status generated by management servers. Will not be present if the + // CSDS server is an xDS client. + Status ConfigStatus `protobuf:"varint,1,opt,name=status,proto3,enum=envoy.service.status.v3.ConfigStatus" json:"status,omitempty"` + // Client config status is populated by xDS clients. Will not be present if + // the CSDS server is an xDS server. No matter what the client config status + // is, xDS clients should always dump the most recent accepted xDS config. + // + // .. attention:: + // + // This field is deprecated. Use :ref:`ClientResourceStatus + // ` for per-resource + // config status instead. + // + // Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. + ClientStatus ClientConfigStatus `protobuf:"varint,7,opt,name=client_status,json=clientStatus,proto3,enum=envoy.service.status.v3.ClientConfigStatus" json:"client_status,omitempty"` + // Types that are assignable to PerXdsConfig: + // + // *PerXdsConfig_ListenerConfig + // *PerXdsConfig_ClusterConfig + // *PerXdsConfig_RouteConfig + // *PerXdsConfig_ScopedRouteConfig + // *PerXdsConfig_EndpointConfig + PerXdsConfig isPerXdsConfig_PerXdsConfig `protobuf_oneof:"per_xds_config"` +} + +func (x *PerXdsConfig) Reset() { + *x = PerXdsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PerXdsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PerXdsConfig) ProtoMessage() {} + +func (x *PerXdsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PerXdsConfig.ProtoReflect.Descriptor instead. +func (*PerXdsConfig) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{1} +} + +func (x *PerXdsConfig) GetStatus() ConfigStatus { + if x != nil { + return x.Status + } + return ConfigStatus_UNKNOWN +} + +// Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. +func (x *PerXdsConfig) GetClientStatus() ClientConfigStatus { + if x != nil { + return x.ClientStatus + } + return ClientConfigStatus_CLIENT_UNKNOWN +} + +func (m *PerXdsConfig) GetPerXdsConfig() isPerXdsConfig_PerXdsConfig { + if m != nil { + return m.PerXdsConfig + } + return nil +} + +func (x *PerXdsConfig) GetListenerConfig() *v32.ListenersConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_ListenerConfig); ok { + return x.ListenerConfig + } + return nil +} + +func (x *PerXdsConfig) GetClusterConfig() *v32.ClustersConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_ClusterConfig); ok { + return x.ClusterConfig + } + return nil +} + +func (x *PerXdsConfig) GetRouteConfig() *v32.RoutesConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_RouteConfig); ok { + return x.RouteConfig + } + return nil +} + +func (x *PerXdsConfig) GetScopedRouteConfig() *v32.ScopedRoutesConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_ScopedRouteConfig); ok { + return x.ScopedRouteConfig + } + return nil +} + +func (x *PerXdsConfig) GetEndpointConfig() *v32.EndpointsConfigDump { + if x, ok := x.GetPerXdsConfig().(*PerXdsConfig_EndpointConfig); ok { + return x.EndpointConfig + } + return nil +} + +type isPerXdsConfig_PerXdsConfig interface { + isPerXdsConfig_PerXdsConfig() +} + +type PerXdsConfig_ListenerConfig struct { + ListenerConfig *v32.ListenersConfigDump `protobuf:"bytes,2,opt,name=listener_config,json=listenerConfig,proto3,oneof"` +} + +type PerXdsConfig_ClusterConfig struct { + ClusterConfig *v32.ClustersConfigDump `protobuf:"bytes,3,opt,name=cluster_config,json=clusterConfig,proto3,oneof"` +} + +type PerXdsConfig_RouteConfig struct { + RouteConfig *v32.RoutesConfigDump `protobuf:"bytes,4,opt,name=route_config,json=routeConfig,proto3,oneof"` +} + +type PerXdsConfig_ScopedRouteConfig struct { + ScopedRouteConfig *v32.ScopedRoutesConfigDump `protobuf:"bytes,5,opt,name=scoped_route_config,json=scopedRouteConfig,proto3,oneof"` +} + +type PerXdsConfig_EndpointConfig struct { + EndpointConfig *v32.EndpointsConfigDump `protobuf:"bytes,6,opt,name=endpoint_config,json=endpointConfig,proto3,oneof"` +} + +func (*PerXdsConfig_ListenerConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_ClusterConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_RouteConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_ScopedRouteConfig) isPerXdsConfig_PerXdsConfig() {} + +func (*PerXdsConfig_EndpointConfig) isPerXdsConfig_PerXdsConfig() {} + +// All xds configs for a particular client. +type ClientConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node for a particular client. + Node *v31.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // This field is deprecated in favor of generic_xds_configs which is + // much simpler and uniform in structure. + // + // Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. + XdsConfig []*PerXdsConfig `protobuf:"bytes,2,rep,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` + // Represents generic xDS config and the exact config structure depends on + // the type URL (like Cluster if it is CDS) + GenericXdsConfigs []*ClientConfig_GenericXdsConfig `protobuf:"bytes,3,rep,name=generic_xds_configs,json=genericXdsConfigs,proto3" json:"generic_xds_configs,omitempty"` + // For xDS clients, the scope in which the data is used. + // For example, gRPC indicates the data plane target or that the data is + // associated with gRPC server(s). + ClientScope string `protobuf:"bytes,4,opt,name=client_scope,json=clientScope,proto3" json:"client_scope,omitempty"` +} + +func (x *ClientConfig) Reset() { + *x = ClientConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig) ProtoMessage() {} + +func (x *ClientConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientConfig) GetNode() *v31.Node { + if x != nil { + return x.Node + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/service/status/v3/csds.proto. +func (x *ClientConfig) GetXdsConfig() []*PerXdsConfig { + if x != nil { + return x.XdsConfig + } + return nil +} + +func (x *ClientConfig) GetGenericXdsConfigs() []*ClientConfig_GenericXdsConfig { + if x != nil { + return x.GenericXdsConfigs + } + return nil +} + +func (x *ClientConfig) GetClientScope() string { + if x != nil { + return x.ClientScope + } + return "" +} + +type ClientStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Client configs for the clients specified in the ClientStatusRequest. + Config []*ClientConfig `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty"` +} + +func (x *ClientStatusResponse) Reset() { + *x = ClientStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatusResponse) ProtoMessage() {} + +func (x *ClientStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatusResponse.ProtoReflect.Descriptor instead. +func (*ClientStatusResponse) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientStatusResponse) GetConfig() []*ClientConfig { + if x != nil { + return x.Config + } + return nil +} + +// GenericXdsConfig is used to specify the config status and the dump +// of any xDS resource identified by their type URL. It is the generalized +// version of the now deprecated ListenersConfigDump, ClustersConfigDump etc +// [#next-free-field: 10] +type ClientConfig_GenericXdsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type_url represents the fully qualified name of xDS resource type + // like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Name of the xDS resource + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // This is the :ref:`version_info ` + // in the last processed xDS discovery response. If there are only + // static bootstrap listeners, this field will be "" + VersionInfo string `protobuf:"bytes,3,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + // The xDS resource config. Actual content depends on the type + XdsConfig *anypb.Any `protobuf:"bytes,4,opt,name=xds_config,json=xdsConfig,proto3" json:"xds_config,omitempty"` + // Timestamp when the xDS resource was last updated + LastUpdated *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` + // Per xDS resource config status. It is generated by management servers. + // It will not be present if the CSDS server is an xDS client. + ConfigStatus ConfigStatus `protobuf:"varint,6,opt,name=config_status,json=configStatus,proto3,enum=envoy.service.status.v3.ConfigStatus" json:"config_status,omitempty"` + // Per xDS resource status from the view of a xDS client + ClientStatus v32.ClientResourceStatus `protobuf:"varint,7,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"` + // Set if the last update failed, cleared after the next successful + // update. The *error_state* field contains the rejected version of + // this particular resource along with the reason and timestamp. For + // successfully updated or acknowledged resource, this field should + // be empty. + // [#not-implemented-hide:] + ErrorState *v32.UpdateFailureState `protobuf:"bytes,8,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"` + // Is static resource is true if it is specified in the config supplied + // through the file at the startup. + IsStaticResource bool `protobuf:"varint,9,opt,name=is_static_resource,json=isStaticResource,proto3" json:"is_static_resource,omitempty"` +} + +func (x *ClientConfig_GenericXdsConfig) Reset() { + *x = ClientConfig_GenericXdsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientConfig_GenericXdsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientConfig_GenericXdsConfig) ProtoMessage() {} + +func (x *ClientConfig_GenericXdsConfig) ProtoReflect() protoreflect.Message { + mi := &file_envoy_service_status_v3_csds_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientConfig_GenericXdsConfig.ProtoReflect.Descriptor instead. +func (*ClientConfig_GenericXdsConfig) Descriptor() ([]byte, []int) { + return file_envoy_service_status_v3_csds_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ClientConfig_GenericXdsConfig) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *ClientConfig_GenericXdsConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClientConfig_GenericXdsConfig) GetVersionInfo() string { + if x != nil { + return x.VersionInfo + } + return "" +} + +func (x *ClientConfig_GenericXdsConfig) GetXdsConfig() *anypb.Any { + if x != nil { + return x.XdsConfig + } + return nil +} + +func (x *ClientConfig_GenericXdsConfig) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdated + } + return nil +} + +func (x *ClientConfig_GenericXdsConfig) GetConfigStatus() ConfigStatus { + if x != nil { + return x.ConfigStatus + } + return ConfigStatus_UNKNOWN +} + +func (x *ClientConfig_GenericXdsConfig) GetClientStatus() v32.ClientResourceStatus { + if x != nil { + return x.ClientStatus + } + return v32.ClientResourceStatus(0) +} + +func (x *ClientConfig_GenericXdsConfig) GetErrorState() *v32.UpdateFailureState { + if x != nil { + return x.ErrorState + } + return nil +} + +func (x *ClientConfig_GenericXdsConfig) GetIsStaticResource() bool { + if x != nil { + return x.IsStaticResource + } + return false +} + +var File_envoy_service_status_v3_csds_proto protoreflect.FileDescriptor + +var file_envoy_service_status_v3_csds_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x73, 0x64, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfe, 0x01, 0x0a, 0x13, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0c, 0x6e, + 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf9, 0x04, 0x0a, 0x0c, + 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, 0x0d, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, + 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, + 0x0a, 0x13, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, + 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, + 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, + 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x5f, 0x78, 0x64, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xae, 0x06, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x51, 0x0a, 0x0a, 0x78, 0x64, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, + 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x66, 0x0a, 0x13, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0xe2, 0x03, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, + 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, + 0x0a, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x49, 0x0a, + 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, + 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x4b, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x10, 0x04, 0x2a, 0x63, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x43, 0x4b, + 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4e, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x03, 0x32, 0xb2, 0x02, 0x0a, 0x1c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0x98, 0x01, 0x0a, 0x11, 0x46, 0x65, 0x74, 0x63, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, + 0x1b, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x85, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x09, + 0x43, 0x73, 0x64, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x47, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_service_status_v3_csds_proto_rawDescOnce sync.Once + file_envoy_service_status_v3_csds_proto_rawDescData = file_envoy_service_status_v3_csds_proto_rawDesc +) + +func file_envoy_service_status_v3_csds_proto_rawDescGZIP() []byte { + file_envoy_service_status_v3_csds_proto_rawDescOnce.Do(func() { + file_envoy_service_status_v3_csds_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_service_status_v3_csds_proto_rawDescData) + }) + return file_envoy_service_status_v3_csds_proto_rawDescData +} + +var file_envoy_service_status_v3_csds_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_envoy_service_status_v3_csds_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_service_status_v3_csds_proto_goTypes = []interface{}{ + (ConfigStatus)(0), // 0: envoy.service.status.v3.ConfigStatus + (ClientConfigStatus)(0), // 1: envoy.service.status.v3.ClientConfigStatus + (*ClientStatusRequest)(nil), // 2: envoy.service.status.v3.ClientStatusRequest + (*PerXdsConfig)(nil), // 3: envoy.service.status.v3.PerXdsConfig + (*ClientConfig)(nil), // 4: envoy.service.status.v3.ClientConfig + (*ClientStatusResponse)(nil), // 5: envoy.service.status.v3.ClientStatusResponse + (*ClientConfig_GenericXdsConfig)(nil), // 6: envoy.service.status.v3.ClientConfig.GenericXdsConfig + (*v3.NodeMatcher)(nil), // 7: envoy.type.matcher.v3.NodeMatcher + (*v31.Node)(nil), // 8: envoy.config.core.v3.Node + (*v32.ListenersConfigDump)(nil), // 9: envoy.admin.v3.ListenersConfigDump + (*v32.ClustersConfigDump)(nil), // 10: envoy.admin.v3.ClustersConfigDump + (*v32.RoutesConfigDump)(nil), // 11: envoy.admin.v3.RoutesConfigDump + (*v32.ScopedRoutesConfigDump)(nil), // 12: envoy.admin.v3.ScopedRoutesConfigDump + (*v32.EndpointsConfigDump)(nil), // 13: envoy.admin.v3.EndpointsConfigDump + (*anypb.Any)(nil), // 14: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (v32.ClientResourceStatus)(0), // 16: envoy.admin.v3.ClientResourceStatus + (*v32.UpdateFailureState)(nil), // 17: envoy.admin.v3.UpdateFailureState +} +var file_envoy_service_status_v3_csds_proto_depIdxs = []int32{ + 7, // 0: envoy.service.status.v3.ClientStatusRequest.node_matchers:type_name -> envoy.type.matcher.v3.NodeMatcher + 8, // 1: envoy.service.status.v3.ClientStatusRequest.node:type_name -> envoy.config.core.v3.Node + 0, // 2: envoy.service.status.v3.PerXdsConfig.status:type_name -> envoy.service.status.v3.ConfigStatus + 1, // 3: envoy.service.status.v3.PerXdsConfig.client_status:type_name -> envoy.service.status.v3.ClientConfigStatus + 9, // 4: envoy.service.status.v3.PerXdsConfig.listener_config:type_name -> envoy.admin.v3.ListenersConfigDump + 10, // 5: envoy.service.status.v3.PerXdsConfig.cluster_config:type_name -> envoy.admin.v3.ClustersConfigDump + 11, // 6: envoy.service.status.v3.PerXdsConfig.route_config:type_name -> envoy.admin.v3.RoutesConfigDump + 12, // 7: envoy.service.status.v3.PerXdsConfig.scoped_route_config:type_name -> envoy.admin.v3.ScopedRoutesConfigDump + 13, // 8: envoy.service.status.v3.PerXdsConfig.endpoint_config:type_name -> envoy.admin.v3.EndpointsConfigDump + 8, // 9: envoy.service.status.v3.ClientConfig.node:type_name -> envoy.config.core.v3.Node + 3, // 10: envoy.service.status.v3.ClientConfig.xds_config:type_name -> envoy.service.status.v3.PerXdsConfig + 6, // 11: envoy.service.status.v3.ClientConfig.generic_xds_configs:type_name -> envoy.service.status.v3.ClientConfig.GenericXdsConfig + 4, // 12: envoy.service.status.v3.ClientStatusResponse.config:type_name -> envoy.service.status.v3.ClientConfig + 14, // 13: envoy.service.status.v3.ClientConfig.GenericXdsConfig.xds_config:type_name -> google.protobuf.Any + 15, // 14: envoy.service.status.v3.ClientConfig.GenericXdsConfig.last_updated:type_name -> google.protobuf.Timestamp + 0, // 15: envoy.service.status.v3.ClientConfig.GenericXdsConfig.config_status:type_name -> envoy.service.status.v3.ConfigStatus + 16, // 16: envoy.service.status.v3.ClientConfig.GenericXdsConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus + 17, // 17: envoy.service.status.v3.ClientConfig.GenericXdsConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState + 2, // 18: envoy.service.status.v3.ClientStatusDiscoveryService.StreamClientStatus:input_type -> envoy.service.status.v3.ClientStatusRequest + 2, // 19: envoy.service.status.v3.ClientStatusDiscoveryService.FetchClientStatus:input_type -> envoy.service.status.v3.ClientStatusRequest + 5, // 20: envoy.service.status.v3.ClientStatusDiscoveryService.StreamClientStatus:output_type -> envoy.service.status.v3.ClientStatusResponse + 5, // 21: envoy.service.status.v3.ClientStatusDiscoveryService.FetchClientStatus:output_type -> envoy.service.status.v3.ClientStatusResponse + 20, // [20:22] is the sub-list for method output_type + 18, // [18:20] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_envoy_service_status_v3_csds_proto_init() } +func file_envoy_service_status_v3_csds_proto_init() { + if File_envoy_service_status_v3_csds_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_service_status_v3_csds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PerXdsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientConfig_GenericXdsConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_service_status_v3_csds_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*PerXdsConfig_ListenerConfig)(nil), + (*PerXdsConfig_ClusterConfig)(nil), + (*PerXdsConfig_RouteConfig)(nil), + (*PerXdsConfig_ScopedRouteConfig)(nil), + (*PerXdsConfig_EndpointConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_service_status_v3_csds_proto_rawDesc, + NumEnums: 2, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_envoy_service_status_v3_csds_proto_goTypes, + DependencyIndexes: file_envoy_service_status_v3_csds_proto_depIdxs, + EnumInfos: file_envoy_service_status_v3_csds_proto_enumTypes, + MessageInfos: file_envoy_service_status_v3_csds_proto_msgTypes, + }.Build() + File_envoy_service_status_v3_csds_proto = out.File + file_envoy_service_status_v3_csds_proto_rawDesc = nil + file_envoy_service_status_v3_csds_proto_goTypes = nil + file_envoy_service_status_v3_csds_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go new file mode 100644 index 0000000000..d27eee6463 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds.pb.validate.go @@ -0,0 +1,1057 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + v3 "github.com/envoyproxy/go-control-plane/envoy/admin/v3" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = v3.ClientResourceStatus(0) +) + +// Validate checks the field values on ClientStatusRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientStatusRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientStatusRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientStatusRequestMultiError, or nil if none found. +func (m *ClientStatusRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientStatusRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetNodeMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: fmt.Sprintf("NodeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: fmt.Sprintf("NodeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientStatusRequestValidationError{ + field: fmt.Sprintf("NodeMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientStatusRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientStatusRequestValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ExcludeResourceContents + + if len(errors) > 0 { + return ClientStatusRequestMultiError(errors) + } + + return nil +} + +// ClientStatusRequestMultiError is an error wrapping multiple validation +// errors returned by ClientStatusRequest.ValidateAll() if the designated +// constraints aren't met. +type ClientStatusRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientStatusRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientStatusRequestMultiError) AllErrors() []error { return m } + +// ClientStatusRequestValidationError is the validation error returned by +// ClientStatusRequest.Validate if the designated constraints aren't met. +type ClientStatusRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientStatusRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientStatusRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientStatusRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientStatusRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientStatusRequestValidationError) ErrorName() string { + return "ClientStatusRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientStatusRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientStatusRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientStatusRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientStatusRequestValidationError{} + +// Validate checks the field values on PerXdsConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PerXdsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PerXdsConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PerXdsConfigMultiError, or +// nil if none found. +func (m *PerXdsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *PerXdsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Status + + // no validation rules for ClientStatus + + switch v := m.PerXdsConfig.(type) { + case *PerXdsConfig_ListenerConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetListenerConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ListenerConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListenerConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "ListenerConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_ClusterConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetClusterConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ClusterConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClusterConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "ClusterConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_RouteConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "RouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_ScopedRouteConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopedRouteConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ScopedRouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "ScopedRouteConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopedRouteConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "ScopedRouteConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PerXdsConfig_EndpointConfig: + if v == nil { + err := PerXdsConfigValidationError{ + field: "PerXdsConfig", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEndpointConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PerXdsConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PerXdsConfigValidationError{ + field: "EndpointConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return PerXdsConfigMultiError(errors) + } + + return nil +} + +// PerXdsConfigMultiError is an error wrapping multiple validation errors +// returned by PerXdsConfig.ValidateAll() if the designated constraints aren't met. +type PerXdsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PerXdsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PerXdsConfigMultiError) AllErrors() []error { return m } + +// PerXdsConfigValidationError is the validation error returned by +// PerXdsConfig.Validate if the designated constraints aren't met. +type PerXdsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PerXdsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PerXdsConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PerXdsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PerXdsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PerXdsConfigValidationError) ErrorName() string { return "PerXdsConfigValidationError" } + +// Error satisfies the builtin error interface +func (e PerXdsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPerXdsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PerXdsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PerXdsConfigValidationError{} + +// Validate checks the field values on ClientConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ClientConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ClientConfigMultiError, or +// nil if none found. +func (m *ClientConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetXdsConfig() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("XdsConfig[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("XdsConfig[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: fmt.Sprintf("XdsConfig[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetGenericXdsConfigs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("GenericXdsConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfigValidationError{ + field: fmt.Sprintf("GenericXdsConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfigValidationError{ + field: fmt.Sprintf("GenericXdsConfigs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for ClientScope + + if len(errors) > 0 { + return ClientConfigMultiError(errors) + } + + return nil +} + +// ClientConfigMultiError is an error wrapping multiple validation errors +// returned by ClientConfig.ValidateAll() if the designated constraints aren't met. +type ClientConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientConfigMultiError) AllErrors() []error { return m } + +// ClientConfigValidationError is the validation error returned by +// ClientConfig.Validate if the designated constraints aren't met. +type ClientConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientConfigValidationError) ErrorName() string { return "ClientConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ClientConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientConfigValidationError{} + +// Validate checks the field values on ClientStatusResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientStatusResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientStatusResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ClientStatusResponseMultiError, or nil if none found. +func (m *ClientStatusResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientStatusResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetConfig() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientStatusResponseValidationError{ + field: fmt.Sprintf("Config[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientStatusResponseValidationError{ + field: fmt.Sprintf("Config[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientStatusResponseValidationError{ + field: fmt.Sprintf("Config[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ClientStatusResponseMultiError(errors) + } + + return nil +} + +// ClientStatusResponseMultiError is an error wrapping multiple validation +// errors returned by ClientStatusResponse.ValidateAll() if the designated +// constraints aren't met. +type ClientStatusResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientStatusResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientStatusResponseMultiError) AllErrors() []error { return m } + +// ClientStatusResponseValidationError is the validation error returned by +// ClientStatusResponse.Validate if the designated constraints aren't met. +type ClientStatusResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientStatusResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientStatusResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientStatusResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientStatusResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientStatusResponseValidationError) ErrorName() string { + return "ClientStatusResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientStatusResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientStatusResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientStatusResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientStatusResponseValidationError{} + +// Validate checks the field values on ClientConfig_GenericXdsConfig with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ClientConfig_GenericXdsConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ClientConfig_GenericXdsConfig with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ClientConfig_GenericXdsConfigMultiError, or nil if none found. +func (m *ClientConfig_GenericXdsConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ClientConfig_GenericXdsConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for TypeUrl + + // no validation rules for Name + + // no validation rules for VersionInfo + + if all { + switch v := interface{}(m.GetXdsConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "XdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "XdsConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetXdsConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfig_GenericXdsConfigValidationError{ + field: "XdsConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdated()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfig_GenericXdsConfigValidationError{ + field: "LastUpdated", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ConfigStatus + + // no validation rules for ClientStatus + + if all { + switch v := interface{}(m.GetErrorState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ClientConfig_GenericXdsConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ClientConfig_GenericXdsConfigValidationError{ + field: "ErrorState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsStaticResource + + if len(errors) > 0 { + return ClientConfig_GenericXdsConfigMultiError(errors) + } + + return nil +} + +// ClientConfig_GenericXdsConfigMultiError is an error wrapping multiple +// validation errors returned by ClientConfig_GenericXdsConfig.ValidateAll() +// if the designated constraints aren't met. +type ClientConfig_GenericXdsConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ClientConfig_GenericXdsConfigMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ClientConfig_GenericXdsConfigMultiError) AllErrors() []error { return m } + +// ClientConfig_GenericXdsConfigValidationError is the validation error +// returned by ClientConfig_GenericXdsConfig.Validate if the designated +// constraints aren't met. +type ClientConfig_GenericXdsConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ClientConfig_GenericXdsConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ClientConfig_GenericXdsConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ClientConfig_GenericXdsConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ClientConfig_GenericXdsConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ClientConfig_GenericXdsConfigValidationError) ErrorName() string { + return "ClientConfig_GenericXdsConfigValidationError" +} + +// Error satisfies the builtin error interface +func (e ClientConfig_GenericXdsConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sClientConfig_GenericXdsConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ClientConfig_GenericXdsConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ClientConfig_GenericXdsConfigValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go new file mode 100644 index 0000000000..abe9abebdf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_grpc.pb.go @@ -0,0 +1,177 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.26.1 +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ClientStatusDiscoveryService_StreamClientStatus_FullMethodName = "/envoy.service.status.v3.ClientStatusDiscoveryService/StreamClientStatus" + ClientStatusDiscoveryService_FetchClientStatus_FullMethodName = "/envoy.service.status.v3.ClientStatusDiscoveryService/FetchClientStatus" +) + +// ClientStatusDiscoveryServiceClient is the client API for ClientStatusDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ClientStatusDiscoveryServiceClient interface { + StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) + FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) +} + +type clientStatusDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewClientStatusDiscoveryServiceClient(cc grpc.ClientConnInterface) ClientStatusDiscoveryServiceClient { + return &clientStatusDiscoveryServiceClient{cc} +} + +func (c *clientStatusDiscoveryServiceClient) StreamClientStatus(ctx context.Context, opts ...grpc.CallOption) (ClientStatusDiscoveryService_StreamClientStatusClient, error) { + stream, err := c.cc.NewStream(ctx, &ClientStatusDiscoveryService_ServiceDesc.Streams[0], ClientStatusDiscoveryService_StreamClientStatus_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &clientStatusDiscoveryServiceStreamClientStatusClient{stream} + return x, nil +} + +type ClientStatusDiscoveryService_StreamClientStatusClient interface { + Send(*ClientStatusRequest) error + Recv() (*ClientStatusResponse, error) + grpc.ClientStream +} + +type clientStatusDiscoveryServiceStreamClientStatusClient struct { + grpc.ClientStream +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Send(m *ClientStatusRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusClient) Recv() (*ClientStatusResponse, error) { + m := new(ClientStatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *clientStatusDiscoveryServiceClient) FetchClientStatus(ctx context.Context, in *ClientStatusRequest, opts ...grpc.CallOption) (*ClientStatusResponse, error) { + out := new(ClientStatusResponse) + err := c.cc.Invoke(ctx, ClientStatusDiscoveryService_FetchClientStatus_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClientStatusDiscoveryServiceServer is the server API for ClientStatusDiscoveryService service. +// All implementations should embed UnimplementedClientStatusDiscoveryServiceServer +// for forward compatibility +type ClientStatusDiscoveryServiceServer interface { + StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error + FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) +} + +// UnimplementedClientStatusDiscoveryServiceServer should be embedded to have forward compatible implementations. +type UnimplementedClientStatusDiscoveryServiceServer struct { +} + +func (UnimplementedClientStatusDiscoveryServiceServer) StreamClientStatus(ClientStatusDiscoveryService_StreamClientStatusServer) error { + return status.Errorf(codes.Unimplemented, "method StreamClientStatus not implemented") +} +func (UnimplementedClientStatusDiscoveryServiceServer) FetchClientStatus(context.Context, *ClientStatusRequest) (*ClientStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchClientStatus not implemented") +} + +// UnsafeClientStatusDiscoveryServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ClientStatusDiscoveryServiceServer will +// result in compilation errors. +type UnsafeClientStatusDiscoveryServiceServer interface { + mustEmbedUnimplementedClientStatusDiscoveryServiceServer() +} + +func RegisterClientStatusDiscoveryServiceServer(s grpc.ServiceRegistrar, srv ClientStatusDiscoveryServiceServer) { + s.RegisterService(&ClientStatusDiscoveryService_ServiceDesc, srv) +} + +func _ClientStatusDiscoveryService_StreamClientStatus_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ClientStatusDiscoveryServiceServer).StreamClientStatus(&clientStatusDiscoveryServiceStreamClientStatusServer{stream}) +} + +type ClientStatusDiscoveryService_StreamClientStatusServer interface { + Send(*ClientStatusResponse) error + Recv() (*ClientStatusRequest, error) + grpc.ServerStream +} + +type clientStatusDiscoveryServiceStreamClientStatusServer struct { + grpc.ServerStream +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Send(m *ClientStatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *clientStatusDiscoveryServiceStreamClientStatusServer) Recv() (*ClientStatusRequest, error) { + m := new(ClientStatusRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ClientStatusDiscoveryService_FetchClientStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClientStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ClientStatusDiscoveryService_FetchClientStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClientStatusDiscoveryServiceServer).FetchClientStatus(ctx, req.(*ClientStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ClientStatusDiscoveryService_ServiceDesc is the grpc.ServiceDesc for ClientStatusDiscoveryService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ClientStatusDiscoveryService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "envoy.service.status.v3.ClientStatusDiscoveryService", + HandlerType: (*ClientStatusDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchClientStatus", + Handler: _ClientStatusDiscoveryService_FetchClientStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamClientStatus", + Handler: _ClientStatusDiscoveryService_StreamClientStatus_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "envoy/service/status/v3/csds.proto", +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go new file mode 100644 index 0000000000..a55983e810 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/service/status/v3/csds_vtproto.pb.go @@ -0,0 +1,866 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/service/status/v3/csds.proto + +package statusv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + anypb "github.com/planetscale/vtprotobuf/types/known/anypb" + timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ClientStatusRequest) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatusRequest) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientStatusRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ExcludeResourceContents { + i-- + if m.ExcludeResourceContents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if len(m.NodeMatchers) > 0 { + for iNdEx := len(m.NodeMatchers) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.NodeMatchers[iNdEx]).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.NodeMatchers[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PerXdsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PerXdsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x38 + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_EndpointConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ScopedRouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_RouteConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ClusterConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PerXdsConfig.(*PerXdsConfig_ListenerConfig); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PerXdsConfig_ListenerConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ListenerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListenerConfig != nil { + if vtmsg, ok := interface{}(m.ListenerConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ListenerConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_ClusterConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ClusterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClusterConfig != nil { + if vtmsg, ok := interface{}(m.ClusterConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ClusterConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_RouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_RouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfig != nil { + if vtmsg, ok := interface{}(m.RouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.RouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_ScopedRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_ScopedRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ScopedRouteConfig != nil { + if vtmsg, ok := interface{}(m.ScopedRouteConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ScopedRouteConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *PerXdsConfig_EndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PerXdsConfig_EndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndpointConfig != nil { + if vtmsg, ok := interface{}(m.EndpointConfig).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.EndpointConfig) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ClientConfig_GenericXdsConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig_GenericXdsConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig_GenericXdsConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsStaticResource { + i-- + if m.IsStaticResource { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.ErrorState != nil { + if vtmsg, ok := interface{}(m.ErrorState).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.ErrorState) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } + if m.ClientStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus)) + i-- + dAtA[i] = 0x38 + } + if m.ConfigStatus != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ConfigStatus)) + i-- + dAtA[i] = 0x30 + } + if m.LastUpdated != nil { + size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.XdsConfig != nil { + size, err := (*anypb.Any)(m.XdsConfig).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.VersionInfo) > 0 { + i -= len(m.VersionInfo) + copy(dAtA[i:], m.VersionInfo) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientConfig) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConfig) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ClientScope) > 0 { + i -= len(m.ClientScope) + copy(dAtA[i:], m.ClientScope) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ClientScope))) + i-- + dAtA[i] = 0x22 + } + if len(m.GenericXdsConfigs) > 0 { + for iNdEx := len(m.GenericXdsConfigs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.GenericXdsConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.XdsConfig) > 0 { + for iNdEx := len(m.XdsConfig) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.XdsConfig[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Node != nil { + if vtmsg, ok := interface{}(m.Node).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Node) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatusResponse) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatusResponse) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ClientStatusResponse) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Config[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClientStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeMatchers) > 0 { + for _, e := range m.NodeMatchers { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ExcludeResourceContents { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PerXdsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if vtmsg, ok := m.PerXdsConfig.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + n += len(m.unknownFields) + return n +} + +func (m *PerXdsConfig_ListenerConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListenerConfig != nil { + if size, ok := interface{}(m.ListenerConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ListenerConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_ClusterConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClusterConfig != nil { + if size, ok := interface{}(m.ClusterConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ClusterConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_RouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfig != nil { + if size, ok := interface{}(m.RouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.RouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_ScopedRouteConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScopedRouteConfig != nil { + if size, ok := interface{}(m.ScopedRouteConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ScopedRouteConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PerXdsConfig_EndpointConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndpointConfig != nil { + if size, ok := interface{}(m.EndpointConfig).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.EndpointConfig) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ClientConfig_GenericXdsConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.VersionInfo) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.XdsConfig != nil { + l = (*anypb.Any)(m.XdsConfig).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LastUpdated != nil { + l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ConfigStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ConfigStatus)) + } + if m.ClientStatus != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus)) + } + if m.ErrorState != nil { + if size, ok := interface{}(m.ErrorState).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.ErrorState) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.IsStaticResource { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClientConfig) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Node != nil { + if size, ok := interface{}(m.Node).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Node) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.XdsConfig) > 0 { + for _, e := range m.XdsConfig { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.GenericXdsConfigs) > 0 { + for _, e := range m.GenericXdsConfigs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + l = len(m.ClientScope) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go new file mode 100644 index 0000000000..8afb4e8d12 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Cookie defines an API for obtaining or generating HTTP cookie. +type Cookie struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name that will be used to obtain cookie value from downstream HTTP request or generate + // new cookie for downstream. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Duration of cookie. This will be used to set the expiry time of a new cookie when it is + // generated. Set this to 0s to use a session cookie and disable cookie expiration. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` + // Path of cookie. This will be used to set the path of a new cookie when it is generated. + // If no path is specified here, no path will be set for the cookie. + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Cookie) Reset() { + *x = Cookie{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_cookie_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Cookie) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Cookie) ProtoMessage() {} + +func (x *Cookie) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_cookie_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Cookie.ProtoReflect.Descriptor instead. +func (*Cookie) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_cookie_proto_rawDescGZIP(), []int{0} +} + +func (x *Cookie) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Cookie) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +func (x *Cookie) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_envoy_type_http_v3_cookie_proto protoreflect.FileDescriptor + +var file_envoy_type_http_v3_cookie_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, + 0x74, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, + 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, + 0x7b, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, + 0x74, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_http_v3_cookie_proto_rawDescOnce sync.Once + file_envoy_type_http_v3_cookie_proto_rawDescData = file_envoy_type_http_v3_cookie_proto_rawDesc +) + +func file_envoy_type_http_v3_cookie_proto_rawDescGZIP() []byte { + file_envoy_type_http_v3_cookie_proto_rawDescOnce.Do(func() { + file_envoy_type_http_v3_cookie_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_http_v3_cookie_proto_rawDescData) + }) + return file_envoy_type_http_v3_cookie_proto_rawDescData +} + +var file_envoy_type_http_v3_cookie_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_http_v3_cookie_proto_goTypes = []interface{}{ + (*Cookie)(nil), // 0: envoy.type.http.v3.Cookie + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration +} +var file_envoy_type_http_v3_cookie_proto_depIdxs = []int32{ + 1, // 0: envoy.type.http.v3.Cookie.ttl:type_name -> google.protobuf.Duration + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_http_v3_cookie_proto_init() } +func file_envoy_type_http_v3_cookie_proto_init() { + if File_envoy_type_http_v3_cookie_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_http_v3_cookie_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Cookie); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_http_v3_cookie_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_http_v3_cookie_proto_goTypes, + DependencyIndexes: file_envoy_type_http_v3_cookie_proto_depIdxs, + MessageInfos: file_envoy_type_http_v3_cookie_proto_msgTypes, + }.Build() + File_envoy_type_http_v3_cookie_proto = out.File + file_envoy_type_http_v3_cookie_proto_rawDesc = nil + file_envoy_type_http_v3_cookie_proto_goTypes = nil + file_envoy_type_http_v3_cookie_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go new file mode 100644 index 0000000000..3daecd3dea --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie.pb.validate.go @@ -0,0 +1,178 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Cookie with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Cookie) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Cookie with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in CookieMultiError, or nil if none found. +func (m *Cookie) ValidateAll() error { + return m.validate(true) +} + +func (m *Cookie) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CookieValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetTtl(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = CookieValidationError{ + field: "Ttl", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gte := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur < gte { + err := CookieValidationError{ + field: "Ttl", + reason: "value must be greater than or equal to 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + // no validation rules for Path + + if len(errors) > 0 { + return CookieMultiError(errors) + } + + return nil +} + +// CookieMultiError is an error wrapping multiple validation errors returned by +// Cookie.ValidateAll() if the designated constraints aren't met. +type CookieMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CookieMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CookieMultiError) AllErrors() []error { return m } + +// CookieValidationError is the validation error returned by Cookie.Validate if +// the designated constraints aren't met. +type CookieValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CookieValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CookieValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CookieValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CookieValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CookieValidationError) ErrorName() string { return "CookieValidationError" } + +// Error satisfies the builtin error interface +func (e CookieValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCookie.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CookieValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CookieValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go new file mode 100644 index 0000000000..66ab8b784a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/cookie_vtproto.pb.go @@ -0,0 +1,99 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/http/v3/cookie.proto + +package httpv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Cookie) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cookie) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Cookie) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + } + if m.Ttl != nil { + size, err := (*durationpb.Duration)(m.Ttl).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Cookie) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Ttl != nil { + l = (*durationpb.Duration)(m.Ttl).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go new file mode 100644 index 0000000000..dda21b56bd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.go @@ -0,0 +1,403 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PathTransformation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of operations to apply. Transformations will be performed in the order that they appear. + Operations []*PathTransformation_Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` +} + +func (x *PathTransformation) Reset() { + *x = PathTransformation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation) ProtoMessage() {} + +func (x *PathTransformation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation.ProtoReflect.Descriptor instead. +func (*PathTransformation) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0} +} + +func (x *PathTransformation) GetOperations() []*PathTransformation_Operation { + if x != nil { + return x.Operations + } + return nil +} + +// A type of operation to alter text. +type PathTransformation_Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to OperationSpecifier: + // + // *PathTransformation_Operation_NormalizePathRfc_3986 + // *PathTransformation_Operation_MergeSlashes_ + OperationSpecifier isPathTransformation_Operation_OperationSpecifier `protobuf_oneof:"operation_specifier"` +} + +func (x *PathTransformation_Operation) Reset() { + *x = PathTransformation_Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation_Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation_Operation) ProtoMessage() {} + +func (x *PathTransformation_Operation) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation_Operation.ProtoReflect.Descriptor instead. +func (*PathTransformation_Operation) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *PathTransformation_Operation) GetOperationSpecifier() isPathTransformation_Operation_OperationSpecifier { + if m != nil { + return m.OperationSpecifier + } + return nil +} + +func (x *PathTransformation_Operation) GetNormalizePathRfc_3986() *PathTransformation_Operation_NormalizePathRFC3986 { + if x, ok := x.GetOperationSpecifier().(*PathTransformation_Operation_NormalizePathRfc_3986); ok { + return x.NormalizePathRfc_3986 + } + return nil +} + +func (x *PathTransformation_Operation) GetMergeSlashes() *PathTransformation_Operation_MergeSlashes { + if x, ok := x.GetOperationSpecifier().(*PathTransformation_Operation_MergeSlashes_); ok { + return x.MergeSlashes + } + return nil +} + +type isPathTransformation_Operation_OperationSpecifier interface { + isPathTransformation_Operation_OperationSpecifier() +} + +type PathTransformation_Operation_NormalizePathRfc_3986 struct { + // Enable path normalization per RFC 3986. + NormalizePathRfc_3986 *PathTransformation_Operation_NormalizePathRFC3986 `protobuf:"bytes,2,opt,name=normalize_path_rfc_3986,json=normalizePathRfc3986,proto3,oneof"` +} + +type PathTransformation_Operation_MergeSlashes_ struct { + // Enable merging adjacent slashes. + MergeSlashes *PathTransformation_Operation_MergeSlashes `protobuf:"bytes,3,opt,name=merge_slashes,json=mergeSlashes,proto3,oneof"` +} + +func (*PathTransformation_Operation_NormalizePathRfc_3986) isPathTransformation_Operation_OperationSpecifier() { +} + +func (*PathTransformation_Operation_MergeSlashes_) isPathTransformation_Operation_OperationSpecifier() { +} + +// Should text be normalized according to RFC 3986? This typically is used for path headers +// before any processing of requests by HTTP filters or routing. This applies percent-encoded +// normalization and path segment normalization. Fails on characters disallowed in URLs +// (e.g. NULLs). See `Normalization and Comparison +// `_ for details of normalization. Note that +// this options does not perform `case normalization +// `_ +type PathTransformation_Operation_NormalizePathRFC3986 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PathTransformation_Operation_NormalizePathRFC3986) Reset() { + *x = PathTransformation_Operation_NormalizePathRFC3986{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation_Operation_NormalizePathRFC3986) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation_Operation_NormalizePathRFC3986) ProtoMessage() {} + +func (x *PathTransformation_Operation_NormalizePathRFC3986) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation_Operation_NormalizePathRFC3986.ProtoReflect.Descriptor instead. +func (*PathTransformation_Operation_NormalizePathRFC3986) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0, 0, 0} +} + +// Determines if adjacent slashes are merged into one. A common use case is for a request path +// header. Using this option in “:ref: PathNormalizationOptions +// “ +// will allow incoming requests with path “//dir///file“ to match against route with “prefix“ +// match set to “/dir“. When using for header transformations, note that slash merging is not +// part of `HTTP spec `_ and is provided for convenience. +type PathTransformation_Operation_MergeSlashes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PathTransformation_Operation_MergeSlashes) Reset() { + *x = PathTransformation_Operation_MergeSlashes{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathTransformation_Operation_MergeSlashes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathTransformation_Operation_MergeSlashes) ProtoMessage() {} + +func (x *PathTransformation_Operation_MergeSlashes) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_http_v3_path_transformation_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathTransformation_Operation_MergeSlashes.ProtoReflect.Descriptor instead. +func (*PathTransformation_Operation_MergeSlashes) Descriptor() ([]byte, []int) { + return file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP(), []int{0, 0, 1} +} + +var File_envoy_type_http_v3_path_transformation_proto protoreflect.FileDescriptor + +var file_envoy_type_http_v3_path_transformation_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, + 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x03, 0x0a, 0x12, 0x50, + 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x50, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0xb5, 0x02, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x7e, 0x0a, 0x17, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x72, 0x66, 0x63, 0x5f, 0x33, 0x39, 0x38, 0x36, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x52, 0x46, 0x43, 0x33, 0x39, 0x38, 0x36, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x66, 0x63, 0x33, 0x39, 0x38, + 0x36, 0x12, 0x64, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, 0x73, 0x6c, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, + 0x74, 0x68, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, + 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, + 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x1a, 0x16, 0x0a, 0x14, 0x4e, 0x6f, 0x72, 0x6d, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x46, 0x43, 0x33, 0x39, 0x38, 0x36, 0x1a, + 0x0e, 0x0a, 0x0c, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x65, 0x73, 0x42, + 0x1a, 0x0a, 0x13, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x87, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x17, 0x50, 0x61, 0x74, 0x68, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x68, + 0x74, 0x74, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_http_v3_path_transformation_proto_rawDescOnce sync.Once + file_envoy_type_http_v3_path_transformation_proto_rawDescData = file_envoy_type_http_v3_path_transformation_proto_rawDesc +) + +func file_envoy_type_http_v3_path_transformation_proto_rawDescGZIP() []byte { + file_envoy_type_http_v3_path_transformation_proto_rawDescOnce.Do(func() { + file_envoy_type_http_v3_path_transformation_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_http_v3_path_transformation_proto_rawDescData) + }) + return file_envoy_type_http_v3_path_transformation_proto_rawDescData +} + +var file_envoy_type_http_v3_path_transformation_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_type_http_v3_path_transformation_proto_goTypes = []interface{}{ + (*PathTransformation)(nil), // 0: envoy.type.http.v3.PathTransformation + (*PathTransformation_Operation)(nil), // 1: envoy.type.http.v3.PathTransformation.Operation + (*PathTransformation_Operation_NormalizePathRFC3986)(nil), // 2: envoy.type.http.v3.PathTransformation.Operation.NormalizePathRFC3986 + (*PathTransformation_Operation_MergeSlashes)(nil), // 3: envoy.type.http.v3.PathTransformation.Operation.MergeSlashes +} +var file_envoy_type_http_v3_path_transformation_proto_depIdxs = []int32{ + 1, // 0: envoy.type.http.v3.PathTransformation.operations:type_name -> envoy.type.http.v3.PathTransformation.Operation + 2, // 1: envoy.type.http.v3.PathTransformation.Operation.normalize_path_rfc_3986:type_name -> envoy.type.http.v3.PathTransformation.Operation.NormalizePathRFC3986 + 3, // 2: envoy.type.http.v3.PathTransformation.Operation.merge_slashes:type_name -> envoy.type.http.v3.PathTransformation.Operation.MergeSlashes + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_type_http_v3_path_transformation_proto_init() } +func file_envoy_type_http_v3_path_transformation_proto_init() { + if File_envoy_type_http_v3_path_transformation_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_http_v3_path_transformation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation_Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation_Operation_NormalizePathRFC3986); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTransformation_Operation_MergeSlashes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_http_v3_path_transformation_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*PathTransformation_Operation_NormalizePathRfc_3986)(nil), + (*PathTransformation_Operation_MergeSlashes_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_http_v3_path_transformation_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_http_v3_path_transformation_proto_goTypes, + DependencyIndexes: file_envoy_type_http_v3_path_transformation_proto_depIdxs, + MessageInfos: file_envoy_type_http_v3_path_transformation_proto_msgTypes, + }.Build() + File_envoy_type_http_v3_path_transformation_proto = out.File + file_envoy_type_http_v3_path_transformation_proto_rawDesc = nil + file_envoy_type_http_v3_path_transformation_proto_goTypes = nil + file_envoy_type_http_v3_path_transformation_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go new file mode 100644 index 0000000000..0bb35065bc --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation.pb.validate.go @@ -0,0 +1,595 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PathTransformation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PathTransformation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathTransformation with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PathTransformationMultiError, or nil if none found. +func (m *PathTransformation) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetOperations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathTransformationValidationError{ + field: fmt.Sprintf("Operations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathTransformationValidationError{ + field: fmt.Sprintf("Operations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathTransformationValidationError{ + field: fmt.Sprintf("Operations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return PathTransformationMultiError(errors) + } + + return nil +} + +// PathTransformationMultiError is an error wrapping multiple validation errors +// returned by PathTransformation.ValidateAll() if the designated constraints +// aren't met. +type PathTransformationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformationMultiError) AllErrors() []error { return m } + +// PathTransformationValidationError is the validation error returned by +// PathTransformation.Validate if the designated constraints aren't met. +type PathTransformationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathTransformationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathTransformationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathTransformationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformationValidationError) ErrorName() string { + return "PathTransformationValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformationValidationError{} + +// Validate checks the field values on PathTransformation_Operation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PathTransformation_Operation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathTransformation_Operation with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PathTransformation_OperationMultiError, or nil if none found. +func (m *PathTransformation_Operation) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation_Operation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofOperationSpecifierPresent := false + switch v := m.OperationSpecifier.(type) { + case *PathTransformation_Operation_NormalizePathRfc_3986: + if v == nil { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOperationSpecifierPresent = true + + if all { + switch v := interface{}(m.GetNormalizePathRfc_3986()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "NormalizePathRfc_3986", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "NormalizePathRfc_3986", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNormalizePathRfc_3986()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathTransformation_OperationValidationError{ + field: "NormalizePathRfc_3986", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *PathTransformation_Operation_MergeSlashes_: + if v == nil { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofOperationSpecifierPresent = true + + if all { + switch v := interface{}(m.GetMergeSlashes()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "MergeSlashes", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathTransformation_OperationValidationError{ + field: "MergeSlashes", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMergeSlashes()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathTransformation_OperationValidationError{ + field: "MergeSlashes", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofOperationSpecifierPresent { + err := PathTransformation_OperationValidationError{ + field: "OperationSpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PathTransformation_OperationMultiError(errors) + } + + return nil +} + +// PathTransformation_OperationMultiError is an error wrapping multiple +// validation errors returned by PathTransformation_Operation.ValidateAll() if +// the designated constraints aren't met. +type PathTransformation_OperationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformation_OperationMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformation_OperationMultiError) AllErrors() []error { return m } + +// PathTransformation_OperationValidationError is the validation error returned +// by PathTransformation_Operation.Validate if the designated constraints +// aren't met. +type PathTransformation_OperationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformation_OperationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathTransformation_OperationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathTransformation_OperationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathTransformation_OperationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformation_OperationValidationError) ErrorName() string { + return "PathTransformation_OperationValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformation_OperationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation_Operation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformation_OperationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformation_OperationValidationError{} + +// Validate checks the field values on +// PathTransformation_Operation_NormalizePathRFC3986 with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PathTransformation_Operation_NormalizePathRFC3986) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// PathTransformation_Operation_NormalizePathRFC3986 with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// PathTransformation_Operation_NormalizePathRFC3986MultiError, or nil if none found. +func (m *PathTransformation_Operation_NormalizePathRFC3986) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return PathTransformation_Operation_NormalizePathRFC3986MultiError(errors) + } + + return nil +} + +// PathTransformation_Operation_NormalizePathRFC3986MultiError is an error +// wrapping multiple validation errors returned by +// PathTransformation_Operation_NormalizePathRFC3986.ValidateAll() if the +// designated constraints aren't met. +type PathTransformation_Operation_NormalizePathRFC3986MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformation_Operation_NormalizePathRFC3986MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformation_Operation_NormalizePathRFC3986MultiError) AllErrors() []error { return m } + +// PathTransformation_Operation_NormalizePathRFC3986ValidationError is the +// validation error returned by +// PathTransformation_Operation_NormalizePathRFC3986.Validate if the +// designated constraints aren't met. +type PathTransformation_Operation_NormalizePathRFC3986ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) ErrorName() string { + return "PathTransformation_Operation_NormalizePathRFC3986ValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformation_Operation_NormalizePathRFC3986ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation_Operation_NormalizePathRFC3986.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformation_Operation_NormalizePathRFC3986ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformation_Operation_NormalizePathRFC3986ValidationError{} + +// Validate checks the field values on +// PathTransformation_Operation_MergeSlashes with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PathTransformation_Operation_MergeSlashes) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// PathTransformation_Operation_MergeSlashes with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// PathTransformation_Operation_MergeSlashesMultiError, or nil if none found. +func (m *PathTransformation_Operation_MergeSlashes) ValidateAll() error { + return m.validate(true) +} + +func (m *PathTransformation_Operation_MergeSlashes) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return PathTransformation_Operation_MergeSlashesMultiError(errors) + } + + return nil +} + +// PathTransformation_Operation_MergeSlashesMultiError is an error wrapping +// multiple validation errors returned by +// PathTransformation_Operation_MergeSlashes.ValidateAll() if the designated +// constraints aren't met. +type PathTransformation_Operation_MergeSlashesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathTransformation_Operation_MergeSlashesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathTransformation_Operation_MergeSlashesMultiError) AllErrors() []error { return m } + +// PathTransformation_Operation_MergeSlashesValidationError is the validation +// error returned by PathTransformation_Operation_MergeSlashes.Validate if the +// designated constraints aren't met. +type PathTransformation_Operation_MergeSlashesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathTransformation_Operation_MergeSlashesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathTransformation_Operation_MergeSlashesValidationError) ErrorName() string { + return "PathTransformation_Operation_MergeSlashesValidationError" +} + +// Error satisfies the builtin error interface +func (e PathTransformation_Operation_MergeSlashesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathTransformation_Operation_MergeSlashes.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathTransformation_Operation_MergeSlashesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathTransformation_Operation_MergeSlashesValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go new file mode 100644 index 0000000000..64d8960cf2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/http/v3/path_transformation_vtproto.pb.go @@ -0,0 +1,300 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/http/v3/path_transformation.proto + +package httpv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_MergeSlashes) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation_Operation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.OperationSpecifier.(*PathTransformation_Operation_MergeSlashes_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.OperationSpecifier.(*PathTransformation_Operation_NormalizePathRfc_3986); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NormalizePathRfc_3986 != nil { + size, err := m.NormalizePathRfc_3986.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *PathTransformation_Operation_MergeSlashes_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation_Operation_MergeSlashes_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MergeSlashes != nil { + size, err := m.MergeSlashes.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *PathTransformation) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathTransformation) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathTransformation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Operations[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PathTransformation_Operation_NormalizePathRFC3986) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation_MergeSlashes) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.OperationSpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *PathTransformation_Operation_NormalizePathRfc_3986) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NormalizePathRfc_3986 != nil { + l = m.NormalizePathRfc_3986.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PathTransformation_Operation_MergeSlashes_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MergeSlashes != nil { + l = m.MergeSlashes.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *PathTransformation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, e := range m.Operations { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go new file mode 100644 index 0000000000..db3bd5994f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// FilterStateMatcher provides a general interface for matching the filter state objects. +type FilterStateMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter state key to retrieve the object. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Types that are assignable to Matcher: + // + // *FilterStateMatcher_StringMatch + Matcher isFilterStateMatcher_Matcher `protobuf_oneof:"matcher"` +} + +func (x *FilterStateMatcher) Reset() { + *x = FilterStateMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterStateMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterStateMatcher) ProtoMessage() {} + +func (x *FilterStateMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterStateMatcher.ProtoReflect.Descriptor instead. +func (*FilterStateMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_filter_state_proto_rawDescGZIP(), []int{0} +} + +func (x *FilterStateMatcher) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (m *FilterStateMatcher) GetMatcher() isFilterStateMatcher_Matcher { + if m != nil { + return m.Matcher + } + return nil +} + +func (x *FilterStateMatcher) GetStringMatch() *StringMatcher { + if x, ok := x.GetMatcher().(*FilterStateMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +type isFilterStateMatcher_Matcher interface { + isFilterStateMatcher_Matcher() +} + +type FilterStateMatcher_StringMatch struct { + // Matches the filter state object as a string value. + StringMatch *StringMatcher `protobuf:"bytes,2,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +func (*FilterStateMatcher_StringMatch) isFilterStateMatcher_Matcher() {} + +var File_envoy_type_matcher_v3_filter_state_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_filter_state_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a, 0x01, + 0x0a, 0x12, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x89, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_filter_state_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_filter_state_proto_rawDescData = file_envoy_type_matcher_v3_filter_state_proto_rawDesc +) + +func file_envoy_type_matcher_v3_filter_state_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_filter_state_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_filter_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_filter_state_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_filter_state_proto_rawDescData +} + +var file_envoy_type_matcher_v3_filter_state_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_filter_state_proto_goTypes = []interface{}{ + (*FilterStateMatcher)(nil), // 0: envoy.type.matcher.v3.FilterStateMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_filter_state_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.FilterStateMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_filter_state_proto_init() } +func file_envoy_type_matcher_v3_filter_state_proto_init() { + if File_envoy_type_matcher_v3_filter_state_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterStateMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_filter_state_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FilterStateMatcher_StringMatch)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_filter_state_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_filter_state_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_filter_state_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_filter_state_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_filter_state_proto = out.File + file_envoy_type_matcher_v3_filter_state_proto_rawDesc = nil + file_envoy_type_matcher_v3_filter_state_proto_goTypes = nil + file_envoy_type_matcher_v3_filter_state_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go new file mode 100644 index 0000000000..41a5f68db0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state.pb.validate.go @@ -0,0 +1,208 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on FilterStateMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *FilterStateMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterStateMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FilterStateMatcherMultiError, or nil if none found. +func (m *FilterStateMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterStateMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := FilterStateMatcherValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofMatcherPresent := false + switch v := m.Matcher.(type) { + case *FilterStateMatcher_StringMatch: + if v == nil { + err := FilterStateMatcherValidationError{ + field: "Matcher", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatcherPresent = true + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterStateMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatcherPresent { + err := FilterStateMatcherValidationError{ + field: "Matcher", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FilterStateMatcherMultiError(errors) + } + + return nil +} + +// FilterStateMatcherMultiError is an error wrapping multiple validation errors +// returned by FilterStateMatcher.ValidateAll() if the designated constraints +// aren't met. +type FilterStateMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterStateMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterStateMatcherMultiError) AllErrors() []error { return m } + +// FilterStateMatcherValidationError is the validation error returned by +// FilterStateMatcher.Validate if the designated constraints aren't met. +type FilterStateMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterStateMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterStateMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterStateMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterStateMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterStateMatcherValidationError) ErrorName() string { + return "FilterStateMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e FilterStateMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterStateMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterStateMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterStateMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go new file mode 100644 index 0000000000..873f63eef7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/filter_state_vtproto.pb.go @@ -0,0 +1,121 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/filter_state.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *FilterStateMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilterStateMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterStateMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Matcher.(*FilterStateMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilterStateMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FilterStateMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + size, err := m.StringMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *FilterStateMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Matcher.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *FilterStateMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + l = m.StringMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go new file mode 100644 index 0000000000..a2f9c73adc --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.go @@ -0,0 +1,452 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Match input indicates that matching should be done on a specific request header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_headers] +type HttpRequestHeaderMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request header to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpRequestHeaderMatchInput) Reset() { + *x = HttpRequestHeaderMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestHeaderMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestHeaderMatchInput) ProtoMessage() {} + +func (x *HttpRequestHeaderMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestHeaderMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestHeaderMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpRequestHeaderMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicates that matching should be done on a specific request trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_trailers] +type HttpRequestTrailerMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The request trailer to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpRequestTrailerMatchInput) Reset() { + *x = HttpRequestTrailerMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestTrailerMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestTrailerMatchInput) ProtoMessage() {} + +func (x *HttpRequestTrailerMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestTrailerMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestTrailerMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpRequestTrailerMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicating that matching should be done on a specific response header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_headers] +type HttpResponseHeaderMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response header to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpResponseHeaderMatchInput) Reset() { + *x = HttpResponseHeaderMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseHeaderMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseHeaderMatchInput) ProtoMessage() {} + +func (x *HttpResponseHeaderMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseHeaderMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseHeaderMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpResponseHeaderMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicates that matching should be done on a specific response trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_trailers] +type HttpResponseTrailerMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response trailer to match on. + HeaderName string `protobuf:"bytes,1,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` +} + +func (x *HttpResponseTrailerMatchInput) Reset() { + *x = HttpResponseTrailerMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseTrailerMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseTrailerMatchInput) ProtoMessage() {} + +func (x *HttpResponseTrailerMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseTrailerMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseTrailerMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{3} +} + +func (x *HttpResponseTrailerMatchInput) GetHeaderName() string { + if x != nil { + return x.HeaderName + } + return "" +} + +// Match input indicates that matching should be done on a specific query parameter. +// The resulting input string will be the first query parameter for the value +// 'query_param'. +// [#extension: envoy.matching.inputs.query_params] +type HttpRequestQueryParamMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The query parameter to match on. + QueryParam string `protobuf:"bytes,1,opt,name=query_param,json=queryParam,proto3" json:"query_param,omitempty"` +} + +func (x *HttpRequestQueryParamMatchInput) Reset() { + *x = HttpRequestQueryParamMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRequestQueryParamMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRequestQueryParamMatchInput) ProtoMessage() {} + +func (x *HttpRequestQueryParamMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRequestQueryParamMatchInput.ProtoReflect.Descriptor instead. +func (*HttpRequestQueryParamMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{4} +} + +func (x *HttpRequestQueryParamMatchInput) GetQueryParam() string { + if x != nil { + return x.QueryParam + } + return "" +} + +var File_envoy_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x1b, 0x48, 0x74, 0x74, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, + 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x1c, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0x4c, 0x0a, 0x1c, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x22, 0x4d, 0x0a, 0x1d, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x4b, 0x0a, 0x1f, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x42, 0x88, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, + 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_http_inputs_proto_rawDescData = file_envoy_type_matcher_v3_http_inputs_proto_rawDesc +) + +func file_envoy_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_http_inputs_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_http_inputs_proto_rawDescData +} + +var file_envoy_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{ + (*HttpRequestHeaderMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpRequestHeaderMatchInput + (*HttpRequestTrailerMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpRequestTrailerMatchInput + (*HttpResponseHeaderMatchInput)(nil), // 2: envoy.type.matcher.v3.HttpResponseHeaderMatchInput + (*HttpResponseTrailerMatchInput)(nil), // 3: envoy.type.matcher.v3.HttpResponseTrailerMatchInput + (*HttpRequestQueryParamMatchInput)(nil), // 4: envoy.type.matcher.v3.HttpRequestQueryParamMatchInput +} +var file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_http_inputs_proto_init() } +func file_envoy_type_matcher_v3_http_inputs_proto_init() { + if File_envoy_type_matcher_v3_http_inputs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestHeaderMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestTrailerMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseHeaderMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseTrailerMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_http_inputs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRequestQueryParamMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_http_inputs_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_http_inputs_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_http_inputs_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_http_inputs_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_http_inputs_proto = out.File + file_envoy_type_matcher_v3_http_inputs_proto_rawDesc = nil + file_envoy_type_matcher_v3_http_inputs_proto_goTypes = nil + file_envoy_type_matcher_v3_http_inputs_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go new file mode 100644 index 0000000000..78de165bdc --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs.pb.validate.go @@ -0,0 +1,615 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpRequestHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestHeaderMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpRequestHeaderMatchInputMultiError, or nil if none found. +func (m *HttpRequestHeaderMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestHeaderMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpRequestHeaderMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpRequestHeaderMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestHeaderMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestHeaderMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestHeaderMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpRequestHeaderMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestHeaderMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestHeaderMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestHeaderMatchInputValidationError is the validation error returned +// by HttpRequestHeaderMatchInput.Validate if the designated constraints +// aren't met. +type HttpRequestHeaderMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestHeaderMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestHeaderMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestHeaderMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestHeaderMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestHeaderMatchInputValidationError) ErrorName() string { + return "HttpRequestHeaderMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestHeaderMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestHeaderMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestHeaderMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestHeaderMatchInputValidationError{} + +var _HttpRequestHeaderMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpRequestTrailerMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestTrailerMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestTrailerMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpRequestTrailerMatchInputMultiError, or nil if none found. +func (m *HttpRequestTrailerMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestTrailerMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpRequestTrailerMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpRequestTrailerMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestTrailerMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestTrailerMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestTrailerMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpRequestTrailerMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestTrailerMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestTrailerMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestTrailerMatchInputValidationError is the validation error returned +// by HttpRequestTrailerMatchInput.Validate if the designated constraints +// aren't met. +type HttpRequestTrailerMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestTrailerMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestTrailerMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestTrailerMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestTrailerMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestTrailerMatchInputValidationError) ErrorName() string { + return "HttpRequestTrailerMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestTrailerMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestTrailerMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestTrailerMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestTrailerMatchInputValidationError{} + +var _HttpRequestTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpResponseHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpResponseHeaderMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseHeaderMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HttpResponseHeaderMatchInputMultiError, or nil if none found. +func (m *HttpResponseHeaderMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseHeaderMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpResponseHeaderMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpResponseHeaderMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpResponseHeaderMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseHeaderMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpResponseHeaderMatchInput.ValidateAll() if +// the designated constraints aren't met. +type HttpResponseHeaderMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseHeaderMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseHeaderMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseHeaderMatchInputValidationError is the validation error returned +// by HttpResponseHeaderMatchInput.Validate if the designated constraints +// aren't met. +type HttpResponseHeaderMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseHeaderMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseHeaderMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseHeaderMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseHeaderMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseHeaderMatchInputValidationError) ErrorName() string { + return "HttpResponseHeaderMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseHeaderMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseHeaderMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseHeaderMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseHeaderMatchInputValidationError{} + +var _HttpResponseHeaderMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpResponseTrailerMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpResponseTrailerMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseTrailerMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpResponseTrailerMatchInputMultiError, or nil if none found. +func (m *HttpResponseTrailerMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseTrailerMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if !_HttpResponseTrailerMatchInput_HeaderName_Pattern.MatchString(m.GetHeaderName()) { + err := HttpResponseTrailerMatchInputValidationError{ + field: "HeaderName", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpResponseTrailerMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseTrailerMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpResponseTrailerMatchInput.ValidateAll() +// if the designated constraints aren't met. +type HttpResponseTrailerMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseTrailerMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseTrailerMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseTrailerMatchInputValidationError is the validation error +// returned by HttpResponseTrailerMatchInput.Validate if the designated +// constraints aren't met. +type HttpResponseTrailerMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseTrailerMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseTrailerMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseTrailerMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseTrailerMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseTrailerMatchInputValidationError) ErrorName() string { + return "HttpResponseTrailerMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseTrailerMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseTrailerMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseTrailerMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseTrailerMatchInputValidationError{} + +var _HttpResponseTrailerMatchInput_HeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on HttpRequestQueryParamMatchInput with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HttpRequestQueryParamMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpRequestQueryParamMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpRequestQueryParamMatchInputMultiError, or nil if none found. +func (m *HttpRequestQueryParamMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpRequestQueryParamMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetQueryParam()) < 1 { + err := HttpRequestQueryParamMatchInputValidationError{ + field: "QueryParam", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpRequestQueryParamMatchInputMultiError(errors) + } + + return nil +} + +// HttpRequestQueryParamMatchInputMultiError is an error wrapping multiple +// validation errors returned by HttpRequestQueryParamMatchInput.ValidateAll() +// if the designated constraints aren't met. +type HttpRequestQueryParamMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpRequestQueryParamMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpRequestQueryParamMatchInputMultiError) AllErrors() []error { return m } + +// HttpRequestQueryParamMatchInputValidationError is the validation error +// returned by HttpRequestQueryParamMatchInput.Validate if the designated +// constraints aren't met. +type HttpRequestQueryParamMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpRequestQueryParamMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpRequestQueryParamMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpRequestQueryParamMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpRequestQueryParamMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpRequestQueryParamMatchInputValidationError) ErrorName() string { + return "HttpRequestQueryParamMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpRequestQueryParamMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpRequestQueryParamMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpRequestQueryParamMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpRequestQueryParamMatchInputValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go new file mode 100644 index 0000000000..ecf552dc0e --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/http_inputs_vtproto.pb.go @@ -0,0 +1,289 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/http_inputs.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpRequestHeaderMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestHeaderMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestHeaderMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestTrailerMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestTrailerMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestTrailerMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseHeaderMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseHeaderMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseHeaderMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseTrailerMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseTrailerMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseTrailerMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.HeaderName) > 0 { + i -= len(m.HeaderName) + copy(dAtA[i:], m.HeaderName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestQueryParamMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequestQueryParamMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpRequestQueryParamMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.QueryParam) > 0 { + i -= len(m.QueryParam) + copy(dAtA[i:], m.QueryParam) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.QueryParam))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRequestHeaderMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpRequestTrailerMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseHeaderMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseTrailerMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HeaderName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpRequestQueryParamMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.QueryParam) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go new file mode 100644 index 0000000000..14a093334b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.go @@ -0,0 +1,303 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [#next-major-version: MetadataMatcher should use StructMatcher] +type MetadataMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filter name to retrieve the Struct from the Metadata. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The path to retrieve the Value from the Struct. + Path []*MetadataMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + // The MetadataMatcher is matched if the value retrieved by path is matched to this value. + Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + // If true, the match result will be inverted. + Invert bool `protobuf:"varint,4,opt,name=invert,proto3" json:"invert,omitempty"` +} + +func (x *MetadataMatcher) Reset() { + *x = MetadataMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataMatcher) ProtoMessage() {} + +func (x *MetadataMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataMatcher.ProtoReflect.Descriptor instead. +func (*MetadataMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *MetadataMatcher) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *MetadataMatcher) GetPath() []*MetadataMatcher_PathSegment { + if x != nil { + return x.Path + } + return nil +} + +func (x *MetadataMatcher) GetValue() *ValueMatcher { + if x != nil { + return x.Value + } + return nil +} + +func (x *MetadataMatcher) GetInvert() bool { + if x != nil { + return x.Invert + } + return false +} + +// Specifies the segment in a path to retrieve value from Metadata. +// Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that +// if the segment key refers to a list, it has to be the last segment in a path. +type MetadataMatcher_PathSegment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Segment: + // + // *MetadataMatcher_PathSegment_Key + Segment isMetadataMatcher_PathSegment_Segment `protobuf_oneof:"segment"` +} + +func (x *MetadataMatcher_PathSegment) Reset() { + *x = MetadataMatcher_PathSegment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataMatcher_PathSegment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataMatcher_PathSegment) ProtoMessage() {} + +func (x *MetadataMatcher_PathSegment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_metadata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataMatcher_PathSegment.ProtoReflect.Descriptor instead. +func (*MetadataMatcher_PathSegment) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *MetadataMatcher_PathSegment) GetSegment() isMetadataMatcher_PathSegment_Segment { + if m != nil { + return m.Segment + } + return nil +} + +func (x *MetadataMatcher_PathSegment) GetKey() string { + if x, ok := x.GetSegment().(*MetadataMatcher_PathSegment_Key); ok { + return x.Key + } + return "" +} + +type isMetadataMatcher_PathSegment_Segment interface { + isMetadataMatcher_PathSegment_Segment() +} + +type MetadataMatcher_PathSegment_Key struct { + // If specified, use the key to retrieve the value in a Struct. + Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` +} + +func (*MetadataMatcher_PathSegment_Key) isMetadataMatcher_PathSegment_Segment() {} + +var File_envoy_type_matcher_v3_metadata_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_metadata_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xff, 0x02, 0x0a, 0x0f, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x1f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x50, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x6e, 0x76, 0x65, 0x72, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x1a, + 0x71, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, + 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x86, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_metadata_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_metadata_proto_rawDescData = file_envoy_type_matcher_v3_metadata_proto_rawDesc +) + +func file_envoy_type_matcher_v3_metadata_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_metadata_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_metadata_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_metadata_proto_rawDescData +} + +var file_envoy_type_matcher_v3_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_metadata_proto_goTypes = []interface{}{ + (*MetadataMatcher)(nil), // 0: envoy.type.matcher.v3.MetadataMatcher + (*MetadataMatcher_PathSegment)(nil), // 1: envoy.type.matcher.v3.MetadataMatcher.PathSegment + (*ValueMatcher)(nil), // 2: envoy.type.matcher.v3.ValueMatcher +} +var file_envoy_type_matcher_v3_metadata_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.MetadataMatcher.path:type_name -> envoy.type.matcher.v3.MetadataMatcher.PathSegment + 2, // 1: envoy.type.matcher.v3.MetadataMatcher.value:type_name -> envoy.type.matcher.v3.ValueMatcher + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_metadata_proto_init() } +func file_envoy_type_matcher_v3_metadata_proto_init() { + if File_envoy_type_matcher_v3_metadata_proto != nil { + return + } + file_envoy_type_matcher_v3_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataMatcher_PathSegment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_metadata_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MetadataMatcher_PathSegment_Key)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_metadata_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_metadata_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_metadata_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_metadata_proto = out.File + file_envoy_type_matcher_v3_metadata_proto_rawDesc = nil + file_envoy_type_matcher_v3_metadata_proto_goTypes = nil + file_envoy_type_matcher_v3_metadata_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go new file mode 100644 index 0000000000..27c898ee04 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata.pb.validate.go @@ -0,0 +1,378 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MetadataMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MetadataMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataMatcherMultiError, or nil if none found. +func (m *MetadataMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetFilter()) < 1 { + err := MetadataMatcherValidationError{ + field: "Filter", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetPath()) < 1 { + err := MetadataMatcherValidationError{ + field: "Path", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPath() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetValue() == nil { + err := MetadataMatcherValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Invert + + if len(errors) > 0 { + return MetadataMatcherMultiError(errors) + } + + return nil +} + +// MetadataMatcherMultiError is an error wrapping multiple validation errors +// returned by MetadataMatcher.ValidateAll() if the designated constraints +// aren't met. +type MetadataMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMatcherMultiError) AllErrors() []error { return m } + +// MetadataMatcherValidationError is the validation error returned by +// MetadataMatcher.Validate if the designated constraints aren't met. +type MetadataMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataMatcherValidationError) ErrorName() string { return "MetadataMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataMatcherValidationError{} + +// Validate checks the field values on MetadataMatcher_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataMatcher_PathSegment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataMatcher_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataMatcher_PathSegmentMultiError, or nil if none found. +func (m *MetadataMatcher_PathSegment) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataMatcher_PathSegment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSegmentPresent := false + switch v := m.Segment.(type) { + case *MetadataMatcher_PathSegment_Key: + if v == nil { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { + err := MetadataMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetadataMatcher_PathSegmentMultiError(errors) + } + + return nil +} + +// MetadataMatcher_PathSegmentMultiError is an error wrapping multiple +// validation errors returned by MetadataMatcher_PathSegment.ValidateAll() if +// the designated constraints aren't met. +type MetadataMatcher_PathSegmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMatcher_PathSegmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMatcher_PathSegmentMultiError) AllErrors() []error { return m } + +// MetadataMatcher_PathSegmentValidationError is the validation error returned +// by MetadataMatcher_PathSegment.Validate if the designated constraints +// aren't met. +type MetadataMatcher_PathSegmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataMatcher_PathSegmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataMatcher_PathSegmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataMatcher_PathSegmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataMatcher_PathSegmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataMatcher_PathSegmentValidationError) ErrorName() string { + return "MetadataMatcher_PathSegmentValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataMatcher_PathSegmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataMatcher_PathSegment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataMatcher_PathSegmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataMatcher_PathSegmentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go new file mode 100644 index 0000000000..4050e14c26 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/metadata_vtproto.pb.go @@ -0,0 +1,195 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/metadata.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetadataMatcher_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataMatcher_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*MetadataMatcher_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataMatcher_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *MetadataMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Invert { + i-- + if m.Invert { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Filter) > 0 { + i -= len(m.Filter) + copy(dAtA[i:], m.Filter) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filter))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataMatcher_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataMatcher_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *MetadataMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filter) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Invert { + n += 2 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go new file mode 100644 index 0000000000..d6083cb277 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a Node. +// The match follows AND semantics. +type NodeMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies match criteria on the node id. + NodeId *StringMatcher `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Specifies match criteria on the node metadata. + NodeMetadatas []*StructMatcher `protobuf:"bytes,2,rep,name=node_metadatas,json=nodeMetadatas,proto3" json:"node_metadatas,omitempty"` +} + +func (x *NodeMatcher) Reset() { + *x = NodeMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeMatcher) ProtoMessage() {} + +func (x *NodeMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeMatcher.ProtoReflect.Descriptor instead. +func (*NodeMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_node_proto_rawDescGZIP(), []int{0} +} + +func (x *NodeMatcher) GetNodeId() *StringMatcher { + if x != nil { + return x.NodeId + } + return nil +} + +func (x *NodeMatcher) GetNodeMetadatas() []*StructMatcher { + if x != nil { + return x.NodeMetadatas + } + return nil +} + +var File_envoy_type_matcher_v3_node_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_node_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0e, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x73, 0x3a, + 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x82, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_node_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_node_proto_rawDescData = file_envoy_type_matcher_v3_node_proto_rawDesc +) + +func file_envoy_type_matcher_v3_node_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_node_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_node_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_node_proto_rawDescData +} + +var file_envoy_type_matcher_v3_node_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_node_proto_goTypes = []interface{}{ + (*NodeMatcher)(nil), // 0: envoy.type.matcher.v3.NodeMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher + (*StructMatcher)(nil), // 2: envoy.type.matcher.v3.StructMatcher +} +var file_envoy_type_matcher_v3_node_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.NodeMatcher.node_id:type_name -> envoy.type.matcher.v3.StringMatcher + 2, // 1: envoy.type.matcher.v3.NodeMatcher.node_metadatas:type_name -> envoy.type.matcher.v3.StructMatcher + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_node_proto_init() } +func file_envoy_type_matcher_v3_node_proto_init() { + if File_envoy_type_matcher_v3_node_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + file_envoy_type_matcher_v3_struct_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_node_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_node_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_node_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_node_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_node_proto = out.File + file_envoy_type_matcher_v3_node_proto_rawDesc = nil + file_envoy_type_matcher_v3_node_proto_goTypes = nil + file_envoy_type_matcher_v3_node_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go new file mode 100644 index 0000000000..62aa27f7ad --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node.pb.validate.go @@ -0,0 +1,199 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on NodeMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *NodeMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NodeMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in NodeMatcherMultiError, or +// nil if none found. +func (m *NodeMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *NodeMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNodeId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: "NodeId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: "NodeId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNodeId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeMatcherValidationError{ + field: "NodeId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetNodeMetadatas() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: fmt.Sprintf("NodeMetadatas[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeMatcherValidationError{ + field: fmt.Sprintf("NodeMetadatas[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeMatcherValidationError{ + field: fmt.Sprintf("NodeMetadatas[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return NodeMatcherMultiError(errors) + } + + return nil +} + +// NodeMatcherMultiError is an error wrapping multiple validation errors +// returned by NodeMatcher.ValidateAll() if the designated constraints aren't met. +type NodeMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NodeMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NodeMatcherMultiError) AllErrors() []error { return m } + +// NodeMatcherValidationError is the validation error returned by +// NodeMatcher.Validate if the designated constraints aren't met. +type NodeMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NodeMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NodeMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NodeMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NodeMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NodeMatcherValidationError) ErrorName() string { return "NodeMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e NodeMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNodeMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NodeMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NodeMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go new file mode 100644 index 0000000000..3bea65da7c --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/node_vtproto.pb.go @@ -0,0 +1,94 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/node.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *NodeMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *NodeMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NodeMetadatas) > 0 { + for iNdEx := len(m.NodeMetadatas) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NodeMetadatas[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.NodeId != nil { + size, err := m.NodeId.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NodeMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeId != nil { + l = m.NodeId.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NodeMetadatas) > 0 { + for _, e := range m.NodeMetadatas { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go new file mode 100644 index 0000000000..2ad4bccfad --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a double value. +type DoubleMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *DoubleMatcher_Range + // *DoubleMatcher_Exact + MatchPattern isDoubleMatcher_MatchPattern `protobuf_oneof:"match_pattern"` +} + +func (x *DoubleMatcher) Reset() { + *x = DoubleMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_number_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleMatcher) ProtoMessage() {} + +func (x *DoubleMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_number_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleMatcher.ProtoReflect.Descriptor instead. +func (*DoubleMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_number_proto_rawDescGZIP(), []int{0} +} + +func (m *DoubleMatcher) GetMatchPattern() isDoubleMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *DoubleMatcher) GetRange() *v3.DoubleRange { + if x, ok := x.GetMatchPattern().(*DoubleMatcher_Range); ok { + return x.Range + } + return nil +} + +func (x *DoubleMatcher) GetExact() float64 { + if x, ok := x.GetMatchPattern().(*DoubleMatcher_Exact); ok { + return x.Exact + } + return 0 +} + +type isDoubleMatcher_MatchPattern interface { + isDoubleMatcher_MatchPattern() +} + +type DoubleMatcher_Range struct { + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + Range *v3.DoubleRange `protobuf:"bytes,1,opt,name=range,proto3,oneof"` +} + +type DoubleMatcher_Exact struct { + // If specified, the input double value must be equal to the value specified here. + Exact float64 `protobuf:"fixed64,2,opt,name=exact,proto3,oneof"` +} + +func (*DoubleMatcher_Range) isDoubleMatcher_MatchPattern() {} + +func (*DoubleMatcher_Exact) isDoubleMatcher_MatchPattern() {} + +var File_envoy_type_matcher_v3_number_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_number_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x3a, + 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x84, + 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_number_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_number_proto_rawDescData = file_envoy_type_matcher_v3_number_proto_rawDesc +) + +func file_envoy_type_matcher_v3_number_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_number_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_number_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_number_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_number_proto_rawDescData +} + +var file_envoy_type_matcher_v3_number_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_number_proto_goTypes = []interface{}{ + (*DoubleMatcher)(nil), // 0: envoy.type.matcher.v3.DoubleMatcher + (*v3.DoubleRange)(nil), // 1: envoy.type.v3.DoubleRange +} +var file_envoy_type_matcher_v3_number_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.DoubleMatcher.range:type_name -> envoy.type.v3.DoubleRange + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_number_proto_init() } +func file_envoy_type_matcher_v3_number_proto_init() { + if File_envoy_type_matcher_v3_number_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_number_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_number_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*DoubleMatcher_Range)(nil), + (*DoubleMatcher_Exact)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_number_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_number_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_number_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_number_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_number_proto = out.File + file_envoy_type_matcher_v3_number_proto_rawDesc = nil + file_envoy_type_matcher_v3_number_proto_goTypes = nil + file_envoy_type_matcher_v3_number_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go new file mode 100644 index 0000000000..b019d7d010 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number.pb.validate.go @@ -0,0 +1,208 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DoubleMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DoubleMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DoubleMatcherMultiError, or +// nil if none found. +func (m *DoubleMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *DoubleMatcher_Range: + if v == nil { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetRange()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DoubleMatcherValidationError{ + field: "Range", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DoubleMatcherValidationError{ + field: "Range", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRange()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DoubleMatcherValidationError{ + field: "Range", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *DoubleMatcher_Exact: + if v == nil { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for Exact + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := DoubleMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return DoubleMatcherMultiError(errors) + } + + return nil +} + +// DoubleMatcherMultiError is an error wrapping multiple validation errors +// returned by DoubleMatcher.ValidateAll() if the designated constraints +// aren't met. +type DoubleMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleMatcherMultiError) AllErrors() []error { return m } + +// DoubleMatcherValidationError is the validation error returned by +// DoubleMatcher.Validate if the designated constraints aren't met. +type DoubleMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleMatcherValidationError) ErrorName() string { return "DoubleMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e DoubleMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go new file mode 100644 index 0000000000..7315258ab0 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/number_vtproto.pb.go @@ -0,0 +1,160 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/number.proto + +package matcherv3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *DoubleMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*DoubleMatcher_Exact); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*DoubleMatcher_Range); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *DoubleMatcher_Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher_Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Range != nil { + if vtmsg, ok := interface{}(m.Range).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Range) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *DoubleMatcher_Exact) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleMatcher_Exact) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Exact)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *DoubleMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *DoubleMatcher_Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Range != nil { + if size, ok := interface{}(m.Range).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Range) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *DoubleMatcher_Exact) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go new file mode 100644 index 0000000000..aac680dbe1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.go @@ -0,0 +1,197 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a path on HTTP request. +type PathMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Rule: + // + // *PathMatcher_Path + Rule isPathMatcher_Rule `protobuf_oneof:"rule"` +} + +func (x *PathMatcher) Reset() { + *x = PathMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_path_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathMatcher) ProtoMessage() {} + +func (x *PathMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_path_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathMatcher.ProtoReflect.Descriptor instead. +func (*PathMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_path_proto_rawDescGZIP(), []int{0} +} + +func (m *PathMatcher) GetRule() isPathMatcher_Rule { + if m != nil { + return m.Rule + } + return nil +} + +func (x *PathMatcher) GetPath() *StringMatcher { + if x, ok := x.GetRule().(*PathMatcher_Path); ok { + return x.Path + } + return nil +} + +type isPathMatcher_Rule interface { + isPathMatcher_Rule() +} + +type PathMatcher_Path struct { + // The “path“ must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path “/data“ will match the “:path“ header “/data#fragment?param=value“. + Path *StringMatcher `protobuf:"bytes,1,opt,name=path,proto3,oneof"` +} + +func (*PathMatcher_Path) isPathMatcher_Rule() {} + +var File_envoy_type_matcher_v3_path_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_path_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, + 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0b, 0x50, 0x61, 0x74, + 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x3a, 0x25, + 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, + 0x42, 0x01, 0x42, 0x82, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x09, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_path_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_path_proto_rawDescData = file_envoy_type_matcher_v3_path_proto_rawDesc +) + +func file_envoy_type_matcher_v3_path_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_path_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_path_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_path_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_path_proto_rawDescData +} + +var file_envoy_type_matcher_v3_path_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_matcher_v3_path_proto_goTypes = []interface{}{ + (*PathMatcher)(nil), // 0: envoy.type.matcher.v3.PathMatcher + (*StringMatcher)(nil), // 1: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_path_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.PathMatcher.path:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_path_proto_init() } +func file_envoy_type_matcher_v3_path_proto_init() { + if File_envoy_type_matcher_v3_path_proto != nil { + return + } + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_path_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_path_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*PathMatcher_Path)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_path_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_path_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_path_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_path_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_path_proto = out.File + file_envoy_type_matcher_v3_path_proto_rawDesc = nil + file_envoy_type_matcher_v3_path_proto_goTypes = nil + file_envoy_type_matcher_v3_path_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go new file mode 100644 index 0000000000..a978c99ab3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path.pb.validate.go @@ -0,0 +1,205 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PathMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PathMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PathMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PathMatcherMultiError, or +// nil if none found. +func (m *PathMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *PathMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofRulePresent := false + switch v := m.Rule.(type) { + case *PathMatcher_Path: + if v == nil { + err := PathMatcherValidationError{ + field: "Rule", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofRulePresent = true + + if m.GetPath() == nil { + err := PathMatcherValidationError{ + field: "Path", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPath()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PathMatcherValidationError{ + field: "Path", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PathMatcherValidationError{ + field: "Path", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPath()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PathMatcherValidationError{ + field: "Path", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofRulePresent { + err := PathMatcherValidationError{ + field: "Rule", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PathMatcherMultiError(errors) + } + + return nil +} + +// PathMatcherMultiError is an error wrapping multiple validation errors +// returned by PathMatcher.ValidateAll() if the designated constraints aren't met. +type PathMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PathMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PathMatcherMultiError) AllErrors() []error { return m } + +// PathMatcherValidationError is the validation error returned by +// PathMatcher.Validate if the designated constraints aren't met. +type PathMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PathMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PathMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PathMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PathMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PathMatcherValidationError) ErrorName() string { return "PathMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e PathMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPathMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PathMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PathMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go new file mode 100644 index 0000000000..044fe9db21 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/path_vtproto.pb.go @@ -0,0 +1,110 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/path.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PathMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Rule.(*PathMatcher_Path); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *PathMatcher_Path) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *PathMatcher_Path) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Path != nil { + size, err := m.Path.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *PathMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Rule.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *PathMatcher_Path) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go new file mode 100644 index 0000000000..383bb267c3 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.go @@ -0,0 +1,415 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/go-control-plane/envoy/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A regex matcher designed for safety when used with untrusted input. +type RegexMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to EngineType: + // + // *RegexMatcher_GoogleRe2 + EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"` + // The regex match string. The string must be supported by the configured engine. The regex is matched + // against the full string, not as a partial match. + Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"` +} + +func (x *RegexMatcher) Reset() { + *x = RegexMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher) ProtoMessage() {} + +func (x *RegexMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead. +func (*RegexMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0} +} + +func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType { + if m != nil { + return m.EngineType + } + return nil +} + +// Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. +func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 { + if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok { + return x.GoogleRe2 + } + return nil +} + +func (x *RegexMatcher) GetRegex() string { + if x != nil { + return x.Regex + } + return "" +} + +type isRegexMatcher_EngineType interface { + isRegexMatcher_EngineType() +} + +type RegexMatcher_GoogleRe2 struct { + // Google's RE2 regex engine. + // + // Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. + GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"` +} + +func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +type RegexMatchAndSubstitute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + Pattern *RegexMatcher `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., “\1“ refers to capture group 1, and “\2“ refers + // to capture group 2. + Substitution string `protobuf:"bytes,2,opt,name=substitution,proto3" json:"substitution,omitempty"` +} + +func (x *RegexMatchAndSubstitute) Reset() { + *x = RegexMatchAndSubstitute{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatchAndSubstitute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatchAndSubstitute) ProtoMessage() {} + +func (x *RegexMatchAndSubstitute) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatchAndSubstitute.ProtoReflect.Descriptor instead. +func (*RegexMatchAndSubstitute) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{1} +} + +func (x *RegexMatchAndSubstitute) GetPattern() *RegexMatcher { + if x != nil { + return x.Pattern + } + return nil +} + +func (x *RegexMatchAndSubstitute) GetSubstitution() string { + if x != nil { + return x.Substitution + } + return "" +} + +// Google's `RE2 `_ regex engine. The regex string must adhere to +// the documented `syntax `_. The engine is designed +// to complete execution in linear time as well as limit the amount of memory used. +// +// Envoy supports program size checking via runtime. The runtime keys “re2.max_program_size.error_level“ +// and “re2.max_program_size.warn_level“ can be set to integers as the maximum program size or +// complexity that a compiled regex can have before an exception is thrown or a warning is +// logged, respectively. “re2.max_program_size.error_level“ defaults to 100, and +// “re2.max_program_size.warn_level“ has no default if unset (will not check/log a warning). +// +// Envoy emits two stats for tracking the program size of regexes: the histogram “re2.program_size“, +// which records the program size, and the counter “re2.exceeded_warn_level“, which is incremented +// each time the program size exceeds the warn level threshold. +type RegexMatcher_GoogleRE2 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field controls the RE2 "program size" which is a rough estimate of how complex a + // compiled regex is to evaluate. A regex that has a program size greater than the configured + // value will fail to compile. In this case, the configured max program size can be increased + // or the regex can be simplified. If not specified, the default is 100. + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + // + // .. note:: + // + // Although this field is deprecated, the program size will still be checked against the + // global ``re2.max_program_size.error_level`` runtime value. + // + // Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. + MaxProgramSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_program_size,json=maxProgramSize,proto3" json:"max_program_size,omitempty"` +} + +func (x *RegexMatcher_GoogleRE2) Reset() { + *x = RegexMatcher_GoogleRE2{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegexMatcher_GoogleRE2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegexMatcher_GoogleRE2) ProtoMessage() {} + +func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_regex_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead. +func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0} +} + +// Deprecated: Marked as deprecated in envoy/type/matcher/v3/regex.proto. +func (x *RegexMatcher_GoogleRE2) GetMaxProgramSize() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxProgramSize + } + return nil +} + +var File_envoy_type_matcher_v3_regex_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_regex_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6, 0x02, 0x0a, 0x0c, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x42, 0x0b, + 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x92, 0x01, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x52, 0x45, 0x32, 0x12, 0x53, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x50, + 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45, 0x32, 0x3a, 0x26, 0x9a, 0xc5, + 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x12, + 0x47, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x2f, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, + 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, + 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x42, 0x83, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, + 0x67, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_regex_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_regex_proto_rawDescData = file_envoy_type_matcher_v3_regex_proto_rawDesc +) + +func file_envoy_type_matcher_v3_regex_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_regex_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_regex_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_regex_proto_rawDescData +} + +var file_envoy_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_type_matcher_v3_regex_proto_goTypes = []interface{}{ + (*RegexMatcher)(nil), // 0: envoy.type.matcher.v3.RegexMatcher + (*RegexMatchAndSubstitute)(nil), // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute + (*RegexMatcher_GoogleRE2)(nil), // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2 + (*wrapperspb.UInt32Value)(nil), // 3: google.protobuf.UInt32Value +} +var file_envoy_type_matcher_v3_regex_proto_depIdxs = []int32{ + 2, // 0: envoy.type.matcher.v3.RegexMatcher.google_re2:type_name -> envoy.type.matcher.v3.RegexMatcher.GoogleRE2 + 0, // 1: envoy.type.matcher.v3.RegexMatchAndSubstitute.pattern:type_name -> envoy.type.matcher.v3.RegexMatcher + 3, // 2: envoy.type.matcher.v3.RegexMatcher.GoogleRE2.max_program_size:type_name -> google.protobuf.UInt32Value + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_regex_proto_init() } +func file_envoy_type_matcher_v3_regex_proto_init() { + if File_envoy_type_matcher_v3_regex_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatchAndSubstitute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_regex_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegexMatcher_GoogleRE2); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RegexMatcher_GoogleRe2)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_regex_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_regex_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_regex_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_regex_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_regex_proto = out.File + file_envoy_type_matcher_v3_regex_proto_rawDesc = nil + file_envoy_type_matcher_v3_regex_proto_goTypes = nil + file_envoy_type_matcher_v3_regex_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go new file mode 100644 index 0000000000..bb00d0cd7a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex.pb.validate.go @@ -0,0 +1,479 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RegexMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RegexMatcherMultiError, or +// nil if none found. +func (m *RegexMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetRegex()) < 1 { + err := RegexMatcherValidationError{ + field: "Regex", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + switch v := m.EngineType.(type) { + case *RegexMatcher_GoogleRe2: + if v == nil { + err := RegexMatcherValidationError{ + field: "EngineType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGoogleRe2()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatcherValidationError{ + field: "GoogleRe2", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return RegexMatcherMultiError(errors) + } + + return nil +} + +// RegexMatcherMultiError is an error wrapping multiple validation errors +// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met. +type RegexMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcherMultiError) AllErrors() []error { return m } + +// RegexMatcherValidationError is the validation error returned by +// RegexMatcher.Validate if the designated constraints aren't met. +type RegexMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e RegexMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcherValidationError{} + +// Validate checks the field values on RegexMatchAndSubstitute with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RegexMatchAndSubstitute) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatchAndSubstitute with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RegexMatchAndSubstituteMultiError, or nil if none found. +func (m *RegexMatchAndSubstitute) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatchAndSubstitute) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetPattern() == nil { + err := RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPattern()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPattern()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatchAndSubstituteValidationError{ + field: "Pattern", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if !_RegexMatchAndSubstitute_Substitution_Pattern.MatchString(m.GetSubstitution()) { + err := RegexMatchAndSubstituteValidationError{ + field: "Substitution", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RegexMatchAndSubstituteMultiError(errors) + } + + return nil +} + +// RegexMatchAndSubstituteMultiError is an error wrapping multiple validation +// errors returned by RegexMatchAndSubstitute.ValidateAll() if the designated +// constraints aren't met. +type RegexMatchAndSubstituteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatchAndSubstituteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatchAndSubstituteMultiError) AllErrors() []error { return m } + +// RegexMatchAndSubstituteValidationError is the validation error returned by +// RegexMatchAndSubstitute.Validate if the designated constraints aren't met. +type RegexMatchAndSubstituteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatchAndSubstituteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatchAndSubstituteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatchAndSubstituteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatchAndSubstituteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatchAndSubstituteValidationError) ErrorName() string { + return "RegexMatchAndSubstituteValidationError" +} + +// Error satisfies the builtin error interface +func (e RegexMatchAndSubstituteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatchAndSubstitute.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatchAndSubstituteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatchAndSubstituteValidationError{} + +var _RegexMatchAndSubstitute_Substitution_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RegexMatcher_GoogleRE2) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RegexMatcher_GoogleRE2MultiError, or nil if none found. +func (m *RegexMatcher_GoogleRE2) ValidateAll() error { + return m.validate(true) +} + +func (m *RegexMatcher_GoogleRE2) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMaxProgramSize()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RegexMatcher_GoogleRE2ValidationError{ + field: "MaxProgramSize", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RegexMatcher_GoogleRE2ValidationError{ + field: "MaxProgramSize", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxProgramSize()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RegexMatcher_GoogleRE2ValidationError{ + field: "MaxProgramSize", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return RegexMatcher_GoogleRE2MultiError(errors) + } + + return nil +} + +// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation +// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated +// constraints aren't met. +type RegexMatcher_GoogleRE2MultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RegexMatcher_GoogleRE2MultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m } + +// RegexMatcher_GoogleRE2ValidationError is the validation error returned by +// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met. +type RegexMatcher_GoogleRE2ValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string { + return "RegexMatcher_GoogleRE2ValidationError" +} + +// Error satisfies the builtin error interface +func (e RegexMatcher_GoogleRE2ValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRegexMatcher_GoogleRE2.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RegexMatcher_GoogleRE2ValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RegexMatcher_GoogleRE2ValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go new file mode 100644 index 0000000000..234f071937 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/regex_vtproto.pb.go @@ -0,0 +1,246 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/regex.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RegexMatcher_GoogleRE2) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatcher_GoogleRE2) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher_GoogleRE2) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxProgramSize != nil { + size, err := (*wrapperspb.UInt32Value)(m.MaxProgramSize).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Regex) > 0 { + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x12 + } + if msg, ok := m.EngineType.(*RegexMatcher_GoogleRe2); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher_GoogleRe2) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatcher_GoogleRe2) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.GoogleRe2 != nil { + size, err := m.GoogleRe2.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *RegexMatchAndSubstitute) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegexMatchAndSubstitute) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RegexMatchAndSubstitute) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Substitution) > 0 { + i -= len(m.Substitution) + copy(dAtA[i:], m.Substitution) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Substitution))) + i-- + dAtA[i] = 0x12 + } + if m.Pattern != nil { + size, err := m.Pattern.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegexMatcher_GoogleRE2) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxProgramSize != nil { + l = (*wrapperspb.UInt32Value)(m.MaxProgramSize).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RegexMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.EngineType.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.Regex) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RegexMatcher_GoogleRe2) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GoogleRe2 != nil { + l = m.GoogleRe2.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RegexMatchAndSubstitute) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pattern != nil { + l = m.Pattern.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Substitution) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go new file mode 100644 index 0000000000..3da1aae4eb --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Match input indicates that matching should be done on the response status +// code. +type HttpResponseStatusCodeMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpResponseStatusCodeMatchInput) Reset() { + *x = HttpResponseStatusCodeMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseStatusCodeMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseStatusCodeMatchInput) ProtoMessage() {} + +func (x *HttpResponseStatusCodeMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseStatusCodeMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseStatusCodeMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP(), []int{0} +} + +// Match input indicates that the matching should be done on the class of the +// response status code. For eg: 1xx, 2xx, 3xx, 4xx or 5xx. +type HttpResponseStatusCodeClassMatchInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HttpResponseStatusCodeClassMatchInput) Reset() { + *x = HttpResponseStatusCodeClassMatchInput{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpResponseStatusCodeClassMatchInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpResponseStatusCodeClassMatchInput) ProtoMessage() {} + +func (x *HttpResponseStatusCodeClassMatchInput) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpResponseStatusCodeClassMatchInput.ProtoReflect.Descriptor instead. +func (*HttpResponseStatusCodeClassMatchInput) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP(), []int{1} +} + +var File_envoy_type_matcher_v3_status_code_input_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_status_code_input_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x20, 0x48, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x27, 0x0a, 0x25, 0x48, 0x74, 0x74, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x42, 0x8d, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, + 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x42, 0x14, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_status_code_input_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_status_code_input_proto_rawDescData = file_envoy_type_matcher_v3_status_code_input_proto_rawDesc +) + +func file_envoy_type_matcher_v3_status_code_input_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_status_code_input_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_status_code_input_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_status_code_input_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_status_code_input_proto_rawDescData +} + +var file_envoy_type_matcher_v3_status_code_input_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_status_code_input_proto_goTypes = []interface{}{ + (*HttpResponseStatusCodeMatchInput)(nil), // 0: envoy.type.matcher.v3.HttpResponseStatusCodeMatchInput + (*HttpResponseStatusCodeClassMatchInput)(nil), // 1: envoy.type.matcher.v3.HttpResponseStatusCodeClassMatchInput +} +var file_envoy_type_matcher_v3_status_code_input_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_status_code_input_proto_init() } +func file_envoy_type_matcher_v3_status_code_input_proto_init() { + if File_envoy_type_matcher_v3_status_code_input_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseStatusCodeMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_status_code_input_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpResponseStatusCodeClassMatchInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_status_code_input_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_status_code_input_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_status_code_input_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_status_code_input_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_status_code_input_proto = out.File + file_envoy_type_matcher_v3_status_code_input_proto_rawDesc = nil + file_envoy_type_matcher_v3_status_code_input_proto_goTypes = nil + file_envoy_type_matcher_v3_status_code_input_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go new file mode 100644 index 0000000000..b09b90c13d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input.pb.validate.go @@ -0,0 +1,247 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpResponseStatusCodeMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *HttpResponseStatusCodeMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseStatusCodeMatchInput with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// HttpResponseStatusCodeMatchInputMultiError, or nil if none found. +func (m *HttpResponseStatusCodeMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseStatusCodeMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpResponseStatusCodeMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseStatusCodeMatchInputMultiError is an error wrapping multiple +// validation errors returned by +// HttpResponseStatusCodeMatchInput.ValidateAll() if the designated +// constraints aren't met. +type HttpResponseStatusCodeMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseStatusCodeMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseStatusCodeMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseStatusCodeMatchInputValidationError is the validation error +// returned by HttpResponseStatusCodeMatchInput.Validate if the designated +// constraints aren't met. +type HttpResponseStatusCodeMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseStatusCodeMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseStatusCodeMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseStatusCodeMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseStatusCodeMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseStatusCodeMatchInputValidationError) ErrorName() string { + return "HttpResponseStatusCodeMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseStatusCodeMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseStatusCodeMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseStatusCodeMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseStatusCodeMatchInputValidationError{} + +// Validate checks the field values on HttpResponseStatusCodeClassMatchInput +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *HttpResponseStatusCodeClassMatchInput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpResponseStatusCodeClassMatchInput +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// HttpResponseStatusCodeClassMatchInputMultiError, or nil if none found. +func (m *HttpResponseStatusCodeClassMatchInput) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpResponseStatusCodeClassMatchInput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HttpResponseStatusCodeClassMatchInputMultiError(errors) + } + + return nil +} + +// HttpResponseStatusCodeClassMatchInputMultiError is an error wrapping +// multiple validation errors returned by +// HttpResponseStatusCodeClassMatchInput.ValidateAll() if the designated +// constraints aren't met. +type HttpResponseStatusCodeClassMatchInputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpResponseStatusCodeClassMatchInputMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpResponseStatusCodeClassMatchInputMultiError) AllErrors() []error { return m } + +// HttpResponseStatusCodeClassMatchInputValidationError is the validation error +// returned by HttpResponseStatusCodeClassMatchInput.Validate if the +// designated constraints aren't met. +type HttpResponseStatusCodeClassMatchInputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpResponseStatusCodeClassMatchInputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpResponseStatusCodeClassMatchInputValidationError) ErrorName() string { + return "HttpResponseStatusCodeClassMatchInputValidationError" +} + +// Error satisfies the builtin error interface +func (e HttpResponseStatusCodeClassMatchInputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpResponseStatusCodeClassMatchInput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpResponseStatusCodeClassMatchInputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpResponseStatusCodeClassMatchInputValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go new file mode 100644 index 0000000000..156377f501 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/status_code_input_vtproto.pb.go @@ -0,0 +1,104 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/status_code_input.proto + +package matcherv3 + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpResponseStatusCodeMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseStatusCodeMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseStatusCodeMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpResponseStatusCodeClassMatchInput) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HttpResponseStatusCodeMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HttpResponseStatusCodeClassMatchInput) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go new file mode 100644 index 0000000000..2ebed90845 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.go @@ -0,0 +1,400 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/cncf/xds/go/xds/core/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a string. +// [#next-free-field: 9] +type StringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *StringMatcher_Exact + // *StringMatcher_Prefix + // *StringMatcher_Suffix + // *StringMatcher_SafeRegex + // *StringMatcher_Contains + // *StringMatcher_Custom + MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"` + // If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This + // has no effect for the safe_regex match. + // For example, the matcher “data“ will match both input string “Data“ and “data“ if set to true. + IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"` +} + +func (x *StringMatcher) Reset() { + *x = StringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringMatcher) ProtoMessage() {} + +func (x *StringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead. +func (*StringMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{0} +} + +func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *StringMatcher) GetExact() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok { + return x.Exact + } + return "" +} + +func (x *StringMatcher) GetPrefix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok { + return x.Prefix + } + return "" +} + +func (x *StringMatcher) GetSuffix() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok { + return x.Suffix + } + return "" +} + +func (x *StringMatcher) GetSafeRegex() *RegexMatcher { + if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok { + return x.SafeRegex + } + return nil +} + +func (x *StringMatcher) GetContains() string { + if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok { + return x.Contains + } + return "" +} + +func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig { + if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok { + return x.Custom + } + return nil +} + +func (x *StringMatcher) GetIgnoreCase() bool { + if x != nil { + return x.IgnoreCase + } + return false +} + +type isStringMatcher_MatchPattern interface { + isStringMatcher_MatchPattern() +} + +type StringMatcher_Exact struct { + // The input string must match exactly the string specified here. + // + // Examples: + // + // * “abc“ only matches the value “abc“. + Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` +} + +type StringMatcher_Prefix struct { + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * “abc“ matches the value “abc.xyz“ + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` +} + +type StringMatcher_Suffix struct { + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * “abc“ matches the value “xyz.abc“ + Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"` +} + +type StringMatcher_SafeRegex struct { + // The input string must match the regular expression specified here. + SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"` +} + +type StringMatcher_Contains struct { + // The input string must have the substring specified here. + // Note: empty contains match is not allowed, please use regex instead. + // + // Examples: + // + // * “abc“ matches the value “xyz.abc.def“ + Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"` +} + +type StringMatcher_Custom struct { + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {} + +func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {} + +// Specifies a list of ways to match a string. +type ListStringMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"` +} + +func (x *ListStringMatcher) Reset() { + *x = ListStringMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListStringMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListStringMatcher) ProtoMessage() {} + +func (x *ListStringMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_string_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead. +func (*ListStringMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_string_proto_rawDescGZIP(), []int{1} +} + +func (x *ListStringMatcher) GetPatterns() []*StringMatcher { + if x != nil { + return x.Patterns + } + return nil +} + +var File_envoy_type_matcher_v3_string_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_string_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x03, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, + 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, + 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, + 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, + 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, + 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x8c, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4a, 0x0a, + 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_string_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_string_proto_rawDescData = file_envoy_type_matcher_v3_string_proto_rawDesc +) + +func file_envoy_type_matcher_v3_string_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_string_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_string_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_string_proto_rawDescData +} + +var file_envoy_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_string_proto_goTypes = []interface{}{ + (*StringMatcher)(nil), // 0: envoy.type.matcher.v3.StringMatcher + (*ListStringMatcher)(nil), // 1: envoy.type.matcher.v3.ListStringMatcher + (*RegexMatcher)(nil), // 2: envoy.type.matcher.v3.RegexMatcher + (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig +} +var file_envoy_type_matcher_v3_string_proto_depIdxs = []int32{ + 2, // 0: envoy.type.matcher.v3.StringMatcher.safe_regex:type_name -> envoy.type.matcher.v3.RegexMatcher + 3, // 1: envoy.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig + 0, // 2: envoy.type.matcher.v3.ListStringMatcher.patterns:type_name -> envoy.type.matcher.v3.StringMatcher + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_string_proto_init() } +func file_envoy_type_matcher_v3_string_proto_init() { + if File_envoy_type_matcher_v3_string_proto != nil { + return + } + file_envoy_type_matcher_v3_regex_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStringMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*StringMatcher_Exact)(nil), + (*StringMatcher_Prefix)(nil), + (*StringMatcher_Suffix)(nil), + (*StringMatcher_SafeRegex)(nil), + (*StringMatcher_Contains)(nil), + (*StringMatcher_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_string_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_string_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_string_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_string_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_string_proto = out.File + file_envoy_type_matcher_v3_string_proto_rawDesc = nil + file_envoy_type_matcher_v3_string_proto_goTypes = nil + file_envoy_type_matcher_v3_string_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go new file mode 100644 index 0000000000..98e3925f64 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string.pb.validate.go @@ -0,0 +1,482 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StringMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StringMatcherMultiError, or +// nil if none found. +func (m *StringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for IgnoreCase + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *StringMatcher_Exact: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for Exact + case *StringMatcher_Prefix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetPrefix()) < 1 { + err := StringMatcherValidationError{ + field: "Prefix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Suffix: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetSuffix()) < 1 { + err := StringMatcherValidationError{ + field: "Suffix", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_SafeRegex: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if m.GetSafeRegex() == nil { + err := StringMatcherValidationError{ + field: "SafeRegex", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetSafeRegex()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "SafeRegex", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *StringMatcher_Contains: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if utf8.RuneCountInString(m.GetContains()) < 1 { + err := StringMatcherValidationError{ + field: "Contains", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *StringMatcher_Custom: + if v == nil { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetCustom()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StringMatcherValidationError{ + field: "Custom", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := StringMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StringMatcherMultiError(errors) + } + + return nil +} + +// StringMatcherMultiError is an error wrapping multiple validation errors +// returned by StringMatcher.ValidateAll() if the designated constraints +// aren't met. +type StringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StringMatcherMultiError) AllErrors() []error { return m } + +// StringMatcherValidationError is the validation error returned by +// StringMatcher.Validate if the designated constraints aren't met. +type StringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StringMatcherValidationError{} + +// Validate checks the field values on ListStringMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListStringMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListStringMatcher with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListStringMatcherMultiError, or nil if none found. +func (m *ListStringMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ListStringMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPatterns()) < 1 { + err := ListStringMatcherValidationError{ + field: "Patterns", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPatterns() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListStringMatcherValidationError{ + field: fmt.Sprintf("Patterns[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListStringMatcherMultiError(errors) + } + + return nil +} + +// ListStringMatcherMultiError is an error wrapping multiple validation errors +// returned by ListStringMatcher.ValidateAll() if the designated constraints +// aren't met. +type ListStringMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListStringMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListStringMatcherMultiError) AllErrors() []error { return m } + +// ListStringMatcherValidationError is the validation error returned by +// ListStringMatcher.Validate if the designated constraints aren't met. +type ListStringMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListStringMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListStringMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListStringMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListStringMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListStringMatcherValidationError) ErrorName() string { + return "ListStringMatcherValidationError" +} + +// Error satisfies the builtin error interface +func (e ListStringMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListStringMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListStringMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListStringMatcherValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go new file mode 100644 index 0000000000..9c016e2e73 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/string_vtproto.pb.go @@ -0,0 +1,370 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/string.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StringMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*StringMatcher_Custom); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Contains); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.IgnoreCase { + i-- + if m.IgnoreCase { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if msg, ok := m.MatchPattern.(*StringMatcher_SafeRegex); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Suffix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Prefix); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*StringMatcher_Exact); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StringMatcher_Exact) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Exact) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Exact) + copy(dAtA[i:], m.Exact) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Exact))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StringMatcher_Prefix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Prefix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *StringMatcher_Suffix) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Suffix) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Suffix) + copy(dAtA[i:], m.Suffix) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Suffix))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *StringMatcher_SafeRegex) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_SafeRegex) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SafeRegex != nil { + size, err := m.SafeRegex.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *StringMatcher_Contains) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Contains) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Contains) + copy(dAtA[i:], m.Contains) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Contains))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *StringMatcher_Custom) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringMatcher_Custom) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Custom != nil { + if vtmsg, ok := interface{}(m.Custom).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Custom) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x42 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *ListStringMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStringMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListStringMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Patterns) > 0 { + for iNdEx := len(m.Patterns) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Patterns[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StringMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.IgnoreCase { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *StringMatcher_Exact) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Exact) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Prefix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Suffix) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Suffix) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_SafeRegex) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SafeRegex != nil { + l = m.SafeRegex.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *StringMatcher_Contains) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Contains) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StringMatcher_Custom) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Custom != nil { + if size, ok := interface{}(m.Custom).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Custom) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListStringMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Patterns) > 0 { + for _, e := range m.Patterns { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go new file mode 100644 index 0000000000..ef844bc7f8 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.go @@ -0,0 +1,329 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses “path“ to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +type StructMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The path to retrieve the Value from the Struct. + Path []*StructMatcher_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + // The StructMatcher is matched if the value retrieved by path is matched to this value. + Value *ValueMatcher `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *StructMatcher) Reset() { + *x = StructMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructMatcher) ProtoMessage() {} + +func (x *StructMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructMatcher.ProtoReflect.Descriptor instead. +func (*StructMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *StructMatcher) GetPath() []*StructMatcher_PathSegment { + if x != nil { + return x.Path + } + return nil +} + +func (x *StructMatcher) GetValue() *ValueMatcher { + if x != nil { + return x.Value + } + return nil +} + +// Specifies the segment in a path to retrieve value from Struct. +type StructMatcher_PathSegment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Segment: + // + // *StructMatcher_PathSegment_Key + Segment isStructMatcher_PathSegment_Segment `protobuf_oneof:"segment"` +} + +func (x *StructMatcher_PathSegment) Reset() { + *x = StructMatcher_PathSegment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructMatcher_PathSegment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructMatcher_PathSegment) ProtoMessage() {} + +func (x *StructMatcher_PathSegment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_struct_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructMatcher_PathSegment.ProtoReflect.Descriptor instead. +func (*StructMatcher_PathSegment) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_struct_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *StructMatcher_PathSegment) GetSegment() isStructMatcher_PathSegment_Segment { + if m != nil { + return m.Segment + } + return nil +} + +func (x *StructMatcher_PathSegment) GetKey() string { + if x, ok := x.GetSegment().(*StructMatcher_PathSegment_Key); ok { + return x.Key + } + return "" +} + +type isStructMatcher_PathSegment_Segment interface { + isStructMatcher_PathSegment_Segment() +} + +type StructMatcher_PathSegment_Key struct { + // If specified, use the key to retrieve the value in a Struct. + Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` +} + +func (*StructMatcher_PathSegment_Key) isStructMatcher_PathSegment_Segment() {} + +var File_envoy_type_matcher_v3_struct_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_struct_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, + 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, + 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x02, 0x0a, 0x0d, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x6f, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x33, 0x9a, 0xc5, + 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, + 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, + 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_struct_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_struct_proto_rawDescData = file_envoy_type_matcher_v3_struct_proto_rawDesc +) + +func file_envoy_type_matcher_v3_struct_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_struct_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_struct_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_struct_proto_rawDescData +} + +var file_envoy_type_matcher_v3_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_matcher_v3_struct_proto_goTypes = []interface{}{ + (*StructMatcher)(nil), // 0: envoy.type.matcher.v3.StructMatcher + (*StructMatcher_PathSegment)(nil), // 1: envoy.type.matcher.v3.StructMatcher.PathSegment + (*ValueMatcher)(nil), // 2: envoy.type.matcher.v3.ValueMatcher +} +var file_envoy_type_matcher_v3_struct_proto_depIdxs = []int32{ + 1, // 0: envoy.type.matcher.v3.StructMatcher.path:type_name -> envoy.type.matcher.v3.StructMatcher.PathSegment + 2, // 1: envoy.type.matcher.v3.StructMatcher.value:type_name -> envoy.type.matcher.v3.ValueMatcher + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_struct_proto_init() } +func file_envoy_type_matcher_v3_struct_proto_init() { + if File_envoy_type_matcher_v3_struct_proto != nil { + return + } + file_envoy_type_matcher_v3_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructMatcher_PathSegment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*StructMatcher_PathSegment_Key)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_struct_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_struct_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_struct_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_struct_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_struct_proto = out.File + file_envoy_type_matcher_v3_struct_proto_rawDesc = nil + file_envoy_type_matcher_v3_struct_proto_goTypes = nil + file_envoy_type_matcher_v3_struct_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go new file mode 100644 index 0000000000..d69c1547f6 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct.pb.validate.go @@ -0,0 +1,364 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on StructMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StructMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StructMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StructMatcherMultiError, or +// nil if none found. +func (m *StructMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *StructMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetPath()) < 1 { + err := StructMatcherValidationError{ + field: "Path", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPath() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StructMatcherValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetValue() == nil { + err := StructMatcherValidationError{ + field: "Value", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StructMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StructMatcherValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return StructMatcherMultiError(errors) + } + + return nil +} + +// StructMatcherMultiError is an error wrapping multiple validation errors +// returned by StructMatcher.ValidateAll() if the designated constraints +// aren't met. +type StructMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StructMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StructMatcherMultiError) AllErrors() []error { return m } + +// StructMatcherValidationError is the validation error returned by +// StructMatcher.Validate if the designated constraints aren't met. +type StructMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StructMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StructMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StructMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StructMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StructMatcherValidationError) ErrorName() string { return "StructMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e StructMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStructMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StructMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StructMatcherValidationError{} + +// Validate checks the field values on StructMatcher_PathSegment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *StructMatcher_PathSegment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StructMatcher_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// StructMatcher_PathSegmentMultiError, or nil if none found. +func (m *StructMatcher_PathSegment) ValidateAll() error { + return m.validate(true) +} + +func (m *StructMatcher_PathSegment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSegmentPresent := false + switch v := m.Segment.(type) { + case *StructMatcher_PathSegment_Key: + if v == nil { + err := StructMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := StructMatcher_PathSegmentValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { + err := StructMatcher_PathSegmentValidationError{ + field: "Segment", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return StructMatcher_PathSegmentMultiError(errors) + } + + return nil +} + +// StructMatcher_PathSegmentMultiError is an error wrapping multiple validation +// errors returned by StructMatcher_PathSegment.ValidateAll() if the +// designated constraints aren't met. +type StructMatcher_PathSegmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StructMatcher_PathSegmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StructMatcher_PathSegmentMultiError) AllErrors() []error { return m } + +// StructMatcher_PathSegmentValidationError is the validation error returned by +// StructMatcher_PathSegment.Validate if the designated constraints aren't met. +type StructMatcher_PathSegmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StructMatcher_PathSegmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StructMatcher_PathSegmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StructMatcher_PathSegmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StructMatcher_PathSegmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StructMatcher_PathSegmentValidationError) ErrorName() string { + return "StructMatcher_PathSegmentValidationError" +} + +// Error satisfies the builtin error interface +func (e StructMatcher_PathSegmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStructMatcher_PathSegment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StructMatcher_PathSegmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StructMatcher_PathSegmentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go new file mode 100644 index 0000000000..d36052b836 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/struct_vtproto.pb.go @@ -0,0 +1,171 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/struct.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StructMatcher_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StructMatcher_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*StructMatcher_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *StructMatcher_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StructMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StructMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StructMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *StructMatcher_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *StructMatcher_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *StructMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go new file mode 100644 index 0000000000..7ba125cf30 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.go @@ -0,0 +1,552 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 8] +type ValueMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies how to match a value. + // + // Types that are assignable to MatchPattern: + // + // *ValueMatcher_NullMatch_ + // *ValueMatcher_DoubleMatch + // *ValueMatcher_StringMatch + // *ValueMatcher_BoolMatch + // *ValueMatcher_PresentMatch + // *ValueMatcher_ListMatch + // *ValueMatcher_OrMatch + MatchPattern isValueMatcher_MatchPattern `protobuf_oneof:"match_pattern"` +} + +func (x *ValueMatcher) Reset() { + *x = ValueMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValueMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValueMatcher) ProtoMessage() {} + +func (x *ValueMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValueMatcher.ProtoReflect.Descriptor instead. +func (*ValueMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{0} +} + +func (m *ValueMatcher) GetMatchPattern() isValueMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *ValueMatcher) GetNullMatch() *ValueMatcher_NullMatch { + if x, ok := x.GetMatchPattern().(*ValueMatcher_NullMatch_); ok { + return x.NullMatch + } + return nil +} + +func (x *ValueMatcher) GetDoubleMatch() *DoubleMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_DoubleMatch); ok { + return x.DoubleMatch + } + return nil +} + +func (x *ValueMatcher) GetStringMatch() *StringMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_StringMatch); ok { + return x.StringMatch + } + return nil +} + +func (x *ValueMatcher) GetBoolMatch() bool { + if x, ok := x.GetMatchPattern().(*ValueMatcher_BoolMatch); ok { + return x.BoolMatch + } + return false +} + +func (x *ValueMatcher) GetPresentMatch() bool { + if x, ok := x.GetMatchPattern().(*ValueMatcher_PresentMatch); ok { + return x.PresentMatch + } + return false +} + +func (x *ValueMatcher) GetListMatch() *ListMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_ListMatch); ok { + return x.ListMatch + } + return nil +} + +func (x *ValueMatcher) GetOrMatch() *OrMatcher { + if x, ok := x.GetMatchPattern().(*ValueMatcher_OrMatch); ok { + return x.OrMatch + } + return nil +} + +type isValueMatcher_MatchPattern interface { + isValueMatcher_MatchPattern() +} + +type ValueMatcher_NullMatch_ struct { + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch *ValueMatcher_NullMatch `protobuf:"bytes,1,opt,name=null_match,json=nullMatch,proto3,oneof"` +} + +type ValueMatcher_DoubleMatch struct { + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatch *DoubleMatcher `protobuf:"bytes,2,opt,name=double_match,json=doubleMatch,proto3,oneof"` +} + +type ValueMatcher_StringMatch struct { + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatch *StringMatcher `protobuf:"bytes,3,opt,name=string_match,json=stringMatch,proto3,oneof"` +} + +type ValueMatcher_BoolMatch struct { + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + BoolMatch bool `protobuf:"varint,4,opt,name=bool_match,json=boolMatch,proto3,oneof"` +} + +type ValueMatcher_PresentMatch struct { + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + PresentMatch bool `protobuf:"varint,5,opt,name=present_match,json=presentMatch,proto3,oneof"` +} + +type ValueMatcher_ListMatch struct { + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatch *ListMatcher `protobuf:"bytes,6,opt,name=list_match,json=listMatch,proto3,oneof"` +} + +type ValueMatcher_OrMatch struct { + // If specified, a match occurs if and only if any of the alternatives in the match accept the value. + OrMatch *OrMatcher `protobuf:"bytes,7,opt,name=or_match,json=orMatch,proto3,oneof"` +} + +func (*ValueMatcher_NullMatch_) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_DoubleMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_StringMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_BoolMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_PresentMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_ListMatch) isValueMatcher_MatchPattern() {} + +func (*ValueMatcher_OrMatch) isValueMatcher_MatchPattern() {} + +// Specifies the way to match a list value. +type ListMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to MatchPattern: + // + // *ListMatcher_OneOf + MatchPattern isListMatcher_MatchPattern `protobuf_oneof:"match_pattern"` +} + +func (x *ListMatcher) Reset() { + *x = ListMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMatcher) ProtoMessage() {} + +func (x *ListMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMatcher.ProtoReflect.Descriptor instead. +func (*ListMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{1} +} + +func (m *ListMatcher) GetMatchPattern() isListMatcher_MatchPattern { + if m != nil { + return m.MatchPattern + } + return nil +} + +func (x *ListMatcher) GetOneOf() *ValueMatcher { + if x, ok := x.GetMatchPattern().(*ListMatcher_OneOf); ok { + return x.OneOf + } + return nil +} + +type isListMatcher_MatchPattern interface { + isListMatcher_MatchPattern() +} + +type ListMatcher_OneOf struct { + // If specified, at least one of the values in the list must match the value specified. + OneOf *ValueMatcher `protobuf:"bytes,1,opt,name=one_of,json=oneOf,proto3,oneof"` +} + +func (*ListMatcher_OneOf) isListMatcher_MatchPattern() {} + +// Specifies a list of alternatives for the match. +type OrMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValueMatchers []*ValueMatcher `protobuf:"bytes,1,rep,name=value_matchers,json=valueMatchers,proto3" json:"value_matchers,omitempty"` +} + +func (x *OrMatcher) Reset() { + *x = OrMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OrMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrMatcher) ProtoMessage() {} + +func (x *OrMatcher) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OrMatcher.ProtoReflect.Descriptor instead. +func (*OrMatcher) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{2} +} + +func (x *OrMatcher) GetValueMatchers() []*ValueMatcher { + if x != nil { + return x.ValueMatchers + } + return nil +} + +// NullMatch is an empty message to specify a null value. +type ValueMatcher_NullMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ValueMatcher_NullMatch) Reset() { + *x = ValueMatcher_NullMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValueMatcher_NullMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValueMatcher_NullMatch) ProtoMessage() {} + +func (x *ValueMatcher_NullMatch) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_matcher_v3_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValueMatcher_NullMatch.ProtoReflect.Descriptor instead. +func (*ValueMatcher_NullMatch) Descriptor() ([]byte, []int) { + return file_envoy_type_matcher_v3_value_proto_rawDescGZIP(), []int{0, 0} +} + +var File_envoy_type_matcher_v3_value_proto protoreflect.FileDescriptor + +var file_envoy_type_matcher_v3_value_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, + 0x33, 0x2f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x04, + 0x0a, 0x0c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, + 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x43, 0x0a, 0x0a, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x3d, 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x1a, 0x3d, 0x0a, 0x09, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x30, 0x9a, + 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, + 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x88, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3c, 0x0a, + 0x06, 0x6f, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x6f, 0x6e, 0x65, 0x4f, 0x66, 0x3a, 0x25, 0x9a, 0xc5, 0x88, + 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x61, 0x0a, 0x09, 0x4f, 0x72, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x0d, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x42, 0x83, 0x01, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_matcher_v3_value_proto_rawDescOnce sync.Once + file_envoy_type_matcher_v3_value_proto_rawDescData = file_envoy_type_matcher_v3_value_proto_rawDesc +) + +func file_envoy_type_matcher_v3_value_proto_rawDescGZIP() []byte { + file_envoy_type_matcher_v3_value_proto_rawDescOnce.Do(func() { + file_envoy_type_matcher_v3_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_matcher_v3_value_proto_rawDescData) + }) + return file_envoy_type_matcher_v3_value_proto_rawDescData +} + +var file_envoy_type_matcher_v3_value_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_envoy_type_matcher_v3_value_proto_goTypes = []interface{}{ + (*ValueMatcher)(nil), // 0: envoy.type.matcher.v3.ValueMatcher + (*ListMatcher)(nil), // 1: envoy.type.matcher.v3.ListMatcher + (*OrMatcher)(nil), // 2: envoy.type.matcher.v3.OrMatcher + (*ValueMatcher_NullMatch)(nil), // 3: envoy.type.matcher.v3.ValueMatcher.NullMatch + (*DoubleMatcher)(nil), // 4: envoy.type.matcher.v3.DoubleMatcher + (*StringMatcher)(nil), // 5: envoy.type.matcher.v3.StringMatcher +} +var file_envoy_type_matcher_v3_value_proto_depIdxs = []int32{ + 3, // 0: envoy.type.matcher.v3.ValueMatcher.null_match:type_name -> envoy.type.matcher.v3.ValueMatcher.NullMatch + 4, // 1: envoy.type.matcher.v3.ValueMatcher.double_match:type_name -> envoy.type.matcher.v3.DoubleMatcher + 5, // 2: envoy.type.matcher.v3.ValueMatcher.string_match:type_name -> envoy.type.matcher.v3.StringMatcher + 1, // 3: envoy.type.matcher.v3.ValueMatcher.list_match:type_name -> envoy.type.matcher.v3.ListMatcher + 2, // 4: envoy.type.matcher.v3.ValueMatcher.or_match:type_name -> envoy.type.matcher.v3.OrMatcher + 0, // 5: envoy.type.matcher.v3.ListMatcher.one_of:type_name -> envoy.type.matcher.v3.ValueMatcher + 0, // 6: envoy.type.matcher.v3.OrMatcher.value_matchers:type_name -> envoy.type.matcher.v3.ValueMatcher + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_envoy_type_matcher_v3_value_proto_init() } +func file_envoy_type_matcher_v3_value_proto_init() { + if File_envoy_type_matcher_v3_value_proto != nil { + return + } + file_envoy_type_matcher_v3_number_proto_init() + file_envoy_type_matcher_v3_string_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_matcher_v3_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValueMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OrMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValueMatcher_NullMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_matcher_v3_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ValueMatcher_NullMatch_)(nil), + (*ValueMatcher_DoubleMatch)(nil), + (*ValueMatcher_StringMatch)(nil), + (*ValueMatcher_BoolMatch)(nil), + (*ValueMatcher_PresentMatch)(nil), + (*ValueMatcher_ListMatch)(nil), + (*ValueMatcher_OrMatch)(nil), + } + file_envoy_type_matcher_v3_value_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ListMatcher_OneOf)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_matcher_v3_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_matcher_v3_value_proto_goTypes, + DependencyIndexes: file_envoy_type_matcher_v3_value_proto_depIdxs, + MessageInfos: file_envoy_type_matcher_v3_value_proto_msgTypes, + }.Build() + File_envoy_type_matcher_v3_value_proto = out.File + file_envoy_type_matcher_v3_value_proto_rawDesc = nil + file_envoy_type_matcher_v3_value_proto_goTypes = nil + file_envoy_type_matcher_v3_value_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go new file mode 100644 index 0000000000..9814aa0dba --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value.pb.validate.go @@ -0,0 +1,791 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ValueMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ValueMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValueMatcher with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ValueMatcherMultiError, or +// nil if none found. +func (m *ValueMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ValueMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *ValueMatcher_NullMatch_: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetNullMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "NullMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "NullMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNullMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "NullMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_DoubleMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetDoubleMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "DoubleMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "DoubleMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDoubleMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "DoubleMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_StringMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetStringMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStringMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "StringMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_BoolMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for BoolMatch + case *ValueMatcher_PresentMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + // no validation rules for PresentMatch + case *ValueMatcher_ListMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetListMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "ListMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "ListMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "ListMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *ValueMatcher_OrMatch: + if v == nil { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetOrMatch()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ValueMatcherValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOrMatch()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ValueMatcherValidationError{ + field: "OrMatch", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := ValueMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ValueMatcherMultiError(errors) + } + + return nil +} + +// ValueMatcherMultiError is an error wrapping multiple validation errors +// returned by ValueMatcher.ValidateAll() if the designated constraints aren't met. +type ValueMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValueMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValueMatcherMultiError) AllErrors() []error { return m } + +// ValueMatcherValidationError is the validation error returned by +// ValueMatcher.Validate if the designated constraints aren't met. +type ValueMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValueMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValueMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValueMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValueMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValueMatcherValidationError) ErrorName() string { return "ValueMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e ValueMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValueMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValueMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValueMatcherValidationError{} + +// Validate checks the field values on ListMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListMatcherMultiError, or +// nil if none found. +func (m *ListMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *ListMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofMatchPatternPresent := false + switch v := m.MatchPattern.(type) { + case *ListMatcher_OneOf: + if v == nil { + err := ListMatcherValidationError{ + field: "MatchPattern", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofMatchPatternPresent = true + + if all { + switch v := interface{}(m.GetOneOf()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListMatcherValidationError{ + field: "OneOf", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListMatcherValidationError{ + field: "OneOf", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOneOf()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListMatcherValidationError{ + field: "OneOf", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofMatchPatternPresent { + err := ListMatcherValidationError{ + field: "MatchPattern", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ListMatcherMultiError(errors) + } + + return nil +} + +// ListMatcherMultiError is an error wrapping multiple validation errors +// returned by ListMatcher.ValidateAll() if the designated constraints aren't met. +type ListMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListMatcherMultiError) AllErrors() []error { return m } + +// ListMatcherValidationError is the validation error returned by +// ListMatcher.Validate if the designated constraints aren't met. +type ListMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListMatcherValidationError) ErrorName() string { return "ListMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e ListMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListMatcherValidationError{} + +// Validate checks the field values on OrMatcher with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *OrMatcher) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OrMatcher with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in OrMatcherMultiError, or nil +// if none found. +func (m *OrMatcher) ValidateAll() error { + return m.validate(true) +} + +func (m *OrMatcher) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(m.GetValueMatchers()) < 2 { + err := OrMatcherValidationError{ + field: "ValueMatchers", + reason: "value must contain at least 2 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetValueMatchers() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OrMatcherValidationError{ + field: fmt.Sprintf("ValueMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OrMatcherValidationError{ + field: fmt.Sprintf("ValueMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OrMatcherValidationError{ + field: fmt.Sprintf("ValueMatchers[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return OrMatcherMultiError(errors) + } + + return nil +} + +// OrMatcherMultiError is an error wrapping multiple validation errors returned +// by OrMatcher.ValidateAll() if the designated constraints aren't met. +type OrMatcherMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OrMatcherMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OrMatcherMultiError) AllErrors() []error { return m } + +// OrMatcherValidationError is the validation error returned by +// OrMatcher.Validate if the designated constraints aren't met. +type OrMatcherValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OrMatcherValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OrMatcherValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OrMatcherValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OrMatcherValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OrMatcherValidationError) ErrorName() string { return "OrMatcherValidationError" } + +// Error satisfies the builtin error interface +func (e OrMatcherValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOrMatcher.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OrMatcherValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OrMatcherValidationError{} + +// Validate checks the field values on ValueMatcher_NullMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ValueMatcher_NullMatch) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ValueMatcher_NullMatch with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ValueMatcher_NullMatchMultiError, or nil if none found. +func (m *ValueMatcher_NullMatch) ValidateAll() error { + return m.validate(true) +} + +func (m *ValueMatcher_NullMatch) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ValueMatcher_NullMatchMultiError(errors) + } + + return nil +} + +// ValueMatcher_NullMatchMultiError is an error wrapping multiple validation +// errors returned by ValueMatcher_NullMatch.ValidateAll() if the designated +// constraints aren't met. +type ValueMatcher_NullMatchMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ValueMatcher_NullMatchMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ValueMatcher_NullMatchMultiError) AllErrors() []error { return m } + +// ValueMatcher_NullMatchValidationError is the validation error returned by +// ValueMatcher_NullMatch.Validate if the designated constraints aren't met. +type ValueMatcher_NullMatchValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ValueMatcher_NullMatchValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ValueMatcher_NullMatchValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ValueMatcher_NullMatchValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ValueMatcher_NullMatchValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ValueMatcher_NullMatchValidationError) ErrorName() string { + return "ValueMatcher_NullMatchValidationError" +} + +// Error satisfies the builtin error interface +func (e ValueMatcher_NullMatchValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sValueMatcher_NullMatch.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ValueMatcher_NullMatchValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ValueMatcher_NullMatchValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go new file mode 100644 index 0000000000..852f5cead1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3/value_vtproto.pb.go @@ -0,0 +1,545 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/matcher/v3/value.proto + +package matcherv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ValueMatcher_NullMatch) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueMatcher_NullMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_NullMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*ValueMatcher_OrMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_ListMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_PresentMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_BoolMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_StringMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_DoubleMatch); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.MatchPattern.(*ValueMatcher_NullMatch_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher_NullMatch_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_NullMatch_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NullMatch != nil { + size, err := m.NullMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_DoubleMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_DoubleMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DoubleMatch != nil { + size, err := m.DoubleMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_StringMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_StringMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StringMatch != nil { + size, err := m.StringMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_BoolMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_BoolMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *ValueMatcher_PresentMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_PresentMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.PresentMatch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *ValueMatcher_ListMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_ListMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListMatch != nil { + size, err := m.ListMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ValueMatcher_OrMatch) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ValueMatcher_OrMatch) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OrMatch != nil { + size, err := m.OrMatch.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *ListMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.MatchPattern.(*ListMatcher_OneOf); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *ListMatcher_OneOf) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListMatcher_OneOf) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OneOf != nil { + size, err := m.OneOf.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *OrMatcher) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrMatcher) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *OrMatcher) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ValueMatchers) > 0 { + for iNdEx := len(m.ValueMatchers) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ValueMatchers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValueMatcher_NullMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ValueMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ValueMatcher_NullMatch_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NullMatch != nil { + l = m.NullMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_DoubleMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DoubleMatch != nil { + l = m.DoubleMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_StringMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StringMatch != nil { + l = m.StringMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_BoolMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ValueMatcher_PresentMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *ValueMatcher_ListMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListMatch != nil { + l = m.ListMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ValueMatcher_OrMatch) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OrMatch != nil { + l = m.OrMatch.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *ListMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.MatchPattern.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *ListMatcher_OneOf) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OneOf != nil { + l = m.OneOf.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *OrMatcher) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValueMatchers) > 0 { + for _, e := range m.ValueMatchers { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go new file mode 100644 index 0000000000..7a6ac07a53 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.go @@ -0,0 +1,683 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// MetadataKey provides a general interface using “key“ and “path“ to retrieve value from +// :ref:`Metadata `. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.xxx: +// prop: +// foo: bar +// xyz: +// hello: envoy +// +// The following MetadataKey will retrieve a string value "bar" from the Metadata. +// +// .. code-block:: yaml +// +// key: envoy.xxx +// path: +// - key: prop +// - key: foo +type MetadataKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key name of Metadata to retrieve the Struct from the metadata. + // Typically, it represents a builtin subsystem or custom extension. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The path to retrieve the Value from the Struct. It can be a prefix or a full path, + // e.g. “[prop, xyz]“ for a struct or “[prop, foo]“ for a string in the example, + // which depends on the particular scenario. + // + // Note: Due to that only the key type segment is supported, the path can not specify a list + // unless the list is the last segment. + Path []*MetadataKey_PathSegment `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` +} + +func (x *MetadataKey) Reset() { + *x = MetadataKey{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKey) ProtoMessage() {} + +func (x *MetadataKey) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKey.ProtoReflect.Descriptor instead. +func (*MetadataKey) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{0} +} + +func (x *MetadataKey) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataKey) GetPath() []*MetadataKey_PathSegment { + if x != nil { + return x.Path + } + return nil +} + +// Describes what kind of metadata. +type MetadataKind struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *MetadataKind_Request_ + // *MetadataKind_Route_ + // *MetadataKind_Cluster_ + // *MetadataKind_Host_ + Kind isMetadataKind_Kind `protobuf_oneof:"kind"` +} + +func (x *MetadataKind) Reset() { + *x = MetadataKind{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind) ProtoMessage() {} + +func (x *MetadataKind) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind.ProtoReflect.Descriptor instead. +func (*MetadataKind) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1} +} + +func (m *MetadataKind) GetKind() isMetadataKind_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *MetadataKind) GetRequest() *MetadataKind_Request { + if x, ok := x.GetKind().(*MetadataKind_Request_); ok { + return x.Request + } + return nil +} + +func (x *MetadataKind) GetRoute() *MetadataKind_Route { + if x, ok := x.GetKind().(*MetadataKind_Route_); ok { + return x.Route + } + return nil +} + +func (x *MetadataKind) GetCluster() *MetadataKind_Cluster { + if x, ok := x.GetKind().(*MetadataKind_Cluster_); ok { + return x.Cluster + } + return nil +} + +func (x *MetadataKind) GetHost() *MetadataKind_Host { + if x, ok := x.GetKind().(*MetadataKind_Host_); ok { + return x.Host + } + return nil +} + +type isMetadataKind_Kind interface { + isMetadataKind_Kind() +} + +type MetadataKind_Request_ struct { + // Request kind of metadata. + Request *MetadataKind_Request `protobuf:"bytes,1,opt,name=request,proto3,oneof"` +} + +type MetadataKind_Route_ struct { + // Route kind of metadata. + Route *MetadataKind_Route `protobuf:"bytes,2,opt,name=route,proto3,oneof"` +} + +type MetadataKind_Cluster_ struct { + // Cluster kind of metadata. + Cluster *MetadataKind_Cluster `protobuf:"bytes,3,opt,name=cluster,proto3,oneof"` +} + +type MetadataKind_Host_ struct { + // Host kind of metadata. + Host *MetadataKind_Host `protobuf:"bytes,4,opt,name=host,proto3,oneof"` +} + +func (*MetadataKind_Request_) isMetadataKind_Kind() {} + +func (*MetadataKind_Route_) isMetadataKind_Kind() {} + +func (*MetadataKind_Cluster_) isMetadataKind_Kind() {} + +func (*MetadataKind_Host_) isMetadataKind_Kind() {} + +// Specifies the segment in a path to retrieve value from Metadata. +// Currently it is only supported to specify the key, i.e. field name, as one segment of a path. +type MetadataKey_PathSegment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Segment: + // + // *MetadataKey_PathSegment_Key + Segment isMetadataKey_PathSegment_Segment `protobuf_oneof:"segment"` +} + +func (x *MetadataKey_PathSegment) Reset() { + *x = MetadataKey_PathSegment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKey_PathSegment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKey_PathSegment) ProtoMessage() {} + +func (x *MetadataKey_PathSegment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKey_PathSegment.ProtoReflect.Descriptor instead. +func (*MetadataKey_PathSegment) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{0, 0} +} + +func (m *MetadataKey_PathSegment) GetSegment() isMetadataKey_PathSegment_Segment { + if m != nil { + return m.Segment + } + return nil +} + +func (x *MetadataKey_PathSegment) GetKey() string { + if x, ok := x.GetSegment().(*MetadataKey_PathSegment_Key); ok { + return x.Key + } + return "" +} + +type isMetadataKey_PathSegment_Segment interface { + isMetadataKey_PathSegment_Segment() +} + +type MetadataKey_PathSegment_Key struct { + // If specified, use the key to retrieve the value in a Struct. + Key string `protobuf:"bytes,1,opt,name=key,proto3,oneof"` +} + +func (*MetadataKey_PathSegment_Key) isMetadataKey_PathSegment_Segment() {} + +// Represents dynamic metadata associated with the request. +type MetadataKind_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Request) Reset() { + *x = MetadataKind_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Request) ProtoMessage() {} + +func (x *MetadataKind_Request) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Request.ProtoReflect.Descriptor instead. +func (*MetadataKind_Request) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 0} +} + +// Represents metadata from :ref:`the route`. +type MetadataKind_Route struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Route) Reset() { + *x = MetadataKind_Route{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Route) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Route) ProtoMessage() {} + +func (x *MetadataKind_Route) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Route.ProtoReflect.Descriptor instead. +func (*MetadataKind_Route) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 1} +} + +// Represents metadata from :ref:`the upstream cluster`. +type MetadataKind_Cluster struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Cluster) Reset() { + *x = MetadataKind_Cluster{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Cluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Cluster) ProtoMessage() {} + +func (x *MetadataKind_Cluster) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Cluster.ProtoReflect.Descriptor instead. +func (*MetadataKind_Cluster) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 2} +} + +// Represents metadata from :ref:`the upstream +// host`. +type MetadataKind_Host struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MetadataKind_Host) Reset() { + *x = MetadataKind_Host{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataKind_Host) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataKind_Host) ProtoMessage() {} + +func (x *MetadataKind_Host) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_metadata_v3_metadata_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataKind_Host.ProtoReflect.Descriptor instead. +func (*MetadataKind_Host) Descriptor() ([]byte, []int) { + return file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP(), []int{1, 3} +} + +var File_envoy_type_metadata_v3_metadata_proto protoreflect.FileDescriptor + +var file_envoy_type_metadata_v3_metadata_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x0b, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, + 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x1a, 0x71, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x07, 0x73, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, + 0x65, 0x79, 0x22, 0xd2, 0x04, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, + 0x69, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, + 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, + 0x6e, 0x64, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x12, 0x48, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, + 0x48, 0x6f, 0x73, 0x74, 0x48, 0x00, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x1a, 0x3d, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, + 0x69, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x0a, 0x05, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x1a, 0x3d, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x2f, 0x9a, + 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x2a, + 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x42, 0x0b, 0x0a, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x89, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_metadata_v3_metadata_proto_rawDescOnce sync.Once + file_envoy_type_metadata_v3_metadata_proto_rawDescData = file_envoy_type_metadata_v3_metadata_proto_rawDesc +) + +func file_envoy_type_metadata_v3_metadata_proto_rawDescGZIP() []byte { + file_envoy_type_metadata_v3_metadata_proto_rawDescOnce.Do(func() { + file_envoy_type_metadata_v3_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_metadata_v3_metadata_proto_rawDescData) + }) + return file_envoy_type_metadata_v3_metadata_proto_rawDescData +} + +var file_envoy_type_metadata_v3_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_envoy_type_metadata_v3_metadata_proto_goTypes = []interface{}{ + (*MetadataKey)(nil), // 0: envoy.type.metadata.v3.MetadataKey + (*MetadataKind)(nil), // 1: envoy.type.metadata.v3.MetadataKind + (*MetadataKey_PathSegment)(nil), // 2: envoy.type.metadata.v3.MetadataKey.PathSegment + (*MetadataKind_Request)(nil), // 3: envoy.type.metadata.v3.MetadataKind.Request + (*MetadataKind_Route)(nil), // 4: envoy.type.metadata.v3.MetadataKind.Route + (*MetadataKind_Cluster)(nil), // 5: envoy.type.metadata.v3.MetadataKind.Cluster + (*MetadataKind_Host)(nil), // 6: envoy.type.metadata.v3.MetadataKind.Host +} +var file_envoy_type_metadata_v3_metadata_proto_depIdxs = []int32{ + 2, // 0: envoy.type.metadata.v3.MetadataKey.path:type_name -> envoy.type.metadata.v3.MetadataKey.PathSegment + 3, // 1: envoy.type.metadata.v3.MetadataKind.request:type_name -> envoy.type.metadata.v3.MetadataKind.Request + 4, // 2: envoy.type.metadata.v3.MetadataKind.route:type_name -> envoy.type.metadata.v3.MetadataKind.Route + 5, // 3: envoy.type.metadata.v3.MetadataKind.cluster:type_name -> envoy.type.metadata.v3.MetadataKind.Cluster + 6, // 4: envoy.type.metadata.v3.MetadataKind.host:type_name -> envoy.type.metadata.v3.MetadataKind.Host + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_envoy_type_metadata_v3_metadata_proto_init() } +func file_envoy_type_metadata_v3_metadata_proto_init() { + if File_envoy_type_metadata_v3_metadata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_metadata_v3_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKey_PathSegment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Route); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Cluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataKind_Host); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*MetadataKind_Request_)(nil), + (*MetadataKind_Route_)(nil), + (*MetadataKind_Cluster_)(nil), + (*MetadataKind_Host_)(nil), + } + file_envoy_type_metadata_v3_metadata_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*MetadataKey_PathSegment_Key)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_metadata_v3_metadata_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_metadata_v3_metadata_proto_goTypes, + DependencyIndexes: file_envoy_type_metadata_v3_metadata_proto_depIdxs, + MessageInfos: file_envoy_type_metadata_v3_metadata_proto_msgTypes, + }.Build() + File_envoy_type_metadata_v3_metadata_proto = out.File + file_envoy_type_metadata_v3_metadata_proto_rawDesc = nil + file_envoy_type_metadata_v3_metadata_proto_goTypes = nil + file_envoy_type_metadata_v3_metadata_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go new file mode 100644 index 0000000000..adc8c8ed51 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata.pb.validate.go @@ -0,0 +1,1025 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on MetadataKey with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MetadataKey) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKey with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataKeyMultiError, or +// nil if none found. +func (m *MetadataKey) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKey) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := MetadataKeyValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(m.GetPath()) < 1 { + err := MetadataKeyValidationError{ + field: "Path", + reason: "value must contain at least 1 item(s)", + } + if !all { + return err + } + errors = append(errors, err) + } + + for idx, item := range m.GetPath() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKeyValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKeyValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKeyValidationError{ + field: fmt.Sprintf("Path[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return MetadataKeyMultiError(errors) + } + + return nil +} + +// MetadataKeyMultiError is an error wrapping multiple validation errors +// returned by MetadataKey.ValidateAll() if the designated constraints aren't met. +type MetadataKeyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKeyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKeyMultiError) AllErrors() []error { return m } + +// MetadataKeyValidationError is the validation error returned by +// MetadataKey.Validate if the designated constraints aren't met. +type MetadataKeyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKeyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKeyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKeyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKeyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKeyValidationError) ErrorName() string { return "MetadataKeyValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataKeyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKey.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKeyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKeyValidationError{} + +// Validate checks the field values on MetadataKind with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *MetadataKind) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataKindMultiError, or +// nil if none found. +func (m *MetadataKind) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofKindPresent := false + switch v := m.Kind.(type) { + case *MetadataKind_Request_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Request", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MetadataKind_Route_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetRoute()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoute()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Route", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MetadataKind_Cluster_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetCluster()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Cluster", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *MetadataKind_Host_: + if v == nil { + err := MetadataKindValidationError{ + field: "Kind", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofKindPresent = true + + if all { + switch v := interface{}(m.GetHost()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Host", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataKindValidationError{ + field: "Host", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHost()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataKindValidationError{ + field: "Host", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofKindPresent { + err := MetadataKindValidationError{ + field: "Kind", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetadataKindMultiError(errors) + } + + return nil +} + +// MetadataKindMultiError is an error wrapping multiple validation errors +// returned by MetadataKind.ValidateAll() if the designated constraints aren't met. +type MetadataKindMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKindMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKindMultiError) AllErrors() []error { return m } + +// MetadataKindValidationError is the validation error returned by +// MetadataKind.Validate if the designated constraints aren't met. +type MetadataKindValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKindValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKindValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKindValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKindValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKindValidationError) ErrorName() string { return "MetadataKindValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataKindValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKindValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKindValidationError{} + +// Validate checks the field values on MetadataKey_PathSegment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKey_PathSegment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKey_PathSegment with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKey_PathSegmentMultiError, or nil if none found. +func (m *MetadataKey_PathSegment) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKey_PathSegment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofSegmentPresent := false + switch v := m.Segment.(type) { + case *MetadataKey_PathSegment_Key: + if v == nil { + err := MetadataKey_PathSegmentValidationError{ + field: "Segment", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofSegmentPresent = true + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := MetadataKey_PathSegmentValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + default: + _ = v // ensures v is used + } + if !oneofSegmentPresent { + err := MetadataKey_PathSegmentValidationError{ + field: "Segment", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return MetadataKey_PathSegmentMultiError(errors) + } + + return nil +} + +// MetadataKey_PathSegmentMultiError is an error wrapping multiple validation +// errors returned by MetadataKey_PathSegment.ValidateAll() if the designated +// constraints aren't met. +type MetadataKey_PathSegmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKey_PathSegmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKey_PathSegmentMultiError) AllErrors() []error { return m } + +// MetadataKey_PathSegmentValidationError is the validation error returned by +// MetadataKey_PathSegment.Validate if the designated constraints aren't met. +type MetadataKey_PathSegmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKey_PathSegmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKey_PathSegmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKey_PathSegmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKey_PathSegmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKey_PathSegmentValidationError) ErrorName() string { + return "MetadataKey_PathSegmentValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKey_PathSegmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKey_PathSegment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKey_PathSegmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKey_PathSegmentValidationError{} + +// Validate checks the field values on MetadataKind_Request with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Request) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Request with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_RequestMultiError, or nil if none found. +func (m *MetadataKind_Request) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Request) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_RequestMultiError(errors) + } + + return nil +} + +// MetadataKind_RequestMultiError is an error wrapping multiple validation +// errors returned by MetadataKind_Request.ValidateAll() if the designated +// constraints aren't met. +type MetadataKind_RequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_RequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_RequestMultiError) AllErrors() []error { return m } + +// MetadataKind_RequestValidationError is the validation error returned by +// MetadataKind_Request.Validate if the designated constraints aren't met. +type MetadataKind_RequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_RequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_RequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_RequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_RequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_RequestValidationError) ErrorName() string { + return "MetadataKind_RequestValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_RequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Request.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_RequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_RequestValidationError{} + +// Validate checks the field values on MetadataKind_Route with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Route) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Route with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_RouteMultiError, or nil if none found. +func (m *MetadataKind_Route) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Route) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_RouteMultiError(errors) + } + + return nil +} + +// MetadataKind_RouteMultiError is an error wrapping multiple validation errors +// returned by MetadataKind_Route.ValidateAll() if the designated constraints +// aren't met. +type MetadataKind_RouteMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_RouteMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_RouteMultiError) AllErrors() []error { return m } + +// MetadataKind_RouteValidationError is the validation error returned by +// MetadataKind_Route.Validate if the designated constraints aren't met. +type MetadataKind_RouteValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_RouteValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_RouteValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_RouteValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_RouteValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_RouteValidationError) ErrorName() string { + return "MetadataKind_RouteValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_RouteValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Route.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_RouteValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_RouteValidationError{} + +// Validate checks the field values on MetadataKind_Cluster with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Cluster) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Cluster with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_ClusterMultiError, or nil if none found. +func (m *MetadataKind_Cluster) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Cluster) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_ClusterMultiError(errors) + } + + return nil +} + +// MetadataKind_ClusterMultiError is an error wrapping multiple validation +// errors returned by MetadataKind_Cluster.ValidateAll() if the designated +// constraints aren't met. +type MetadataKind_ClusterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_ClusterMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_ClusterMultiError) AllErrors() []error { return m } + +// MetadataKind_ClusterValidationError is the validation error returned by +// MetadataKind_Cluster.Validate if the designated constraints aren't met. +type MetadataKind_ClusterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_ClusterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_ClusterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_ClusterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_ClusterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_ClusterValidationError) ErrorName() string { + return "MetadataKind_ClusterValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_ClusterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Cluster.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_ClusterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_ClusterValidationError{} + +// Validate checks the field values on MetadataKind_Host with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *MetadataKind_Host) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on MetadataKind_Host with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// MetadataKind_HostMultiError, or nil if none found. +func (m *MetadataKind_Host) ValidateAll() error { + return m.validate(true) +} + +func (m *MetadataKind_Host) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return MetadataKind_HostMultiError(errors) + } + + return nil +} + +// MetadataKind_HostMultiError is an error wrapping multiple validation errors +// returned by MetadataKind_Host.ValidateAll() if the designated constraints +// aren't met. +type MetadataKind_HostMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataKind_HostMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataKind_HostMultiError) AllErrors() []error { return m } + +// MetadataKind_HostValidationError is the validation error returned by +// MetadataKind_Host.Validate if the designated constraints aren't met. +type MetadataKind_HostValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataKind_HostValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataKind_HostValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataKind_HostValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataKind_HostValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataKind_HostValidationError) ErrorName() string { + return "MetadataKind_HostValidationError" +} + +// Error satisfies the builtin error interface +func (e MetadataKind_HostValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadataKind_Host.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataKind_HostValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataKind_HostValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go new file mode 100644 index 0000000000..efbf1efc3d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3/metadata_vtproto.pb.go @@ -0,0 +1,563 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/metadata/v3/metadata.proto + +package metadatav3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *MetadataKey_PathSegment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKey_PathSegment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey_PathSegment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Segment.(*MetadataKey_PathSegment_Key); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataKey_PathSegment_Key) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey_PathSegment_Key) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *MetadataKey) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKey) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKey) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Path) > 0 { + for iNdEx := len(m.Path) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Path[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Request) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Request) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Request) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Route) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Route) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Route) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Cluster) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Cluster) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Host) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind_Host) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Host) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataKind) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Kind.(*MetadataKind_Host_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Cluster_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Route_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Kind.(*MetadataKind_Request_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *MetadataKind_Request_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Request_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Route_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Route_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Route != nil { + size, err := m.Route.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Cluster_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Cluster_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cluster != nil { + size, err := m.Cluster.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MetadataKind_Host_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *MetadataKind_Host_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Host != nil { + size, err := m.Host.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *MetadataKey_PathSegment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Segment.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKey_PathSegment_Key) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *MetadataKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Path) > 0 { + for _, e := range m.Path { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Request) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Route) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Cluster) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Host) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Kind.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *MetadataKind_Request_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Route_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Route != nil { + l = m.Route.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Cluster_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *MetadataKind_Host_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Host != nil { + l = m.Host.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go new file mode 100644 index 0000000000..388e4749e2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.go @@ -0,0 +1,609 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + v3 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes custom tags for the active span. +// [#next-free-field: 6] +type CustomTag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Used to populate the tag name. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Used to specify what kind of custom tag. + // + // Types that are assignable to Type: + // + // *CustomTag_Literal_ + // *CustomTag_Environment_ + // *CustomTag_RequestHeader + // *CustomTag_Metadata_ + Type isCustomTag_Type `protobuf_oneof:"type"` +} + +func (x *CustomTag) Reset() { + *x = CustomTag{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag) ProtoMessage() {} + +func (x *CustomTag) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag.ProtoReflect.Descriptor instead. +func (*CustomTag) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomTag) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (m *CustomTag) GetType() isCustomTag_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *CustomTag) GetLiteral() *CustomTag_Literal { + if x, ok := x.GetType().(*CustomTag_Literal_); ok { + return x.Literal + } + return nil +} + +func (x *CustomTag) GetEnvironment() *CustomTag_Environment { + if x, ok := x.GetType().(*CustomTag_Environment_); ok { + return x.Environment + } + return nil +} + +func (x *CustomTag) GetRequestHeader() *CustomTag_Header { + if x, ok := x.GetType().(*CustomTag_RequestHeader); ok { + return x.RequestHeader + } + return nil +} + +func (x *CustomTag) GetMetadata() *CustomTag_Metadata { + if x, ok := x.GetType().(*CustomTag_Metadata_); ok { + return x.Metadata + } + return nil +} + +type isCustomTag_Type interface { + isCustomTag_Type() +} + +type CustomTag_Literal_ struct { + // A literal custom tag. + Literal *CustomTag_Literal `protobuf:"bytes,2,opt,name=literal,proto3,oneof"` +} + +type CustomTag_Environment_ struct { + // An environment custom tag. + Environment *CustomTag_Environment `protobuf:"bytes,3,opt,name=environment,proto3,oneof"` +} + +type CustomTag_RequestHeader struct { + // A request header custom tag. + RequestHeader *CustomTag_Header `protobuf:"bytes,4,opt,name=request_header,json=requestHeader,proto3,oneof"` +} + +type CustomTag_Metadata_ struct { + // A custom tag to obtain tag value from the metadata. + Metadata *CustomTag_Metadata `protobuf:"bytes,5,opt,name=metadata,proto3,oneof"` +} + +func (*CustomTag_Literal_) isCustomTag_Type() {} + +func (*CustomTag_Environment_) isCustomTag_Type() {} + +func (*CustomTag_RequestHeader) isCustomTag_Type() {} + +func (*CustomTag_Metadata_) isCustomTag_Type() {} + +// Literal type custom tag with static value for the tag value. +type CustomTag_Literal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Static literal value to populate the tag value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *CustomTag_Literal) Reset() { + *x = CustomTag_Literal{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Literal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Literal) ProtoMessage() {} + +func (x *CustomTag_Literal) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Literal.ProtoReflect.Descriptor instead. +func (*CustomTag_Literal) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CustomTag_Literal) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Environment type custom tag with environment name and default value. +type CustomTag_Environment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Environment variable name to obtain the value to populate the tag value. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // When the environment variable is not found, + // the tag value will be populated with this default value if specified, + // otherwise no tag will be populated. + DefaultValue string `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CustomTag_Environment) Reset() { + *x = CustomTag_Environment{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Environment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Environment) ProtoMessage() {} + +func (x *CustomTag_Environment) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Environment.ProtoReflect.Descriptor instead. +func (*CustomTag_Environment) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *CustomTag_Environment) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CustomTag_Environment) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Header type custom tag with header name and default value. +type CustomTag_Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Header name to obtain the value to populate the tag value. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // When the header does not exist, + // the tag value will be populated with this default value if specified, + // otherwise no tag will be populated. + DefaultValue string `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CustomTag_Header) Reset() { + *x = CustomTag_Header{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Header) ProtoMessage() {} + +func (x *CustomTag_Header) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Header.ProtoReflect.Descriptor instead. +func (*CustomTag_Header) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *CustomTag_Header) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CustomTag_Header) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +// Metadata type custom tag using +// :ref:`MetadataKey ` to retrieve the protobuf value +// from :ref:`Metadata `, and populate the tag value with +// `the canonical JSON `_ +// representation of it. +type CustomTag_Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specify what kind of metadata to obtain tag value from. + Kind *v3.MetadataKind `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // Metadata key to define the path to retrieve the tag value. + MetadataKey *v3.MetadataKey `protobuf:"bytes,2,opt,name=metadata_key,json=metadataKey,proto3" json:"metadata_key,omitempty"` + // When no valid metadata is found, + // the tag value would be populated with this default value if specified, + // otherwise no tag would be populated. + DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (x *CustomTag_Metadata) Reset() { + *x = CustomTag_Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomTag_Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomTag_Metadata) ProtoMessage() {} + +func (x *CustomTag_Metadata) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomTag_Metadata.ProtoReflect.Descriptor instead. +func (*CustomTag_Metadata) Descriptor() ([]byte, []int) { + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *CustomTag_Metadata) GetKind() *v3.MetadataKind { + if x != nil { + return x.Kind + } + return nil +} + +func (x *CustomTag_Metadata) GetMetadataKey() *v3.MetadataKey { + if x != nil { + return x.MetadataKey + } + return nil +} + +func (x *CustomTag_Metadata) GetDefaultValue() string { + if x != nil { + return x.DefaultValue + } + return "" +} + +var File_envoy_type_tracing_v3_custom_tag_proto protoreflect.FileDescriptor + +var file_envoy_type_tracing_v3_custom_tag_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, 0x61, + 0x63, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x61, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, + 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xd4, 0x07, 0x0a, 0x09, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x12, + 0x19, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x44, 0x0a, 0x07, 0x6c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x4c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, + 0x12, 0x50, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x50, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x58, 0x0a, + 0x07, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, + 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x83, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, + 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, + 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, + 0x67, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x7f, 0x0a, + 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xe2, + 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x69, 0x6e, 0x64, 0x52, + 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x42, 0x0b, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x87, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, + 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x54, 0x61, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x72, + 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, + 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_tracing_v3_custom_tag_proto_rawDescOnce sync.Once + file_envoy_type_tracing_v3_custom_tag_proto_rawDescData = file_envoy_type_tracing_v3_custom_tag_proto_rawDesc +) + +func file_envoy_type_tracing_v3_custom_tag_proto_rawDescGZIP() []byte { + file_envoy_type_tracing_v3_custom_tag_proto_rawDescOnce.Do(func() { + file_envoy_type_tracing_v3_custom_tag_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_tracing_v3_custom_tag_proto_rawDescData) + }) + return file_envoy_type_tracing_v3_custom_tag_proto_rawDescData +} + +var file_envoy_type_tracing_v3_custom_tag_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_envoy_type_tracing_v3_custom_tag_proto_goTypes = []interface{}{ + (*CustomTag)(nil), // 0: envoy.type.tracing.v3.CustomTag + (*CustomTag_Literal)(nil), // 1: envoy.type.tracing.v3.CustomTag.Literal + (*CustomTag_Environment)(nil), // 2: envoy.type.tracing.v3.CustomTag.Environment + (*CustomTag_Header)(nil), // 3: envoy.type.tracing.v3.CustomTag.Header + (*CustomTag_Metadata)(nil), // 4: envoy.type.tracing.v3.CustomTag.Metadata + (*v3.MetadataKind)(nil), // 5: envoy.type.metadata.v3.MetadataKind + (*v3.MetadataKey)(nil), // 6: envoy.type.metadata.v3.MetadataKey +} +var file_envoy_type_tracing_v3_custom_tag_proto_depIdxs = []int32{ + 1, // 0: envoy.type.tracing.v3.CustomTag.literal:type_name -> envoy.type.tracing.v3.CustomTag.Literal + 2, // 1: envoy.type.tracing.v3.CustomTag.environment:type_name -> envoy.type.tracing.v3.CustomTag.Environment + 3, // 2: envoy.type.tracing.v3.CustomTag.request_header:type_name -> envoy.type.tracing.v3.CustomTag.Header + 4, // 3: envoy.type.tracing.v3.CustomTag.metadata:type_name -> envoy.type.tracing.v3.CustomTag.Metadata + 5, // 4: envoy.type.tracing.v3.CustomTag.Metadata.kind:type_name -> envoy.type.metadata.v3.MetadataKind + 6, // 5: envoy.type.tracing.v3.CustomTag.Metadata.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_envoy_type_tracing_v3_custom_tag_proto_init() } +func file_envoy_type_tracing_v3_custom_tag_proto_init() { + if File_envoy_type_tracing_v3_custom_tag_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Literal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Environment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomTag_Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_tracing_v3_custom_tag_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CustomTag_Literal_)(nil), + (*CustomTag_Environment_)(nil), + (*CustomTag_RequestHeader)(nil), + (*CustomTag_Metadata_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_tracing_v3_custom_tag_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_tracing_v3_custom_tag_proto_goTypes, + DependencyIndexes: file_envoy_type_tracing_v3_custom_tag_proto_depIdxs, + MessageInfos: file_envoy_type_tracing_v3_custom_tag_proto_msgTypes, + }.Build() + File_envoy_type_tracing_v3_custom_tag_proto = out.File + file_envoy_type_tracing_v3_custom_tag_proto_rawDesc = nil + file_envoy_type_tracing_v3_custom_tag_proto_goTypes = nil + file_envoy_type_tracing_v3_custom_tag_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go new file mode 100644 index 0000000000..d15de9b688 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag.pb.validate.go @@ -0,0 +1,847 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CustomTag with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CustomTag) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CustomTagMultiError, or nil +// if none found. +func (m *CustomTag) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetTag()) < 1 { + err := CustomTagValidationError{ + field: "Tag", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + oneofTypePresent := false + switch v := m.Type.(type) { + case *CustomTag_Literal_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetLiteral()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Literal", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Literal", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLiteral()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "Literal", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CustomTag_Environment_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetEnvironment()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Environment", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Environment", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEnvironment()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "Environment", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CustomTag_RequestHeader: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetRequestHeader()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "RequestHeader", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "RequestHeader", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestHeader()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "RequestHeader", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CustomTag_Metadata_: + if v == nil { + err := CustomTagValidationError{ + field: "Type", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTypePresent = true + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTagValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTagValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTypePresent { + err := CustomTagValidationError{ + field: "Type", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CustomTagMultiError(errors) + } + + return nil +} + +// CustomTagMultiError is an error wrapping multiple validation errors returned +// by CustomTag.ValidateAll() if the designated constraints aren't met. +type CustomTagMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTagMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTagMultiError) AllErrors() []error { return m } + +// CustomTagValidationError is the validation error returned by +// CustomTag.Validate if the designated constraints aren't met. +type CustomTagValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTagValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTagValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTagValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTagValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTagValidationError) ErrorName() string { return "CustomTagValidationError" } + +// Error satisfies the builtin error interface +func (e CustomTagValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTagValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTagValidationError{} + +// Validate checks the field values on CustomTag_Literal with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Literal) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Literal with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_LiteralMultiError, or nil if none found. +func (m *CustomTag_Literal) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Literal) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetValue()) < 1 { + err := CustomTag_LiteralValidationError{ + field: "Value", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return CustomTag_LiteralMultiError(errors) + } + + return nil +} + +// CustomTag_LiteralMultiError is an error wrapping multiple validation errors +// returned by CustomTag_Literal.ValidateAll() if the designated constraints +// aren't met. +type CustomTag_LiteralMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_LiteralMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_LiteralMultiError) AllErrors() []error { return m } + +// CustomTag_LiteralValidationError is the validation error returned by +// CustomTag_Literal.Validate if the designated constraints aren't met. +type CustomTag_LiteralValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_LiteralValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_LiteralValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_LiteralValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_LiteralValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_LiteralValidationError) ErrorName() string { + return "CustomTag_LiteralValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomTag_LiteralValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Literal.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_LiteralValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_LiteralValidationError{} + +// Validate checks the field values on CustomTag_Environment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Environment) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Environment with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_EnvironmentMultiError, or nil if none found. +func (m *CustomTag_Environment) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Environment) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CustomTag_EnvironmentValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return CustomTag_EnvironmentMultiError(errors) + } + + return nil +} + +// CustomTag_EnvironmentMultiError is an error wrapping multiple validation +// errors returned by CustomTag_Environment.ValidateAll() if the designated +// constraints aren't met. +type CustomTag_EnvironmentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_EnvironmentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_EnvironmentMultiError) AllErrors() []error { return m } + +// CustomTag_EnvironmentValidationError is the validation error returned by +// CustomTag_Environment.Validate if the designated constraints aren't met. +type CustomTag_EnvironmentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_EnvironmentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_EnvironmentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_EnvironmentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_EnvironmentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_EnvironmentValidationError) ErrorName() string { + return "CustomTag_EnvironmentValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomTag_EnvironmentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Environment.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_EnvironmentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_EnvironmentValidationError{} + +// Validate checks the field values on CustomTag_Header with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Header) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Header with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_HeaderMultiError, or nil if none found. +func (m *CustomTag_Header) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Header) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := CustomTag_HeaderValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if !_CustomTag_Header_Name_Pattern.MatchString(m.GetName()) { + err := CustomTag_HeaderValidationError{ + field: "Name", + reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return CustomTag_HeaderMultiError(errors) + } + + return nil +} + +// CustomTag_HeaderMultiError is an error wrapping multiple validation errors +// returned by CustomTag_Header.ValidateAll() if the designated constraints +// aren't met. +type CustomTag_HeaderMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_HeaderMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_HeaderMultiError) AllErrors() []error { return m } + +// CustomTag_HeaderValidationError is the validation error returned by +// CustomTag_Header.Validate if the designated constraints aren't met. +type CustomTag_HeaderValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_HeaderValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_HeaderValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_HeaderValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_HeaderValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_HeaderValidationError) ErrorName() string { return "CustomTag_HeaderValidationError" } + +// Error satisfies the builtin error interface +func (e CustomTag_HeaderValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Header.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_HeaderValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_HeaderValidationError{} + +var _CustomTag_Header_Name_Pattern = regexp.MustCompile("^[^\x00\n\r]*$") + +// Validate checks the field values on CustomTag_Metadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CustomTag_Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CustomTag_Metadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CustomTag_MetadataMultiError, or nil if none found. +func (m *CustomTag_Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *CustomTag_Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetKind()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "Kind", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "Kind", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKind()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTag_MetadataValidationError{ + field: "Kind", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadataKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CustomTag_MetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadataKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CustomTag_MetadataValidationError{ + field: "MetadataKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DefaultValue + + if len(errors) > 0 { + return CustomTag_MetadataMultiError(errors) + } + + return nil +} + +// CustomTag_MetadataMultiError is an error wrapping multiple validation errors +// returned by CustomTag_Metadata.ValidateAll() if the designated constraints +// aren't met. +type CustomTag_MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CustomTag_MetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CustomTag_MetadataMultiError) AllErrors() []error { return m } + +// CustomTag_MetadataValidationError is the validation error returned by +// CustomTag_Metadata.Validate if the designated constraints aren't met. +type CustomTag_MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CustomTag_MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CustomTag_MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CustomTag_MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CustomTag_MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CustomTag_MetadataValidationError) ErrorName() string { + return "CustomTag_MetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e CustomTag_MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCustomTag_Metadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CustomTag_MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CustomTag_MetadataValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go new file mode 100644 index 0000000000..e558c5d079 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3/custom_tag_vtproto.pb.go @@ -0,0 +1,556 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/tracing/v3/custom_tag.proto + +package tracingv3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CustomTag_Literal) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Literal) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Literal) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Environment) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Environment) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Environment) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Header) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Header) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Header) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Metadata) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag_Metadata) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Metadata) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x1a + } + if m.MetadataKey != nil { + if vtmsg, ok := interface{}(m.MetadataKey).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.MetadataKey) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.Kind != nil { + if vtmsg, ok := interface{}(m.Kind).(interface { + MarshalToSizedBufferVTStrict([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Kind) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTag) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Type.(*CustomTag_Metadata_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_RequestHeader); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_Environment_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Type.(*CustomTag_Literal_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Tag) > 0 { + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTag_Literal_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Literal_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Literal != nil { + size, err := m.Literal.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Environment_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Environment_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Environment != nil { + size, err := m.Environment.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *CustomTag_RequestHeader) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_RequestHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestHeader != nil { + size, err := m.RequestHeader.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Metadata_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *CustomTag_Metadata_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *CustomTag_Literal) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Environment) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Header) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + if size, ok := interface{}(m.Kind).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Kind) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MetadataKey != nil { + if size, ok := interface{}(m.MetadataKey).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.MetadataKey) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if vtmsg, ok := m.Type.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *CustomTag_Literal_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Literal != nil { + l = m.Literal.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_Environment_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Environment != nil { + l = m.Environment.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_RequestHeader) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestHeader != nil { + l = m.RequestHeader.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *CustomTag_Metadata_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go new file mode 100644 index 0000000000..af620911fd --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.go @@ -0,0 +1,333 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the hash policy +type HashPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PolicySpecifier: + // + // *HashPolicy_SourceIp_ + // *HashPolicy_FilterState_ + PolicySpecifier isHashPolicy_PolicySpecifier `protobuf_oneof:"policy_specifier"` +} + +func (x *HashPolicy) Reset() { + *x = HashPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashPolicy) ProtoMessage() {} + +func (x *HashPolicy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashPolicy.ProtoReflect.Descriptor instead. +func (*HashPolicy) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0} +} + +func (m *HashPolicy) GetPolicySpecifier() isHashPolicy_PolicySpecifier { + if m != nil { + return m.PolicySpecifier + } + return nil +} + +func (x *HashPolicy) GetSourceIp() *HashPolicy_SourceIp { + if x, ok := x.GetPolicySpecifier().(*HashPolicy_SourceIp_); ok { + return x.SourceIp + } + return nil +} + +func (x *HashPolicy) GetFilterState() *HashPolicy_FilterState { + if x, ok := x.GetPolicySpecifier().(*HashPolicy_FilterState_); ok { + return x.FilterState + } + return nil +} + +type isHashPolicy_PolicySpecifier interface { + isHashPolicy_PolicySpecifier() +} + +type HashPolicy_SourceIp_ struct { + SourceIp *HashPolicy_SourceIp `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp,proto3,oneof"` +} + +type HashPolicy_FilterState_ struct { + FilterState *HashPolicy_FilterState `protobuf:"bytes,2,opt,name=filter_state,json=filterState,proto3,oneof"` +} + +func (*HashPolicy_SourceIp_) isHashPolicy_PolicySpecifier() {} + +func (*HashPolicy_FilterState_) isHashPolicy_PolicySpecifier() {} + +// The source IP will be used to compute the hash used by hash-based load balancing +// algorithms. +type HashPolicy_SourceIp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HashPolicy_SourceIp) Reset() { + *x = HashPolicy_SourceIp{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashPolicy_SourceIp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashPolicy_SourceIp) ProtoMessage() {} + +func (x *HashPolicy_SourceIp) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashPolicy_SourceIp.ProtoReflect.Descriptor instead. +func (*HashPolicy_SourceIp) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0, 0} +} + +// An Object in the :ref:`filterState ` will be used +// to compute the hash used by hash-based load balancing algorithms. +type HashPolicy_FilterState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Object in the filterState, which is an Envoy::Hashable object. If there is no + // data associated with the key, or the stored object is not Envoy::Hashable, no hash will be + // produced. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *HashPolicy_FilterState) Reset() { + *x = HashPolicy_FilterState{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HashPolicy_FilterState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HashPolicy_FilterState) ProtoMessage() {} + +func (x *HashPolicy_FilterState) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_hash_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HashPolicy_FilterState.ProtoReflect.Descriptor instead. +func (*HashPolicy_FilterState) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_hash_policy_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *HashPolicy_FilterState) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +var File_envoy_type_v3_hash_policy_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_hash_policy_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x02, 0x0a, 0x0a, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x09, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, + 0x70, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x4a, 0x0a, + 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x31, 0x0a, 0x08, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x1a, 0x28, 0x0a, 0x0b, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x75, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, + 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_hash_policy_proto_rawDescOnce sync.Once + file_envoy_type_v3_hash_policy_proto_rawDescData = file_envoy_type_v3_hash_policy_proto_rawDesc +) + +func file_envoy_type_v3_hash_policy_proto_rawDescGZIP() []byte { + file_envoy_type_v3_hash_policy_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_hash_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_hash_policy_proto_rawDescData) + }) + return file_envoy_type_v3_hash_policy_proto_rawDescData +} + +var file_envoy_type_v3_hash_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_type_v3_hash_policy_proto_goTypes = []interface{}{ + (*HashPolicy)(nil), // 0: envoy.type.v3.HashPolicy + (*HashPolicy_SourceIp)(nil), // 1: envoy.type.v3.HashPolicy.SourceIp + (*HashPolicy_FilterState)(nil), // 2: envoy.type.v3.HashPolicy.FilterState +} +var file_envoy_type_v3_hash_policy_proto_depIdxs = []int32{ + 1, // 0: envoy.type.v3.HashPolicy.source_ip:type_name -> envoy.type.v3.HashPolicy.SourceIp + 2, // 1: envoy.type.v3.HashPolicy.filter_state:type_name -> envoy.type.v3.HashPolicy.FilterState + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_hash_policy_proto_init() } +func file_envoy_type_v3_hash_policy_proto_init() { + if File_envoy_type_v3_hash_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_hash_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_hash_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashPolicy_SourceIp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_hash_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashPolicy_FilterState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_v3_hash_policy_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*HashPolicy_SourceIp_)(nil), + (*HashPolicy_FilterState_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_hash_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_hash_policy_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_hash_policy_proto_depIdxs, + MessageInfos: file_envoy_type_v3_hash_policy_proto_msgTypes, + }.Build() + File_envoy_type_v3_hash_policy_proto = out.File + file_envoy_type_v3_hash_policy_proto_rawDesc = nil + file_envoy_type_v3_hash_policy_proto_goTypes = nil + file_envoy_type_v3_hash_policy_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go new file mode 100644 index 0000000000..5ec37f5402 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy.pb.validate.go @@ -0,0 +1,451 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HashPolicy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HashPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HashPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HashPolicyMultiError, or +// nil if none found. +func (m *HashPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *HashPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofPolicySpecifierPresent := false + switch v := m.PolicySpecifier.(type) { + case *HashPolicy_SourceIp_: + if v == nil { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetSourceIp()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceIp()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HashPolicyValidationError{ + field: "SourceIp", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *HashPolicy_FilterState_: + if v == nil { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofPolicySpecifierPresent = true + + if all { + switch v := interface{}(m.GetFilterState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilterState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HashPolicyValidationError{ + field: "FilterState", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofPolicySpecifierPresent { + err := HashPolicyValidationError{ + field: "PolicySpecifier", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HashPolicyMultiError(errors) + } + + return nil +} + +// HashPolicyMultiError is an error wrapping multiple validation errors +// returned by HashPolicy.ValidateAll() if the designated constraints aren't met. +type HashPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HashPolicyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HashPolicyMultiError) AllErrors() []error { return m } + +// HashPolicyValidationError is the validation error returned by +// HashPolicy.Validate if the designated constraints aren't met. +type HashPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HashPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HashPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HashPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HashPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HashPolicyValidationError) ErrorName() string { return "HashPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e HashPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHashPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HashPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HashPolicyValidationError{} + +// Validate checks the field values on HashPolicy_SourceIp with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HashPolicy_SourceIp) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HashPolicy_SourceIp with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HashPolicy_SourceIpMultiError, or nil if none found. +func (m *HashPolicy_SourceIp) ValidateAll() error { + return m.validate(true) +} + +func (m *HashPolicy_SourceIp) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return HashPolicy_SourceIpMultiError(errors) + } + + return nil +} + +// HashPolicy_SourceIpMultiError is an error wrapping multiple validation +// errors returned by HashPolicy_SourceIp.ValidateAll() if the designated +// constraints aren't met. +type HashPolicy_SourceIpMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HashPolicy_SourceIpMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HashPolicy_SourceIpMultiError) AllErrors() []error { return m } + +// HashPolicy_SourceIpValidationError is the validation error returned by +// HashPolicy_SourceIp.Validate if the designated constraints aren't met. +type HashPolicy_SourceIpValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HashPolicy_SourceIpValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HashPolicy_SourceIpValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HashPolicy_SourceIpValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HashPolicy_SourceIpValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HashPolicy_SourceIpValidationError) ErrorName() string { + return "HashPolicy_SourceIpValidationError" +} + +// Error satisfies the builtin error interface +func (e HashPolicy_SourceIpValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHashPolicy_SourceIp.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HashPolicy_SourceIpValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HashPolicy_SourceIpValidationError{} + +// Validate checks the field values on HashPolicy_FilterState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HashPolicy_FilterState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HashPolicy_FilterState with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HashPolicy_FilterStateMultiError, or nil if none found. +func (m *HashPolicy_FilterState) ValidateAll() error { + return m.validate(true) +} + +func (m *HashPolicy_FilterState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetKey()) < 1 { + err := HashPolicy_FilterStateValidationError{ + field: "Key", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HashPolicy_FilterStateMultiError(errors) + } + + return nil +} + +// HashPolicy_FilterStateMultiError is an error wrapping multiple validation +// errors returned by HashPolicy_FilterState.ValidateAll() if the designated +// constraints aren't met. +type HashPolicy_FilterStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HashPolicy_FilterStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HashPolicy_FilterStateMultiError) AllErrors() []error { return m } + +// HashPolicy_FilterStateValidationError is the validation error returned by +// HashPolicy_FilterState.Validate if the designated constraints aren't met. +type HashPolicy_FilterStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HashPolicy_FilterStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HashPolicy_FilterStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HashPolicy_FilterStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HashPolicy_FilterStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HashPolicy_FilterStateValidationError) ErrorName() string { + return "HashPolicy_FilterStateValidationError" +} + +// Error satisfies the builtin error interface +func (e HashPolicy_FilterStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHashPolicy_FilterState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HashPolicy_FilterStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HashPolicy_FilterStateValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go new file mode 100644 index 0000000000..bcc1995968 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/hash_policy_vtproto.pb.go @@ -0,0 +1,251 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/hash_policy.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HashPolicy_SourceIp) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy_SourceIp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_SourceIp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy_FilterState) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy_FilterState) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_FilterState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashPolicy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.PolicySpecifier.(*HashPolicy_FilterState_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.PolicySpecifier.(*HashPolicy_SourceIp_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *HashPolicy_SourceIp_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_SourceIp_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SourceIp != nil { + size, err := m.SourceIp.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *HashPolicy_FilterState_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HashPolicy_FilterState_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FilterState != nil { + size, err := m.FilterState.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *HashPolicy_SourceIp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy_FilterState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.PolicySpecifier.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *HashPolicy_SourceIp_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SourceIp != nil { + l = m.SourceIp.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *HashPolicy_FilterState_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FilterState != nil { + l = m.FilterState.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go new file mode 100644 index 0000000000..74f4e24dfe --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/http.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CodecClientType int32 + +const ( + CodecClientType_HTTP1 CodecClientType = 0 + CodecClientType_HTTP2 CodecClientType = 1 + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + CodecClientType_HTTP3 CodecClientType = 2 +) + +// Enum value maps for CodecClientType. +var ( + CodecClientType_name = map[int32]string{ + 0: "HTTP1", + 1: "HTTP2", + 2: "HTTP3", + } + CodecClientType_value = map[string]int32{ + "HTTP1": 0, + "HTTP2": 1, + "HTTP3": 2, + } +) + +func (x CodecClientType) Enum() *CodecClientType { + p := new(CodecClientType) + *p = x + return p +} + +func (x CodecClientType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodecClientType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_http_proto_enumTypes[0].Descriptor() +} + +func (CodecClientType) Type() protoreflect.EnumType { + return &file_envoy_type_v3_http_proto_enumTypes[0] +} + +func (x CodecClientType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CodecClientType.Descriptor instead. +func (CodecClientType) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_http_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_type_v3_http_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_http_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, 0x32, 0x0a, 0x0f, 0x43, 0x6f, 0x64, 0x65, + 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x48, + 0x54, 0x54, 0x50, 0x31, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, + 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x33, 0x10, 0x02, 0x42, 0x6f, 0xba, 0x80, + 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_http_proto_rawDescOnce sync.Once + file_envoy_type_v3_http_proto_rawDescData = file_envoy_type_v3_http_proto_rawDesc +) + +func file_envoy_type_v3_http_proto_rawDescGZIP() []byte { + file_envoy_type_v3_http_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_http_proto_rawDescData) + }) + return file_envoy_type_v3_http_proto_rawDescData +} + +var file_envoy_type_v3_http_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_http_proto_goTypes = []interface{}{ + (CodecClientType)(0), // 0: envoy.type.v3.CodecClientType +} +var file_envoy_type_v3_http_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_http_proto_init() } +func file_envoy_type_v3_http_proto_init() { + if File_envoy_type_v3_http_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_http_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_http_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_http_proto_depIdxs, + EnumInfos: file_envoy_type_v3_http_proto_enumTypes, + }.Build() + File_envoy_type_v3_http_proto = out.File + file_envoy_type_v3_http_proto_rawDesc = nil + file_envoy_type_v3_http_proto_goTypes = nil + file_envoy_type_v3_http_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go new file mode 100644 index 0000000000..e2c41e26f2 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/http.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go new file mode 100644 index 0000000000..f7e952b3a1 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.go @@ -0,0 +1,458 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HTTP response codes supported in Envoy. +// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml +type StatusCode int32 + +const ( + // Empty - This code not part of the HTTP status code specification, but it is needed for proto + // `enum` type. + StatusCode_Empty StatusCode = 0 + StatusCode_Continue StatusCode = 100 + StatusCode_OK StatusCode = 200 + StatusCode_Created StatusCode = 201 + StatusCode_Accepted StatusCode = 202 + StatusCode_NonAuthoritativeInformation StatusCode = 203 + StatusCode_NoContent StatusCode = 204 + StatusCode_ResetContent StatusCode = 205 + StatusCode_PartialContent StatusCode = 206 + StatusCode_MultiStatus StatusCode = 207 + StatusCode_AlreadyReported StatusCode = 208 + StatusCode_IMUsed StatusCode = 226 + StatusCode_MultipleChoices StatusCode = 300 + StatusCode_MovedPermanently StatusCode = 301 + StatusCode_Found StatusCode = 302 + StatusCode_SeeOther StatusCode = 303 + StatusCode_NotModified StatusCode = 304 + StatusCode_UseProxy StatusCode = 305 + StatusCode_TemporaryRedirect StatusCode = 307 + StatusCode_PermanentRedirect StatusCode = 308 + StatusCode_BadRequest StatusCode = 400 + StatusCode_Unauthorized StatusCode = 401 + StatusCode_PaymentRequired StatusCode = 402 + StatusCode_Forbidden StatusCode = 403 + StatusCode_NotFound StatusCode = 404 + StatusCode_MethodNotAllowed StatusCode = 405 + StatusCode_NotAcceptable StatusCode = 406 + StatusCode_ProxyAuthenticationRequired StatusCode = 407 + StatusCode_RequestTimeout StatusCode = 408 + StatusCode_Conflict StatusCode = 409 + StatusCode_Gone StatusCode = 410 + StatusCode_LengthRequired StatusCode = 411 + StatusCode_PreconditionFailed StatusCode = 412 + StatusCode_PayloadTooLarge StatusCode = 413 + StatusCode_URITooLong StatusCode = 414 + StatusCode_UnsupportedMediaType StatusCode = 415 + StatusCode_RangeNotSatisfiable StatusCode = 416 + StatusCode_ExpectationFailed StatusCode = 417 + StatusCode_MisdirectedRequest StatusCode = 421 + StatusCode_UnprocessableEntity StatusCode = 422 + StatusCode_Locked StatusCode = 423 + StatusCode_FailedDependency StatusCode = 424 + StatusCode_UpgradeRequired StatusCode = 426 + StatusCode_PreconditionRequired StatusCode = 428 + StatusCode_TooManyRequests StatusCode = 429 + StatusCode_RequestHeaderFieldsTooLarge StatusCode = 431 + StatusCode_InternalServerError StatusCode = 500 + StatusCode_NotImplemented StatusCode = 501 + StatusCode_BadGateway StatusCode = 502 + StatusCode_ServiceUnavailable StatusCode = 503 + StatusCode_GatewayTimeout StatusCode = 504 + StatusCode_HTTPVersionNotSupported StatusCode = 505 + StatusCode_VariantAlsoNegotiates StatusCode = 506 + StatusCode_InsufficientStorage StatusCode = 507 + StatusCode_LoopDetected StatusCode = 508 + StatusCode_NotExtended StatusCode = 510 + StatusCode_NetworkAuthenticationRequired StatusCode = 511 +) + +// Enum value maps for StatusCode. +var ( + StatusCode_name = map[int32]string{ + 0: "Empty", + 100: "Continue", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "NonAuthoritativeInformation", + 204: "NoContent", + 205: "ResetContent", + 206: "PartialContent", + 207: "MultiStatus", + 208: "AlreadyReported", + 226: "IMUsed", + 300: "MultipleChoices", + 301: "MovedPermanently", + 302: "Found", + 303: "SeeOther", + 304: "NotModified", + 305: "UseProxy", + 307: "TemporaryRedirect", + 308: "PermanentRedirect", + 400: "BadRequest", + 401: "Unauthorized", + 402: "PaymentRequired", + 403: "Forbidden", + 404: "NotFound", + 405: "MethodNotAllowed", + 406: "NotAcceptable", + 407: "ProxyAuthenticationRequired", + 408: "RequestTimeout", + 409: "Conflict", + 410: "Gone", + 411: "LengthRequired", + 412: "PreconditionFailed", + 413: "PayloadTooLarge", + 414: "URITooLong", + 415: "UnsupportedMediaType", + 416: "RangeNotSatisfiable", + 417: "ExpectationFailed", + 421: "MisdirectedRequest", + 422: "UnprocessableEntity", + 423: "Locked", + 424: "FailedDependency", + 426: "UpgradeRequired", + 428: "PreconditionRequired", + 429: "TooManyRequests", + 431: "RequestHeaderFieldsTooLarge", + 500: "InternalServerError", + 501: "NotImplemented", + 502: "BadGateway", + 503: "ServiceUnavailable", + 504: "GatewayTimeout", + 505: "HTTPVersionNotSupported", + 506: "VariantAlsoNegotiates", + 507: "InsufficientStorage", + 508: "LoopDetected", + 510: "NotExtended", + 511: "NetworkAuthenticationRequired", + } + StatusCode_value = map[string]int32{ + "Empty": 0, + "Continue": 100, + "OK": 200, + "Created": 201, + "Accepted": 202, + "NonAuthoritativeInformation": 203, + "NoContent": 204, + "ResetContent": 205, + "PartialContent": 206, + "MultiStatus": 207, + "AlreadyReported": 208, + "IMUsed": 226, + "MultipleChoices": 300, + "MovedPermanently": 301, + "Found": 302, + "SeeOther": 303, + "NotModified": 304, + "UseProxy": 305, + "TemporaryRedirect": 307, + "PermanentRedirect": 308, + "BadRequest": 400, + "Unauthorized": 401, + "PaymentRequired": 402, + "Forbidden": 403, + "NotFound": 404, + "MethodNotAllowed": 405, + "NotAcceptable": 406, + "ProxyAuthenticationRequired": 407, + "RequestTimeout": 408, + "Conflict": 409, + "Gone": 410, + "LengthRequired": 411, + "PreconditionFailed": 412, + "PayloadTooLarge": 413, + "URITooLong": 414, + "UnsupportedMediaType": 415, + "RangeNotSatisfiable": 416, + "ExpectationFailed": 417, + "MisdirectedRequest": 421, + "UnprocessableEntity": 422, + "Locked": 423, + "FailedDependency": 424, + "UpgradeRequired": 426, + "PreconditionRequired": 428, + "TooManyRequests": 429, + "RequestHeaderFieldsTooLarge": 431, + "InternalServerError": 500, + "NotImplemented": 501, + "BadGateway": 502, + "ServiceUnavailable": 503, + "GatewayTimeout": 504, + "HTTPVersionNotSupported": 505, + "VariantAlsoNegotiates": 506, + "InsufficientStorage": 507, + "LoopDetected": 508, + "NotExtended": 510, + "NetworkAuthenticationRequired": 511, + } +) + +func (x StatusCode) Enum() *StatusCode { + p := new(StatusCode) + *p = x + return p +} + +func (x StatusCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StatusCode) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_http_status_proto_enumTypes[0].Descriptor() +} + +func (StatusCode) Type() protoreflect.EnumType { + return &file_envoy_type_v3_http_status_proto_enumTypes[0] +} + +func (x StatusCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StatusCode.Descriptor instead. +func (StatusCode) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_http_status_proto_rawDescGZIP(), []int{0} +} + +// HTTP status. +type HttpStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Supplies HTTP response code. + Code StatusCode `protobuf:"varint,1,opt,name=code,proto3,enum=envoy.type.v3.StatusCode" json:"code,omitempty"` +} + +func (x *HttpStatus) Reset() { + *x = HttpStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_http_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpStatus) ProtoMessage() {} + +func (x *HttpStatus) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_http_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpStatus.ProtoReflect.Descriptor instead. +func (*HttpStatus) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_http_status_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpStatus) GetCode() StatusCode { + if x != nil { + return x.Code + } + return StatusCode_Empty +} + +var File_envoy_type_v3_http_status_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_http_status_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, + 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x65, 0x0a, 0x0a, 0x48, + 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, + 0x64, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x00, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2a, 0xb5, 0x09, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, + 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x10, 0x64, 0x12, 0x07, 0x0a, 0x02, 0x4f, 0x4b, + 0x10, 0xc8, 0x01, 0x12, 0x0c, 0x0a, 0x07, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x10, 0xc9, + 0x01, 0x12, 0x0d, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x10, 0xca, 0x01, + 0x12, 0x20, 0x0a, 0x1b, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, + 0xcb, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x4e, 0x6f, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, + 0xcc, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x10, 0xcd, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x10, 0xce, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x4d, 0x75, + 0x6c, 0x74, 0x69, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0xcf, 0x01, 0x12, 0x14, 0x0a, 0x0f, + 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, + 0xd0, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x4d, 0x55, 0x73, 0x65, 0x64, 0x10, 0xe2, 0x01, 0x12, + 0x14, 0x0a, 0x0f, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x43, 0x68, 0x6f, 0x69, 0x63, + 0x65, 0x73, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, 0x64, 0x50, 0x65, + 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x10, 0xad, 0x02, 0x12, 0x0a, 0x0a, 0x05, + 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0xae, 0x02, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x65, 0x65, 0x4f, + 0x74, 0x68, 0x65, 0x72, 0x10, 0xaf, 0x02, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, 0x74, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0xb0, 0x02, 0x12, 0x0d, 0x0a, 0x08, 0x55, 0x73, 0x65, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x10, 0xb1, 0x02, 0x12, 0x16, 0x0a, 0x11, 0x54, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x72, 0x79, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb3, 0x02, + 0x12, 0x16, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x10, 0xb4, 0x02, 0x12, 0x0f, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0x90, 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x55, 0x6e, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x10, 0x91, 0x03, 0x12, 0x14, 0x0a, 0x0f, + 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, + 0x92, 0x03, 0x12, 0x0e, 0x0a, 0x09, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x10, + 0x93, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0x94, + 0x03, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x6f, 0x74, 0x41, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x10, 0x95, 0x03, 0x12, 0x12, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x10, 0x96, 0x03, 0x12, 0x20, 0x0a, 0x1b, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0x97, 0x03, 0x12, 0x13, + 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x10, 0x98, 0x03, 0x12, 0x0d, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x10, + 0x99, 0x03, 0x12, 0x09, 0x0a, 0x04, 0x47, 0x6f, 0x6e, 0x65, 0x10, 0x9a, 0x03, 0x12, 0x13, 0x0a, + 0x0e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, + 0x9b, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x9c, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x10, 0x9d, + 0x03, 0x12, 0x0f, 0x0a, 0x0a, 0x55, 0x52, 0x49, 0x54, 0x6f, 0x6f, 0x4c, 0x6f, 0x6e, 0x67, 0x10, + 0x9e, 0x03, 0x12, 0x19, 0x0a, 0x14, 0x55, 0x6e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x10, 0x9f, 0x03, 0x12, 0x18, 0x0a, + 0x13, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4e, 0x6f, 0x74, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x10, 0xa0, 0x03, 0x12, 0x16, 0x0a, 0x11, 0x45, 0x78, 0x70, 0x65, 0x63, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0xa1, 0x03, 0x12, + 0x17, 0x0a, 0x12, 0x4d, 0x69, 0x73, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0xa5, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x6e, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x10, + 0xa6, 0x03, 0x12, 0x0b, 0x0a, 0x06, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x10, 0xa7, 0x03, 0x12, + 0x15, 0x0a, 0x10, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x10, 0xa8, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xaa, 0x03, 0x12, 0x19, 0x0a, 0x14, + 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x10, 0xac, 0x03, 0x12, 0x14, 0x0a, 0x0f, 0x54, 0x6f, 0x6f, 0x4d, 0x61, + 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x10, 0xad, 0x03, 0x12, 0x20, 0x0a, + 0x1b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x54, 0x6f, 0x6f, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x10, 0xaf, 0x03, 0x12, + 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0xf4, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x6f, 0x74, + 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x10, 0xf5, 0x03, 0x12, 0x0f, + 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x10, 0xf6, 0x03, 0x12, + 0x17, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, + 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x10, 0xf7, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x10, 0xf8, 0x03, 0x12, 0x1c, 0x0a, + 0x17, 0x48, 0x54, 0x54, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, 0xf9, 0x03, 0x12, 0x1a, 0x0a, 0x15, 0x56, + 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x41, 0x6c, 0x73, 0x6f, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, + 0x61, 0x74, 0x65, 0x73, 0x10, 0xfa, 0x03, 0x12, 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x73, 0x75, 0x66, + 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x10, 0xfb, + 0x03, 0x12, 0x11, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x70, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x10, 0xfc, 0x03, 0x12, 0x10, 0x0a, 0x0b, 0x4e, 0x6f, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x65, 0x64, 0x10, 0xfe, 0x03, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x10, 0xff, 0x03, 0x42, 0x75, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, + 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_http_status_proto_rawDescOnce sync.Once + file_envoy_type_v3_http_status_proto_rawDescData = file_envoy_type_v3_http_status_proto_rawDesc +) + +func file_envoy_type_v3_http_status_proto_rawDescGZIP() []byte { + file_envoy_type_v3_http_status_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_http_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_http_status_proto_rawDescData) + }) + return file_envoy_type_v3_http_status_proto_rawDescData +} + +var file_envoy_type_v3_http_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_http_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_v3_http_status_proto_goTypes = []interface{}{ + (StatusCode)(0), // 0: envoy.type.v3.StatusCode + (*HttpStatus)(nil), // 1: envoy.type.v3.HttpStatus +} +var file_envoy_type_v3_http_status_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.HttpStatus.code:type_name -> envoy.type.v3.StatusCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_http_status_proto_init() } +func file_envoy_type_v3_http_status_proto_init() { + if File_envoy_type_v3_http_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_http_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_http_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_http_status_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_http_status_proto_depIdxs, + EnumInfos: file_envoy_type_v3_http_status_proto_enumTypes, + MessageInfos: file_envoy_type_v3_http_status_proto_msgTypes, + }.Build() + File_envoy_type_v3_http_status_proto = out.File + file_envoy_type_v3_http_status_proto_rawDesc = nil + file_envoy_type_v3_http_status_proto_goTypes = nil + file_envoy_type_v3_http_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go new file mode 100644 index 0000000000..d3f76e9372 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status.pb.validate.go @@ -0,0 +1,162 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HttpStatus with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HttpStatus) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HttpStatus with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HttpStatusMultiError, or +// nil if none found. +func (m *HttpStatus) ValidateAll() error { + return m.validate(true) +} + +func (m *HttpStatus) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if _, ok := _HttpStatus_Code_NotInLookup[m.GetCode()]; ok { + err := HttpStatusValidationError{ + field: "Code", + reason: "value must not be in list [Empty]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if _, ok := StatusCode_name[int32(m.GetCode())]; !ok { + err := HttpStatusValidationError{ + field: "Code", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return HttpStatusMultiError(errors) + } + + return nil +} + +// HttpStatusMultiError is an error wrapping multiple validation errors +// returned by HttpStatus.ValidateAll() if the designated constraints aren't met. +type HttpStatusMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HttpStatusMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HttpStatusMultiError) AllErrors() []error { return m } + +// HttpStatusValidationError is the validation error returned by +// HttpStatus.Validate if the designated constraints aren't met. +type HttpStatusValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HttpStatusValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HttpStatusValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HttpStatusValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HttpStatusValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HttpStatusValidationError) ErrorName() string { return "HttpStatusValidationError" } + +// Error satisfies the builtin error interface +func (e HttpStatusValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHttpStatus.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HttpStatusValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HttpStatusValidationError{} + +var _HttpStatus_Code_NotInLookup = map[StatusCode]struct{}{ + 0: {}, +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go new file mode 100644 index 0000000000..f25340d846 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/http_status_vtproto.pb.go @@ -0,0 +1,70 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/http_status.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpStatus) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpStatus) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *HttpStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Code != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HttpStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Code)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go new file mode 100644 index 0000000000..45eb66186d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.go @@ -0,0 +1,316 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Fraction percentages support several fixed denominator values. +type FractionalPercent_DenominatorType int32 + +const ( + // 100. + // + // **Example**: 1/100 = 1%. + FractionalPercent_HUNDRED FractionalPercent_DenominatorType = 0 + // 10,000. + // + // **Example**: 1/10000 = 0.01%. + FractionalPercent_TEN_THOUSAND FractionalPercent_DenominatorType = 1 + // 1,000,000. + // + // **Example**: 1/1000000 = 0.0001%. + FractionalPercent_MILLION FractionalPercent_DenominatorType = 2 +) + +// Enum value maps for FractionalPercent_DenominatorType. +var ( + FractionalPercent_DenominatorType_name = map[int32]string{ + 0: "HUNDRED", + 1: "TEN_THOUSAND", + 2: "MILLION", + } + FractionalPercent_DenominatorType_value = map[string]int32{ + "HUNDRED": 0, + "TEN_THOUSAND": 1, + "MILLION": 2, + } +) + +func (x FractionalPercent_DenominatorType) Enum() *FractionalPercent_DenominatorType { + p := new(FractionalPercent_DenominatorType) + *p = x + return p +} + +func (x FractionalPercent_DenominatorType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FractionalPercent_DenominatorType) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_percent_proto_enumTypes[0].Descriptor() +} + +func (FractionalPercent_DenominatorType) Type() protoreflect.EnumType { + return &file_envoy_type_v3_percent_proto_enumTypes[0] +} + +func (x FractionalPercent_DenominatorType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FractionalPercent_DenominatorType.Descriptor instead. +func (FractionalPercent_DenominatorType) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{1, 0} +} + +// Identifies a percentage, in the range [0.0, 100.0]. +type Percent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Percent) Reset() { + *x = Percent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_percent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Percent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Percent) ProtoMessage() {} + +func (x *Percent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_percent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Percent.ProtoReflect.Descriptor instead. +func (*Percent) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{0} +} + +func (x *Percent) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// A fractional percentage is used in cases in which for performance reasons performing floating +// point to integer conversions during randomness calculations is undesirable. The message includes +// both a numerator and denominator that together determine the final fractional value. +// +// * **Example**: 1/100 = 1%. +// * **Example**: 3/10000 = 0.03%. +type FractionalPercent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the numerator. Defaults to 0. + Numerator uint32 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"` + // Specifies the denominator. If the denominator specified is less than the numerator, the final + // fractional percentage is capped at 1 (100%). + Denominator FractionalPercent_DenominatorType `protobuf:"varint,2,opt,name=denominator,proto3,enum=envoy.type.v3.FractionalPercent_DenominatorType" json:"denominator,omitempty"` +} + +func (x *FractionalPercent) Reset() { + *x = FractionalPercent{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_percent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FractionalPercent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FractionalPercent) ProtoMessage() {} + +func (x *FractionalPercent) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_percent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FractionalPercent.ProtoReflect.Descriptor instead. +func (*FractionalPercent) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_percent_proto_rawDescGZIP(), []int{1} +} + +func (x *FractionalPercent) GetNumerator() uint32 { + if x != nil { + return x.Numerator + } + return 0 +} + +func (x *FractionalPercent) GetDenominator() FractionalPercent_DenominatorType { + if x != nil { + return x.Denominator + } + return FractionalPercent_HUNDRED +} + +var File_envoy_type_v3_percent_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_percent_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, + 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, + 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x53, 0x0a, 0x07, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, + 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x19, 0x9a, 0xc5, 0x88, 0x1e, 0x14, 0x0a, 0x12, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0xf3, 0x01, 0x0a, + 0x11, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x5c, 0x0a, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, + 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x3d, + 0x0a, 0x0f, 0x44, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x55, 0x4e, 0x44, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x45, 0x4e, 0x5f, 0x54, 0x48, 0x4f, 0x55, 0x53, 0x41, 0x4e, 0x44, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x49, 0x4c, 0x4c, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x3a, 0x23, 0x9a, + 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x42, 0x72, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_percent_proto_rawDescOnce sync.Once + file_envoy_type_v3_percent_proto_rawDescData = file_envoy_type_v3_percent_proto_rawDesc +) + +func file_envoy_type_v3_percent_proto_rawDescGZIP() []byte { + file_envoy_type_v3_percent_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_percent_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_percent_proto_rawDescData) + }) + return file_envoy_type_v3_percent_proto_rawDescData +} + +var file_envoy_type_v3_percent_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_percent_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_v3_percent_proto_goTypes = []interface{}{ + (FractionalPercent_DenominatorType)(0), // 0: envoy.type.v3.FractionalPercent.DenominatorType + (*Percent)(nil), // 1: envoy.type.v3.Percent + (*FractionalPercent)(nil), // 2: envoy.type.v3.FractionalPercent +} +var file_envoy_type_v3_percent_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.FractionalPercent.denominator:type_name -> envoy.type.v3.FractionalPercent.DenominatorType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_percent_proto_init() } +func file_envoy_type_v3_percent_proto_init() { + if File_envoy_type_v3_percent_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_percent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Percent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_percent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FractionalPercent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_percent_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_percent_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_percent_proto_depIdxs, + EnumInfos: file_envoy_type_v3_percent_proto_enumTypes, + MessageInfos: file_envoy_type_v3_percent_proto_msgTypes, + }.Build() + File_envoy_type_v3_percent_proto = out.File + file_envoy_type_v3_percent_proto_rawDesc = nil + file_envoy_type_v3_percent_proto_goTypes = nil + file_envoy_type_v3_percent_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go new file mode 100644 index 0000000000..2929f39f81 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent.pb.validate.go @@ -0,0 +1,261 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Percent with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Percent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Percent with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in PercentMultiError, or nil if none found. +func (m *Percent) ValidateAll() error { + return m.validate(true) +} + +func (m *Percent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if val := m.GetValue(); val < 0 || val > 100 { + err := PercentValidationError{ + field: "Value", + reason: "value must be inside range [0, 100]", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return PercentMultiError(errors) + } + + return nil +} + +// PercentMultiError is an error wrapping multiple validation errors returned +// by Percent.ValidateAll() if the designated constraints aren't met. +type PercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PercentMultiError) AllErrors() []error { return m } + +// PercentValidationError is the validation error returned by Percent.Validate +// if the designated constraints aren't met. +type PercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PercentValidationError) ErrorName() string { return "PercentValidationError" } + +// Error satisfies the builtin error interface +func (e PercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PercentValidationError{} + +// Validate checks the field values on FractionalPercent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *FractionalPercent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FractionalPercent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FractionalPercentMultiError, or nil if none found. +func (m *FractionalPercent) ValidateAll() error { + return m.validate(true) +} + +func (m *FractionalPercent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Numerator + + if _, ok := FractionalPercent_DenominatorType_name[int32(m.GetDenominator())]; !ok { + err := FractionalPercentValidationError{ + field: "Denominator", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return FractionalPercentMultiError(errors) + } + + return nil +} + +// FractionalPercentMultiError is an error wrapping multiple validation errors +// returned by FractionalPercent.ValidateAll() if the designated constraints +// aren't met. +type FractionalPercentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FractionalPercentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FractionalPercentMultiError) AllErrors() []error { return m } + +// FractionalPercentValidationError is the validation error returned by +// FractionalPercent.Validate if the designated constraints aren't met. +type FractionalPercentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FractionalPercentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FractionalPercentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FractionalPercentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FractionalPercentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FractionalPercentValidationError) ErrorName() string { + return "FractionalPercentValidationError" +} + +// Error satisfies the builtin error interface +func (e FractionalPercentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFractionalPercent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FractionalPercentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FractionalPercentValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go new file mode 100644 index 0000000000..82c60c5d95 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/percent_vtproto.pb.go @@ -0,0 +1,132 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/percent.proto + +package typev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Percent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Percent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Percent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FractionalPercent) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FractionalPercent) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FractionalPercent) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Denominator != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Denominator)) + i-- + dAtA[i] = 0x10 + } + if m.Numerator != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Numerator)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Percent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *FractionalPercent) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Numerator != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Numerator)) + } + if m.Denominator != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Denominator)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go new file mode 100644 index 0000000000..63be48f3c7 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the int64 start and end of the range using half-open interval semantics [start, +// end). +type Int64Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // start of the range (inclusive) + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // end of the range (exclusive) + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int64Range) Reset() { + *x = Int64Range{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_range_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Range) ProtoMessage() {} + +func (x *Int64Range) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_range_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead. +func (*Int64Range) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{0} +} + +func (x *Int64Range) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int64Range) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +// Specifies the int32 start and end of the range using half-open interval semantics [start, +// end). +type Int32Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // start of the range (inclusive) + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // end of the range (exclusive) + End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *Int32Range) Reset() { + *x = Int32Range{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_range_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Range) ProtoMessage() {} + +func (x *Int32Range) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_range_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead. +func (*Int32Range) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{1} +} + +func (x *Int32Range) GetStart() int32 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Int32Range) GetEnd() int32 { + if x != nil { + return x.End + } + return 0 +} + +// Specifies the double start and end of the range using half-open interval semantics [start, +// end). +type DoubleRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // start of the range (inclusive) + Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"` + // end of the range (exclusive) + End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *DoubleRange) Reset() { + *x = DoubleRange{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_range_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRange) ProtoMessage() {} + +func (x *DoubleRange) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_range_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead. +func (*DoubleRange) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_range_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRange) GetStart() float64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *DoubleRange) GetEnd() float64 { + if x != nil { + return x.End + } + return 0 +} + +var File_envoy_type_v3_range_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_range_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x52, 0x0a, 0x0a, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x22, 0x52, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x3a, 0x1c, 0x9a, 0xc5, 0x88, 0x1e, 0x17, 0x0a, 0x15, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x22, 0x54, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, + 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x70, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_range_proto_rawDescOnce sync.Once + file_envoy_type_v3_range_proto_rawDescData = file_envoy_type_v3_range_proto_rawDesc +) + +func file_envoy_type_v3_range_proto_rawDescGZIP() []byte { + file_envoy_type_v3_range_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_range_proto_rawDescData) + }) + return file_envoy_type_v3_range_proto_rawDescData +} + +var file_envoy_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_envoy_type_v3_range_proto_goTypes = []interface{}{ + (*Int64Range)(nil), // 0: envoy.type.v3.Int64Range + (*Int32Range)(nil), // 1: envoy.type.v3.Int32Range + (*DoubleRange)(nil), // 2: envoy.type.v3.DoubleRange +} +var file_envoy_type_v3_range_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_range_proto_init() } +func file_envoy_type_v3_range_proto_init() { + if File_envoy_type_v3_range_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_range_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_range_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_range_proto_depIdxs, + MessageInfos: file_envoy_type_v3_range_proto_msgTypes, + }.Build() + File_envoy_type_v3_range_proto = out.File + file_envoy_type_v3_range_proto_rawDesc = nil + file_envoy_type_v3_range_proto_goTypes = nil + file_envoy_type_v3_range_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go new file mode 100644 index 0000000000..6bf697e9bf --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range.pb.validate.go @@ -0,0 +1,346 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Int64Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int64Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int64Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int64RangeMultiError, or +// nil if none found. +func (m *Int64Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int64Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int64RangeMultiError(errors) + } + + return nil +} + +// Int64RangeMultiError is an error wrapping multiple validation errors +// returned by Int64Range.ValidateAll() if the designated constraints aren't met. +type Int64RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int64RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int64RangeMultiError) AllErrors() []error { return m } + +// Int64RangeValidationError is the validation error returned by +// Int64Range.Validate if the designated constraints aren't met. +type Int64RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int64RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int64RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int64RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int64RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int64RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt64Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int64RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int64RangeValidationError{} + +// Validate checks the field values on Int32Range with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Int32Range) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Int32Range with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in Int32RangeMultiError, or +// nil if none found. +func (m *Int32Range) ValidateAll() error { + return m.validate(true) +} + +func (m *Int32Range) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return Int32RangeMultiError(errors) + } + + return nil +} + +// Int32RangeMultiError is an error wrapping multiple validation errors +// returned by Int32Range.ValidateAll() if the designated constraints aren't met. +type Int32RangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Int32RangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Int32RangeMultiError) AllErrors() []error { return m } + +// Int32RangeValidationError is the validation error returned by +// Int32Range.Validate if the designated constraints aren't met. +type Int32RangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Int32RangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Int32RangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Int32RangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Int32RangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" } + +// Error satisfies the builtin error interface +func (e Int32RangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInt32Range.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Int32RangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Int32RangeValidationError{} + +// Validate checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DoubleRange) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DoubleRange with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DoubleRangeMultiError, or +// nil if none found. +func (m *DoubleRange) ValidateAll() error { + return m.validate(true) +} + +func (m *DoubleRange) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Start + + // no validation rules for End + + if len(errors) > 0 { + return DoubleRangeMultiError(errors) + } + + return nil +} + +// DoubleRangeMultiError is an error wrapping multiple validation errors +// returned by DoubleRange.ValidateAll() if the designated constraints aren't met. +type DoubleRangeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DoubleRangeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DoubleRangeMultiError) AllErrors() []error { return m } + +// DoubleRangeValidationError is the validation error returned by +// DoubleRange.Validate if the designated constraints aren't met. +type DoubleRangeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DoubleRangeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DoubleRangeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DoubleRangeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DoubleRangeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" } + +// Error satisfies the builtin error interface +func (e DoubleRangeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDoubleRange.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DoubleRangeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DoubleRangeValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go new file mode 100644 index 0000000000..7309b8c14b --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/range_vtproto.pb.go @@ -0,0 +1,200 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/range.proto + +package typev3 + +import ( + binary "encoding/binary" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Int64Range) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int64Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Range) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Range) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int32Range) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DoubleRange) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleRange) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleRange) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.End != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.End)))) + i-- + dAtA[i] = 0x11 + } + if m.Start != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Start)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Int64Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.End)) + } + n += len(m.unknownFields) + return n +} + +func (m *Int32Range) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.End)) + } + n += len(m.unknownFields) + return n +} + +func (m *DoubleRange) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 9 + } + if m.End != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go new file mode 100644 index 0000000000..e7663f294f --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.go @@ -0,0 +1,407 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/cncf/xds/go/xds/annotations/v3" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Choose between allow all and deny all. +type RateLimitStrategy_BlanketRule int32 + +const ( + RateLimitStrategy_ALLOW_ALL RateLimitStrategy_BlanketRule = 0 + RateLimitStrategy_DENY_ALL RateLimitStrategy_BlanketRule = 1 +) + +// Enum value maps for RateLimitStrategy_BlanketRule. +var ( + RateLimitStrategy_BlanketRule_name = map[int32]string{ + 0: "ALLOW_ALL", + 1: "DENY_ALL", + } + RateLimitStrategy_BlanketRule_value = map[string]int32{ + "ALLOW_ALL": 0, + "DENY_ALL": 1, + } +) + +func (x RateLimitStrategy_BlanketRule) Enum() *RateLimitStrategy_BlanketRule { + p := new(RateLimitStrategy_BlanketRule) + *p = x + return p +} + +func (x RateLimitStrategy_BlanketRule) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitStrategy_BlanketRule) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_ratelimit_strategy_proto_enumTypes[0].Descriptor() +} + +func (RateLimitStrategy_BlanketRule) Type() protoreflect.EnumType { + return &file_envoy_type_v3_ratelimit_strategy_proto_enumTypes[0] +} + +func (x RateLimitStrategy_BlanketRule) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimitStrategy_BlanketRule.Descriptor instead. +func (RateLimitStrategy_BlanketRule) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0, 0} +} + +type RateLimitStrategy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Strategy: + // + // *RateLimitStrategy_BlanketRule_ + // *RateLimitStrategy_RequestsPerTimeUnit_ + // *RateLimitStrategy_TokenBucket + Strategy isRateLimitStrategy_Strategy `protobuf_oneof:"strategy"` +} + +func (x *RateLimitStrategy) Reset() { + *x = RateLimitStrategy{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitStrategy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitStrategy) ProtoMessage() {} + +func (x *RateLimitStrategy) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitStrategy.ProtoReflect.Descriptor instead. +func (*RateLimitStrategy) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0} +} + +func (m *RateLimitStrategy) GetStrategy() isRateLimitStrategy_Strategy { + if m != nil { + return m.Strategy + } + return nil +} + +func (x *RateLimitStrategy) GetBlanketRule() RateLimitStrategy_BlanketRule { + if x, ok := x.GetStrategy().(*RateLimitStrategy_BlanketRule_); ok { + return x.BlanketRule + } + return RateLimitStrategy_ALLOW_ALL +} + +func (x *RateLimitStrategy) GetRequestsPerTimeUnit() *RateLimitStrategy_RequestsPerTimeUnit { + if x, ok := x.GetStrategy().(*RateLimitStrategy_RequestsPerTimeUnit_); ok { + return x.RequestsPerTimeUnit + } + return nil +} + +func (x *RateLimitStrategy) GetTokenBucket() *TokenBucket { + if x, ok := x.GetStrategy().(*RateLimitStrategy_TokenBucket); ok { + return x.TokenBucket + } + return nil +} + +type isRateLimitStrategy_Strategy interface { + isRateLimitStrategy_Strategy() +} + +type RateLimitStrategy_BlanketRule_ struct { + // Allow or Deny the requests. + // If unset, allow all. + BlanketRule RateLimitStrategy_BlanketRule `protobuf:"varint,1,opt,name=blanket_rule,json=blanketRule,proto3,enum=envoy.type.v3.RateLimitStrategy_BlanketRule,oneof"` +} + +type RateLimitStrategy_RequestsPerTimeUnit_ struct { + // Best-effort limit of the number of requests per time unit, f.e. requests per second. + // Does not prescribe any specific rate limiting algorithm, see :ref:`RequestsPerTimeUnit + // ` for details. + RequestsPerTimeUnit *RateLimitStrategy_RequestsPerTimeUnit `protobuf:"bytes,2,opt,name=requests_per_time_unit,json=requestsPerTimeUnit,proto3,oneof"` +} + +type RateLimitStrategy_TokenBucket struct { + // Limit the requests by consuming tokens from the Token Bucket. + // Allow the same number of requests as the number of tokens available in + // the token bucket. + TokenBucket *TokenBucket `protobuf:"bytes,3,opt,name=token_bucket,json=tokenBucket,proto3,oneof"` +} + +func (*RateLimitStrategy_BlanketRule_) isRateLimitStrategy_Strategy() {} + +func (*RateLimitStrategy_RequestsPerTimeUnit_) isRateLimitStrategy_Strategy() {} + +func (*RateLimitStrategy_TokenBucket) isRateLimitStrategy_Strategy() {} + +// Best-effort limit of the number of requests per time unit. +// +// Allows to specify the desired requests per second (RPS, QPS), requests per minute (QPM, RPM), +// etc., without specifying a rate limiting algorithm implementation. +// +// “RequestsPerTimeUnit“ strategy does not demand any specific rate limiting algorithm to be +// used (in contrast to the :ref:`TokenBucket `, +// for example). It implies that the implementation details of rate limiting algorithm are +// irrelevant as long as the configured number of "requests per time unit" is achieved. +// +// Note that the “TokenBucket“ is still a valid implementation of the “RequestsPerTimeUnit“ +// strategy, and may be chosen to enforce the rate limit. However, there's no guarantee it will be +// the “TokenBucket“ in particular, and not the Leaky Bucket, the Sliding Window, or any other +// rate limiting algorithm that fulfills the requirements. +type RateLimitStrategy_RequestsPerTimeUnit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The desired number of requests per :ref:`time_unit + // ` to allow. + // If set to “0“, deny all (equivalent to “BlanketRule.DENY_ALL“). + // + // .. note:: + // + // Note that the algorithm implementation determines the course of action for the requests + // over the limit. As long as the ``requests_per_time_unit`` converges on the desired value, + // it's allowed to treat this field as a soft-limit: allow bursts, redistribute the allowance + // over time, etc. + RequestsPerTimeUnit uint64 `protobuf:"varint,1,opt,name=requests_per_time_unit,json=requestsPerTimeUnit,proto3" json:"requests_per_time_unit,omitempty"` + // The unit of time. Ignored when :ref:`requests_per_time_unit + // ` + // is “0“ (deny all). + TimeUnit RateLimitUnit `protobuf:"varint,2,opt,name=time_unit,json=timeUnit,proto3,enum=envoy.type.v3.RateLimitUnit" json:"time_unit,omitempty"` +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) Reset() { + *x = RateLimitStrategy_RequestsPerTimeUnit{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitStrategy_RequestsPerTimeUnit) ProtoMessage() {} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitStrategy_RequestsPerTimeUnit.ProtoReflect.Descriptor instead. +func (*RateLimitStrategy_RequestsPerTimeUnit) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) GetRequestsPerTimeUnit() uint64 { + if x != nil { + return x.RequestsPerTimeUnit + } + return 0 +} + +func (x *RateLimitStrategy_RequestsPerTimeUnit) GetTimeUnit() RateLimitUnit { + if x != nil { + return x.TimeUnit + } + return RateLimitUnit_UNKNOWN +} + +var File_envoy_type_v3_ratelimit_strategy_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_ratelimit_strategy_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, + 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, + 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x5b, 0x0a, 0x0c, + 0x62, 0x6c, 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x2e, 0x42, 0x6c, 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, + 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, + 0x61, 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x6b, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x6e, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x48, + 0x00, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, + 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x8f, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x12, + 0x33, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, + 0x55, 0x6e, 0x69, 0x74, 0x12, 0x43, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x55, 0x6e, 0x69, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x74, 0x22, 0x2a, 0x0a, 0x0b, 0x42, 0x6c, 0x61, + 0x6e, 0x6b, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x4e, 0x59, 0x5f, + 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x42, 0x0f, 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x84, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, + 0x02, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x52, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_ratelimit_strategy_proto_rawDescOnce sync.Once + file_envoy_type_v3_ratelimit_strategy_proto_rawDescData = file_envoy_type_v3_ratelimit_strategy_proto_rawDesc +) + +func file_envoy_type_v3_ratelimit_strategy_proto_rawDescGZIP() []byte { + file_envoy_type_v3_ratelimit_strategy_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_ratelimit_strategy_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_ratelimit_strategy_proto_rawDescData) + }) + return file_envoy_type_v3_ratelimit_strategy_proto_rawDescData +} + +var file_envoy_type_v3_ratelimit_strategy_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_ratelimit_strategy_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_envoy_type_v3_ratelimit_strategy_proto_goTypes = []interface{}{ + (RateLimitStrategy_BlanketRule)(0), // 0: envoy.type.v3.RateLimitStrategy.BlanketRule + (*RateLimitStrategy)(nil), // 1: envoy.type.v3.RateLimitStrategy + (*RateLimitStrategy_RequestsPerTimeUnit)(nil), // 2: envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit + (*TokenBucket)(nil), // 3: envoy.type.v3.TokenBucket + (RateLimitUnit)(0), // 4: envoy.type.v3.RateLimitUnit +} +var file_envoy_type_v3_ratelimit_strategy_proto_depIdxs = []int32{ + 0, // 0: envoy.type.v3.RateLimitStrategy.blanket_rule:type_name -> envoy.type.v3.RateLimitStrategy.BlanketRule + 2, // 1: envoy.type.v3.RateLimitStrategy.requests_per_time_unit:type_name -> envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit + 3, // 2: envoy.type.v3.RateLimitStrategy.token_bucket:type_name -> envoy.type.v3.TokenBucket + 4, // 3: envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit.time_unit:type_name -> envoy.type.v3.RateLimitUnit + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_ratelimit_strategy_proto_init() } +func file_envoy_type_v3_ratelimit_strategy_proto_init() { + if File_envoy_type_v3_ratelimit_strategy_proto != nil { + return + } + file_envoy_type_v3_ratelimit_unit_proto_init() + file_envoy_type_v3_token_bucket_proto_init() + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitStrategy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitStrategy_RequestsPerTimeUnit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_envoy_type_v3_ratelimit_strategy_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*RateLimitStrategy_BlanketRule_)(nil), + (*RateLimitStrategy_RequestsPerTimeUnit_)(nil), + (*RateLimitStrategy_TokenBucket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_ratelimit_strategy_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_ratelimit_strategy_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_ratelimit_strategy_proto_depIdxs, + EnumInfos: file_envoy_type_v3_ratelimit_strategy_proto_enumTypes, + MessageInfos: file_envoy_type_v3_ratelimit_strategy_proto_msgTypes, + }.Build() + File_envoy_type_v3_ratelimit_strategy_proto = out.File + file_envoy_type_v3_ratelimit_strategy_proto_rawDesc = nil + file_envoy_type_v3_ratelimit_strategy_proto_goTypes = nil + file_envoy_type_v3_ratelimit_strategy_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go new file mode 100644 index 0000000000..eebce17eac --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy.pb.validate.go @@ -0,0 +1,381 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RateLimitStrategy with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *RateLimitStrategy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitStrategy with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RateLimitStrategyMultiError, or nil if none found. +func (m *RateLimitStrategy) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitStrategy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + oneofStrategyPresent := false + switch v := m.Strategy.(type) { + case *RateLimitStrategy_BlanketRule_: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if _, ok := RateLimitStrategy_BlanketRule_name[int32(m.GetBlanketRule())]; !ok { + err := RateLimitStrategyValidationError{ + field: "BlanketRule", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + case *RateLimitStrategy_RequestsPerTimeUnit_: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if all { + switch v := interface{}(m.GetRequestsPerTimeUnit()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRequestsPerTimeUnit()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitStrategyValidationError{ + field: "RequestsPerTimeUnit", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *RateLimitStrategy_TokenBucket: + if v == nil { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofStrategyPresent = true + + if all { + switch v := interface{}(m.GetTokenBucket()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTokenBucket()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RateLimitStrategyValidationError{ + field: "TokenBucket", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofStrategyPresent { + err := RateLimitStrategyValidationError{ + field: "Strategy", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimitStrategyMultiError(errors) + } + + return nil +} + +// RateLimitStrategyMultiError is an error wrapping multiple validation errors +// returned by RateLimitStrategy.ValidateAll() if the designated constraints +// aren't met. +type RateLimitStrategyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitStrategyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitStrategyMultiError) AllErrors() []error { return m } + +// RateLimitStrategyValidationError is the validation error returned by +// RateLimitStrategy.Validate if the designated constraints aren't met. +type RateLimitStrategyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitStrategyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitStrategyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitStrategyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitStrategyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitStrategyValidationError) ErrorName() string { + return "RateLimitStrategyValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitStrategyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitStrategy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitStrategyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitStrategyValidationError{} + +// Validate checks the field values on RateLimitStrategy_RequestsPerTimeUnit +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *RateLimitStrategy_RequestsPerTimeUnit) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RateLimitStrategy_RequestsPerTimeUnit +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// RateLimitStrategy_RequestsPerTimeUnitMultiError, or nil if none found. +func (m *RateLimitStrategy_RequestsPerTimeUnit) ValidateAll() error { + return m.validate(true) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RequestsPerTimeUnit + + if _, ok := RateLimitUnit_name[int32(m.GetTimeUnit())]; !ok { + err := RateLimitStrategy_RequestsPerTimeUnitValidationError{ + field: "TimeUnit", + reason: "value must be one of the defined enum values", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return RateLimitStrategy_RequestsPerTimeUnitMultiError(errors) + } + + return nil +} + +// RateLimitStrategy_RequestsPerTimeUnitMultiError is an error wrapping +// multiple validation errors returned by +// RateLimitStrategy_RequestsPerTimeUnit.ValidateAll() if the designated +// constraints aren't met. +type RateLimitStrategy_RequestsPerTimeUnitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RateLimitStrategy_RequestsPerTimeUnitMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RateLimitStrategy_RequestsPerTimeUnitMultiError) AllErrors() []error { return m } + +// RateLimitStrategy_RequestsPerTimeUnitValidationError is the validation error +// returned by RateLimitStrategy_RequestsPerTimeUnit.Validate if the +// designated constraints aren't met. +type RateLimitStrategy_RequestsPerTimeUnitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) ErrorName() string { + return "RateLimitStrategy_RequestsPerTimeUnitValidationError" +} + +// Error satisfies the builtin error interface +func (e RateLimitStrategy_RequestsPerTimeUnitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRateLimitStrategy_RequestsPerTimeUnit.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RateLimitStrategy_RequestsPerTimeUnitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RateLimitStrategy_RequestsPerTimeUnitValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go new file mode 100644 index 0000000000..c35990b767 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_strategy_vtproto.pb.go @@ -0,0 +1,241 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_strategy.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TimeUnit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TimeUnit)) + i-- + dAtA[i] = 0x10 + } + if m.RequestsPerTimeUnit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RequestsPerTimeUnit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RateLimitStrategy) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimitStrategy) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if msg, ok := m.Strategy.(*RateLimitStrategy_TokenBucket); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Strategy.(*RateLimitStrategy_RequestsPerTimeUnit_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if msg, ok := m.Strategy.(*RateLimitStrategy_BlanketRule_); ok { + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *RateLimitStrategy_BlanketRule_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_BlanketRule_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BlanketRule)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_RequestsPerTimeUnit_) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_RequestsPerTimeUnit_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RequestsPerTimeUnit != nil { + size, err := m.RequestsPerTimeUnit.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_TokenBucket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *RateLimitStrategy_TokenBucket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TokenBucket != nil { + size, err := m.TokenBucket.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RateLimitStrategy_RequestsPerTimeUnit) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestsPerTimeUnit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RequestsPerTimeUnit)) + } + if m.TimeUnit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TimeUnit)) + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitStrategy) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Strategy.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *RateLimitStrategy_BlanketRule_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.BlanketRule)) + return n +} +func (m *RateLimitStrategy_RequestsPerTimeUnit_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestsPerTimeUnit != nil { + l = m.RequestsPerTimeUnit.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} +func (m *RateLimitStrategy_TokenBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TokenBucket != nil { + l = m.TokenBucket.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 2 + } + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go new file mode 100644 index 0000000000..3686888888 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.go @@ -0,0 +1,165 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/ratelimit_unit.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Identifies the unit of of time for rate limit. +type RateLimitUnit int32 + +const ( + // The time unit is not known. + RateLimitUnit_UNKNOWN RateLimitUnit = 0 + // The time unit representing a second. + RateLimitUnit_SECOND RateLimitUnit = 1 + // The time unit representing a minute. + RateLimitUnit_MINUTE RateLimitUnit = 2 + // The time unit representing an hour. + RateLimitUnit_HOUR RateLimitUnit = 3 + // The time unit representing a day. + RateLimitUnit_DAY RateLimitUnit = 4 + // The time unit representing a month. + RateLimitUnit_MONTH RateLimitUnit = 5 + // The time unit representing a year. + RateLimitUnit_YEAR RateLimitUnit = 6 +) + +// Enum value maps for RateLimitUnit. +var ( + RateLimitUnit_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SECOND", + 2: "MINUTE", + 3: "HOUR", + 4: "DAY", + 5: "MONTH", + 6: "YEAR", + } + RateLimitUnit_value = map[string]int32{ + "UNKNOWN": 0, + "SECOND": 1, + "MINUTE": 2, + "HOUR": 3, + "DAY": 4, + "MONTH": 5, + "YEAR": 6, + } +) + +func (x RateLimitUnit) Enum() *RateLimitUnit { + p := new(RateLimitUnit) + *p = x + return p +} + +func (x RateLimitUnit) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitUnit) Descriptor() protoreflect.EnumDescriptor { + return file_envoy_type_v3_ratelimit_unit_proto_enumTypes[0].Descriptor() +} + +func (RateLimitUnit) Type() protoreflect.EnumType { + return &file_envoy_type_v3_ratelimit_unit_proto_enumTypes[0] +} + +func (x RateLimitUnit) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RateLimitUnit.Descriptor instead. +func (RateLimitUnit) EnumDescriptor() ([]byte, []int) { + return file_envoy_type_v3_ratelimit_unit_proto_rawDescGZIP(), []int{0} +} + +var File_envoy_type_v3_ratelimit_unit_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_ratelimit_unit_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2a, 0x5c, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x55, + 0x6e, 0x69, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x4d, 0x49, 0x4e, 0x55, 0x54, 0x45, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x55, 0x52, + 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, + 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x05, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x06, + 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x52, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x55, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_ratelimit_unit_proto_rawDescOnce sync.Once + file_envoy_type_v3_ratelimit_unit_proto_rawDescData = file_envoy_type_v3_ratelimit_unit_proto_rawDesc +) + +func file_envoy_type_v3_ratelimit_unit_proto_rawDescGZIP() []byte { + file_envoy_type_v3_ratelimit_unit_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_ratelimit_unit_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_ratelimit_unit_proto_rawDescData) + }) + return file_envoy_type_v3_ratelimit_unit_proto_rawDescData +} + +var file_envoy_type_v3_ratelimit_unit_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_envoy_type_v3_ratelimit_unit_proto_goTypes = []interface{}{ + (RateLimitUnit)(0), // 0: envoy.type.v3.RateLimitUnit +} +var file_envoy_type_v3_ratelimit_unit_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_ratelimit_unit_proto_init() } +func file_envoy_type_v3_ratelimit_unit_proto_init() { + if File_envoy_type_v3_ratelimit_unit_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_ratelimit_unit_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_ratelimit_unit_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_ratelimit_unit_proto_depIdxs, + EnumInfos: file_envoy_type_v3_ratelimit_unit_proto_enumTypes, + }.Build() + File_envoy_type_v3_ratelimit_unit_proto = out.File + file_envoy_type_v3_ratelimit_unit_proto_rawDesc = nil + file_envoy_type_v3_ratelimit_unit_proto_goTypes = nil + file_envoy_type_v3_ratelimit_unit_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go new file mode 100644 index 0000000000..17658400ee --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/ratelimit_unit.pb.validate.go @@ -0,0 +1,37 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/ratelimit_unit.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go new file mode 100644 index 0000000000..630e6567c4 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.go @@ -0,0 +1,181 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate +// expected behaviors and APIs, the patch version field is used only +// for security fixes and can be generally ignored. +type SemanticVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MajorNumber uint32 `protobuf:"varint,1,opt,name=major_number,json=majorNumber,proto3" json:"major_number,omitempty"` + MinorNumber uint32 `protobuf:"varint,2,opt,name=minor_number,json=minorNumber,proto3" json:"minor_number,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` +} + +func (x *SemanticVersion) Reset() { + *x = SemanticVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_semantic_version_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SemanticVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SemanticVersion) ProtoMessage() {} + +func (x *SemanticVersion) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_semantic_version_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SemanticVersion.ProtoReflect.Descriptor instead. +func (*SemanticVersion) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_semantic_version_proto_rawDescGZIP(), []int{0} +} + +func (x *SemanticVersion) GetMajorNumber() uint32 { + if x != nil { + return x.MajorNumber + } + return 0 +} + +func (x *SemanticVersion) GetMinorNumber() uint32 { + if x != nil { + return x.MinorNumber + } + return 0 +} + +func (x *SemanticVersion) GetPatch() uint32 { + if x != nil { + return x.Patch + } + return 0 +} + +var File_envoy_type_v3_semantic_version_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_semantic_version_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x0f, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x21, + 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7a, 0xba, 0x80, 0xc8, 0xd1, + 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x42, 0x14, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_semantic_version_proto_rawDescOnce sync.Once + file_envoy_type_v3_semantic_version_proto_rawDescData = file_envoy_type_v3_semantic_version_proto_rawDesc +) + +func file_envoy_type_v3_semantic_version_proto_rawDescGZIP() []byte { + file_envoy_type_v3_semantic_version_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_semantic_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_semantic_version_proto_rawDescData) + }) + return file_envoy_type_v3_semantic_version_proto_rawDescData +} + +var file_envoy_type_v3_semantic_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_v3_semantic_version_proto_goTypes = []interface{}{ + (*SemanticVersion)(nil), // 0: envoy.type.v3.SemanticVersion +} +var file_envoy_type_v3_semantic_version_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_semantic_version_proto_init() } +func file_envoy_type_v3_semantic_version_proto_init() { + if File_envoy_type_v3_semantic_version_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_semantic_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SemanticVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_semantic_version_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_semantic_version_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_semantic_version_proto_depIdxs, + MessageInfos: file_envoy_type_v3_semantic_version_proto_msgTypes, + }.Build() + File_envoy_type_v3_semantic_version_proto = out.File + file_envoy_type_v3_semantic_version_proto_rawDesc = nil + file_envoy_type_v3_semantic_version_proto_goTypes = nil + file_envoy_type_v3_semantic_version_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go new file mode 100644 index 0000000000..af3b6ee415 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version.pb.validate.go @@ -0,0 +1,143 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SemanticVersion with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SemanticVersion) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SemanticVersion with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SemanticVersionMultiError, or nil if none found. +func (m *SemanticVersion) ValidateAll() error { + return m.validate(true) +} + +func (m *SemanticVersion) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for MajorNumber + + // no validation rules for MinorNumber + + // no validation rules for Patch + + if len(errors) > 0 { + return SemanticVersionMultiError(errors) + } + + return nil +} + +// SemanticVersionMultiError is an error wrapping multiple validation errors +// returned by SemanticVersion.ValidateAll() if the designated constraints +// aren't met. +type SemanticVersionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SemanticVersionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SemanticVersionMultiError) AllErrors() []error { return m } + +// SemanticVersionValidationError is the validation error returned by +// SemanticVersion.Validate if the designated constraints aren't met. +type SemanticVersionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SemanticVersionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SemanticVersionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SemanticVersionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SemanticVersionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SemanticVersionValidationError) ErrorName() string { return "SemanticVersionValidationError" } + +// Error satisfies the builtin error interface +func (e SemanticVersionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSemanticVersion.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SemanticVersionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SemanticVersionValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go new file mode 100644 index 0000000000..13810fe82a --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/semantic_version_vtproto.pb.go @@ -0,0 +1,86 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/semantic_version.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SemanticVersion) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SemanticVersion) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *SemanticVersion) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Patch != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Patch)) + i-- + dAtA[i] = 0x18 + } + if m.MinorNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinorNumber)) + i-- + dAtA[i] = 0x10 + } + if m.MajorNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MajorNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SemanticVersion) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MajorNumber != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MajorNumber)) + } + if m.MinorNumber != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinorNumber)) + } + if m.Patch != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Patch)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go new file mode 100644 index 0000000000..9c21f24541 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v5.26.1 +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + _ "github.com/cncf/xds/go/udpa/annotations" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Configures a token bucket, typically used for rate limiting. +type TokenBucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket + // initially contains. + MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` + // The number of tokens added to the bucket during each fill interval. If not specified, defaults + // to a single token. + TokensPerFill *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=tokens_per_fill,json=tokensPerFill,proto3" json:"tokens_per_fill,omitempty"` + // The fill interval that tokens are added to the bucket. During each fill interval + // “tokens_per_fill“ are added to the bucket. The bucket will never contain more than + // “max_tokens“ tokens. + FillInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=fill_interval,json=fillInterval,proto3" json:"fill_interval,omitempty"` +} + +func (x *TokenBucket) Reset() { + *x = TokenBucket{} + if protoimpl.UnsafeEnabled { + mi := &file_envoy_type_v3_token_bucket_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TokenBucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TokenBucket) ProtoMessage() {} + +func (x *TokenBucket) ProtoReflect() protoreflect.Message { + mi := &file_envoy_type_v3_token_bucket_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TokenBucket.ProtoReflect.Descriptor instead. +func (*TokenBucket) Descriptor() ([]byte, []int) { + return file_envoy_type_v3_token_bucket_proto_rawDescGZIP(), []int{0} +} + +func (x *TokenBucket) GetMaxTokens() uint32 { + if x != nil { + return x.MaxTokens + } + return 0 +} + +func (x *TokenBucket) GetTokensPerFill() *wrapperspb.UInt32Value { + if x != nil { + return x.TokensPerFill + } + return nil +} + +func (x *TokenBucket) GetFillInterval() *durationpb.Duration { + if x != nil { + return x.FillInterval + } + return nil +} + +var File_envoy_type_v3_token_bucket_proto protoreflect.FileDescriptor + +var file_envoy_type_v3_token_bucket_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xef, 0x01, 0x0a, + 0x0b, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0a, + 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x50, 0x65, 0x72, 0x46, + 0x69, 0x6c, 0x6c, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, + 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, + 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x76, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_envoy_type_v3_token_bucket_proto_rawDescOnce sync.Once + file_envoy_type_v3_token_bucket_proto_rawDescData = file_envoy_type_v3_token_bucket_proto_rawDesc +) + +func file_envoy_type_v3_token_bucket_proto_rawDescGZIP() []byte { + file_envoy_type_v3_token_bucket_proto_rawDescOnce.Do(func() { + file_envoy_type_v3_token_bucket_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_type_v3_token_bucket_proto_rawDescData) + }) + return file_envoy_type_v3_token_bucket_proto_rawDescData +} + +var file_envoy_type_v3_token_bucket_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_envoy_type_v3_token_bucket_proto_goTypes = []interface{}{ + (*TokenBucket)(nil), // 0: envoy.type.v3.TokenBucket + (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value + (*durationpb.Duration)(nil), // 2: google.protobuf.Duration +} +var file_envoy_type_v3_token_bucket_proto_depIdxs = []int32{ + 1, // 0: envoy.type.v3.TokenBucket.tokens_per_fill:type_name -> google.protobuf.UInt32Value + 2, // 1: envoy.type.v3.TokenBucket.fill_interval:type_name -> google.protobuf.Duration + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_envoy_type_v3_token_bucket_proto_init() } +func file_envoy_type_v3_token_bucket_proto_init() { + if File_envoy_type_v3_token_bucket_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_envoy_type_v3_token_bucket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TokenBucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_envoy_type_v3_token_bucket_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_envoy_type_v3_token_bucket_proto_goTypes, + DependencyIndexes: file_envoy_type_v3_token_bucket_proto_depIdxs, + MessageInfos: file_envoy_type_v3_token_bucket_proto_msgTypes, + }.Build() + File_envoy_type_v3_token_bucket_proto = out.File + file_envoy_type_v3_token_bucket_proto_rawDesc = nil + file_envoy_type_v3_token_bucket_proto_goTypes = nil + file_envoy_type_v3_token_bucket_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go new file mode 100644 index 0000000000..4f2607621d --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket.pb.validate.go @@ -0,0 +1,203 @@ +//go:build !disable_pgv +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on TokenBucket with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *TokenBucket) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TokenBucket with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in TokenBucketMultiError, or +// nil if none found. +func (m *TokenBucket) ValidateAll() error { + return m.validate(true) +} + +func (m *TokenBucket) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetMaxTokens() <= 0 { + err := TokenBucketValidationError{ + field: "MaxTokens", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + if wrapper := m.GetTokensPerFill(); wrapper != nil { + + if wrapper.GetValue() <= 0 { + err := TokenBucketValidationError{ + field: "TokensPerFill", + reason: "value must be greater than 0", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if m.GetFillInterval() == nil { + err := TokenBucketValidationError{ + field: "FillInterval", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if d := m.GetFillInterval(); d != nil { + dur, err := d.AsDuration(), d.CheckValid() + if err != nil { + err = TokenBucketValidationError{ + field: "FillInterval", + reason: "value is not a valid duration", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } else { + + gt := time.Duration(0*time.Second + 0*time.Nanosecond) + + if dur <= gt { + err := TokenBucketValidationError{ + field: "FillInterval", + reason: "value must be greater than 0s", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + } + + if len(errors) > 0 { + return TokenBucketMultiError(errors) + } + + return nil +} + +// TokenBucketMultiError is an error wrapping multiple validation errors +// returned by TokenBucket.ValidateAll() if the designated constraints aren't met. +type TokenBucketMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TokenBucketMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TokenBucketMultiError) AllErrors() []error { return m } + +// TokenBucketValidationError is the validation error returned by +// TokenBucket.Validate if the designated constraints aren't met. +type TokenBucketValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TokenBucketValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TokenBucketValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TokenBucketValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TokenBucketValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TokenBucketValidationError) ErrorName() string { return "TokenBucketValidationError" } + +// Error satisfies the builtin error interface +func (e TokenBucketValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTokenBucket.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TokenBucketValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TokenBucketValidationError{} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go new file mode 100644 index 0000000000..8ab53eaf28 --- /dev/null +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/type/v3/token_bucket_vtproto.pb.go @@ -0,0 +1,100 @@ +//go:build vtprotobuf +// +build vtprotobuf + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// source: envoy/type/v3/token_bucket.proto + +package typev3 + +import ( + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb" + wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *TokenBucket) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenBucket) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *TokenBucket) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FillInterval != nil { + size, err := (*durationpb.Duration)(m.FillInterval).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.TokensPerFill != nil { + size, err := (*wrapperspb.UInt32Value)(m.TokensPerFill).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.MaxTokens != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTokens)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TokenBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTokens != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTokens)) + } + if m.TokensPerFill != nil { + l = (*wrapperspb.UInt32Value)(m.TokensPerFill).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.FillInterval != nil { + l = (*durationpb.Duration)(m.FillInterval).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE b/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD new file mode 100644 index 0000000000..a9d38c51b9 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/BUILD @@ -0,0 +1,74 @@ +load("@com_google_protobuf//:protobuf.bzl", "py_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@rules_java//java:defs.bzl", "java_proto_library") +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = + ["//visibility:public"], +) + +proto_library( + name = "validate_proto", + srcs = ["validate.proto"], + deps = [ + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:timestamp_proto", + ], +) + +cc_proto_library( + name = "validate_cc", + deps = [":validate_proto"], +) + +py_proto_library( + name = "validate_py", + srcs = ["validate.proto"], + deps = ["@com_google_protobuf//:protobuf_python"], +) + +go_proto_library( + name = "validate_go_proto", + importpath = "github.com/envoyproxy/protoc-gen-validate/validate", + proto = ":validate_proto", +) + +cc_library( + name = "cc_validate", + hdrs = ["validate.h"], +) + +go_library( + name = "validate_go", + embed = [":validate_go_proto"], + importpath = "github.com/envoyproxy/protoc-gen-validate/validate", +) + +java_proto_library( + name = "validate_java", + deps = [":validate_proto"], +) + +filegroup( + name = "validate_src", + srcs = ["validate.proto"], +) + +alias( + name = "go_default_library", + actual = ":validate", + deprecation = "Use :validate instead of :go_default_library. Details about the new naming convention: https://github.com/bazelbuild/bazel-gazelle/pull/863", + visibility = ["//visibility:public"], +) + +# this alias allows build files generated with Gazelle in other repositories +# to find validate as an external dependency +alias( + name = "validate", + actual = ":validate_go", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h new file mode 100644 index 0000000000..d6cf6c9d90 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.h @@ -0,0 +1,183 @@ +#ifndef _VALIDATE_H +#define _VALIDATE_H + +#include +#include +#include +#include +#include +#include +#include + +#if !defined(_WIN32) +#include +#else +#include +#include + +// uses macros to #define a ton of symbols, +// many of which interfere with our code here and down +// the line in various extensions. +#undef DELETE +#undef ERROR +#undef GetMessage +#undef interface +#undef TRUE +#undef min + +#endif + +#include "google/protobuf/message.h" + +namespace pgv { +using std::string; + +class UnimplementedException : public std::runtime_error { +public: + UnimplementedException() : std::runtime_error("not yet implemented") {} + UnimplementedException(const std::string& message) : std::runtime_error(message) {} + // Thrown by C++ validation code that is not yet implemented. +}; + +using ValidationMsg = std::string; + +class BaseValidator { +public: + /** + * Validate/check a generic message object with a registered validator for the concrete message + * type. + * @param m supplies the message to check. + * @param err supplies the place to return error information. + * @return true if the validation passes OR there is no registered validator for the concrete + * message type. false is returned if validation explicitly fails. + */ + static bool AbstractCheckMessage(const google::protobuf::Message& m, ValidationMsg* err) { + // Polymorphic lookup is used to see if there is a matching concrete validator. If so, call it. + // Otherwise return success. + auto it = abstractValidators().find(std::type_index(typeid(m))); + if (it == abstractValidators().end()) { + return true; + } + return it->second(m, err); + } + +protected: + // Used to implement AbstractCheckMessage() above. Every message that is linked into the binary + // will register itself by type_index, allowing for polymorphic lookup later. + static std::unordered_map>& + abstractValidators() { + static auto* validator_map = new std::unordered_map< + std::type_index, std::function>(); + return *validator_map; + } +}; + +template class Validator : public BaseValidator { +public: + Validator(std::function check) : check_(check) { + abstractValidators()[std::type_index(typeid(T))] = [this](const google::protobuf::Message& m, + ValidationMsg* err) -> bool { + return check_(dynamic_cast(m), err); + }; + } + +private: + std::function check_; +}; + +static inline std::string String(const ValidationMsg& msg) { return std::string(msg); } + +static inline bool IsPrefix(const string& maybe_prefix, const string& search_in) { + return search_in.compare(0, maybe_prefix.size(), maybe_prefix) == 0; +} + +static inline bool IsSuffix(const string& maybe_suffix, const string& search_in) { + return maybe_suffix.size() <= search_in.size() && + search_in.compare(search_in.size() - maybe_suffix.size(), maybe_suffix.size(), + maybe_suffix) == 0; +} + +static inline bool Contains(const string& search_in, const string& to_find) { + return search_in.find(to_find) != string::npos; +} + +static inline bool NotContains(const string& search_in, const string& to_find) { + return !Contains(search_in, to_find); +} + +static inline bool IsIpv4(const string& to_validate) { + struct sockaddr_in sa; + return !(inet_pton(AF_INET, to_validate.c_str(), &sa.sin_addr) < 1); +} + +static inline bool IsIpv6(const string& to_validate) { + struct sockaddr_in6 sa_six; + return !(inet_pton(AF_INET6, to_validate.c_str(), &sa_six.sin6_addr) < 1); +} + +static inline bool IsIp(const string& to_validate) { + return IsIpv4(to_validate) || IsIpv6(to_validate); +} + +static inline bool IsHostname(const string& to_validate) { + if (to_validate.length() > 253) { + return false; + } + + const std::regex dot_regex{"\\."}; + const auto iter_end = std::sregex_token_iterator(); + auto iter = std::sregex_token_iterator(to_validate.begin(), to_validate.end(), dot_regex, -1); + for (; iter != iter_end; ++iter) { + const std::string& part = *iter; + if (part.empty() || part.length() > 63) { + return false; + } + if (part.at(0) == '-') { + return false; + } + if (part.at(part.length() - 1) == '-') { + return false; + } + for (const auto& character : part) { + if ((character < 'A' || character > 'Z') && (character < 'a' || character > 'z') && + (character < '0' || character > '9') && character != '-') { + return false; + } + } + } + + return true; +} + +namespace { + +inline int OneCharLen(const char* src) { + return "\1\1\1\1\1\1\1\1\1\1\1\1\2\2\3\4"[(*src & 0xFF) >> 4]; +} + +inline int UTF8FirstLetterNumBytes(const char *utf8_str, int str_len) { + if (str_len == 0) + return 0; + return OneCharLen(utf8_str); +} + +inline size_t Utf8Len(const string& narrow_string) { + const char* str_char = narrow_string.c_str(); + ptrdiff_t byte_len = narrow_string.length(); + size_t unicode_len = 0; + int char_len = 1; + while (byte_len > 0 && char_len > 0) { + char_len = UTF8FirstLetterNumBytes(str_char, byte_len); + str_char += char_len; + byte_len -= char_len; + ++unicode_len; + } + return unicode_len; +} + +} // namespace + +} // namespace pgv + +#endif // _VALIDATE_H diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go new file mode 100644 index 0000000000..6df95e89ec --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.pb.go @@ -0,0 +1,4106 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: validate/validate.proto + +package validate + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// WellKnownRegex contain some well-known patterns. +type KnownRegex int32 + +const ( + KnownRegex_UNKNOWN KnownRegex = 0 + // HTTP header name as defined by RFC 7230. + KnownRegex_HTTP_HEADER_NAME KnownRegex = 1 + // HTTP header value as defined by RFC 7230. + KnownRegex_HTTP_HEADER_VALUE KnownRegex = 2 +) + +// Enum value maps for KnownRegex. +var ( + KnownRegex_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP_HEADER_NAME", + 2: "HTTP_HEADER_VALUE", + } + KnownRegex_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP_HEADER_NAME": 1, + "HTTP_HEADER_VALUE": 2, + } +) + +func (x KnownRegex) Enum() *KnownRegex { + p := new(KnownRegex) + *p = x + return p +} + +func (x KnownRegex) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KnownRegex) Descriptor() protoreflect.EnumDescriptor { + return file_validate_validate_proto_enumTypes[0].Descriptor() +} + +func (KnownRegex) Type() protoreflect.EnumType { + return &file_validate_validate_proto_enumTypes[0] +} + +func (x KnownRegex) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *KnownRegex) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = KnownRegex(num) + return nil +} + +// Deprecated: Use KnownRegex.Descriptor instead. +func (KnownRegex) EnumDescriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{0} +} + +// FieldRules encapsulates the rules for each type of field. Depending on the +// field, the correct set should be used to ensure proper validations. +type FieldRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message *MessageRules `protobuf:"bytes,17,opt,name=message" json:"message,omitempty"` + // Types that are assignable to Type: + // + // *FieldRules_Float + // *FieldRules_Double + // *FieldRules_Int32 + // *FieldRules_Int64 + // *FieldRules_Uint32 + // *FieldRules_Uint64 + // *FieldRules_Sint32 + // *FieldRules_Sint64 + // *FieldRules_Fixed32 + // *FieldRules_Fixed64 + // *FieldRules_Sfixed32 + // *FieldRules_Sfixed64 + // *FieldRules_Bool + // *FieldRules_String_ + // *FieldRules_Bytes + // *FieldRules_Enum + // *FieldRules_Repeated + // *FieldRules_Map + // *FieldRules_Any + // *FieldRules_Duration + // *FieldRules_Timestamp + Type isFieldRules_Type `protobuf_oneof:"type"` +} + +func (x *FieldRules) Reset() { + *x = FieldRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldRules) ProtoMessage() {} + +func (x *FieldRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldRules.ProtoReflect.Descriptor instead. +func (*FieldRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldRules) GetMessage() *MessageRules { + if x != nil { + return x.Message + } + return nil +} + +func (m *FieldRules) GetType() isFieldRules_Type { + if m != nil { + return m.Type + } + return nil +} + +func (x *FieldRules) GetFloat() *FloatRules { + if x, ok := x.GetType().(*FieldRules_Float); ok { + return x.Float + } + return nil +} + +func (x *FieldRules) GetDouble() *DoubleRules { + if x, ok := x.GetType().(*FieldRules_Double); ok { + return x.Double + } + return nil +} + +func (x *FieldRules) GetInt32() *Int32Rules { + if x, ok := x.GetType().(*FieldRules_Int32); ok { + return x.Int32 + } + return nil +} + +func (x *FieldRules) GetInt64() *Int64Rules { + if x, ok := x.GetType().(*FieldRules_Int64); ok { + return x.Int64 + } + return nil +} + +func (x *FieldRules) GetUint32() *UInt32Rules { + if x, ok := x.GetType().(*FieldRules_Uint32); ok { + return x.Uint32 + } + return nil +} + +func (x *FieldRules) GetUint64() *UInt64Rules { + if x, ok := x.GetType().(*FieldRules_Uint64); ok { + return x.Uint64 + } + return nil +} + +func (x *FieldRules) GetSint32() *SInt32Rules { + if x, ok := x.GetType().(*FieldRules_Sint32); ok { + return x.Sint32 + } + return nil +} + +func (x *FieldRules) GetSint64() *SInt64Rules { + if x, ok := x.GetType().(*FieldRules_Sint64); ok { + return x.Sint64 + } + return nil +} + +func (x *FieldRules) GetFixed32() *Fixed32Rules { + if x, ok := x.GetType().(*FieldRules_Fixed32); ok { + return x.Fixed32 + } + return nil +} + +func (x *FieldRules) GetFixed64() *Fixed64Rules { + if x, ok := x.GetType().(*FieldRules_Fixed64); ok { + return x.Fixed64 + } + return nil +} + +func (x *FieldRules) GetSfixed32() *SFixed32Rules { + if x, ok := x.GetType().(*FieldRules_Sfixed32); ok { + return x.Sfixed32 + } + return nil +} + +func (x *FieldRules) GetSfixed64() *SFixed64Rules { + if x, ok := x.GetType().(*FieldRules_Sfixed64); ok { + return x.Sfixed64 + } + return nil +} + +func (x *FieldRules) GetBool() *BoolRules { + if x, ok := x.GetType().(*FieldRules_Bool); ok { + return x.Bool + } + return nil +} + +func (x *FieldRules) GetString_() *StringRules { + if x, ok := x.GetType().(*FieldRules_String_); ok { + return x.String_ + } + return nil +} + +func (x *FieldRules) GetBytes() *BytesRules { + if x, ok := x.GetType().(*FieldRules_Bytes); ok { + return x.Bytes + } + return nil +} + +func (x *FieldRules) GetEnum() *EnumRules { + if x, ok := x.GetType().(*FieldRules_Enum); ok { + return x.Enum + } + return nil +} + +func (x *FieldRules) GetRepeated() *RepeatedRules { + if x, ok := x.GetType().(*FieldRules_Repeated); ok { + return x.Repeated + } + return nil +} + +func (x *FieldRules) GetMap() *MapRules { + if x, ok := x.GetType().(*FieldRules_Map); ok { + return x.Map + } + return nil +} + +func (x *FieldRules) GetAny() *AnyRules { + if x, ok := x.GetType().(*FieldRules_Any); ok { + return x.Any + } + return nil +} + +func (x *FieldRules) GetDuration() *DurationRules { + if x, ok := x.GetType().(*FieldRules_Duration); ok { + return x.Duration + } + return nil +} + +func (x *FieldRules) GetTimestamp() *TimestampRules { + if x, ok := x.GetType().(*FieldRules_Timestamp); ok { + return x.Timestamp + } + return nil +} + +type isFieldRules_Type interface { + isFieldRules_Type() +} + +type FieldRules_Float struct { + // Scalar Field Types + Float *FloatRules `protobuf:"bytes,1,opt,name=float,oneof"` +} + +type FieldRules_Double struct { + Double *DoubleRules `protobuf:"bytes,2,opt,name=double,oneof"` +} + +type FieldRules_Int32 struct { + Int32 *Int32Rules `protobuf:"bytes,3,opt,name=int32,oneof"` +} + +type FieldRules_Int64 struct { + Int64 *Int64Rules `protobuf:"bytes,4,opt,name=int64,oneof"` +} + +type FieldRules_Uint32 struct { + Uint32 *UInt32Rules `protobuf:"bytes,5,opt,name=uint32,oneof"` +} + +type FieldRules_Uint64 struct { + Uint64 *UInt64Rules `protobuf:"bytes,6,opt,name=uint64,oneof"` +} + +type FieldRules_Sint32 struct { + Sint32 *SInt32Rules `protobuf:"bytes,7,opt,name=sint32,oneof"` +} + +type FieldRules_Sint64 struct { + Sint64 *SInt64Rules `protobuf:"bytes,8,opt,name=sint64,oneof"` +} + +type FieldRules_Fixed32 struct { + Fixed32 *Fixed32Rules `protobuf:"bytes,9,opt,name=fixed32,oneof"` +} + +type FieldRules_Fixed64 struct { + Fixed64 *Fixed64Rules `protobuf:"bytes,10,opt,name=fixed64,oneof"` +} + +type FieldRules_Sfixed32 struct { + Sfixed32 *SFixed32Rules `protobuf:"bytes,11,opt,name=sfixed32,oneof"` +} + +type FieldRules_Sfixed64 struct { + Sfixed64 *SFixed64Rules `protobuf:"bytes,12,opt,name=sfixed64,oneof"` +} + +type FieldRules_Bool struct { + Bool *BoolRules `protobuf:"bytes,13,opt,name=bool,oneof"` +} + +type FieldRules_String_ struct { + String_ *StringRules `protobuf:"bytes,14,opt,name=string,oneof"` +} + +type FieldRules_Bytes struct { + Bytes *BytesRules `protobuf:"bytes,15,opt,name=bytes,oneof"` +} + +type FieldRules_Enum struct { + // Complex Field Types + Enum *EnumRules `protobuf:"bytes,16,opt,name=enum,oneof"` +} + +type FieldRules_Repeated struct { + Repeated *RepeatedRules `protobuf:"bytes,18,opt,name=repeated,oneof"` +} + +type FieldRules_Map struct { + Map *MapRules `protobuf:"bytes,19,opt,name=map,oneof"` +} + +type FieldRules_Any struct { + // Well-Known Field Types + Any *AnyRules `protobuf:"bytes,20,opt,name=any,oneof"` +} + +type FieldRules_Duration struct { + Duration *DurationRules `protobuf:"bytes,21,opt,name=duration,oneof"` +} + +type FieldRules_Timestamp struct { + Timestamp *TimestampRules `protobuf:"bytes,22,opt,name=timestamp,oneof"` +} + +func (*FieldRules_Float) isFieldRules_Type() {} + +func (*FieldRules_Double) isFieldRules_Type() {} + +func (*FieldRules_Int32) isFieldRules_Type() {} + +func (*FieldRules_Int64) isFieldRules_Type() {} + +func (*FieldRules_Uint32) isFieldRules_Type() {} + +func (*FieldRules_Uint64) isFieldRules_Type() {} + +func (*FieldRules_Sint32) isFieldRules_Type() {} + +func (*FieldRules_Sint64) isFieldRules_Type() {} + +func (*FieldRules_Fixed32) isFieldRules_Type() {} + +func (*FieldRules_Fixed64) isFieldRules_Type() {} + +func (*FieldRules_Sfixed32) isFieldRules_Type() {} + +func (*FieldRules_Sfixed64) isFieldRules_Type() {} + +func (*FieldRules_Bool) isFieldRules_Type() {} + +func (*FieldRules_String_) isFieldRules_Type() {} + +func (*FieldRules_Bytes) isFieldRules_Type() {} + +func (*FieldRules_Enum) isFieldRules_Type() {} + +func (*FieldRules_Repeated) isFieldRules_Type() {} + +func (*FieldRules_Map) isFieldRules_Type() {} + +func (*FieldRules_Any) isFieldRules_Type() {} + +func (*FieldRules_Duration) isFieldRules_Type() {} + +func (*FieldRules_Timestamp) isFieldRules_Type() {} + +// FloatRules describes the constraints applied to `float` values +type FloatRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *float32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *float32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *float32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *float32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *float32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []float32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []float32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *FloatRules) Reset() { + *x = FloatRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FloatRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatRules) ProtoMessage() {} + +func (x *FloatRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatRules.ProtoReflect.Descriptor instead. +func (*FloatRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatRules) GetConst() float32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *FloatRules) GetLt() float32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *FloatRules) GetLte() float32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *FloatRules) GetGt() float32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *FloatRules) GetGte() float32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *FloatRules) GetIn() []float32 { + if x != nil { + return x.In + } + return nil +} + +func (x *FloatRules) GetNotIn() []float32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *FloatRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// DoubleRules describes the constraints applied to `double` values +type DoubleRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *float64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *float64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *float64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *float64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *float64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []float64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []float64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *DoubleRules) Reset() { + *x = DoubleRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleRules) ProtoMessage() {} + +func (x *DoubleRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleRules.ProtoReflect.Descriptor instead. +func (*DoubleRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{2} +} + +func (x *DoubleRules) GetConst() float64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *DoubleRules) GetLt() float64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *DoubleRules) GetLte() float64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *DoubleRules) GetGt() float64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *DoubleRules) GetGte() float64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *DoubleRules) GetIn() []float64 { + if x != nil { + return x.In + } + return nil +} + +func (x *DoubleRules) GetNotIn() []float64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *DoubleRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Int32Rules describes the constraints applied to `int32` values +type Int32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int32 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int32 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int32 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int32 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Int32Rules) Reset() { + *x = Int32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Rules) ProtoMessage() {} + +func (x *Int32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Rules.ProtoReflect.Descriptor instead. +func (*Int32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{3} +} + +func (x *Int32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Int32Rules) GetLt() int32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Int32Rules) GetLte() int32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Int32Rules) GetGt() int32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Int32Rules) GetGte() int32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Int32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *Int32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Int32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Int64Rules describes the constraints applied to `int64` values +type Int64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Int64Rules) Reset() { + *x = Int64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Rules) ProtoMessage() {} + +func (x *Int64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Rules.ProtoReflect.Descriptor instead. +func (*Int64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{4} +} + +func (x *Int64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Int64Rules) GetLt() int64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Int64Rules) GetLte() int64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Int64Rules) GetGt() int64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Int64Rules) GetGte() int64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Int64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *Int64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Int64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// UInt32Rules describes the constraints applied to `uint32` values +type UInt32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint32 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint32 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint32 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint32 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint32 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint32 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *UInt32Rules) Reset() { + *x = UInt32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Rules) ProtoMessage() {} + +func (x *UInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Rules.ProtoReflect.Descriptor instead. +func (*UInt32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Rules) GetConst() uint32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *UInt32Rules) GetLt() uint32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *UInt32Rules) GetLte() uint32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *UInt32Rules) GetGt() uint32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *UInt32Rules) GetGte() uint32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *UInt32Rules) GetIn() []uint32 { + if x != nil { + return x.In + } + return nil +} + +func (x *UInt32Rules) GetNotIn() []uint32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *UInt32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// UInt64Rules describes the constraints applied to `uint64` values +type UInt64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint64 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint64 `protobuf:"varint,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint64 `protobuf:"varint,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint64 `protobuf:"varint,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint64 `protobuf:"varint,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint64 `protobuf:"varint,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint64 `protobuf:"varint,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *UInt64Rules) Reset() { + *x = UInt64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Rules) ProtoMessage() {} + +func (x *UInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Rules.ProtoReflect.Descriptor instead. +func (*UInt64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{6} +} + +func (x *UInt64Rules) GetConst() uint64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *UInt64Rules) GetLt() uint64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *UInt64Rules) GetLte() uint64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *UInt64Rules) GetGt() uint64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *UInt64Rules) GetGte() uint64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *UInt64Rules) GetIn() []uint64 { + if x != nil { + return x.In + } + return nil +} + +func (x *UInt64Rules) GetNotIn() []uint64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *UInt64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SInt32Rules describes the constraints applied to `sint32` values +type SInt32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"zigzag32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int32 `protobuf:"zigzag32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int32 `protobuf:"zigzag32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int32 `protobuf:"zigzag32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int32 `protobuf:"zigzag32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"zigzag32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"zigzag32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SInt32Rules) Reset() { + *x = SInt32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SInt32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt32Rules) ProtoMessage() {} + +func (x *SInt32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SInt32Rules.ProtoReflect.Descriptor instead. +func (*SInt32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{7} +} + +func (x *SInt32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SInt32Rules) GetLt() int32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SInt32Rules) GetLte() int32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SInt32Rules) GetGt() int32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SInt32Rules) GetGte() int32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SInt32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *SInt32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SInt32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SInt64Rules describes the constraints applied to `sint64` values +type SInt64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int64 `protobuf:"zigzag64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 `protobuf:"zigzag64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 `protobuf:"zigzag64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 `protobuf:"zigzag64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 `protobuf:"zigzag64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int64 `protobuf:"zigzag64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 `protobuf:"zigzag64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SInt64Rules) Reset() { + *x = SInt64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SInt64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SInt64Rules) ProtoMessage() {} + +func (x *SInt64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SInt64Rules.ProtoReflect.Descriptor instead. +func (*SInt64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{8} +} + +func (x *SInt64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SInt64Rules) GetLt() int64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SInt64Rules) GetLte() int64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SInt64Rules) GetGt() int64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SInt64Rules) GetGte() int64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SInt64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *SInt64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SInt64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Fixed32Rules describes the constraints applied to `fixed32` values +type Fixed32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Fixed32Rules) Reset() { + *x = Fixed32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Fixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed32Rules) ProtoMessage() {} + +func (x *Fixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Fixed32Rules.ProtoReflect.Descriptor instead. +func (*Fixed32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{9} +} + +func (x *Fixed32Rules) GetConst() uint32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Fixed32Rules) GetLt() uint32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Fixed32Rules) GetLte() uint32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Fixed32Rules) GetGt() uint32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Fixed32Rules) GetGte() uint32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Fixed32Rules) GetIn() []uint32 { + if x != nil { + return x.In + } + return nil +} + +func (x *Fixed32Rules) GetNotIn() []uint32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Fixed32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// Fixed64Rules describes the constraints applied to `fixed64` values +type Fixed64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *uint64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *uint64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *uint64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *uint64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *uint64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []uint64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []uint64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *Fixed64Rules) Reset() { + *x = Fixed64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Fixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Fixed64Rules) ProtoMessage() {} + +func (x *Fixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Fixed64Rules.ProtoReflect.Descriptor instead. +func (*Fixed64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{10} +} + +func (x *Fixed64Rules) GetConst() uint64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *Fixed64Rules) GetLt() uint64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *Fixed64Rules) GetLte() uint64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *Fixed64Rules) GetGt() uint64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *Fixed64Rules) GetGte() uint64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *Fixed64Rules) GetIn() []uint64 { + if x != nil { + return x.In + } + return nil +} + +func (x *Fixed64Rules) GetNotIn() []uint64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *Fixed64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SFixed32Rules describes the constraints applied to `sfixed32` values +type SFixed32Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"fixed32,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int32 `protobuf:"fixed32,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int32 `protobuf:"fixed32,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int32 `protobuf:"fixed32,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int32 `protobuf:"fixed32,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"fixed32,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"fixed32,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SFixed32Rules) Reset() { + *x = SFixed32Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SFixed32Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed32Rules) ProtoMessage() {} + +func (x *SFixed32Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SFixed32Rules.ProtoReflect.Descriptor instead. +func (*SFixed32Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{11} +} + +func (x *SFixed32Rules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SFixed32Rules) GetLt() int32 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SFixed32Rules) GetLte() int32 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SFixed32Rules) GetGt() int32 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SFixed32Rules) GetGte() int32 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SFixed32Rules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *SFixed32Rules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SFixed32Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// SFixed64Rules describes the constraints applied to `sfixed64` values +type SFixed64Rules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int64 `protobuf:"fixed64,1,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 `protobuf:"fixed64,2,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 `protobuf:"fixed64,3,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 `protobuf:"fixed64,4,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 `protobuf:"fixed64,5,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int64 `protobuf:"fixed64,6,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 `protobuf:"fixed64,7,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,8,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *SFixed64Rules) Reset() { + *x = SFixed64Rules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SFixed64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SFixed64Rules) ProtoMessage() {} + +func (x *SFixed64Rules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SFixed64Rules.ProtoReflect.Descriptor instead. +func (*SFixed64Rules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{12} +} + +func (x *SFixed64Rules) GetConst() int64 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *SFixed64Rules) GetLt() int64 { + if x != nil && x.Lt != nil { + return *x.Lt + } + return 0 +} + +func (x *SFixed64Rules) GetLte() int64 { + if x != nil && x.Lte != nil { + return *x.Lte + } + return 0 +} + +func (x *SFixed64Rules) GetGt() int64 { + if x != nil && x.Gt != nil { + return *x.Gt + } + return 0 +} + +func (x *SFixed64Rules) GetGte() int64 { + if x != nil && x.Gte != nil { + return *x.Gte + } + return 0 +} + +func (x *SFixed64Rules) GetIn() []int64 { + if x != nil { + return x.In + } + return nil +} + +func (x *SFixed64Rules) GetNotIn() []int64 { + if x != nil { + return x.NotIn + } + return nil +} + +func (x *SFixed64Rules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// BoolRules describes the constraints applied to `bool` values +type BoolRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *bool `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` +} + +func (x *BoolRules) Reset() { + *x = BoolRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BoolRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolRules) ProtoMessage() {} + +func (x *BoolRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolRules.ProtoReflect.Descriptor instead. +func (*BoolRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{13} +} + +func (x *BoolRules) GetConst() bool { + if x != nil && x.Const != nil { + return *x.Const + } + return false +} + +// StringRules describe the constraints applied to `string` values +type StringRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *string `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + Len *uint64 `protobuf:"varint,19,opt,name=len" json:"len,omitempty"` + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"` + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` + // LenBytes specifies that this field must be the specified number of bytes + LenBytes *uint64 `protobuf:"varint,20,opt,name=len_bytes,json=lenBytes" json:"len_bytes,omitempty"` + // MinBytes specifies that this field must be the specified number of bytes + // at a minimum + MinBytes *uint64 `protobuf:"varint,4,opt,name=min_bytes,json=minBytes" json:"min_bytes,omitempty"` + // MaxBytes specifies that this field must be the specified number of bytes + // at a maximum + MaxBytes *uint64 `protobuf:"varint,5,opt,name=max_bytes,json=maxBytes" json:"max_bytes,omitempty"` + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + Pattern *string `protobuf:"bytes,6,opt,name=pattern" json:"pattern,omitempty"` + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + Prefix *string `protobuf:"bytes,7,opt,name=prefix" json:"prefix,omitempty"` + // Suffix specifies that this field must have the specified substring at + // the end of the string. + Suffix *string `protobuf:"bytes,8,opt,name=suffix" json:"suffix,omitempty"` + // Contains specifies that this field must have the specified substring + // anywhere in the string. + Contains *string `protobuf:"bytes,9,opt,name=contains" json:"contains,omitempty"` + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + NotContains *string `protobuf:"bytes,23,opt,name=not_contains,json=notContains" json:"not_contains,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []string `protobuf:"bytes,10,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []string `protobuf:"bytes,11,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // WellKnown rules provide advanced constraints against common string + // patterns + // + // Types that are assignable to WellKnown: + // + // *StringRules_Email + // *StringRules_Hostname + // *StringRules_Ip + // *StringRules_Ipv4 + // *StringRules_Ipv6 + // *StringRules_Uri + // *StringRules_UriRef + // *StringRules_Address + // *StringRules_Uuid + // *StringRules_WellKnownRegex + WellKnown isStringRules_WellKnown `protobuf_oneof:"well_known"` + // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable + // strict header validation. + // By default, this is true, and HTTP header validations are RFC-compliant. + // Setting to false will enable a looser validations that only disallows + // \r\n\0 characters, which can be used to bypass header matching rules. + Strict *bool `protobuf:"varint,25,opt,name=strict,def=1" json:"strict,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,26,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +// Default values for StringRules fields. +const ( + Default_StringRules_Strict = bool(true) +) + +func (x *StringRules) Reset() { + *x = StringRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringRules) ProtoMessage() {} + +func (x *StringRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringRules.ProtoReflect.Descriptor instead. +func (*StringRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{14} +} + +func (x *StringRules) GetConst() string { + if x != nil && x.Const != nil { + return *x.Const + } + return "" +} + +func (x *StringRules) GetLen() uint64 { + if x != nil && x.Len != nil { + return *x.Len + } + return 0 +} + +func (x *StringRules) GetMinLen() uint64 { + if x != nil && x.MinLen != nil { + return *x.MinLen + } + return 0 +} + +func (x *StringRules) GetMaxLen() uint64 { + if x != nil && x.MaxLen != nil { + return *x.MaxLen + } + return 0 +} + +func (x *StringRules) GetLenBytes() uint64 { + if x != nil && x.LenBytes != nil { + return *x.LenBytes + } + return 0 +} + +func (x *StringRules) GetMinBytes() uint64 { + if x != nil && x.MinBytes != nil { + return *x.MinBytes + } + return 0 +} + +func (x *StringRules) GetMaxBytes() uint64 { + if x != nil && x.MaxBytes != nil { + return *x.MaxBytes + } + return 0 +} + +func (x *StringRules) GetPattern() string { + if x != nil && x.Pattern != nil { + return *x.Pattern + } + return "" +} + +func (x *StringRules) GetPrefix() string { + if x != nil && x.Prefix != nil { + return *x.Prefix + } + return "" +} + +func (x *StringRules) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +func (x *StringRules) GetContains() string { + if x != nil && x.Contains != nil { + return *x.Contains + } + return "" +} + +func (x *StringRules) GetNotContains() string { + if x != nil && x.NotContains != nil { + return *x.NotContains + } + return "" +} + +func (x *StringRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *StringRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +func (m *StringRules) GetWellKnown() isStringRules_WellKnown { + if m != nil { + return m.WellKnown + } + return nil +} + +func (x *StringRules) GetEmail() bool { + if x, ok := x.GetWellKnown().(*StringRules_Email); ok { + return x.Email + } + return false +} + +func (x *StringRules) GetHostname() bool { + if x, ok := x.GetWellKnown().(*StringRules_Hostname); ok { + return x.Hostname + } + return false +} + +func (x *StringRules) GetIp() bool { + if x, ok := x.GetWellKnown().(*StringRules_Ip); ok { + return x.Ip + } + return false +} + +func (x *StringRules) GetIpv4() bool { + if x, ok := x.GetWellKnown().(*StringRules_Ipv4); ok { + return x.Ipv4 + } + return false +} + +func (x *StringRules) GetIpv6() bool { + if x, ok := x.GetWellKnown().(*StringRules_Ipv6); ok { + return x.Ipv6 + } + return false +} + +func (x *StringRules) GetUri() bool { + if x, ok := x.GetWellKnown().(*StringRules_Uri); ok { + return x.Uri + } + return false +} + +func (x *StringRules) GetUriRef() bool { + if x, ok := x.GetWellKnown().(*StringRules_UriRef); ok { + return x.UriRef + } + return false +} + +func (x *StringRules) GetAddress() bool { + if x, ok := x.GetWellKnown().(*StringRules_Address); ok { + return x.Address + } + return false +} + +func (x *StringRules) GetUuid() bool { + if x, ok := x.GetWellKnown().(*StringRules_Uuid); ok { + return x.Uuid + } + return false +} + +func (x *StringRules) GetWellKnownRegex() KnownRegex { + if x, ok := x.GetWellKnown().(*StringRules_WellKnownRegex); ok { + return x.WellKnownRegex + } + return KnownRegex_UNKNOWN +} + +func (x *StringRules) GetStrict() bool { + if x != nil && x.Strict != nil { + return *x.Strict + } + return Default_StringRules_Strict +} + +func (x *StringRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +type isStringRules_WellKnown interface { + isStringRules_WellKnown() +} + +type StringRules_Email struct { + // Email specifies that the field must be a valid email address as + // defined by RFC 5322 + Email bool `protobuf:"varint,12,opt,name=email,oneof"` +} + +type StringRules_Hostname struct { + // Hostname specifies that the field must be a valid hostname as + // defined by RFC 1034. This constraint does not support + // internationalized domain names (IDNs). + Hostname bool `protobuf:"varint,13,opt,name=hostname,oneof"` +} + +type StringRules_Ip struct { + // Ip specifies that the field must be a valid IP (v4 or v6) address. + // Valid IPv6 addresses should not include surrounding square brackets. + Ip bool `protobuf:"varint,14,opt,name=ip,oneof"` +} + +type StringRules_Ipv4 struct { + // Ipv4 specifies that the field must be a valid IPv4 address. + Ipv4 bool `protobuf:"varint,15,opt,name=ipv4,oneof"` +} + +type StringRules_Ipv6 struct { + // Ipv6 specifies that the field must be a valid IPv6 address. Valid + // IPv6 addresses should not include surrounding square brackets. + Ipv6 bool `protobuf:"varint,16,opt,name=ipv6,oneof"` +} + +type StringRules_Uri struct { + // Uri specifies that the field must be a valid, absolute URI as defined + // by RFC 3986 + Uri bool `protobuf:"varint,17,opt,name=uri,oneof"` +} + +type StringRules_UriRef struct { + // UriRef specifies that the field must be a valid URI as defined by RFC + // 3986 and may be relative or absolute. + UriRef bool `protobuf:"varint,18,opt,name=uri_ref,json=uriRef,oneof"` +} + +type StringRules_Address struct { + // Address specifies that the field must be either a valid hostname as + // defined by RFC 1034 (which does not support internationalized domain + // names or IDNs), or it can be a valid IP (v4 or v6). + Address bool `protobuf:"varint,21,opt,name=address,oneof"` +} + +type StringRules_Uuid struct { + // Uuid specifies that the field must be a valid UUID as defined by + // RFC 4122 + Uuid bool `protobuf:"varint,22,opt,name=uuid,oneof"` +} + +type StringRules_WellKnownRegex struct { + // WellKnownRegex specifies a common well known pattern defined as a regex. + WellKnownRegex KnownRegex `protobuf:"varint,24,opt,name=well_known_regex,json=wellKnownRegex,enum=validate.KnownRegex,oneof"` +} + +func (*StringRules_Email) isStringRules_WellKnown() {} + +func (*StringRules_Hostname) isStringRules_WellKnown() {} + +func (*StringRules_Ip) isStringRules_WellKnown() {} + +func (*StringRules_Ipv4) isStringRules_WellKnown() {} + +func (*StringRules_Ipv6) isStringRules_WellKnown() {} + +func (*StringRules_Uri) isStringRules_WellKnown() {} + +func (*StringRules_UriRef) isStringRules_WellKnown() {} + +func (*StringRules_Address) isStringRules_WellKnown() {} + +func (*StringRules_Uuid) isStringRules_WellKnown() {} + +func (*StringRules_WellKnownRegex) isStringRules_WellKnown() {} + +// BytesRules describe the constraints applied to `bytes` values +type BytesRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const []byte `protobuf:"bytes,1,opt,name=const" json:"const,omitempty"` + // Len specifies that this field must be the specified number of bytes + Len *uint64 `protobuf:"varint,13,opt,name=len" json:"len,omitempty"` + // MinLen specifies that this field must be the specified number of bytes + // at a minimum + MinLen *uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen" json:"min_len,omitempty"` + // MaxLen specifies that this field must be the specified number of bytes + // at a maximum + MaxLen *uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen" json:"max_len,omitempty"` + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + Pattern *string `protobuf:"bytes,4,opt,name=pattern" json:"pattern,omitempty"` + // Prefix specifies that this field must have the specified bytes at the + // beginning of the string. + Prefix []byte `protobuf:"bytes,5,opt,name=prefix" json:"prefix,omitempty"` + // Suffix specifies that this field must have the specified bytes at the + // end of the string. + Suffix []byte `protobuf:"bytes,6,opt,name=suffix" json:"suffix,omitempty"` + // Contains specifies that this field must have the specified bytes + // anywhere in the string. + Contains []byte `protobuf:"bytes,7,opt,name=contains" json:"contains,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In [][]byte `protobuf:"bytes,8,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn [][]byte `protobuf:"bytes,9,rep,name=not_in,json=notIn" json:"not_in,omitempty"` + // WellKnown rules provide advanced constraints against common byte + // patterns + // + // Types that are assignable to WellKnown: + // + // *BytesRules_Ip + // *BytesRules_Ipv4 + // *BytesRules_Ipv6 + WellKnown isBytesRules_WellKnown `protobuf_oneof:"well_known"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,14,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *BytesRules) Reset() { + *x = BytesRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BytesRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesRules) ProtoMessage() {} + +func (x *BytesRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesRules.ProtoReflect.Descriptor instead. +func (*BytesRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{15} +} + +func (x *BytesRules) GetConst() []byte { + if x != nil { + return x.Const + } + return nil +} + +func (x *BytesRules) GetLen() uint64 { + if x != nil && x.Len != nil { + return *x.Len + } + return 0 +} + +func (x *BytesRules) GetMinLen() uint64 { + if x != nil && x.MinLen != nil { + return *x.MinLen + } + return 0 +} + +func (x *BytesRules) GetMaxLen() uint64 { + if x != nil && x.MaxLen != nil { + return *x.MaxLen + } + return 0 +} + +func (x *BytesRules) GetPattern() string { + if x != nil && x.Pattern != nil { + return *x.Pattern + } + return "" +} + +func (x *BytesRules) GetPrefix() []byte { + if x != nil { + return x.Prefix + } + return nil +} + +func (x *BytesRules) GetSuffix() []byte { + if x != nil { + return x.Suffix + } + return nil +} + +func (x *BytesRules) GetContains() []byte { + if x != nil { + return x.Contains + } + return nil +} + +func (x *BytesRules) GetIn() [][]byte { + if x != nil { + return x.In + } + return nil +} + +func (x *BytesRules) GetNotIn() [][]byte { + if x != nil { + return x.NotIn + } + return nil +} + +func (m *BytesRules) GetWellKnown() isBytesRules_WellKnown { + if m != nil { + return m.WellKnown + } + return nil +} + +func (x *BytesRules) GetIp() bool { + if x, ok := x.GetWellKnown().(*BytesRules_Ip); ok { + return x.Ip + } + return false +} + +func (x *BytesRules) GetIpv4() bool { + if x, ok := x.GetWellKnown().(*BytesRules_Ipv4); ok { + return x.Ipv4 + } + return false +} + +func (x *BytesRules) GetIpv6() bool { + if x, ok := x.GetWellKnown().(*BytesRules_Ipv6); ok { + return x.Ipv6 + } + return false +} + +func (x *BytesRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +type isBytesRules_WellKnown interface { + isBytesRules_WellKnown() +} + +type BytesRules_Ip struct { + // Ip specifies that the field must be a valid IP (v4 or v6) address in + // byte format + Ip bool `protobuf:"varint,10,opt,name=ip,oneof"` +} + +type BytesRules_Ipv4 struct { + // Ipv4 specifies that the field must be a valid IPv4 address in byte + // format + Ipv4 bool `protobuf:"varint,11,opt,name=ipv4,oneof"` +} + +type BytesRules_Ipv6 struct { + // Ipv6 specifies that the field must be a valid IPv6 address in byte + // format + Ipv6 bool `protobuf:"varint,12,opt,name=ipv6,oneof"` +} + +func (*BytesRules_Ip) isBytesRules_WellKnown() {} + +func (*BytesRules_Ipv4) isBytesRules_WellKnown() {} + +func (*BytesRules_Ipv6) isBytesRules_WellKnown() {} + +// EnumRules describe the constraints applied to enum values +type EnumRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Const specifies that this field must be exactly the specified value + Const *int32 `protobuf:"varint,1,opt,name=const" json:"const,omitempty"` + // DefinedOnly specifies that this field must be only one of the defined + // values for this enum, failing on any undefined value. + DefinedOnly *bool `protobuf:"varint,2,opt,name=defined_only,json=definedOnly" json:"defined_only,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []int32 `protobuf:"varint,3,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int32 `protobuf:"varint,4,rep,name=not_in,json=notIn" json:"not_in,omitempty"` +} + +func (x *EnumRules) Reset() { + *x = EnumRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumRules) ProtoMessage() {} + +func (x *EnumRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumRules.ProtoReflect.Descriptor instead. +func (*EnumRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{16} +} + +func (x *EnumRules) GetConst() int32 { + if x != nil && x.Const != nil { + return *x.Const + } + return 0 +} + +func (x *EnumRules) GetDefinedOnly() bool { + if x != nil && x.DefinedOnly != nil { + return *x.DefinedOnly + } + return false +} + +func (x *EnumRules) GetIn() []int32 { + if x != nil { + return x.In + } + return nil +} + +func (x *EnumRules) GetNotIn() []int32 { + if x != nil { + return x.NotIn + } + return nil +} + +// MessageRules describe the constraints applied to embedded message values. +// For message-type fields, validation is performed recursively. +type MessageRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Skip specifies that the validation rules of this field should not be + // evaluated + Skip *bool `protobuf:"varint,1,opt,name=skip" json:"skip,omitempty"` + // Required specifies that this field must be set + Required *bool `protobuf:"varint,2,opt,name=required" json:"required,omitempty"` +} + +func (x *MessageRules) Reset() { + *x = MessageRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageRules) ProtoMessage() {} + +func (x *MessageRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageRules.ProtoReflect.Descriptor instead. +func (*MessageRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{17} +} + +func (x *MessageRules) GetSkip() bool { + if x != nil && x.Skip != nil { + return *x.Skip + } + return false +} + +func (x *MessageRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +// RepeatedRules describe the constraints applied to `repeated` values +type RepeatedRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + Unique *bool `protobuf:"varint,3,opt,name=unique" json:"unique,omitempty"` + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + Items *FieldRules `protobuf:"bytes,4,opt,name=items" json:"items,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,5,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *RepeatedRules) Reset() { + *x = RepeatedRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RepeatedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedRules) ProtoMessage() {} + +func (x *RepeatedRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RepeatedRules.ProtoReflect.Descriptor instead. +func (*RepeatedRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{18} +} + +func (x *RepeatedRules) GetMinItems() uint64 { + if x != nil && x.MinItems != nil { + return *x.MinItems + } + return 0 +} + +func (x *RepeatedRules) GetMaxItems() uint64 { + if x != nil && x.MaxItems != nil { + return *x.MaxItems + } + return 0 +} + +func (x *RepeatedRules) GetUnique() bool { + if x != nil && x.Unique != nil { + return *x.Unique + } + return false +} + +func (x *RepeatedRules) GetItems() *FieldRules { + if x != nil { + return x.Items + } + return nil +} + +func (x *RepeatedRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// MapRules describe the constraints applied to `map` values +type MapRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MinPairs specifies that this field must have the specified number of + // KVs at a minimum + MinPairs *uint64 `protobuf:"varint,1,opt,name=min_pairs,json=minPairs" json:"min_pairs,omitempty"` + // MaxPairs specifies that this field must have the specified number of + // KVs at a maximum + MaxPairs *uint64 `protobuf:"varint,2,opt,name=max_pairs,json=maxPairs" json:"max_pairs,omitempty"` + // NoSparse specifies values in this field cannot be unset. This only + // applies to map's with message value types. + NoSparse *bool `protobuf:"varint,3,opt,name=no_sparse,json=noSparse" json:"no_sparse,omitempty"` + // Keys specifies the constraints to be applied to each key in the field. + Keys *FieldRules `protobuf:"bytes,4,opt,name=keys" json:"keys,omitempty"` + // Values specifies the constraints to be applied to the value of each key + // in the field. Message values will still have their validations evaluated + // unless skip is specified here. + Values *FieldRules `protobuf:"bytes,5,opt,name=values" json:"values,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + IgnoreEmpty *bool `protobuf:"varint,6,opt,name=ignore_empty,json=ignoreEmpty" json:"ignore_empty,omitempty"` +} + +func (x *MapRules) Reset() { + *x = MapRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapRules) ProtoMessage() {} + +func (x *MapRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapRules.ProtoReflect.Descriptor instead. +func (*MapRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{19} +} + +func (x *MapRules) GetMinPairs() uint64 { + if x != nil && x.MinPairs != nil { + return *x.MinPairs + } + return 0 +} + +func (x *MapRules) GetMaxPairs() uint64 { + if x != nil && x.MaxPairs != nil { + return *x.MaxPairs + } + return 0 +} + +func (x *MapRules) GetNoSparse() bool { + if x != nil && x.NoSparse != nil { + return *x.NoSparse + } + return false +} + +func (x *MapRules) GetKeys() *FieldRules { + if x != nil { + return x.Keys + } + return nil +} + +func (x *MapRules) GetValues() *FieldRules { + if x != nil { + return x.Values + } + return nil +} + +func (x *MapRules) GetIgnoreEmpty() bool { + if x != nil && x.IgnoreEmpty != nil { + return *x.IgnoreEmpty + } + return false +} + +// AnyRules describe constraints applied exclusively to the +// `google.protobuf.Any` well-known type +type AnyRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required specifies that this field must be set + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // In specifies that this field's `type_url` must be equal to one of the + // specified values. + In []string `protobuf:"bytes,2,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field's `type_url` must not be equal to any of + // the specified values. + NotIn []string `protobuf:"bytes,3,rep,name=not_in,json=notIn" json:"not_in,omitempty"` +} + +func (x *AnyRules) Reset() { + *x = AnyRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyRules) ProtoMessage() {} + +func (x *AnyRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyRules.ProtoReflect.Descriptor instead. +func (*AnyRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{20} +} + +func (x *AnyRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *AnyRules) GetIn() []string { + if x != nil { + return x.In + } + return nil +} + +func (x *AnyRules) GetNotIn() []string { + if x != nil { + return x.NotIn + } + return nil +} + +// DurationRules describe the constraints applied exclusively to the +// `google.protobuf.Duration` well-known type +type DurationRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required specifies that this field must be set + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Const specifies that this field must be exactly the specified value + Const *durationpb.Duration `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *durationpb.Duration `protobuf:"bytes,3,opt,name=lt" json:"lt,omitempty"` + // Lt specifies that this field must be less than the specified value, + // inclusive + Lte *durationpb.Duration `protobuf:"bytes,4,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive + Gt *durationpb.Duration `protobuf:"bytes,5,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than the specified value, + // inclusive + Gte *durationpb.Duration `protobuf:"bytes,6,opt,name=gte" json:"gte,omitempty"` + // In specifies that this field must be equal to one of the specified + // values + In []*durationpb.Duration `protobuf:"bytes,7,rep,name=in" json:"in,omitempty"` + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []*durationpb.Duration `protobuf:"bytes,8,rep,name=not_in,json=notIn" json:"not_in,omitempty"` +} + +func (x *DurationRules) Reset() { + *x = DurationRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DurationRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DurationRules) ProtoMessage() {} + +func (x *DurationRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DurationRules.ProtoReflect.Descriptor instead. +func (*DurationRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{21} +} + +func (x *DurationRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *DurationRules) GetConst() *durationpb.Duration { + if x != nil { + return x.Const + } + return nil +} + +func (x *DurationRules) GetLt() *durationpb.Duration { + if x != nil { + return x.Lt + } + return nil +} + +func (x *DurationRules) GetLte() *durationpb.Duration { + if x != nil { + return x.Lte + } + return nil +} + +func (x *DurationRules) GetGt() *durationpb.Duration { + if x != nil { + return x.Gt + } + return nil +} + +func (x *DurationRules) GetGte() *durationpb.Duration { + if x != nil { + return x.Gte + } + return nil +} + +func (x *DurationRules) GetIn() []*durationpb.Duration { + if x != nil { + return x.In + } + return nil +} + +func (x *DurationRules) GetNotIn() []*durationpb.Duration { + if x != nil { + return x.NotIn + } + return nil +} + +// TimestampRules describe the constraints applied exclusively to the +// `google.protobuf.Timestamp` well-known type +type TimestampRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required specifies that this field must be set + Required *bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Const specifies that this field must be exactly the specified value + Const *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=const" json:"const,omitempty"` + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=lt" json:"lt,omitempty"` + // Lte specifies that this field must be less than the specified value, + // inclusive + Lte *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=lte" json:"lte,omitempty"` + // Gt specifies that this field must be greater than the specified value, + // exclusive + Gt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=gt" json:"gt,omitempty"` + // Gte specifies that this field must be greater than the specified value, + // inclusive + Gte *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=gte" json:"gte,omitempty"` + // LtNow specifies that this must be less than the current time. LtNow + // can only be used with the Within rule. + LtNow *bool `protobuf:"varint,7,opt,name=lt_now,json=ltNow" json:"lt_now,omitempty"` + // GtNow specifies that this must be greater than the current time. GtNow + // can only be used with the Within rule. + GtNow *bool `protobuf:"varint,8,opt,name=gt_now,json=gtNow" json:"gt_now,omitempty"` + // Within specifies that this field must be within this duration of the + // current time. This constraint can be used alone or with the LtNow and + // GtNow rules. + Within *durationpb.Duration `protobuf:"bytes,9,opt,name=within" json:"within,omitempty"` +} + +func (x *TimestampRules) Reset() { + *x = TimestampRules{} + if protoimpl.UnsafeEnabled { + mi := &file_validate_validate_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimestampRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimestampRules) ProtoMessage() {} + +func (x *TimestampRules) ProtoReflect() protoreflect.Message { + mi := &file_validate_validate_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimestampRules.ProtoReflect.Descriptor instead. +func (*TimestampRules) Descriptor() ([]byte, []int) { + return file_validate_validate_proto_rawDescGZIP(), []int{22} +} + +func (x *TimestampRules) GetRequired() bool { + if x != nil && x.Required != nil { + return *x.Required + } + return false +} + +func (x *TimestampRules) GetConst() *timestamppb.Timestamp { + if x != nil { + return x.Const + } + return nil +} + +func (x *TimestampRules) GetLt() *timestamppb.Timestamp { + if x != nil { + return x.Lt + } + return nil +} + +func (x *TimestampRules) GetLte() *timestamppb.Timestamp { + if x != nil { + return x.Lte + } + return nil +} + +func (x *TimestampRules) GetGt() *timestamppb.Timestamp { + if x != nil { + return x.Gt + } + return nil +} + +func (x *TimestampRules) GetGte() *timestamppb.Timestamp { + if x != nil { + return x.Gte + } + return nil +} + +func (x *TimestampRules) GetLtNow() bool { + if x != nil && x.LtNow != nil { + return *x.LtNow + } + return false +} + +func (x *TimestampRules) GetGtNow() bool { + if x != nil && x.GtNow != nil { + return *x.GtNow + } + return false +} + +func (x *TimestampRules) GetWithin() *durationpb.Duration { + if x != nil { + return x.Within + } + return nil +} + +var file_validate_validate_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1071, + Name: "validate.disabled", + Tag: "varint,1071,opt,name=disabled", + Filename: "validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1072, + Name: "validate.ignored", + Tag: "varint,1072,opt,name=ignored", + Filename: "validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.OneofOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1071, + Name: "validate.required", + Tag: "varint,1071,opt,name=required", + Filename: "validate/validate.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldRules)(nil), + Field: 1071, + Name: "validate.rules", + Tag: "bytes,1071,opt,name=rules", + Filename: "validate/validate.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // Disabled nullifies any validation rules for this message, including any + // message fields associated with it that do support validation. + // + // optional bool disabled = 1071; + E_Disabled = &file_validate_validate_proto_extTypes[0] + // Ignore skips generation of validation methods for this message. + // + // optional bool ignored = 1072; + E_Ignored = &file_validate_validate_proto_extTypes[1] +) + +// Extension fields to descriptorpb.OneofOptions. +var ( + // Required ensures that exactly one the field options in a oneof is set; + // validation fails if no fields in the oneof are set. + // + // optional bool required = 1071; + E_Required = &file_validate_validate_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + // + // optional validate.FieldRules rules = 1071; + E_Rules = &file_validate_validate_proto_extTypes[3] +) + +var File_validate_validate_proto protoreflect.FileDescriptor + +var file_validate_validate_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x08, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x66, 0x6c, 0x6f, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, + 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69, + 0x6e, 0x74, 0x33, 0x32, 0x12, 0x2c, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x12, 0x2f, 0x0a, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x75, 0x69, 0x6e, + 0x74, 0x33, 0x32, 0x12, 0x2f, 0x0a, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x53, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, + 0x69, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x53, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, + 0x32, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, + 0x00, 0x52, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, + 0x78, 0x65, 0x64, 0x36, 0x34, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x12, 0x35, + 0x0a, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x46, 0x69, 0x78, + 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x73, 0x66, 0x69, + 0x78, 0x65, 0x64, 0x33, 0x32, 0x12, 0x35, 0x0a, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, + 0x34, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x53, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x48, 0x00, 0x52, 0x08, 0x73, 0x66, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x12, 0x29, 0x0a, 0x04, + 0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, + 0x00, 0x52, 0x04, 0x62, 0x6f, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, + 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x03, 0x6d, 0x61, 0x70, + 0x12, 0x26, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x02, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x02, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, + 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x02, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x01, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, + 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x01, 0x52, 0x05, + 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x67, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x67, 0x74, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x02, 0x69, + 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x0a, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6c, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, + 0x01, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, + 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x04, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, + 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, + 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x53, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x11, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x03, 0x67, 0x74, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x11, 0x52, 0x02, 0x69, 0x6e, + 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x11, + 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb1, 0x01, 0x0a, 0x0b, 0x53, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x12, 0x52, 0x02, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x12, 0x52, 0x03, 0x6c, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x02, + 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, + 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x12, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x12, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb2, + 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x07, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x07, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x07, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x07, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x07, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, + 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x07, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0xb2, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x06, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x06, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, + 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x06, 0x52, 0x05, + 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x53, 0x46, 0x69, + 0x78, 0x65, 0x64, 0x33, 0x32, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x02, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x03, 0x6c, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0f, 0x52, 0x02, + 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0f, 0x52, + 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0f, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0f, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xb3, + 0x01, 0x0a, 0x0d, 0x53, 0x46, 0x69, 0x78, 0x65, 0x64, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x10, 0x52, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x10, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x10, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x10, 0x52, 0x02, 0x67, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x10, 0x52, 0x03, 0x67, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x10, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, + 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x10, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x21, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x22, 0xd4, 0x05, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6c, 0x65, 0x6e, 0x12, + 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65, + 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x65, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x02, 0x69, 0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x02, 0x69, 0x70, + 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x03, + 0x75, 0x72, 0x69, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72, 0x69, + 0x12, 0x19, 0x0a, 0x07, 0x75, 0x72, 0x69, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x06, 0x75, 0x72, 0x69, 0x52, 0x65, 0x66, 0x12, 0x1a, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x40, 0x0a, + 0x10, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x48, 0x00, 0x52, + 0x0e, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, + 0x1c, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x42, 0x0c, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x22, 0xe2, + 0x02, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x03, 0x6c, 0x65, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x12, 0x17, + 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, + 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x6e, + 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x10, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x48, 0x00, 0x52, 0x02, 0x69, 0x70, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, + 0x69, 0x70, 0x76, 0x36, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, + 0x76, 0x36, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0c, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x22, 0x6b, 0x0a, 0x09, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, + 0x5f, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, + 0x22, 0x3e, 0x0a, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x73, 0x6b, 0x69, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x22, 0xb0, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, + 0x5f, 0x73, 0x70, 0x61, 0x72, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, + 0x6f, 0x53, 0x70, 0x61, 0x72, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x04, 0x6b, 0x65, 0x79, + 0x73, 0x12, 0x2c, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x4d, 0x0a, 0x08, 0x41, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, + 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, + 0x6e, 0x22, 0xe9, 0x02, 0x0a, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x12, 0x29, 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x6c, + 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x02, 0x67, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x74, 0x65, + 0x12, 0x29, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x30, 0x0a, 0x06, 0x6e, + 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x22, 0xf3, 0x02, + 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x05, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x12, 0x2a, + 0x0a, 0x02, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x02, 0x6c, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x6c, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x03, 0x6c, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x02, 0x67, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x67, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x03, 0x67, + 0x74, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x74, 0x5f, 0x6e, 0x6f, 0x77, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x6c, 0x74, 0x4e, 0x6f, 0x77, 0x12, 0x15, 0x0a, 0x06, 0x67, 0x74, 0x5f, + 0x6e, 0x6f, 0x77, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x67, 0x74, 0x4e, 0x6f, 0x77, + 0x12, 0x31, 0x0a, 0x06, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x77, 0x69, 0x74, + 0x68, 0x69, 0x6e, 0x2a, 0x46, 0x0a, 0x0a, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x67, 0x65, + 0x78, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, + 0x0a, 0x10, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x5f, 0x4e, 0x41, + 0x4d, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x48, 0x45, 0x41, + 0x44, 0x45, 0x52, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x02, 0x3a, 0x3c, 0x0a, 0x08, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3a, 0x3a, 0x0a, 0x07, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x3a, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x3a, 0x4a, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x50, 0x0a, + 0x1a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x70, + 0x67, 0x76, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5a, 0x32, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, + 0x78, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, +} + +var ( + file_validate_validate_proto_rawDescOnce sync.Once + file_validate_validate_proto_rawDescData = file_validate_validate_proto_rawDesc +) + +func file_validate_validate_proto_rawDescGZIP() []byte { + file_validate_validate_proto_rawDescOnce.Do(func() { + file_validate_validate_proto_rawDescData = protoimpl.X.CompressGZIP(file_validate_validate_proto_rawDescData) + }) + return file_validate_validate_proto_rawDescData +} + +var file_validate_validate_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_validate_validate_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_validate_validate_proto_goTypes = []interface{}{ + (KnownRegex)(0), // 0: validate.KnownRegex + (*FieldRules)(nil), // 1: validate.FieldRules + (*FloatRules)(nil), // 2: validate.FloatRules + (*DoubleRules)(nil), // 3: validate.DoubleRules + (*Int32Rules)(nil), // 4: validate.Int32Rules + (*Int64Rules)(nil), // 5: validate.Int64Rules + (*UInt32Rules)(nil), // 6: validate.UInt32Rules + (*UInt64Rules)(nil), // 7: validate.UInt64Rules + (*SInt32Rules)(nil), // 8: validate.SInt32Rules + (*SInt64Rules)(nil), // 9: validate.SInt64Rules + (*Fixed32Rules)(nil), // 10: validate.Fixed32Rules + (*Fixed64Rules)(nil), // 11: validate.Fixed64Rules + (*SFixed32Rules)(nil), // 12: validate.SFixed32Rules + (*SFixed64Rules)(nil), // 13: validate.SFixed64Rules + (*BoolRules)(nil), // 14: validate.BoolRules + (*StringRules)(nil), // 15: validate.StringRules + (*BytesRules)(nil), // 16: validate.BytesRules + (*EnumRules)(nil), // 17: validate.EnumRules + (*MessageRules)(nil), // 18: validate.MessageRules + (*RepeatedRules)(nil), // 19: validate.RepeatedRules + (*MapRules)(nil), // 20: validate.MapRules + (*AnyRules)(nil), // 21: validate.AnyRules + (*DurationRules)(nil), // 22: validate.DurationRules + (*TimestampRules)(nil), // 23: validate.TimestampRules + (*durationpb.Duration)(nil), // 24: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp + (*descriptorpb.MessageOptions)(nil), // 26: google.protobuf.MessageOptions + (*descriptorpb.OneofOptions)(nil), // 27: google.protobuf.OneofOptions + (*descriptorpb.FieldOptions)(nil), // 28: google.protobuf.FieldOptions +} +var file_validate_validate_proto_depIdxs = []int32{ + 18, // 0: validate.FieldRules.message:type_name -> validate.MessageRules + 2, // 1: validate.FieldRules.float:type_name -> validate.FloatRules + 3, // 2: validate.FieldRules.double:type_name -> validate.DoubleRules + 4, // 3: validate.FieldRules.int32:type_name -> validate.Int32Rules + 5, // 4: validate.FieldRules.int64:type_name -> validate.Int64Rules + 6, // 5: validate.FieldRules.uint32:type_name -> validate.UInt32Rules + 7, // 6: validate.FieldRules.uint64:type_name -> validate.UInt64Rules + 8, // 7: validate.FieldRules.sint32:type_name -> validate.SInt32Rules + 9, // 8: validate.FieldRules.sint64:type_name -> validate.SInt64Rules + 10, // 9: validate.FieldRules.fixed32:type_name -> validate.Fixed32Rules + 11, // 10: validate.FieldRules.fixed64:type_name -> validate.Fixed64Rules + 12, // 11: validate.FieldRules.sfixed32:type_name -> validate.SFixed32Rules + 13, // 12: validate.FieldRules.sfixed64:type_name -> validate.SFixed64Rules + 14, // 13: validate.FieldRules.bool:type_name -> validate.BoolRules + 15, // 14: validate.FieldRules.string:type_name -> validate.StringRules + 16, // 15: validate.FieldRules.bytes:type_name -> validate.BytesRules + 17, // 16: validate.FieldRules.enum:type_name -> validate.EnumRules + 19, // 17: validate.FieldRules.repeated:type_name -> validate.RepeatedRules + 20, // 18: validate.FieldRules.map:type_name -> validate.MapRules + 21, // 19: validate.FieldRules.any:type_name -> validate.AnyRules + 22, // 20: validate.FieldRules.duration:type_name -> validate.DurationRules + 23, // 21: validate.FieldRules.timestamp:type_name -> validate.TimestampRules + 0, // 22: validate.StringRules.well_known_regex:type_name -> validate.KnownRegex + 1, // 23: validate.RepeatedRules.items:type_name -> validate.FieldRules + 1, // 24: validate.MapRules.keys:type_name -> validate.FieldRules + 1, // 25: validate.MapRules.values:type_name -> validate.FieldRules + 24, // 26: validate.DurationRules.const:type_name -> google.protobuf.Duration + 24, // 27: validate.DurationRules.lt:type_name -> google.protobuf.Duration + 24, // 28: validate.DurationRules.lte:type_name -> google.protobuf.Duration + 24, // 29: validate.DurationRules.gt:type_name -> google.protobuf.Duration + 24, // 30: validate.DurationRules.gte:type_name -> google.protobuf.Duration + 24, // 31: validate.DurationRules.in:type_name -> google.protobuf.Duration + 24, // 32: validate.DurationRules.not_in:type_name -> google.protobuf.Duration + 25, // 33: validate.TimestampRules.const:type_name -> google.protobuf.Timestamp + 25, // 34: validate.TimestampRules.lt:type_name -> google.protobuf.Timestamp + 25, // 35: validate.TimestampRules.lte:type_name -> google.protobuf.Timestamp + 25, // 36: validate.TimestampRules.gt:type_name -> google.protobuf.Timestamp + 25, // 37: validate.TimestampRules.gte:type_name -> google.protobuf.Timestamp + 24, // 38: validate.TimestampRules.within:type_name -> google.protobuf.Duration + 26, // 39: validate.disabled:extendee -> google.protobuf.MessageOptions + 26, // 40: validate.ignored:extendee -> google.protobuf.MessageOptions + 27, // 41: validate.required:extendee -> google.protobuf.OneofOptions + 28, // 42: validate.rules:extendee -> google.protobuf.FieldOptions + 1, // 43: validate.rules:type_name -> validate.FieldRules + 44, // [44:44] is the sub-list for method output_type + 44, // [44:44] is the sub-list for method input_type + 43, // [43:44] is the sub-list for extension type_name + 39, // [39:43] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_validate_validate_proto_init() } +func file_validate_validate_proto_init() { + if File_validate_validate_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_validate_validate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FloatRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoubleRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UInt32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UInt64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SInt32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SInt64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Fixed32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Fixed64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SFixed32Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SFixed64Rules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BoolRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BytesRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RepeatedRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DurationRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validate_validate_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimestampRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_validate_validate_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FieldRules_Float)(nil), + (*FieldRules_Double)(nil), + (*FieldRules_Int32)(nil), + (*FieldRules_Int64)(nil), + (*FieldRules_Uint32)(nil), + (*FieldRules_Uint64)(nil), + (*FieldRules_Sint32)(nil), + (*FieldRules_Sint64)(nil), + (*FieldRules_Fixed32)(nil), + (*FieldRules_Fixed64)(nil), + (*FieldRules_Sfixed32)(nil), + (*FieldRules_Sfixed64)(nil), + (*FieldRules_Bool)(nil), + (*FieldRules_String_)(nil), + (*FieldRules_Bytes)(nil), + (*FieldRules_Enum)(nil), + (*FieldRules_Repeated)(nil), + (*FieldRules_Map)(nil), + (*FieldRules_Any)(nil), + (*FieldRules_Duration)(nil), + (*FieldRules_Timestamp)(nil), + } + file_validate_validate_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*StringRules_Email)(nil), + (*StringRules_Hostname)(nil), + (*StringRules_Ip)(nil), + (*StringRules_Ipv4)(nil), + (*StringRules_Ipv6)(nil), + (*StringRules_Uri)(nil), + (*StringRules_UriRef)(nil), + (*StringRules_Address)(nil), + (*StringRules_Uuid)(nil), + (*StringRules_WellKnownRegex)(nil), + } + file_validate_validate_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*BytesRules_Ip)(nil), + (*BytesRules_Ipv4)(nil), + (*BytesRules_Ipv6)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_validate_validate_proto_rawDesc, + NumEnums: 1, + NumMessages: 23, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_validate_validate_proto_goTypes, + DependencyIndexes: file_validate_validate_proto_depIdxs, + EnumInfos: file_validate_validate_proto_enumTypes, + MessageInfos: file_validate_validate_proto_msgTypes, + ExtensionInfos: file_validate_validate_proto_extTypes, + }.Build() + File_validate_validate_proto = out.File + file_validate_validate_proto_rawDesc = nil + file_validate_validate_proto_goTypes = nil + file_validate_validate_proto_depIdxs = nil +} diff --git a/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto new file mode 100644 index 0000000000..5aa9653938 --- /dev/null +++ b/vendor/github.com/envoyproxy/protoc-gen-validate/validate/validate.proto @@ -0,0 +1,862 @@ +syntax = "proto2"; +package validate; + +option go_package = "github.com/envoyproxy/protoc-gen-validate/validate"; +option java_package = "io.envoyproxy.pgv.validate"; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// Validation rules applied at the message level +extend google.protobuf.MessageOptions { + // Disabled nullifies any validation rules for this message, including any + // message fields associated with it that do support validation. + optional bool disabled = 1071; + // Ignore skips generation of validation methods for this message. + optional bool ignored = 1072; +} + +// Validation rules applied at the oneof level +extend google.protobuf.OneofOptions { + // Required ensures that exactly one the field options in a oneof is set; + // validation fails if no fields in the oneof are set. + optional bool required = 1071; +} + +// Validation rules applied at the field level +extend google.protobuf.FieldOptions { + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + optional FieldRules rules = 1071; +} + +// FieldRules encapsulates the rules for each type of field. Depending on the +// field, the correct set should be used to ensure proper validations. +message FieldRules { + optional MessageRules message = 17; + oneof type { + // Scalar Field Types + FloatRules float = 1; + DoubleRules double = 2; + Int32Rules int32 = 3; + Int64Rules int64 = 4; + UInt32Rules uint32 = 5; + UInt64Rules uint64 = 6; + SInt32Rules sint32 = 7; + SInt64Rules sint64 = 8; + Fixed32Rules fixed32 = 9; + Fixed64Rules fixed64 = 10; + SFixed32Rules sfixed32 = 11; + SFixed64Rules sfixed64 = 12; + BoolRules bool = 13; + StringRules string = 14; + BytesRules bytes = 15; + + // Complex Field Types + EnumRules enum = 16; + RepeatedRules repeated = 18; + MapRules map = 19; + + // Well-Known Field Types + AnyRules any = 20; + DurationRules duration = 21; + TimestampRules timestamp = 22; + } +} + +// FloatRules describes the constraints applied to `float` values +message FloatRules { + // Const specifies that this field must be exactly the specified value + optional float const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional float lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional float lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional float gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional float gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated float in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated float not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// DoubleRules describes the constraints applied to `double` values +message DoubleRules { + // Const specifies that this field must be exactly the specified value + optional double const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional double lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional double lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional double gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional double gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated double in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated double not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Int32Rules describes the constraints applied to `int32` values +message Int32Rules { + // Const specifies that this field must be exactly the specified value + optional int32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional int32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional int32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional int32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional int32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated int32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Int64Rules describes the constraints applied to `int64` values +message Int64Rules { + // Const specifies that this field must be exactly the specified value + optional int64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional int64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional int64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional int64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional int64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated int64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// UInt32Rules describes the constraints applied to `uint32` values +message UInt32Rules { + // Const specifies that this field must be exactly the specified value + optional uint32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional uint32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional uint32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional uint32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional uint32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated uint32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated uint32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// UInt64Rules describes the constraints applied to `uint64` values +message UInt64Rules { + // Const specifies that this field must be exactly the specified value + optional uint64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional uint64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional uint64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional uint64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional uint64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated uint64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated uint64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SInt32Rules describes the constraints applied to `sint32` values +message SInt32Rules { + // Const specifies that this field must be exactly the specified value + optional sint32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sint32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sint32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sint32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sint32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sint32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sint32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SInt64Rules describes the constraints applied to `sint64` values +message SInt64Rules { + // Const specifies that this field must be exactly the specified value + optional sint64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sint64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sint64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sint64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sint64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sint64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sint64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Fixed32Rules describes the constraints applied to `fixed32` values +message Fixed32Rules { + // Const specifies that this field must be exactly the specified value + optional fixed32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional fixed32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional fixed32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional fixed32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional fixed32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated fixed32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated fixed32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Fixed64Rules describes the constraints applied to `fixed64` values +message Fixed64Rules { + // Const specifies that this field must be exactly the specified value + optional fixed64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional fixed64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional fixed64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional fixed64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional fixed64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated fixed64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated fixed64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SFixed32Rules describes the constraints applied to `sfixed32` values +message SFixed32Rules { + // Const specifies that this field must be exactly the specified value + optional sfixed32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sfixed32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sfixed32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sfixed32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sfixed32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sfixed32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sfixed32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SFixed64Rules describes the constraints applied to `sfixed64` values +message SFixed64Rules { + // Const specifies that this field must be exactly the specified value + optional sfixed64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sfixed64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sfixed64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sfixed64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sfixed64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sfixed64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sfixed64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// BoolRules describes the constraints applied to `bool` values +message BoolRules { + // Const specifies that this field must be exactly the specified value + optional bool const = 1; +} + +// StringRules describe the constraints applied to `string` values +message StringRules { + // Const specifies that this field must be exactly the specified value + optional string const = 1; + + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 len = 19; + + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 min_len = 2; + + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 max_len = 3; + + // LenBytes specifies that this field must be the specified number of bytes + optional uint64 len_bytes = 20; + + // MinBytes specifies that this field must be the specified number of bytes + // at a minimum + optional uint64 min_bytes = 4; + + // MaxBytes specifies that this field must be the specified number of bytes + // at a maximum + optional uint64 max_bytes = 5; + + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + optional string pattern = 6; + + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + optional string prefix = 7; + + // Suffix specifies that this field must have the specified substring at + // the end of the string. + optional string suffix = 8; + + // Contains specifies that this field must have the specified substring + // anywhere in the string. + optional string contains = 9; + + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + optional string not_contains = 23; + + // In specifies that this field must be equal to one of the specified + // values + repeated string in = 10; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated string not_in = 11; + + // WellKnown rules provide advanced constraints against common string + // patterns + oneof well_known { + // Email specifies that the field must be a valid email address as + // defined by RFC 5322 + bool email = 12; + + // Hostname specifies that the field must be a valid hostname as + // defined by RFC 1034. This constraint does not support + // internationalized domain names (IDNs). + bool hostname = 13; + + // Ip specifies that the field must be a valid IP (v4 or v6) address. + // Valid IPv6 addresses should not include surrounding square brackets. + bool ip = 14; + + // Ipv4 specifies that the field must be a valid IPv4 address. + bool ipv4 = 15; + + // Ipv6 specifies that the field must be a valid IPv6 address. Valid + // IPv6 addresses should not include surrounding square brackets. + bool ipv6 = 16; + + // Uri specifies that the field must be a valid, absolute URI as defined + // by RFC 3986 + bool uri = 17; + + // UriRef specifies that the field must be a valid URI as defined by RFC + // 3986 and may be relative or absolute. + bool uri_ref = 18; + + // Address specifies that the field must be either a valid hostname as + // defined by RFC 1034 (which does not support internationalized domain + // names or IDNs), or it can be a valid IP (v4 or v6). + bool address = 21; + + // Uuid specifies that the field must be a valid UUID as defined by + // RFC 4122 + bool uuid = 22; + + // WellKnownRegex specifies a common well known pattern defined as a regex. + KnownRegex well_known_regex = 24; + } + + // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable + // strict header validation. + // By default, this is true, and HTTP header validations are RFC-compliant. + // Setting to false will enable a looser validations that only disallows + // \r\n\0 characters, which can be used to bypass header matching rules. + optional bool strict = 25 [default = true]; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 26; +} + +// WellKnownRegex contain some well-known patterns. +enum KnownRegex { + UNKNOWN = 0; + + // HTTP header name as defined by RFC 7230. + HTTP_HEADER_NAME = 1; + + // HTTP header value as defined by RFC 7230. + HTTP_HEADER_VALUE = 2; +} + +// BytesRules describe the constraints applied to `bytes` values +message BytesRules { + // Const specifies that this field must be exactly the specified value + optional bytes const = 1; + + // Len specifies that this field must be the specified number of bytes + optional uint64 len = 13; + + // MinLen specifies that this field must be the specified number of bytes + // at a minimum + optional uint64 min_len = 2; + + // MaxLen specifies that this field must be the specified number of bytes + // at a maximum + optional uint64 max_len = 3; + + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + optional string pattern = 4; + + // Prefix specifies that this field must have the specified bytes at the + // beginning of the string. + optional bytes prefix = 5; + + // Suffix specifies that this field must have the specified bytes at the + // end of the string. + optional bytes suffix = 6; + + // Contains specifies that this field must have the specified bytes + // anywhere in the string. + optional bytes contains = 7; + + // In specifies that this field must be equal to one of the specified + // values + repeated bytes in = 8; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated bytes not_in = 9; + + // WellKnown rules provide advanced constraints against common byte + // patterns + oneof well_known { + // Ip specifies that the field must be a valid IP (v4 or v6) address in + // byte format + bool ip = 10; + + // Ipv4 specifies that the field must be a valid IPv4 address in byte + // format + bool ipv4 = 11; + + // Ipv6 specifies that the field must be a valid IPv6 address in byte + // format + bool ipv6 = 12; + } + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 14; +} + +// EnumRules describe the constraints applied to enum values +message EnumRules { + // Const specifies that this field must be exactly the specified value + optional int32 const = 1; + + // DefinedOnly specifies that this field must be only one of the defined + // values for this enum, failing on any undefined value. + optional bool defined_only = 2; + + // In specifies that this field must be equal to one of the specified + // values + repeated int32 in = 3; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int32 not_in = 4; +} + +// MessageRules describe the constraints applied to embedded message values. +// For message-type fields, validation is performed recursively. +message MessageRules { + // Skip specifies that the validation rules of this field should not be + // evaluated + optional bool skip = 1; + + // Required specifies that this field must be set + optional bool required = 2; +} + +// RepeatedRules describe the constraints applied to `repeated` values +message RepeatedRules { + // MinItems specifies that this field must have the specified number of + // items at a minimum + optional uint64 min_items = 1; + + // MaxItems specifies that this field must have the specified number of + // items at a maximum + optional uint64 max_items = 2; + + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + optional bool unique = 3; + + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + optional FieldRules items = 4; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 5; +} + +// MapRules describe the constraints applied to `map` values +message MapRules { + // MinPairs specifies that this field must have the specified number of + // KVs at a minimum + optional uint64 min_pairs = 1; + + // MaxPairs specifies that this field must have the specified number of + // KVs at a maximum + optional uint64 max_pairs = 2; + + // NoSparse specifies values in this field cannot be unset. This only + // applies to map's with message value types. + optional bool no_sparse = 3; + + // Keys specifies the constraints to be applied to each key in the field. + optional FieldRules keys = 4; + + // Values specifies the constraints to be applied to the value of each key + // in the field. Message values will still have their validations evaluated + // unless skip is specified here. + optional FieldRules values = 5; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 6; +} + +// AnyRules describe constraints applied exclusively to the +// `google.protobuf.Any` well-known type +message AnyRules { + // Required specifies that this field must be set + optional bool required = 1; + + // In specifies that this field's `type_url` must be equal to one of the + // specified values. + repeated string in = 2; + + // NotIn specifies that this field's `type_url` must not be equal to any of + // the specified values. + repeated string not_in = 3; +} + +// DurationRules describe the constraints applied exclusively to the +// `google.protobuf.Duration` well-known type +message DurationRules { + // Required specifies that this field must be set + optional bool required = 1; + + // Const specifies that this field must be exactly the specified value + optional google.protobuf.Duration const = 2; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional google.protobuf.Duration lt = 3; + + // Lt specifies that this field must be less than the specified value, + // inclusive + optional google.protobuf.Duration lte = 4; + + // Gt specifies that this field must be greater than the specified value, + // exclusive + optional google.protobuf.Duration gt = 5; + + // Gte specifies that this field must be greater than the specified value, + // inclusive + optional google.protobuf.Duration gte = 6; + + // In specifies that this field must be equal to one of the specified + // values + repeated google.protobuf.Duration in = 7; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated google.protobuf.Duration not_in = 8; +} + +// TimestampRules describe the constraints applied exclusively to the +// `google.protobuf.Timestamp` well-known type +message TimestampRules { + // Required specifies that this field must be set + optional bool required = 1; + + // Const specifies that this field must be exactly the specified value + optional google.protobuf.Timestamp const = 2; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional google.protobuf.Timestamp lt = 3; + + // Lte specifies that this field must be less than the specified value, + // inclusive + optional google.protobuf.Timestamp lte = 4; + + // Gt specifies that this field must be greater than the specified value, + // exclusive + optional google.protobuf.Timestamp gt = 5; + + // Gte specifies that this field must be greater than the specified value, + // inclusive + optional google.protobuf.Timestamp gte = 6; + + // LtNow specifies that this must be less than the current time. LtNow + // can only be used with the Within rule. + optional bool lt_now = 7; + + // GtNow specifies that this must be greater than the current time. GtNow + // can only be used with the Within rule. + optional bool gt_now = 8; + + // Within specifies that this field must be within this duration of the + // current time. This constraint can be used alone or with the LtNow and + // GtNow rules. + optional google.protobuf.Duration within = 9; +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3..f4e7dbf37b 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b1..daea9dd6d6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e5757549..fa854785d0 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d5..e4ac2a2fff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8..c349c326c7 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e40..36c311694c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a0..d8de5ab76f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015..5eb5dbc66f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d61..c54a630838 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc499..0760efe916 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 0000000000..b0eab10090 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 0000000000..928319fb09 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 0000000000..3186b0c349 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 0000000000..f69fdb930f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 0000000000..607e683bd7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 0000000000..35c734be43 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 0000000000..e5b3b6f694 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 0000000000..1dd455bc5a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 0000000000..f1b2e73bd5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 0000000000..52bf4ce53b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 0000000000..547df1df84 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 0000000000..7daa45e19e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 0000000000..30976ce973 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 0000000000..37dfeddc28 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 0000000000..a72c649549 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae653..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b885..f65e8fe3ed 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78f..a29fc7aab6 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 0000000000..f1c181ec9c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 0000000000..38cb9ae101 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..c794b2b0c6 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..de0965e12d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 0000000000..eaa8504921 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 0000000000..af0a79507e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 0000000000..9c05146d16 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 0000000000..823bff12ce --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 0000000000..ea0f39e24f --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 0000000000..ec038a49ec --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 0000000000..85842ac736 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 0000000000..44afb86608 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 0000000000..23f68b984c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 0000000000..6508e291d6 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 0000000000..8b4b4bbc59 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 0000000000..31c39336dd --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 0000000000..de175cee4a --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 0000000000..507ab6c184 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 0000000000..81228acf0f --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 0000000000..5c4d2b7a42 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 0000000000..b40793b95e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index 52c991b1e2..0090321caa 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "encoding/base64" "encoding/hex" + "encoding/json" "fmt" "net" "net/http" @@ -41,6 +42,7 @@ const ( EnvVaultClientCert = "VAULT_CLIENT_CERT" EnvVaultClientKey = "VAULT_CLIENT_KEY" EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultHeaders = "VAULT_HEADERS" EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" EnvVaultNamespace = "VAULT_NAMESPACE" @@ -665,6 +667,30 @@ func NewClient(c *Config) (*Client, error) { client.setNamespace(namespace) } + if envHeaders := os.Getenv(EnvVaultHeaders); envHeaders != "" { + var result map[string]any + err := json.Unmarshal([]byte(envHeaders), &result) + if err != nil { + return nil, fmt.Errorf("could not unmarshal environment-supplied headers") + } + var forbiddenHeaders []string + for key, value := range result { + if strings.HasPrefix(key, "X-Vault-") { + forbiddenHeaders = append(forbiddenHeaders, key) + continue + } + + value, ok := value.(string) + if !ok { + return nil, fmt.Errorf("environment-supplied headers include non-string values") + } + client.AddHeader(key, value) + } + if len(forbiddenHeaders) > 0 { + return nil, fmt.Errorf("failed to setup Headers[%s]: Header starting by 'X-Vault-' are for internal usage only", strings.Join(forbiddenHeaders, ", ")) + } + } + return client, nil } @@ -705,7 +731,7 @@ func (c *Client) SetAddress(addr string) error { parsedAddr, err := c.config.ParseAddress(addr) if err != nil { - return errwrap.Wrapf("failed to set address: {{err}}", err) + return fmt.Errorf("failed to set address: %w", err) } c.addr = parsedAddr diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go index 4bc1390b93..bdb8fb64b3 100644 --- a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/cenkalti/backoff/v3" + "github.com/cenkalti/backoff/v4" ) var ( diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go index a2d912c64d..c0c8dea734 100644 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -7,7 +7,6 @@ import ( "bytes" "encoding/json" "io" - "io/ioutil" "net/http" "net/url" @@ -77,13 +76,13 @@ func (r *Request) ToHTTP() (*http.Request, error) { // No body case r.BodyBytes != nil: - req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + req.Request.Body = io.NopCloser(bytes.NewReader(r.BodyBytes)) default: if c, ok := r.Body.(io.ReadCloser); ok { req.Request.Body = c } else { - req.Request.Body = ioutil.NopCloser(r.Body) + req.Request.Body = io.NopCloser(r.Body) } } diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go index 2842c12551..23246bf716 100644 --- a/vendor/github.com/hashicorp/vault/api/response.go +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" ) @@ -44,7 +43,7 @@ func (r *Response) Error() error { } r.Body.Close() - r.Body = ioutil.NopCloser(bodyBuf) + r.Body = io.NopCloser(bodyBuf) ns := r.Header.Get(NamespaceHeaderName) // Build up the error object diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index d37bf3cf06..7df9f66a4d 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -159,6 +159,10 @@ TOKEN_DONE: goto DONE } + if s.Data["identity_policies"] == nil { + goto DONE + } + sList, ok := s.Data["identity_policies"].([]string) if ok { identityPolicies = sList diff --git a/vendor/github.com/hashicorp/vault/api/sudo_paths.go b/vendor/github.com/hashicorp/vault/api/sudo_paths.go index 24beb4bb1f..d458cbde0f 100644 --- a/vendor/github.com/hashicorp/vault/api/sudo_paths.go +++ b/vendor/github.com/hashicorp/vault/api/sudo_paths.go @@ -28,6 +28,7 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`), "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + "/sys/internal/counters/activity/export": regexp.MustCompile(`^/sys/internal/counters/activity/export$`), "/sys/leases": regexp.MustCompile(`^/sys/leases$`), // This entry is a bit wrong... sys/leases/lookup does NOT require sudo. But sys/leases/lookup/ with a trailing // slash DOES require sudo. But the part of the Vault CLI that uses this logic doesn't pass operation-appropriate diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index 699f6e9fd0..f0e896271b 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -264,7 +264,7 @@ func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) continue } var b []byte - b, err = ioutil.ReadAll(t) + b, err = io.ReadAll(t) if err != nil || len(b) == 0 { return } diff --git a/vendor/github.com/jellydator/ttlcache/v3/README.md b/vendor/github.com/jellydator/ttlcache/v3/README.md index 3a557b030d..a17cb24371 100644 --- a/vendor/github.com/jellydator/ttlcache/v3/README.md +++ b/vendor/github.com/jellydator/ttlcache/v3/README.md @@ -10,7 +10,8 @@ - Type parameters - Item expiration and automatic deletion - Automatic expiration time extension on each `Get` call -- `Loader` interface that may be used to load/lazily initialize missing cache +- `Loader` interface that may be used to load/lazily initialize missing cache +- Thread Safe items - Event handlers (insertion and eviction) - Metrics diff --git a/vendor/github.com/jellydator/ttlcache/v3/cache.go b/vendor/github.com/jellydator/ttlcache/v3/cache.go index 1ad3afbece..1b9e72ef01 100644 --- a/vendor/github.com/jellydator/ttlcache/v3/cache.go +++ b/vendor/github.com/jellydator/ttlcache/v3/cache.go @@ -133,7 +133,7 @@ func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] { ttl = c.options.ttl } - elem := c.get(key, false) + elem := c.get(key, false, true) if elem != nil { // update/overwrite an existing item item := elem.Value.(*Item[K, V]) @@ -176,14 +176,14 @@ func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] { // It returns nil if the item is not found or is expired. // Not safe for concurrent use by multiple goroutines without additional // locking. -func (c *Cache[K, V]) get(key K, touch bool) *list.Element { +func (c *Cache[K, V]) get(key K, touch bool, includeExpired bool) *list.Element { elem := c.items.values[key] if elem == nil { return nil } item := elem.Value.(*Item[K, V]) - if item.isExpiredUnsafe() { + if !includeExpired && item.isExpiredUnsafe() { return nil } @@ -218,7 +218,7 @@ func (c *Cache[K, V]) getWithOpts(key K, lockAndLoad bool, opts ...Option[K, V]) c.items.mu.Lock() } - elem := c.get(key, !getOpts.disableTouchOnHit) + elem := c.get(key, !getOpts.disableTouchOnHit, false) if lockAndLoad { c.items.mu.Unlock() @@ -339,8 +339,8 @@ func (c *Cache[K, V]) Has(key K) bool { c.items.mu.RLock() defer c.items.mu.RUnlock() - _, ok := c.items.values[key] - return ok + elem, ok := c.items.values[key] + return ok && !elem.Value.(*Item[K, V]).isExpiredUnsafe() } // GetOrSet retrieves an item from the cache by the provided key. @@ -436,26 +436,66 @@ func (c *Cache[K, V]) DeleteExpired() { // If the item is not found, the method is no-op. func (c *Cache[K, V]) Touch(key K) { c.items.mu.Lock() - c.get(key, true) + c.get(key, true, false) c.items.mu.Unlock() } -// Len returns the total number of items in the cache. +// Len returns the number of unexpired items in the cache. func (c *Cache[K, V]) Len() int { c.items.mu.RLock() defer c.items.mu.RUnlock() - return len(c.items.values) + total := c.items.expQueue.Len() + if total == 0 { + return 0 + } + + // search the heap-based expQueue by BFS + countExpired := func() int { + var ( + q []int + res int + ) + + item := c.items.expQueue[0].Value.(*Item[K, V]) + if !item.isExpiredUnsafe() { + return res + } + + q = append(q, 0) + for len(q) > 0 { + pop := q[0] + q = q[1:] + res++ + + for i := 1; i <= 2; i++ { + idx := 2*pop + i + if idx >= total { + break + } + + item = c.items.expQueue[idx].Value.(*Item[K, V]) + if item.isExpiredUnsafe() { + q = append(q, idx) + } + } + } + return res + } + + return total - countExpired() } -// Keys returns all keys currently present in the cache. +// Keys returns all unexpired keys in the cache. func (c *Cache[K, V]) Keys() []K { c.items.mu.RLock() defer c.items.mu.RUnlock() - res := make([]K, 0, len(c.items.values)) - for k := range c.items.values { - res = append(res, k) + res := make([]K, 0) + for k, elem := range c.items.values { + if !elem.Value.(*Item[K, V]).isExpiredUnsafe() { + res = append(res, k) + } } return res @@ -467,18 +507,18 @@ func (c *Cache[K, V]) Items() map[K]*Item[K, V] { c.items.mu.RLock() defer c.items.mu.RUnlock() - items := make(map[K]*Item[K, V], len(c.items.values)) - for k := range c.items.values { - item := c.get(k, false) - if item != nil { - items[k] = item.Value.(*Item[K, V]) + items := make(map[K]*Item[K, V]) + for k, elem := range c.items.values { + item := elem.Value.(*Item[K, V]) + if item != nil && !item.isExpiredUnsafe() { + items[k] = item } } return items } -// Range calls fn for each item present in the cache. If fn returns false, +// Range calls fn for each unexpired item in the cache. If fn returns false, // Range stops the iteration. func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { c.items.mu.RLock() @@ -491,9 +531,10 @@ func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { for item := c.items.lru.Front(); item != c.items.lru.Back().Next(); item = item.Next() { i := item.Value.(*Item[K, V]) + expired := i.isExpiredUnsafe() c.items.mu.RUnlock() - if !fn(i) { + if !expired && !fn(i) { return } @@ -503,6 +544,32 @@ func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) { } } +// RangeBackwards calls fn for each unexpired item in the cache in reverse order. +// If fn returns false, RangeBackwards stops the iteration. +func (c *Cache[K, V]) RangeBackwards(fn func(item *Item[K, V]) bool) { + c.items.mu.RLock() + + // Check if cache is empty + if c.items.lru.Len() == 0 { + c.items.mu.RUnlock() + return + } + + for item := c.items.lru.Back(); item != c.items.lru.Front().Prev(); item = item.Prev() { + i := item.Value.(*Item[K, V]) + expired := i.isExpiredUnsafe() + c.items.mu.RUnlock() + + if !expired && !fn(i) { + return + } + + if item.Prev() != nil { + c.items.mu.RLock() + } + } +} + // Metrics returns the metrics of the cache. func (c *Cache[K, V]) Metrics() Metrics { c.metricsMu.RLock() diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 1d8b69e65e..ec52857a3e 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index d964b25fe1..0755e55642 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -565,7 +565,7 @@ complete solutions exist out there. ## Versioning -Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Expect for parts explicitly marked otherwise, go-toml follows [Semantic Versioning](https://semver.org). The supported version of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this document. The last two major versions of Go are supported (see [Go Release diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 7f4e20c128..161acd9343 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -8,7 +8,7 @@ import ( "io" "math" "reflect" - "sort" + "slices" "strconv" "strings" "time" @@ -280,7 +280,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e } hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } @@ -631,6 +631,18 @@ func (enc *Encoder) keyToString(k reflect.Value) (string, error) { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + return strconv.FormatInt(k.Int(), 10), nil + + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + return strconv.FormatUint(k.Uint(), 10), nil + + case keyType.Kind() == reflect.Float32: + return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil + + case keyType.Kind() == reflect.Float64: + return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } @@ -668,8 +680,8 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte } func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key + slices.SortFunc(e, func(a, b entry) int { + return strings.Compare(a.Key, b.Key) }) } @@ -732,7 +744,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) - } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { walkStruct(ctx, t, f.Elem()) } continue @@ -951,7 +963,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 98231bae65..c3df8bee1c 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "reflect" + "strconv" "strings" "sync/atomic" "time" @@ -21,10 +21,8 @@ import ( // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - + d := decoder{} + d.p.Reset(data) return d.FromParser(v) } @@ -117,27 +115,25 @@ func (d *Decoder) EnableUnmarshalerInterface() *Decoder { // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) + b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } - p := unstable.Parser{} - p.Reset(b) dec := decoder{ - p: &p, strict: strict{ Enabled: d.strict, }, unmarshalerInterface: d.unmarshalerInterface, } + dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. - p *unstable.Parser + p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -1078,12 +1074,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, } return mk, nil - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + key, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + key, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + + case keyType.Kind() == reflect.Float32: + key, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float32(key)), nil + + case keyType.Kind() == reflect.Float64: + key, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } diff --git a/vendor/github.com/planetscale/vtprotobuf/LICENSE b/vendor/github.com/planetscale/vtprotobuf/LICENSE new file mode 100644 index 0000000000..dc61de8465 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2021, PlanetScale Inc. All rights reserved. +Copyright (c) 2013, The GoGo Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go b/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go new file mode 100644 index 0000000000..64bde83ed0 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/protohelpers/protohelpers.go @@ -0,0 +1,122 @@ +// Package protohelpers provides helper functions for encoding and decoding protobuf messages. +// The spec can be found at https://protobuf.dev/programming-guides/encoding/. +package protohelpers + +import ( + "fmt" + "io" + "math/bits" +) + +var ( + // ErrInvalidLength is returned when decoding a negative length. + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + // ErrIntOverflow is returned when decoding a varint representation of an integer that overflows 64 bits. + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + // ErrUnexpectedEndOfGroup is returned when decoding a group end without a corresponding group start. + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) + +// EncodeVarint encodes a uint64 into a varint-encoded byte slice and returns the offset of the encoded value. +// The provided offset is the offset after the last byte of the encoded value. +func EncodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= SizeOfVarint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +// SizeOfVarint returns the size of the varint-encoded value. +func SizeOfVarint(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} + +// SizeOfZigzag returns the size of the zigzag-encoded value. +func SizeOfZigzag(x uint64) (n int) { + return SizeOfVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// Skip the first record of the byte slice and return the offset of the next record. +func Skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go new file mode 100644 index 0000000000..c99b9e6106 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/anypb/any_vtproto.pb.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/any.proto + +package anypb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + io "io" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Any anypb.Any + +func (m *Any) CloneVT() *Any { + if m == nil { + return (*Any)(nil) + } + r := new(Any) + r.TypeUrl = m.TypeUrl + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + return r +} + +func (this *Any) EqualVT(that *Any) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.TypeUrl != that.TypeUrl { + return false + } + if string(this.Value) != string(that.Value) { + return false + } + return true +} + +func (m *Any) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Any) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Any) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *Any) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Any) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.TypeUrl = stringValue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go new file mode 100644 index 0000000000..681ae9f72e --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/durationpb/duration_vtproto.pb.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/duration.proto + +package durationpb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Duration durationpb.Duration + +func (m *Duration) CloneVT() *Duration { + if m == nil { + return (*Duration)(nil) + } + r := new(Duration) + r.Seconds = m.Seconds + r.Nanos = m.Nanos + return r +} + +func (this *Duration) EqualVT(that *Duration) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Seconds != that.Seconds { + return false + } + if this.Nanos != that.Nanos { + return false + } + return true +} + +func (m *Duration) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Nanos)) + } + return n +} + +func (m *Duration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go new file mode 100644 index 0000000000..470c82ac52 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/emptypb/empty_vtproto.pb.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/empty.proto + +package emptypb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty emptypb.Empty + +func (m *Empty) CloneVT() *Empty { + if m == nil { + return (*Empty)(nil) + } + r := new(Empty) + return r +} + +func (this *Empty) EqualVT(that *Empty) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return true +} + +func (m *Empty) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Empty) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Empty) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Empty) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Empty) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go new file mode 100644 index 0000000000..be8b40e338 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/structpb/struct_vtproto.pb.go @@ -0,0 +1,2004 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/struct.proto + +package structpb + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + io "io" + math "math" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Struct structpb.Struct +type Value structpb.Value +type Value_NullValue structpb.Value_NullValue +type Value_NumberValue structpb.Value_NumberValue +type Value_StringValue structpb.Value_StringValue +type Value_BoolValue structpb.Value_BoolValue +type Value_StructValue structpb.Value_StructValue +type Value_ListValue structpb.Value_ListValue +type ListValue structpb.ListValue + +func (m *Struct) CloneVT() *Struct { + if m == nil { + return (*Struct)(nil) + } + r := new(Struct) + if rhs := m.Fields; rhs != nil { + tmpContainer := make(map[string]*structpb.Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = (*structpb.Value)((*Value)(v).CloneVT()) + } + r.Fields = tmpContainer + } + return r +} + +func (m *Value) CloneVT() *Value { + if m == nil { + return (*Value)(nil) + } + r := new(Value) + if m.Kind != nil { + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + r.Kind = (*structpb.Value_NullValue)((*Value_NullValue)(c).CloneVT()) + case *structpb.Value_NumberValue: + r.Kind = (*structpb.Value_NumberValue)((*Value_NumberValue)(c).CloneVT()) + case *structpb.Value_StringValue: + r.Kind = (*structpb.Value_StringValue)((*Value_StringValue)(c).CloneVT()) + case *structpb.Value_BoolValue: + r.Kind = (*structpb.Value_BoolValue)((*Value_BoolValue)(c).CloneVT()) + case *structpb.Value_StructValue: + r.Kind = (*structpb.Value_StructValue)((*Value_StructValue)(c).CloneVT()) + case *structpb.Value_ListValue: + r.Kind = (*structpb.Value_ListValue)((*Value_ListValue)(c).CloneVT()) + } + } + return r +} + +func (m *Value_NullValue) CloneVT() *Value_NullValue { + if m == nil { + return (*Value_NullValue)(nil) + } + r := new(Value_NullValue) + r.NullValue = m.NullValue + return r +} + +func (m *Value_NumberValue) CloneVT() *Value_NumberValue { + if m == nil { + return (*Value_NumberValue)(nil) + } + r := new(Value_NumberValue) + r.NumberValue = m.NumberValue + return r +} + +func (m *Value_StringValue) CloneVT() *Value_StringValue { + if m == nil { + return (*Value_StringValue)(nil) + } + r := new(Value_StringValue) + r.StringValue = m.StringValue + return r +} + +func (m *Value_BoolValue) CloneVT() *Value_BoolValue { + if m == nil { + return (*Value_BoolValue)(nil) + } + r := new(Value_BoolValue) + r.BoolValue = m.BoolValue + return r +} + +func (m *Value_StructValue) CloneVT() *Value_StructValue { + if m == nil { + return (*Value_StructValue)(nil) + } + r := new(Value_StructValue) + r.StructValue = (*structpb.Struct)((*Struct)(m.StructValue).CloneVT()) + return r +} + +func (m *Value_ListValue) CloneVT() *Value_ListValue { + if m == nil { + return (*Value_ListValue)(nil) + } + r := new(Value_ListValue) + r.ListValue = (*structpb.ListValue)((*ListValue)(m.ListValue).CloneVT()) + return r +} + +func (m *ListValue) CloneVT() *ListValue { + if m == nil { + return (*ListValue)(nil) + } + r := new(ListValue) + if rhs := m.Values; rhs != nil { + tmpContainer := make([]*structpb.Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = (*structpb.Value)((*Value)(v).CloneVT()) + } + r.Values = tmpContainer + } + return r +} + +func (this *Struct) EqualVT(that *Struct) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Fields) != len(that.Fields) { + return false + } + for i, vx := range this.Fields { + vy, ok := that.Fields[i] + if !ok { + return false + } + if p, q := vx, vy; p != q { + if p == nil { + p = &structpb.Value{} + } + if q == nil { + q = &structpb.Value{} + } + if !(*Value)(p).EqualVT((*Value)(q)) { + return false + } + } + } + return true +} + +func (this *Value) EqualVT(that *Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Kind == nil && that.Kind != nil { + return false + } else if this.Kind != nil { + if that.Kind == nil { + return false + } + switch c := this.Kind.(type) { + case *structpb.Value_NullValue: + if !(*Value_NullValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_NumberValue: + if !(*Value_NumberValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_StringValue: + if !(*Value_StringValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_BoolValue: + if !(*Value_BoolValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_StructValue: + if !(*Value_StructValue)(c).EqualVT(that.Kind) { + return false + } + case *structpb.Value_ListValue: + if !(*Value_ListValue)(c).EqualVT(that.Kind) { + return false + } + } + } + return true +} + +func (this *Value_NullValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_NullValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_NullValue); ok { + that = (*Value_NullValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.NullValue != that.NullValue { + return false + } + return true +} + +func (this *Value_NumberValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_NumberValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_NumberValue); ok { + that = (*Value_NumberValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.NumberValue != that.NumberValue { + return false + } + return true +} + +func (this *Value_StringValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_StringValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_StringValue); ok { + that = (*Value_StringValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.StringValue != that.StringValue { + return false + } + return true +} + +func (this *Value_BoolValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_BoolValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_BoolValue); ok { + that = (*Value_BoolValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if this.BoolValue != that.BoolValue { + return false + } + return true +} + +func (this *Value_StructValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_StructValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_StructValue); ok { + that = (*Value_StructValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.StructValue, that.StructValue; p != q { + if p == nil { + p = &structpb.Struct{} + } + if q == nil { + q = &structpb.Struct{} + } + if !(*Struct)(p).EqualVT((*Struct)(q)) { + return false + } + } + return true +} + +func (this *Value_ListValue) EqualVT(thatIface any) bool { + that, ok := thatIface.(*Value_ListValue) + if !ok { + if ot, ok := thatIface.(*structpb.Value_ListValue); ok { + that = (*Value_ListValue)(ot) + } else { + return false + } + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.ListValue, that.ListValue; p != q { + if p == nil { + p = &structpb.ListValue{} + } + if q == nil { + q = &structpb.ListValue{} + } + if !(*ListValue)(p).EqualVT((*ListValue)(q)) { + return false + } + } + return true +} + +func (this *ListValue) EqualVT(that *ListValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Values) != len(that.Values) { + return false + } + for i, vx := range this.Values { + vy := that.Values[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &structpb.Value{} + } + if q == nil { + q = &structpb.Value{} + } + if !(*Value)(p).EqualVT((*Value)(q)) { + return false + } + } + } + return true +} + +func (m *Struct) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + size, err := (*Value)(v).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + size, err := (*Value_NullValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_NumberValue: + size, err := (*Value_NumberValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_StringValue: + size, err := (*Value_StringValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_BoolValue: + size, err := (*Value_BoolValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_StructValue: + size, err := (*Value_StructValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + case *structpb.Value_ListValue: + size, err := (*Value_ListValue)(c).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + size, err := (*Struct)(m.StructValue).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + size, err := (*ListValue)(m.ListValue).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*Value)(m.Values[iNdEx]).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Struct) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + size, err := (*Value)(v).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m, ok := m.Kind.(*structpb.Value_ListValue); ok { + msg := ((*Value_ListValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_StructValue); ok { + msg := ((*Value_StructValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_BoolValue); ok { + msg := ((*Value_BoolValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_StringValue); ok { + msg := ((*Value_StringValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_NumberValue); ok { + msg := ((*Value_NumberValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m, ok := m.Kind.(*structpb.Value_NullValue); ok { + msg := ((*Value_NullValue)(m)) + size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + size, err := (*Struct)(m.StructValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + size, err := (*ListValue)(m.ListValue).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := (*Value)(m.Values[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Struct) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = (*Value)(v).SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + switch c := m.Kind.(type) { + case *structpb.Value_NullValue: + n += (*Value_NullValue)(c).SizeVT() + case *structpb.Value_NumberValue: + n += (*Value_NumberValue)(c).SizeVT() + case *structpb.Value_StringValue: + n += (*Value_StringValue)(c).SizeVT() + case *structpb.Value_BoolValue: + n += (*Value_BoolValue)(c).SizeVT() + case *structpb.Value_StructValue: + n += (*Value_StructValue)(c).SizeVT() + case *structpb.Value_ListValue: + n += (*Value_ListValue)(c).SizeVT() + } + return n +} + +func (m *Value_NullValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + return n +} +func (m *Value_BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = (*Struct)(m.StructValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *Value_ListValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = (*ListValue)(m.ListValue).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} +func (m *ListValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = (*Value)(e).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + return n +} + +func (m *Struct) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*structpb.Value) + } + var mapkey string + var mapvalue *structpb.Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &structpb.Value{} + if err := (*Value)(mapvalue).UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v structpb.NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= structpb.NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &structpb.Value_NullValue{NullValue: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &structpb.Value_NumberValue{NumberValue: float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &structpb.Value_StringValue{StringValue: string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &structpb.Value_BoolValue{BoolValue: b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_StructValue); ok { + if err := (*Struct)(oneof.StructValue).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.Struct{} + if err := (*Struct)(v).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_StructValue{StructValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_ListValue); ok { + if err := (*ListValue)(oneof.ListValue).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.ListValue{} + if err := (*ListValue)(v).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_ListValue{ListValue: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &structpb.Value{}) + if err := (*Value)(m.Values[len(m.Values)-1]).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Struct) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*structpb.Value) + } + var mapkey string + var mapvalue *structpb.Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + if intStringLenmapkey == 0 { + mapkey = "" + } else { + mapkey = unsafe.String(&dAtA[iNdEx], intStringLenmapkey) + } + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &structpb.Value{} + if err := (*Value)(mapvalue).UnmarshalVTUnsafe(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v structpb.NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= structpb.NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &structpb.Value_NullValue{NullValue: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &structpb.Value_NumberValue{NumberValue: float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Kind = &structpb.Value_StringValue{StringValue: stringValue} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &structpb.Value_BoolValue{BoolValue: b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_StructValue); ok { + if err := (*Struct)(oneof.StructValue).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.Struct{} + if err := (*Struct)(v).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_StructValue{StructValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Kind.(*structpb.Value_ListValue); ok { + if err := (*ListValue)(oneof.ListValue).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &structpb.ListValue{} + if err := (*ListValue)(v).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &structpb.Value_ListValue{ListValue: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &structpb.Value{}) + if err := (*Value)(m.Values[len(m.Values)-1]).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go new file mode 100644 index 0000000000..5c63f308f0 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/timestamppb/timestamp_vtproto.pb.go @@ -0,0 +1,317 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/timestamp.proto + +package timestamppb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Timestamp timestamppb.Timestamp + +func (m *Timestamp) CloneVT() *Timestamp { + if m == nil { + return (*Timestamp)(nil) + } + r := new(Timestamp) + r.Seconds = m.Seconds + r.Nanos = m.Nanos + return r +} + +func (this *Timestamp) EqualVT(that *Timestamp) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Seconds != that.Seconds { + return false + } + if this.Nanos != that.Nanos { + return false + } + return true +} + +func (m *Timestamp) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nanos != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Timestamp) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Nanos)) + } + return n +} + +func (m *Timestamp) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go b/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go new file mode 100644 index 0000000000..0dac9b2822 --- /dev/null +++ b/vendor/github.com/planetscale/vtprotobuf/types/known/wrapperspb/wrappers_vtproto.pb.go @@ -0,0 +1,2240 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: (devel) +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + io "io" + math "math" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DoubleValue wrapperspb.DoubleValue +type FloatValue wrapperspb.FloatValue +type Int64Value wrapperspb.Int64Value +type UInt64Value wrapperspb.UInt64Value +type Int32Value wrapperspb.Int32Value +type UInt32Value wrapperspb.UInt32Value +type BoolValue wrapperspb.BoolValue +type StringValue wrapperspb.StringValue +type BytesValue wrapperspb.BytesValue + +func (m *DoubleValue) CloneVT() *DoubleValue { + if m == nil { + return (*DoubleValue)(nil) + } + r := new(DoubleValue) + r.Value = m.Value + return r +} + +func (m *FloatValue) CloneVT() *FloatValue { + if m == nil { + return (*FloatValue)(nil) + } + r := new(FloatValue) + r.Value = m.Value + return r +} + +func (m *Int64Value) CloneVT() *Int64Value { + if m == nil { + return (*Int64Value)(nil) + } + r := new(Int64Value) + r.Value = m.Value + return r +} + +func (m *UInt64Value) CloneVT() *UInt64Value { + if m == nil { + return (*UInt64Value)(nil) + } + r := new(UInt64Value) + r.Value = m.Value + return r +} + +func (m *Int32Value) CloneVT() *Int32Value { + if m == nil { + return (*Int32Value)(nil) + } + r := new(Int32Value) + r.Value = m.Value + return r +} + +func (m *UInt32Value) CloneVT() *UInt32Value { + if m == nil { + return (*UInt32Value)(nil) + } + r := new(UInt32Value) + r.Value = m.Value + return r +} + +func (m *BoolValue) CloneVT() *BoolValue { + if m == nil { + return (*BoolValue)(nil) + } + r := new(BoolValue) + r.Value = m.Value + return r +} + +func (m *StringValue) CloneVT() *StringValue { + if m == nil { + return (*StringValue)(nil) + } + r := new(StringValue) + r.Value = m.Value + return r +} + +func (m *BytesValue) CloneVT() *BytesValue { + if m == nil { + return (*BytesValue)(nil) + } + r := new(BytesValue) + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + return r +} + +func (this *DoubleValue) EqualVT(that *DoubleValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *FloatValue) EqualVT(that *FloatValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *Int64Value) EqualVT(that *Int64Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *UInt64Value) EqualVT(that *UInt64Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *Int32Value) EqualVT(that *Int32Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *UInt32Value) EqualVT(that *UInt32Value) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *BoolValue) EqualVT(that *BoolValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *StringValue) EqualVT(that *StringValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Value != that.Value { + return false + } + return true +} + +func (this *BytesValue) EqualVT(that *BytesValue) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if string(this.Value) != string(that.Value) { + return false + } + return true +} + +func (m *DoubleValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DoubleValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) MarshalVTStrict() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalToVTStrict(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVTStrict(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DoubleValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *FloatValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + return n +} + +func (m *Int64Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *UInt64Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *Int32Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *UInt32Value) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Value)) + } + return n +} + +func (m *BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *BytesValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + return n +} + +func (m *DoubleValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DoubleValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Value = stringValue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = dAtA[iNdEx:postIndex] + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go index dfc1f0c0e8..6714b3488e 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go @@ -134,7 +134,7 @@ func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken stri fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL) fmt.Fprintf(i.GetOutput(), "Enter verification code: ") var code string - fmt.Fscanf(i.GetInput(), "%s", &code) + _, _ = fmt.Fscanf(i.GetInput(), "%s", &code) // New line in case read input doesn't move cursor to next line. fmt.Fprintln(i.GetOutput()) return code diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go index 32bb79aac1..eee88c031f 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go @@ -415,6 +415,9 @@ func (g *gcpClient) createKeyRing(ctx context.Context) error { KeyRingId: g.keyRing, } result, err := g.kmsClient.CreateKeyRing(ctx, createKeyRingRequest) + if err != nil { + return fmt.Errorf("creating keyring: %w", err) + } log.Printf("Created key ring %s in GCP KMS.\n", result.GetName()) - return err + return nil } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go index 20656bc805..77f418f91f 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go @@ -19,6 +19,7 @@ package hashivault import ( "context" "crypto" + "crypto/ed25519" "encoding/base64" "encoding/json" "errors" @@ -220,17 +221,30 @@ func (h *hashivaultClient) fetchPublicKey(_ context.Context) (crypto.PublicKey, return nil, fmt.Errorf("could not parse transit key keys data as map[string]interface{}") } - publicKeyPem, ok := keyMap["public_key"] + publicKey, ok := keyMap["public_key"] if !ok { return nil, errors.New("failed to read transit key keys: corrupted response") } - strPublicKeyPem, ok := publicKeyPem.(string) + strPublicKey, ok := publicKey.(string) if !ok { return nil, fmt.Errorf("could not parse public key pem as string") } + // vault returns the key type in the "name" field + if keyType := keyMap["name"]; keyType == "ed25519" { + // vault returns ed25519 public keys as base64 encoding of + // the raw bytes of the key + decodedPublicKey, err := base64.StdEncoding.DecodeString(strPublicKey) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode ed25519 public key: %s", err) + } + if keyLen := len(decodedPublicKey); keyLen != ed25519.PublicKeySize { + return nil, fmt.Errorf("decoded ed25519 public key length is %d, should be %d", keyLen, ed25519.PublicKeySize) + } + return ed25519.PublicKey(decodedPublicKey), nil + } - return cryptoutils.UnmarshalPEMToPublicKey([]byte(strPublicKeyPem)) + return cryptoutils.UnmarshalPEMToPublicKey([]byte(strPublicKey)) } func (h *hashivaultClient) public() (crypto.PublicKey, error) { diff --git a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go index 35c7337558..d03973e72a 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go @@ -71,6 +71,7 @@ var ( // getRemoteRoot is a var for testing. var getRemoteRoot = func() string { return DefaultRemoteRoot } +// Deprecated: Use https://pkg.go.dev/github.com/sigstore/sigstore-go/pkg/tuf type TUF struct { sync.Mutex client *client.Client diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go index b6ef1ed53f..5ff9126128 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go @@ -5,30 +5,51 @@ import ( "time" ) -// backoff defines an linear backoff policy. -type backoff struct { - InitialDelay time.Duration - MaxDelay time.Duration +// BackoffStrategy provides backoff facilities. +type BackoffStrategy interface { + // NewBackoff returns a new backoff for the strategy. The returned + // Backoff is in the same state that it would be in after a call to + // Reset(). + NewBackoff() Backoff +} + +// Backoff provides backoff for a workload API operation. +type Backoff interface { + // Next returns the next backoff period. + Next() time.Duration + + // Reset() resets the backoff. + Reset() +} + +type defaultBackoffStrategy struct{} + +func (defaultBackoffStrategy) NewBackoff() Backoff { + return newLinearBackoff() +} + +// linearBackoff defines an linear backoff policy. +type linearBackoff struct { + initialDelay time.Duration + maxDelay time.Duration n int } -func newBackoff() *backoff { - return &backoff{ - InitialDelay: time.Second, - MaxDelay: 30 * time.Second, +func newLinearBackoff() *linearBackoff { + return &linearBackoff{ + initialDelay: time.Second, + maxDelay: 30 * time.Second, n: 0, } } -// Duration returns the next wait period for the backoff. Not goroutine-safe. -func (b *backoff) Duration() time.Duration { +func (b *linearBackoff) Next() time.Duration { backoff := float64(b.n) + 1 - d := math.Min(b.InitialDelay.Seconds()*backoff, b.MaxDelay.Seconds()) + d := math.Min(b.initialDelay.Seconds()*backoff, b.maxDelay.Seconds()) b.n++ return time.Duration(d) * time.Second } -// Reset resets the backoff's state. -func (b *backoff) Reset() { +func (b *linearBackoff) Reset() { b.n = 0 } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go index 4d5de5d59f..7739798b5d 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go @@ -119,7 +119,7 @@ func (c *Client) FetchX509Bundles(ctx context.Context) (*x509bundle.Set, error) // WatchX509Bundles watches for changes to the X.509 bundles. The watcher receives // the updated X.509 bundles. func (c *Client) WatchX509Bundles(ctx context.Context, watcher X509BundleWatcher) error { - backoff := newBackoff() + backoff := c.config.backoffStrategy.NewBackoff() for { err := c.watchX509Bundles(ctx, watcher, backoff) watcher.OnX509BundlesWatchError(err) @@ -152,7 +152,7 @@ func (c *Client) FetchX509Context(ctx context.Context) (*X509Context, error) { // WatchX509Context watches for updates to the X.509 context. The watcher // receives the updated X.509 context. func (c *Client) WatchX509Context(ctx context.Context, watcher X509ContextWatcher) error { - backoff := newBackoff() + backoff := c.config.backoffStrategy.NewBackoff() for { err := c.watchX509Context(ctx, watcher, backoff) watcher.OnX509ContextWatchError(err) @@ -224,7 +224,7 @@ func (c *Client) FetchJWTBundles(ctx context.Context) (*jwtbundle.Set, error) { // WatchJWTBundles watches for changes to the JWT bundles. The watcher receives // the updated JWT bundles. func (c *Client) WatchJWTBundles(ctx context.Context, watcher JWTBundleWatcher) error { - backoff := newBackoff() + backoff := c.config.backoffStrategy.NewBackoff() for { err := c.watchJWTBundles(ctx, watcher, backoff) watcher.OnJWTBundlesWatchError(err) @@ -258,7 +258,7 @@ func (c *Client) newConn(ctx context.Context) (*grpc.ClientConn, error) { return grpc.DialContext(ctx, c.config.address, c.config.dialOptions...) //nolint:staticcheck // preserve backcompat with WithDialOptions option } -func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backoff) error { +func (c *Client) handleWatchError(ctx context.Context, err error, backoff Backoff) error { code := status.Code(err) if code == codes.Canceled { return err @@ -270,7 +270,7 @@ func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backo } c.config.log.Errorf("Failed to watch the Workload API: %v", err) - retryAfter := backoff.Duration() + retryAfter := backoff.Next() c.config.log.Debugf("Retrying watch in %s", retryAfter) select { case <-time.After(retryAfter): @@ -281,7 +281,7 @@ func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backo } } -func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatcher, backoff *backoff) error { +func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatcher, backoff Backoff) error { ctx, cancel := context.WithCancel(withHeader(ctx)) defer cancel() @@ -308,7 +308,7 @@ func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatche } } -func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, backoff *backoff) error { +func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, backoff Backoff) error { ctx, cancel := context.WithCancel(withHeader(ctx)) defer cancel() @@ -335,7 +335,7 @@ func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, } } -func (c *Client) watchX509Bundles(ctx context.Context, watcher X509BundleWatcher, backoff *backoff) error { +func (c *Client) watchX509Bundles(ctx context.Context, watcher X509BundleWatcher, backoff Backoff) error { ctx, cancel := context.WithCancel(withHeader(ctx)) defer cancel() @@ -402,7 +402,8 @@ func withHeader(ctx context.Context) context.Context { func defaultClientConfig() clientConfig { return clientConfig{ - log: logger.Null, + log: logger.Null, + backoffStrategy: defaultBackoffStrategy{}, } } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go index 47ea83ade5..1122353903 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go @@ -16,6 +16,7 @@ var jwtsourceErr = errs.Class("jwtsource") // Workload API. type JWTSource struct { watcher *watcher + picker func([]*jwtsvid.SVID) *jwtsvid.SVID mtx sync.RWMutex bundles *jwtbundle.Set @@ -33,7 +34,9 @@ func NewJWTSource(ctx context.Context, options ...JWTSourceOption) (_ *JWTSource option.configureJWTSource(config) } - s := &JWTSource{} + s := &JWTSource{ + picker: config.picker, + } s.watcher, err = newWatcher(ctx, config.watcher, nil, s.setJWTBundles) if err != nil { @@ -61,7 +64,22 @@ func (s *JWTSource) FetchJWTSVID(ctx context.Context, params jwtsvid.Params) (*j if err := s.checkClosed(); err != nil { return nil, err } - return s.watcher.client.FetchJWTSVID(ctx, params) + + var ( + svid *jwtsvid.SVID + err error + ) + if s.picker == nil { + svid, err = s.watcher.client.FetchJWTSVID(ctx, params) + } else { + svids, err := s.watcher.client.FetchJWTSVIDs(ctx, params) + if err != nil { + return svid, err + } + svid = s.picker(svids) + } + + return svid, err } // FetchJWTSVIDs fetches all JWT-SVIDs from the source with the given parameters. diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go index 00cab7d16c..f596f30c46 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go @@ -2,6 +2,7 @@ package workloadapi import ( "github.com/spiffe/go-spiffe/v2/logger" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" "github.com/spiffe/go-spiffe/v2/svid/x509svid" "google.golang.org/grpc" ) @@ -35,6 +36,14 @@ func WithLogger(logger logger.Logger) ClientOption { }) } +// WithBackoff provides a custom backoff strategy that replaces the +// default backoff strategy (linear backoff). +func WithBackoffStrategy(backoffStrategy BackoffStrategy) ClientOption { + return clientOption(func(c *clientConfig) { + c.backoffStrategy = backoffStrategy + }) +} + // SourceOption are options that are shared among all option types. type SourceOption interface { configureX509Source(*x509SourceConfig) @@ -60,12 +69,12 @@ type X509SourceOption interface { configureX509Source(*x509SourceConfig) } -// WithDefaultX509SVIDPicker provides a function that is used to determine the -// default X509-SVID when more than one is provided by the Workload API. By -// default, the first X509-SVID in the list returned by the Workload API is +// WithDefaultJWTSVIDPicker provides a function that is used to determine the +// default JWT-SVID when more than one is provided by the Workload API. By +// default, the first JWT-SVID in the list returned by the Workload API is // used. -func WithDefaultX509SVIDPicker(picker func([]*x509svid.SVID) *x509svid.SVID) X509SourceOption { - return withDefaultX509SVIDPicker{picker: picker} +func WithDefaultJWTSVIDPicker(picker func([]*jwtsvid.SVID) *jwtsvid.SVID) JWTSourceOption { + return withDefaultJWTSVIDPicker{picker: picker} } // JWTSourceOption is an option for the JWTSource. A SourceOption is also a @@ -74,6 +83,14 @@ type JWTSourceOption interface { configureJWTSource(*jwtSourceConfig) } +// WithDefaultX509SVIDPicker provides a function that is used to determine the +// default X509-SVID when more than one is provided by the Workload API. By +// default, the first X509-SVID in the list returned by the Workload API is +// used. +func WithDefaultX509SVIDPicker(picker func([]*x509svid.SVID) *x509svid.SVID) X509SourceOption { + return withDefaultX509SVIDPicker{picker: picker} +} + // BundleSourceOption is an option for the BundleSource. A SourceOption is also // a BundleSourceOption. type BundleSourceOption interface { @@ -81,10 +98,11 @@ type BundleSourceOption interface { } type clientConfig struct { - address string - namedPipeName string - dialOptions []grpc.DialOption - log logger.Logger + address string + namedPipeName string + dialOptions []grpc.DialOption + log logger.Logger + backoffStrategy BackoffStrategy } type clientOption func(*clientConfig) @@ -100,6 +118,7 @@ type x509SourceConfig struct { type jwtSourceConfig struct { watcher watcherConfig + picker func([]*jwtsvid.SVID) *jwtsvid.SVID } type bundleSourceConfig struct { @@ -145,3 +164,11 @@ type withDefaultX509SVIDPicker struct { func (o withDefaultX509SVIDPicker) configureX509Source(config *x509SourceConfig) { config.picker = o.picker } + +type withDefaultJWTSVIDPicker struct { + picker func([]*jwtsvid.SVID) *jwtsvid.SVID +} + +func (o withDefaultJWTSVIDPicker) configureJWTSource(config *jwtSourceConfig) { + config.picker = o.picker +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go index b3fcd09483..d732641661 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go @@ -37,6 +37,7 @@ import ( const ( StorageTypeDocDB = "docdb" + mongoEnv = "MONGO_SERVER_URL" ) // ErrNothingToWatch is an error that's returned when the backend doesn't have anything to "watch" @@ -98,34 +99,16 @@ func WatchBackend(ctx context.Context, cfg config.Config, watcherStop chan bool) return nil, ErrNothingToWatch } - // Set up watcher only when `storage.docdb.mongo-server-url-dir` is set - if cfg.Storage.DocDB.MongoServerURLDir == "" { + pathsToWatch := getPathsToWatch(ctx, cfg) + if len(pathsToWatch) == 0 { return nil, ErrNothingToWatch } - logger.Infof("setting up fsnotify watcher for directory: %s", cfg.Storage.DocDB.MongoServerURLDir) - watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err } - pathsToWatch := []string{ - // mongo-server-url-dir/MONGO_SERVER_URL is where the MONGO_SERVER_URL environment - // variable is expected to be mounted, either manually or via a Kubernetes secret, etc. - filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "MONGO_SERVER_URL"), - // When a Kubernetes secret is mounted on a path, the `data` in that secret is mounted - // under path/..data that is then `symlink`ed to the key of the data. In this instance, - // the mounted path is going to look like: - // file 1 - ..2024_05_03_11_23_23.1253599725 - // file 2 - ..data -> ..2024_05_03_11_23_23.1253599725 - // file 3 - MONGO_SERVER_URL -> ..data/MONGO_SERVER_URL - // So each time the secret is updated, the file `MONGO_SERVER_URL` is not updated, - // instead the underlying symlink at `..data` is updated and that's what we want to - // capture via the fsnotify event watcher - filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "..data"), - } - backendChan := make(chan *Backend) // Start listening for events. go func() { @@ -145,13 +128,23 @@ func WatchBackend(ctx context.Context, cfg config.Config, watcherStop chan bool) continue } - updatedEnv, err := getMongoServerURLFromDir(cfg.Storage.DocDB.MongoServerURLDir) - if err != nil { - logger.Error(err) - backendChan <- nil + var updatedEnv string + if cfg.Storage.DocDB.MongoServerURLPath != "" { + updatedEnv, err = getMongoServerURLFromPath(cfg.Storage.DocDB.MongoServerURLPath) + if err != nil { + logger.Error(err) + backendChan <- nil + } + } else if cfg.Storage.DocDB.MongoServerURLDir != "" { + updatedEnv, err = getMongoServerURLFromDir(cfg.Storage.DocDB.MongoServerURLDir) + if err != nil { + logger.Error(err) + backendChan <- nil + } } + if updatedEnv != os.Getenv("MONGO_SERVER_URL") { - logger.Infof("directory %s has been updated, reconfiguring backend...", cfg.Storage.DocDB.MongoServerURLDir) + logger.Info("Mongo server url has been updated, reconfiguring backend...") // Now that MONGO_SERVER_URL has been updated, we should update docdb backend again newDocDBBackend, err := NewStorageBackend(ctx, cfg) @@ -179,11 +172,23 @@ func WatchBackend(ctx context.Context, cfg config.Config, watcherStop chan bool) } }() - // Add a path. - err = watcher.Add(cfg.Storage.DocDB.MongoServerURLDir) - if err != nil { - return nil, err + if cfg.Storage.DocDB.MongoServerURLPath != "" { + dirPath := filepath.Dir(cfg.Storage.DocDB.MongoServerURLPath) + // Add a path. + err = watcher.Add(dirPath) + if err != nil { + return nil, err + } } + + if cfg.Storage.DocDB.MongoServerURLDir != "" { + // Add a path. + err = watcher.Add(cfg.Storage.DocDB.MongoServerURLDir) + if err != nil { + return nil, err + } + } + return backendChan, nil } @@ -256,27 +261,43 @@ func (b *Backend) retrieveDocuments(ctx context.Context, opts config.StorageOpts } func populateMongoServerURL(ctx context.Context, cfg config.Config) error { - // First preference is given to the key `storage.docdb.mongo-server-url-dir`. + // First preference is given to the key `storage.docdb.mongo-server-url-path`. + // If that doesn't exist, then we move on to `storage.docdb.mongo-server-url-dir`. // If that doesn't exist, then we move on to `storage.docdb.mongo-server-url`. // If that doesn't exist, then we check if `MONGO_SERVER_URL` env var is set. logger := logging.FromContext(ctx) - mongoEnv := "MONGO_SERVER_URL" + + if cfg.Storage.DocDB.MongoServerURLPath != "" { + logger.Infof("setting %s from storage.docdb.mongo-server-url-path: %s", mongoEnv, cfg.Storage.DocDB.MongoServerURLPath) + mongoServerURL, err := getMongoServerURLFromPath(cfg.Storage.DocDB.MongoServerURLPath) + if err != nil { + return err + } + if err := os.Setenv(mongoEnv, mongoServerURL); err != nil { + return err + } + return nil + } if cfg.Storage.DocDB.MongoServerURLDir != "" { logger.Infof("setting %s from storage.docdb.mongo-server-url-dir: %s", mongoEnv, cfg.Storage.DocDB.MongoServerURLDir) if err := setMongoServerURLFromDir(cfg.Storage.DocDB.MongoServerURLDir); err != nil { return err } - } else if cfg.Storage.DocDB.MongoServerURL != "" { + return nil + } + + if cfg.Storage.DocDB.MongoServerURL != "" { logger.Infof("setting %s from storage.docdb.mongo-server-url", mongoEnv) if err := os.Setenv(mongoEnv, cfg.Storage.DocDB.MongoServerURL); err != nil { return err } + return nil } if _, envExists := os.LookupEnv(mongoEnv); !envExists { return fmt.Errorf("mongo docstore configured but %s environment variable not set, "+ - "supply one of storage.docdb.mongo-server-url-dir, storage.docdb.mongo-server-url or set %s", mongoEnv, mongoEnv) + "supply one of storage.docdb.mongo-server-url-path, storage.docdb.mongo-server-url-dir, storage.docdb.mongo-server-url or set %s", mongoEnv, mongoEnv) } return nil @@ -288,7 +309,7 @@ func setMongoServerURLFromDir(dir string) error { return err } - if err = os.Setenv("MONGO_SERVER_URL", fileDataNormalized); err != nil { + if err = os.Setenv(mongoEnv, fileDataNormalized); err != nil { return err } @@ -296,8 +317,6 @@ func setMongoServerURLFromDir(dir string) error { } func getMongoServerURLFromDir(dir string) (string, error) { - mongoEnv := "MONGO_SERVER_URL" - stat, err := os.Stat(dir) if err != nil { if os.IsNotExist(err) { @@ -327,3 +346,62 @@ func getMongoServerURLFromDir(dir string) (string, error) { return fileDataNormalized, nil } + +// getMongoServerUrlFromPath retreives token from the given mount path +func getMongoServerURLFromPath(path string) (string, error) { + fileData, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("reading file in %q: %w", path, err) + } + + // A trailing newline is fairly common in mounted files, so remove it. + fileDataNormalized := strings.TrimSuffix(string(fileData), "\n") + return fileDataNormalized, nil +} + +func getPathsToWatch(ctx context.Context, cfg config.Config) []string { + var pathsToWatch = []string{} + logger := logging.FromContext(ctx) + + if cfg.Storage.DocDB.MongoServerURLPath != "" { + logger.Infof("setting up fsnotify watcher for mongo server url path: %s", cfg.Storage.DocDB.MongoServerURLPath) + dirPath := filepath.Dir(cfg.Storage.DocDB.MongoServerURLPath) + pathsToWatch = []string{ + // mongo-server-url-path/ is where the mongo server url token + // When a Kubernetes secret is mounted on a path, the `data` in that secret is mounted + // under path/..data that is then `symlink`ed to the key of the data. In this instance, + // the mounted path is going to look like: + // file 1 - ..2024_05_03_11_23_23.1253599725 + // file 2 - ..data -> ..2024_05_03_11_23_23.1253599725 + // file 3 - ..data/ + // So each time the secret is updated, the file is not updated, + // instead the underlying symlink at `..data` is updated and that's what we want to + // capture via the fsnotify event watcher + // filepath.Join(cfg.Storage.DocDB.MongoServerURLPath, "..data"), + filepath.Join(dirPath, "..data"), + } + return pathsToWatch + } + + if cfg.Storage.DocDB.MongoServerURLDir != "" { + logger.Infof("setting up fsnotify watcher for directory: %s", cfg.Storage.DocDB.MongoServerURLDir) + pathsToWatch = []string{ + // mongo-server-url-dir/MONGO_SERVER_URL is where the MONGO_SERVER_URL environment + // variable is expected to be mounted, either manually or via a Kubernetes secret, etc. + filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "MONGO_SERVER_URL"), + // When a Kubernetes secret is mounted on a path, the `data` in that secret is mounted + // under path/..data that is then `symlink`ed to the key of the data. In this instance, + // the mounted path is going to look like: + // file 1 - ..2024_05_03_11_23_23.1253599725 + // file 2 - ..data -> ..2024_05_03_11_23_23.1253599725 + // file 3 - MONGO_SERVER_URL -> ..data/MONGO_SERVER_URL + // So each time the secret is updated, the file `MONGO_SERVER_URL` is not updated, + // instead the underlying symlink at `..data` is updated and that's what we want to + // capture via the fsnotify event watcher + filepath.Join(cfg.Storage.DocDB.MongoServerURLDir, "..data"), + } + return pathsToWatch + } + + return pathsToWatch +} diff --git a/vendor/github.com/tektoncd/chains/pkg/config/config.go b/vendor/github.com/tektoncd/chains/pkg/config/config.go index 7175ac2bc1..4e9cef101a 100644 --- a/vendor/github.com/tektoncd/chains/pkg/config/config.go +++ b/vendor/github.com/tektoncd/chains/pkg/config/config.go @@ -123,9 +123,10 @@ type TektonStorageConfig struct { } type DocDBStorageConfig struct { - URL string - MongoServerURL string - MongoServerURLDir string + URL string + MongoServerURL string + MongoServerURLDir string + MongoServerURLPath string } type GrafeasConfig struct { @@ -168,12 +169,13 @@ const ( ociStorageKey = "artifacts.oci.storage" ociSignerKey = "artifacts.oci.signer" - gcsBucketKey = "storage.gcs.bucket" - ociRepositoryKey = "storage.oci.repository" - ociRepositoryInsecureKey = "storage.oci.repository.insecure" - docDBUrlKey = "storage.docdb.url" - docDBMongoServerURLKey = "storage.docdb.mongo-server-url" - docDBMongoServerURLDirKey = "storage.docdb.mongo-server-url-dir" + gcsBucketKey = "storage.gcs.bucket" + ociRepositoryKey = "storage.oci.repository" + ociRepositoryInsecureKey = "storage.oci.repository.insecure" + docDBUrlKey = "storage.docdb.url" + docDBMongoServerURLKey = "storage.docdb.mongo-server-url" + docDBMongoServerURLDirKey = "storage.docdb.mongo-server-url-dir" + docDBMongoServerURLPathKey = "storage.docdb.mongo-server-url-path" grafeasProjectIDKey = "storage.grafeas.projectid" grafeasNoteIDKey = "storage.grafeas.noteid" @@ -302,6 +304,7 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { asString(docDBUrlKey, &cfg.Storage.DocDB.URL), asString(docDBMongoServerURLKey, &cfg.Storage.DocDB.MongoServerURL), asString(docDBMongoServerURLDirKey, &cfg.Storage.DocDB.MongoServerURLDir), + asString(docDBMongoServerURLPathKey, &cfg.Storage.DocDB.MongoServerURLPath), asString(grafeasProjectIDKey, &cfg.Storage.Grafeas.ProjectID), asString(grafeasNoteIDKey, &cfg.Storage.Grafeas.NoteID), asString(grafeasNoteHint, &cfg.Storage.Grafeas.NoteHint), diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 0000000000..8902bdaaff --- /dev/null +++ b/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 0000000000..bf6e357854 --- /dev/null +++ b/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md new file mode 100644 index 0000000000..b524b8135d --- /dev/null +++ b/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go new file mode 100644 index 0000000000..1a0e6dad00 --- /dev/null +++ b/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/vendor/github.com/youmark/pkcs8/.travis.yml b/vendor/github.com/youmark/pkcs8/.travis.yml deleted file mode 100644 index 3608f7dc79..0000000000 --- a/vendor/github.com/youmark/pkcs8/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -arch: - - amd64 - - ppc64le -language: go - -go: - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - master - -script: - - go test -v ./... diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 7e08aab35e..fc4a7b1dbf 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -330,7 +330,7 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade case reflect.Int64: return reflect.ValueOf(i64), nil case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int + if i64 > math.MaxInt { // Can we fit this inside of an int return emptyValue, fmt.Errorf("%d overflows int", i64) } @@ -434,7 +434,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return fmt.Errorf("%d overflows uint64", i64) } case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint return fmt.Errorf("%d overflows uint", i64) } default: diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 8525472769..39b07135b1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -164,11 +164,15 @@ func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refl return reflect.ValueOf(uint64(i64)), nil case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + v := uint64(i64) + if v > math.MaxUint { // Can we fit this inside of an uint return emptyValue, fmt.Errorf("%d overflows uint", i64) } - return reflect.ValueOf(uint(i64)), nil + return reflect.ValueOf(uint(v)), nil default: return emptyValue, ValueDecoderError{ Name: "UintDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 9695704246..af6ae7b76b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -95,9 +95,9 @@ func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) { return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t) } - i, err := strconv.ParseInt(val.v.(string), 16, 64) + i, err := strconv.ParseUint(val.v.(string), 16, 8) if err != nil { - return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string)) + return nil, 0, fmt.Errorf("invalid $binary subType string: %q: %w", val.v.(string), err) } subType = byte(i) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index a242bb57cf..0e07d50558 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -842,7 +842,7 @@ func (vr *valueReader) peekLength() (int32, error) { } idx := vr.offset - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readLength() (int32, error) { return vr.readi32() } @@ -854,7 +854,7 @@ func (vr *valueReader) readi32() (int32, error) { idx := vr.offset vr.offset += 4 - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readu32() (uint32, error) { @@ -864,7 +864,7 @@ func (vr *valueReader) readu32() (uint32, error) { idx := vr.offset vr.offset += 4 - return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil + return binary.LittleEndian.Uint32(vr.d[idx:]), nil } func (vr *valueReader) readi64() (int64, error) { @@ -874,8 +874,7 @@ func (vr *valueReader) readi64() (int64, error) { idx := vr.offset vr.offset += 8 - return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 | - int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil + return int64(binary.LittleEndian.Uint64(vr.d[idx:])), nil } func (vr *valueReader) readu64() (uint64, error) { @@ -885,6 +884,5 @@ func (vr *valueReader) readu64() (uint64, error) { idx := vr.offset vr.offset += 8 - return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 | - uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil + return binary.LittleEndian.Uint64(vr.d[idx:]), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 4d1bfb3160..a8088e1e30 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -88,8 +88,12 @@ func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval) } -// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext -// instead of the one attached or the default registry. +// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses +// the provided DecodeContext instead of the one attached or the default +// registry. +// +// Deprecated: Use [RawValue.UnmarshalWithRegistry] with a custom registry to customize +// unmarshal behavior instead. func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error { if dc == nil { return ErrNilContext diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index b5b0f35687..d6afb2850e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -10,15 +10,27 @@ import ( "go.mongodb.org/mongo-driver/bson/bsoncodec" ) -// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the -// primitive codecs. +// DefaultRegistry is the default bsoncodec.Registry. It contains the default +// codecs and the primitive codecs. +// +// Deprecated: Use [NewRegistry] to construct a new default registry. To use a +// custom registry when marshaling or unmarshaling, use the "SetRegistry" method +// on an [Encoder] or [Decoder] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Encoder] and [Decoder] for more examples. var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. // -// Deprecated: Use NewRegistry instead. +// Deprecated: Use [NewRegistry] instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go index c5ff1474b4..0a6c1bdcab 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go @@ -9,6 +9,7 @@ package logger import ( "encoding/json" "io" + "math" "sync" "time" ) @@ -36,7 +37,11 @@ func NewIOSink(out io.Writer) *IOSink { // Info will write a JSON-encoded message to the io.Writer. func (sink *IOSink) Info(_ int, msg string, keysAndValues ...interface{}) { - kvMap := make(map[string]interface{}, len(keysAndValues)/2+2) + mapSize := len(keysAndValues) / 2 + if math.MaxInt-mapSize >= 2 { + mapSize += 2 + } + kvMap := make(map[string]interface{}, mapSize) kvMap[KeyTimestamp] = time.Now().UnixNano() kvMap[KeyMessage] = msg diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index 1bedcc3f8a..8d0a2031de 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -531,6 +531,12 @@ func (cs *ChangeStream) ID() int64 { return cs.cursor.ID() } +// RemainingBatchLength returns the number of documents left in the current batch. If this returns zero, the subsequent +// call to Next or TryNext will do a network request to fetch the next batch. +func (cs *ChangeStream) RemainingBatchLength() int { + return len(cs.batch) +} + // SetBatchSize sets the number of documents to fetch from the database with // each iteration of the ChangeStream's "Next" or "TryNext" method. This setting // only affects subsequent document batches fetched from the database. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 280749c7dd..4266412aab 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -209,10 +209,6 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { clientOpt.SetMaxPoolSize(defaultMaxPoolSize) } - if err != nil { - return nil, err - } - cfg, err := topology.NewConfig(clientOpt, client.clock) if err != nil { return nil, err diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index 555035ff51..4cf6fd1a1a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -1200,6 +1200,11 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, opts ...*options.FindOptions) (cur *Cursor, err error) { + + if ctx == nil { + ctx = context.Background() + } + // Omit "maxTimeMS" from operations that return a user-managed cursor to // prevent confusing "cursor not found" errors. To maintain existing // behavior for users who set "timeoutMS" with no context deadline, only @@ -1217,10 +1222,6 @@ func (coll *Collection) find( opts ...*options.FindOptions, ) (cur *Cursor, err error) { - if ctx == nil { - ctx = context.Background() - } - f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err @@ -1801,8 +1802,11 @@ func (coll *Collection) Indexes() IndexView { // SearchIndexes returns a SearchIndexView instance that can be used to perform operations on the search indexes for the collection. func (coll *Collection) SearchIndexes() SearchIndexView { + c, _ := coll.Clone() // Clone() always return a nil error. + c.readConcern = nil + c.writeConcern = nil return SearchIndexView{ - coll: coll, + coll: c, } } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index c5cda9e5bd..57c0186eca 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -580,7 +580,7 @@ func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collection return encryptedFields, nil } -// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// getEncryptedFieldsFromMap tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. // Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { // Check the EncryptedFieldsMap diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index db56745919..17b3731301 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "net" "net/http" "strings" @@ -1177,7 +1178,19 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := make([]byte, 0, len(keyData)+len(certData)+1) + keySize := len(keyData) + if keySize > 64*1024*1024 { + return "", errors.New("X.509 key must be less than 64 MiB") + } + certSize := len(certData) + if certSize > 64*1024*1024 { + return "", errors.New("X.509 certificate must be less than 64 MiB") + } + dataSize := keySize + certSize + 1 + if dataSize > math.MaxInt { + return "", errors.New("size overflow") + } + data := make([]byte, 0, dataSize) data = append(data, keyData...) data = append(data, '\n') data = append(data, certData...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go index fd17ce44e1..36088c2fcb 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go @@ -104,7 +104,7 @@ const ( // UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that // was changed. UpdateLookup FullDocument = "updateLookup" - // WhenAvailable includes a post-image of the the modified document for replace and update change events + // WhenAvailable includes a post-image of the modified document for replace and update change events // if the post-image for this event is available. WhenAvailable FullDocument = "whenAvailable" ) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go index 9774d615ba..8cb8a08b78 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go @@ -9,6 +9,7 @@ package options // SearchIndexesOptions represents options that can be used to configure a SearchIndexView. type SearchIndexesOptions struct { Name *string + Type *string } // SearchIndexes creates a new SearchIndexesOptions instance. @@ -22,6 +23,12 @@ func (sio *SearchIndexesOptions) SetName(name string) *SearchIndexesOptions { return sio } +// SetType sets the value for the Type field. +func (sio *SearchIndexesOptions) SetType(typ string) *SearchIndexesOptions { + sio.Type = &typ + return sio +} + // CreateSearchIndexesOptions represents options that can be used to configure a SearchIndexView.CreateOne or // SearchIndexView.CreateMany operation. type CreateSearchIndexesOptions struct { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go index 6a7871531e..73fe8534ed 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go @@ -13,7 +13,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -109,6 +108,9 @@ func (siv SearchIndexView) CreateMany( if model.Options != nil && model.Options.Name != nil { indexes = bsoncore.AppendStringElement(indexes, "name", *model.Options.Name) } + if model.Options != nil && model.Options.Type != nil { + indexes = bsoncore.AppendStringElement(indexes, "type", *model.Options.Type) + } indexes = bsoncore.AppendDocumentElement(indexes, "definition", definition) indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx) @@ -134,20 +136,13 @@ func (siv SearchIndexView) CreateMany( return nil, err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewCreateSearchIndexes(indexes). - Session(sess).WriteConcern(wc).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name).CommandMonitor(siv.coll.client.monitor). - Deployment(siv.coll.client.deployment).ServerSelector(selector).ServerAPI(siv.coll.client.serverAPI). + Session(sess).CommandMonitor(siv.coll.client.monitor). + ServerSelector(selector).ClusterClock(siv.coll.client.clock). + Collection(siv.coll.name).Database(siv.coll.db.name). + Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) err = op.Execute(ctx) @@ -196,20 +191,12 @@ func (siv SearchIndexView) DropOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewDropSearchIndex(name). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) @@ -258,20 +245,12 @@ func (siv SearchIndexView) UpdateOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewUpdateSearchIndex(name, indexDefinition). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go index 1d9472ec0b..7a73d8d72f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go @@ -51,7 +51,7 @@ var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be n type WriteConcern struct { // W requests acknowledgment that the write operation has propagated to a // specified number of mongod instances or to mongod instances with - // specified tags. It sets the the "w" option in a MongoDB write concern. + // specified tags. It sets the "w" option in a MongoDB write concern. // // W values must be a string or an int. // diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index 879bbdb7a4..d929c46754 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -8,4 +8,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.15.0" +var Driver = "1.16.1" diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 88133293ea..03925d7ada 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -8,6 +8,7 @@ package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( "bytes" + "encoding/binary" "fmt" "math" "strconv" @@ -706,17 +707,16 @@ func ReserveLength(dst []byte) (int32, []byte) { // UpdateLength updates the length at index with length and returns the []byte. func UpdateLength(dst []byte, index, length int32) []byte { - dst[index] = byte(length) - dst[index+1] = byte(length >> 8) - dst[index+2] = byte(length >> 16) - dst[index+3] = byte(length >> 24) + binary.LittleEndian.PutUint32(dst[index:], uint32(length)) return dst } func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) } func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(i32)) + return append(dst, b...) } // ReadLength reads an int32 length from src and returns the length and the remaining bytes. If @@ -734,27 +734,26 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return int32(binary.LittleEndian.Uint32(src)), src[4:], true } func appendi64(dst []byte, i64 int64) []byte { - return append(dst, - byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24), - byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(i64)) + return append(dst, b...) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func appendu32(dst []byte, u32 uint32) []byte { - return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, u32) + return append(dst, b...) } func readu32(src []byte) (uint32, []byte, bool) { @@ -762,23 +761,20 @@ func readu32(src []byte) (uint32, []byte, bool) { return 0, src, false } - return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true + return binary.LittleEndian.Uint32(src), src[4:], true } func appendu64(dst []byte, u64 uint64) []byte { - return append(dst, - byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24), - byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, u64) + return append(dst, b...) } func readu64(src []byte) (uint64, []byte, bool) { if len(src) < 8 { return 0, src, false } - u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 | - uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56) - return u64, src[8:], true + return binary.LittleEndian.Uint64(src), src[8:], true } // keep in sync with readcstringbytes diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go index 6837b53fc5..f68e1da1a0 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. +// Package bsoncore is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package bsoncore contains functions that can be used to encode and decode +// BSON elements and values to or from a slice of bytes. These functions are +// aimed at allowing low level manipulation of BSON and can be used to build a +// higher level BSON library. // // The Read* functions within this package return the values of the element and // a boolean indicating if the values are valid. A boolean was used instead of @@ -15,15 +23,12 @@ // enough bytes. This library attempts to do no validation, it will only return // false if there are not enough bytes for an item to be read. For example, the // ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to +// number of bytes available, it will return false, if there are enough bytes, +// it will return those bytes and true. It is the consumers responsibility to // validate those bytes. // // The Append* functions within this package will append the type value to the // given dst slice. If the slice has enough capacity, it will not grow the // slice. The Append*Element functions within this package operate in the same // way, but additionally append the BSON type and the key before the value. -// -// Warning: Package bsoncore is unstable and there is no backward compatibility -// guarantee. It is experimental and subject to change. package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md deleted file mode 100644 index 3c3e6c56cd..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md +++ /dev/null @@ -1,27 +0,0 @@ -# Driver Library Design - -This document outlines the design for this package. - -## Deployment, Server, and Connection - -Acquiring a `Connection` from a `Server` selected from a `Deployment` enables sending and receiving -wire messages. A `Deployment` represents an set of MongoDB servers and a `Server` represents a -member of that set. These three types form the operation execution stack. - -### Compression - -Compression is handled by Connection type while uncompression is handled automatically by the -Operation type. This is done because the compressor to use for compressing a wire message is -chosen by the connection during handshake, while uncompression can be performed without this -information. This does make the design of compression non-symmetric, but it makes the design simpler -to implement and more consistent. - -## Operation - -The `Operation` type handles executing a series of commands using a `Deployment`. For most uses -`Operation` will only execute a single command, but the main use case for a series of commands is -batch split write commands, such as insert. The type itself is heavily documented, so reading the -code and comments together should provide an understanding of how the type works. - -This type is not meant to be used directly by callers. Instead a wrapping type should be defined -using the IDL. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go new file mode 100644 index 0000000000..99c4c3470f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package creds is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package creds diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go index 9db65cf193..5f9f1f5743 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go @@ -4,20 +4,11 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package auth is not for public use. +// Package auth is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. // -// The API for packages in the 'private' directory have no stability -// guarantee. -// -// The packages within the 'private' directory would normally be put into an -// 'internal' directory to prohibit their use outside the 'mongo' directory. -// However, some MongoDB tools require very low-level access to the building -// blocks of a driver, so we have placed them under 'private' to allow these -// packages to be imported by projects that need them. -// -// These package APIs may be modified in backwards-incompatible ways at any -// time. -// -// You are strongly discouraged from directly using any packages -// under 'private'. +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go index d79b024b74..d9a6c68fee 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go @@ -30,7 +30,11 @@ type CompressionOpts struct { // destination writer. It panics on any errors and should only be used at // package initialization time. func mustZstdNewWriter(lvl zstd.EncoderLevel) *zstd.Encoder { - enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(lvl)) + enc, err := zstd.NewWriter( + nil, + zstd.WithWindowSize(8<<20), // Set window size to 8MB. + zstd.WithEncoderLevel(lvl), + ) if err != nil { panic(err) } @@ -105,6 +109,13 @@ func (e *zlibEncoder) Encode(dst, src []byte) ([]byte, error) { return dst, nil } +var zstdBufPool = sync.Pool{ + New: func() interface{} { + s := make([]byte, 0) + return &s + }, +} + // CompressPayload takes a byte slice and compresses it according to the options passed func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { switch opts.Compressor { @@ -123,7 +134,13 @@ func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { if err != nil { return nil, err } - return encoder.EncodeAll(in, nil), nil + ptr := zstdBufPool.Get().(*[]byte) + b := encoder.EncodeAll(in, *ptr) + dst := make([]byte, len(b)) + copy(dst, b) + *ptr = b[:0] + zstdBufPool.Put(ptr) + return dst, nil default: return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go index 52068b8ea8..686458e292 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package connstring is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package connstring // import "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go index 6573a4c1ad..9334d493ed 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package dns is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package dns import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go index 5fd3ddcb42..900729bf87 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package driver is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package driver // import "go.mongodb.org/mongo-driver/x/mongo/driver" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go index 24f9f9b0ec..80f500085c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go @@ -7,6 +7,13 @@ //go:build !cse // +build !cse +// Package mongocrypt is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package mongocrypt import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go new file mode 100644 index 0000000000..e0cc77052a --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package options is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package options diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go index 8700728729..2bff94a659 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package ocsp is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package ocsp import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go index eb1acec88f..db5367bed5 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go @@ -622,7 +622,7 @@ func (op Operation) Execute(ctx context.Context) error { } }() for { - // If we're starting a retry and the the error from the previous try was + // If we're starting a retry and the error from the previous try was // a context canceled or deadline exceeded error, stop retrying and // return that error. if errors.Is(prevErr, context.Canceled) || errors.Is(prevErr, context.DeadlineExceeded) { @@ -1574,11 +1574,17 @@ func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) // operation's MaxTimeMS if set. If no MaxTimeMS is set on the operation, and context is // not a Timeout context, calculateMaxTimeMS returns 0. func (op Operation) calculateMaxTimeMS(ctx context.Context, mon RTTMonitor) (uint64, error) { - if csot.IsTimeoutContext(ctx) { - if op.OmitCSOTMaxTimeMS { - return 0, nil - } - + // If CSOT is enabled and we're not omitting the CSOT-calculated maxTimeMS + // value, then calculate maxTimeMS. + // + // This allows commands that do not currently send CSOT-calculated maxTimeMS + // (e.g. Find and Aggregate) to still use a manually-provided maxTimeMS + // value. + // + // TODO(GODRIVER-2944): Remove or refactor this logic when we add the + // "timeoutMode" option, which will allow users to opt-in to the + // CSOT-calculated maxTimeMS values if that's the behavior they want. + if csot.IsTimeoutContext(ctx) && !op.OmitCSOTMaxTimeMS { if deadline, ok := ctx.Deadline(); ok { remainingTimeout := time.Until(deadline) rtt90 := mon.P90() @@ -1893,7 +1899,6 @@ func (op Operation) decodeResult(ctx context.Context, opcode wiremessage.OpCode, return nil, errors.New("malformed wire message: insufficient bytes to read single document") } case wiremessage.DocumentSequence: - // TODO(GODRIVER-617): Implement document sequence returns. _, _, wm, ok = wiremessage.ReadMsgSectionDocumentSequence(wm) if !ok { return nil, errors.New("malformed wire message: insufficient bytes to read document sequence") @@ -1989,7 +1994,7 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma } } -// canPublishSucceededEvent returns true if a CommandSucceededEvent can be +// canPublishFinishedEvent returns true if a CommandSucceededEvent can be // published for the given command. This is true if the command is not an // unacknowledged write and the command monitor is monitoring succeeded events. func (op Operation) canPublishFinishedEvent(info finishedInformation) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go index a16f9d716b..cb0d807952 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go @@ -15,7 +15,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -23,19 +22,18 @@ import ( // CreateSearchIndexes performs a createSearchIndexes operation. type CreateSearchIndexes struct { - indexes bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result CreateSearchIndexesResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + indexes bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result CreateSearchIndexesResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // CreateSearchIndexResult represents a single search index result in CreateSearchIndexesResult. @@ -109,9 +107,15 @@ func (csi *CreateSearchIndexes) Execute(ctx context.Context) error { return driver.Operation{ CommandFn: csi.command, ProcessResponseFn: csi.processResponse, + Client: csi.session, + Clock: csi.clock, CommandMonitor: csi.monitor, + Crypt: csi.crypt, Database: csi.database, Deployment: csi.deployment, + Selector: csi.selector, + ServerAPI: csi.serverAPI, + Timeout: csi.timeout, }.Execute(ctx) } @@ -214,16 +218,6 @@ func (csi *CreateSearchIndexes) ServerSelector(selector description.ServerSelect return csi } -// WriteConcern sets the write concern for this operation. -func (csi *CreateSearchIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *CreateSearchIndexes { - if csi == nil { - csi = new(CreateSearchIndexes) - } - - csi.writeConcern = writeConcern - return csi -} - // ServerAPI sets the server API version for this operation. func (csi *CreateSearchIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateSearchIndexes { if csi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go new file mode 100644 index 0000000000..e55b12a748 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package operation is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package operation diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go index 25cde8154b..3992c83165 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,19 +21,18 @@ import ( // DropSearchIndex performs an dropSearchIndex operation. type DropSearchIndex struct { - index string - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result DropSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result DropSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // DropSearchIndexResult represents a dropSearchIndex result returned by the server. @@ -93,7 +91,6 @@ func (dsi *DropSearchIndex) Execute(ctx context.Context) error { Database: dsi.database, Deployment: dsi.deployment, Selector: dsi.selector, - WriteConcern: dsi.writeConcern, ServerAPI: dsi.serverAPI, Timeout: dsi.timeout, }.Execute(ctx) @@ -196,16 +193,6 @@ func (dsi *DropSearchIndex) ServerSelector(selector description.ServerSelector) return dsi } -// WriteConcern sets the write concern for this operation. -func (dsi *DropSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropSearchIndex { - if dsi == nil { - dsi = new(DropSearchIndex) - } - - dsi.writeConcern = writeConcern - return dsi -} - // ServerAPI sets the server API version for this operation. func (dsi *DropSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropSearchIndex { if dsi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go index ba807986c9..64f2da7f6f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,20 +21,19 @@ import ( // UpdateSearchIndex performs a updateSearchIndex operation. type UpdateSearchIndex struct { - index string - definition bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result UpdateSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + definition bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result UpdateSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // UpdateSearchIndexResult represents a single index in the updateSearchIndexResult result. @@ -95,7 +93,6 @@ func (usi *UpdateSearchIndex) Execute(ctx context.Context) error { Database: usi.database, Deployment: usi.deployment, Selector: usi.selector, - WriteConcern: usi.writeConcern, ServerAPI: usi.serverAPI, Timeout: usi.timeout, }.Execute(ctx) @@ -209,16 +206,6 @@ func (usi *UpdateSearchIndex) ServerSelector(selector description.ServerSelector return usi } -// WriteConcern sets the write concern for this operation. -func (usi *UpdateSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *UpdateSearchIndex { - if usi == nil { - usi = new(UpdateSearchIndex) - } - - usi.writeConcern = writeConcern - return usi -} - // ServerAPI sets the server API version for this operation. func (usi *UpdateSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *UpdateSearchIndex { if usi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go new file mode 100644 index 0000000000..80b2ac2dd5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package session is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package session diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index 476459e8e6..649e87b3d1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -320,7 +320,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead // If there was an error and the context was cancelled, we assume it happened due to the cancellation. if errors.Is(ctx.Err(), context.Canceled) { - return context.Canceled + return ctx.Err() } // If there was a timeout error and the context deadline was used, we convert the error into @@ -329,7 +329,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead return originalError } if netErr, ok := originalError.(net.Error); ok && netErr.Timeout() { - return context.DeadlineExceeded + return fmt.Errorf("%w: %s", context.DeadlineExceeded, originalError.Error()) } return originalError diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go index 07f508caae..c7b168dc2c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go @@ -56,7 +56,6 @@ type rttMonitor struct { cfg *rttConfig ctx context.Context cancelFn context.CancelFunc - started bool } var _ driver.RTTMonitor = &rttMonitor{} @@ -83,7 +82,6 @@ func (r *rttMonitor) connect() { r.connMu.Lock() defer r.connMu.Unlock() - r.started = true r.closeWg.Add(1) go func() { @@ -97,10 +95,6 @@ func (r *rttMonitor) disconnect() { r.connMu.Lock() defer r.connMu.Unlock() - if !r.started { - return - } - r.cancelFn() // Wait for the existing connection to complete. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index f4c6d744aa..a29eea4a6d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -125,6 +125,7 @@ type Server struct { processErrorLock sync.Mutex rttMonitor *rttMonitor + monitorOnce sync.Once } // updateTopologyCallback is a callback used to create a server that should be called when the parent Topology instance @@ -285,10 +286,10 @@ func (s *Server) Disconnect(ctx context.Context) error { close(s.done) s.cancelCheck() - s.rttMonitor.disconnect() s.pool.close(ctx) s.closewg.Wait() + s.rttMonitor.disconnect() atomic.StoreInt64(&s.state, serverDisconnected) return nil @@ -661,11 +662,11 @@ func (s *Server) update() { transitionedFromNetworkError := desc.LastError != nil && unwrapConnectionError(desc.LastError) != nil && previousDescription.Kind != description.Unknown - if isStreamingEnabled(s) && isStreamable(s) && !s.rttMonitor.started { - s.rttMonitor.connect() + if isStreamingEnabled(s) && isStreamable(s) { + s.monitorOnce.Do(s.rttMonitor.connect) } - if isStreamable(s) || connectionIsStreaming || transitionedFromNetworkError { + if isStreamingEnabled(s) && (isStreamable(s) || connectionIsStreaming) || transitionedFromNetworkError { continue } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go index 3dbbcfb860..0fb913d21b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go @@ -4,10 +4,19 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package topology contains types that handles the discovery, monitoring, and selection -// of servers. This package is designed to expose enough inner workings of service discovery -// and monitoring to allow low level applications to have fine grained control, while hiding -// most of the detailed implementation of the algorithms. +// Package topology is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package topology contains types that handles the discovery, monitoring, and +// selection of servers. This package is designed to expose enough inner +// workings of service discovery and monitoring to allow low level applications +// to have fine grained control, while hiding most of the detailed +// implementation of the algorithms. package topology // import "go.mongodb.org/mongo-driver/x/mongo/driver/topology" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go index abf09c15bd..2199f855ba 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package wiremessage is intended for internal use only. It is made available +// to facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package wiremessage import ( "bytes" + "encoding/binary" "strings" "sync/atomic" @@ -231,10 +239,11 @@ func ReadHeader(src []byte) (length, requestID, responseTo int32, opcode OpCode, if len(src) < 16 { return 0, 0, 0, 0, src, false } - length = (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) - requestID = (int32(src[4]) | int32(src[5])<<8 | int32(src[6])<<16 | int32(src[7])<<24) - responseTo = (int32(src[8]) | int32(src[9])<<8 | int32(src[10])<<16 | int32(src[11])<<24) - opcode = OpCode(int32(src[12]) | int32(src[13])<<8 | int32(src[14])<<16 | int32(src[15])<<24) + + length = readi32unsafe(src) + requestID = readi32unsafe(src[4:]) + responseTo = readi32unsafe(src[8:]) + opcode = OpCode(readi32unsafe(src[12:])) return length, requestID, responseTo, opcode, src[16:], true } @@ -486,7 +495,7 @@ func ReadReplyCursorID(src []byte) (cursorID int64, rem []byte, ok bool) { return readi64(src) } -// ReadReplyStartingFrom reads the starting from from src. +// ReadReplyStartingFrom reads the starting from src. func ReadReplyStartingFrom(src []byte) (startingFrom int32, rem []byte, ok bool) { return readi32(src) } @@ -570,12 +579,16 @@ func ReadKillCursorsCursorIDs(src []byte, numIDs int32) (cursorIDs []int64, rem return cursorIDs, src, true } -func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) +func appendi32(dst []byte, x int32) []byte { + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(x)) + return append(dst, b...) } -func appendi64(b []byte, i int64) []byte { - return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56)) +func appendi64(dst []byte, x int64) []byte { + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(x)) + return append(dst, b...) } func appendCString(b []byte, str string) []byte { @@ -587,21 +600,18 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return readi32unsafe(src), src[4:], true } func readi32unsafe(src []byte) int32 { - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) + return int32(binary.LittleEndian.Uint32(src)) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func readcstring(src []byte) (string, []byte, bool) { diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md new file mode 100644 index 0000000000..ec35080b4e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md @@ -0,0 +1,76 @@ +# GCP Resource detector + +The GCP resource detector supports detecting resources on: + + * Google Compute Engine (GCE) + * Google Kubernetes Engine (GKE) + * Google App Engine (GAE) + * Cloud Run + * Cloud Run jobs + * Cloud Functions + +## Usage + +```golang +ctx := context.Background() +// Detect your resources +res, err := resource.New(ctx, + // Use the GCP resource detector! + resource.WithDetectors(gcp.NewDetector()), + // Keep the default detectors + resource.WithTelemetrySDK(), + // Add your own custom attributes to identify your application + resource.WithAttributes( + semconv.ServiceNameKey.String("my-application"), + semconv.ServiceNamespaceKey.String("my-company-frontend-team"), + ), +) +if err != nil { + // Handle err +} +// Use the resource in your tracerprovider (or meterprovider) +tp := trace.NewTracerProvider( + // ... other options + trace.WithResource(res), +) +``` + +## Setting Kubernetes attributes + +Previous iterations of GCP resource detection attempted to detect +`container.name`, `k8s.pod.name` and `k8s.namespace.name`. When using this detector, +you should use this in your Pod Spec to set these using +[`OTEL_RESOURCE_ATTRIBUTES`](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable): + +```yaml +env: +- name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name +- name: NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace +- name: CONTAINER_NAME + value: my-container-name +- name: OTEL_RESOURCE_ATTRIBUTES + value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE_NAME),k8s.container.name=$(CONTAINER_NAME) +``` +To have a detector unpack the `OTEL_RESOURCE_ATTRIBUTES` envvar, use the `WithFromEnv` option: + +```golang +... +// Detect your resources +res, err := resource.New(ctx, + resource.WithDetectors(gcp.NewDetector()), + resource.WithTelemetrySDK(), + resource.WithFromEnv(), // unpacks OTEL_RESOURCE_ATTRIBUTES + // Add your own custom attributes to identify your application + resource.WithAttributes( + semconv.ServiceNameKey.String("my-application"), + semconv.ServiceNamespaceKey.String("my-company-frontend-team"), + ), +) +... +``` diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go new file mode 100644 index 0000000000..1c1490b02c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "os" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + gcpFunctionNameKey = "K_SERVICE" +) + +// NewCloudFunction will return a GCP Cloud Function resource detector. +// +// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes. +func NewCloudFunction() resource.Detector { + return &cloudFunction{ + cloudRun: NewCloudRun(), + } +} + +// cloudFunction collects resource information of GCP Cloud Function. +type cloudFunction struct { + cloudRun *CloudRun +} + +// Detect detects associated resources when running in GCP Cloud Function. +func (f *cloudFunction) Detect(ctx context.Context) (*resource.Resource, error) { + functionName, ok := f.googleCloudFunctionName() + if !ok { + return nil, nil + } + + projectID, err := f.cloudRun.mc.ProjectID() + if err != nil { + return nil, err + } + region, err := f.cloudRun.cloudRegion() + if err != nil { + return nil, err + } + + attributes := []attribute.KeyValue{ + semconv.CloudProviderGCP, + semconv.CloudPlatformGCPCloudFunctions, + semconv.FaaSName(functionName), + semconv.CloudAccountID(projectID), + semconv.CloudRegion(region), + } + return resource.NewWithAttributes(semconv.SchemaURL, attributes...), nil +} + +func (f *cloudFunction) googleCloudFunctionName() (string, bool) { + return os.LookupEnv(gcpFunctionNameKey) +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go new file mode 100644 index 0000000000..7754b46683 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "fmt" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const serviceNamespace = "cloud-run-managed" + +// The minimal list of metadata.Client methods we use. Use an interface so we +// can replace it with a fake implementation in the unit test. +type metadataClient interface { + ProjectID() (string, error) + Get(string) (string, error) + InstanceID() (string, error) +} + +// CloudRun collects resource information of Cloud Run instance. +// +// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes. +type CloudRun struct { + mc metadataClient + onGCE func() bool + getenv func(string) string +} + +// compile time assertion that CloudRun implements the resource.Detector +// interface. +var _ resource.Detector = (*CloudRun)(nil) + +// NewCloudRun creates a CloudRun detector. +// +// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes. +func NewCloudRun() *CloudRun { + return &CloudRun{ + mc: metadata.NewClient(nil), + onGCE: metadata.OnGCE, + getenv: os.Getenv, + } +} + +func (c *CloudRun) cloudRegion() (string, error) { + region, err := c.mc.Get("instance/region") + if err != nil { + return "", err + } + // Region from the metadata server is in the format /projects/123/regions/r. + // https://cloud.google.com/run/docs/reference/container-contract#metadata-server + return region[strings.LastIndex(region, "/")+1:], nil +} + +// Detect detects associated resources when running on Cloud Run hosts. +// NOTE: the service.namespace attribute is currently hardcoded to be +// "cloud-run-managed". This may change in the future, please do not rely on +// this behavior yet. +func (c *CloudRun) Detect(ctx context.Context) (*resource.Resource, error) { + // .OnGCE is actually testing whether the metadata server is available. + // Metadata server is supported on Cloud Run. + if !c.onGCE() { + return nil, nil + } + + attributes := []attribute.KeyValue{ + semconv.CloudProviderGCP, + semconv.ServiceNamespace(serviceNamespace), + } + + var errInfo []string + + if projectID, err := c.mc.ProjectID(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if projectID != "" { + attributes = append(attributes, semconv.CloudAccountID(projectID)) + } + + if region, err := c.cloudRegion(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if region != "" { + attributes = append(attributes, semconv.CloudRegion(region)) + } + + if instanceID, err := c.mc.InstanceID(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if instanceID != "" { + attributes = append(attributes, semconv.ServiceInstanceID(instanceID)) + } + + // Part of Cloud Run container runtime contract. + // See https://cloud.google.com/run/docs/reference/container-contract + if service := c.getenv("K_SERVICE"); service == "" { + errInfo = append(errInfo, "envvar K_SERVICE contains empty string.") + } else { + attributes = append(attributes, semconv.ServiceName(service)) + } + res := resource.NewWithAttributes(semconv.SchemaURL, attributes...) + + var aggregatedErr error + if len(errInfo) > 0 { + aggregatedErr = fmt.Errorf("detecting Cloud Run resources: %s", errInfo) + } + + return res, aggregatedErr +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go new file mode 100644 index 0000000000..b9eb1e1e14 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "fmt" + "strconv" + + "cloud.google.com/go/compute/metadata" + "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// NewDetector returns a resource detector which detects resource attributes on: +// * Google Compute Engine (GCE). +// * Google Kubernetes Engine (GKE). +// * Google App Engine (GAE). +// * Cloud Run. +// * Cloud Functions. +func NewDetector() resource.Detector { + return &detector{detector: gcp.NewDetector()} +} + +type detector struct { + detector gcpDetector +} + +// Detect detects associated resources when running on GCE, GKE, GAE, +// Cloud Run, and Cloud functions. +func (d *detector) Detect(ctx context.Context) (*resource.Resource, error) { + if !metadata.OnGCE() { + return nil, nil + } + b := &resourceBuilder{} + b.attrs = append(b.attrs, semconv.CloudProviderGCP) + b.add(semconv.CloudAccountIDKey, d.detector.ProjectID) + + switch d.detector.CloudPlatform() { + case gcp.GKE: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPKubernetesEngine) + b.addZoneOrRegion(d.detector.GKEAvailabilityZoneOrRegion) + b.add(semconv.K8SClusterNameKey, d.detector.GKEClusterName) + b.add(semconv.HostIDKey, d.detector.GKEHostID) + case gcp.CloudRun: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun) + b.add(semconv.FaaSNameKey, d.detector.FaaSName) + b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion) + b.add(semconv.FaaSInstanceKey, d.detector.FaaSID) + b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion) + case gcp.CloudRunJob: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun) + b.add(semconv.FaaSNameKey, d.detector.FaaSName) + b.add(semconv.FaaSInstanceKey, d.detector.FaaSID) + b.add(semconv.GCPCloudRunJobExecutionKey, d.detector.CloudRunJobExecution) + b.addInt(semconv.GCPCloudRunJobTaskIndexKey, d.detector.CloudRunJobTaskIndex) + b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion) + case gcp.CloudFunctions: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudFunctions) + b.add(semconv.FaaSNameKey, d.detector.FaaSName) + b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion) + b.add(semconv.FaaSInstanceKey, d.detector.FaaSID) + b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion) + case gcp.AppEngineFlex: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine) + b.addZoneAndRegion(d.detector.AppEngineFlexAvailabilityZoneAndRegion) + b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName) + b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion) + b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance) + case gcp.AppEngineStandard: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine) + b.add(semconv.CloudAvailabilityZoneKey, d.detector.AppEngineStandardAvailabilityZone) + b.add(semconv.CloudRegionKey, d.detector.AppEngineStandardCloudRegion) + b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName) + b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion) + b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance) + case gcp.GCE: + b.attrs = append(b.attrs, semconv.CloudPlatformGCPComputeEngine) + b.addZoneAndRegion(d.detector.GCEAvailabilityZoneAndRegion) + b.add(semconv.HostTypeKey, d.detector.GCEHostType) + b.add(semconv.HostIDKey, d.detector.GCEHostID) + b.add(semconv.HostNameKey, d.detector.GCEHostName) + b.add(semconv.GCPGceInstanceNameKey, d.detector.GCEInstanceName) + b.add(semconv.GCPGceInstanceHostnameKey, d.detector.GCEInstanceHostname) + default: + // We don't support this platform yet, so just return with what we have + } + return b.build() +} + +// resourceBuilder simplifies constructing resources using GCP detection +// library functions. +type resourceBuilder struct { + errs []error + attrs []attribute.KeyValue +} + +func (r *resourceBuilder) add(key attribute.Key, detect func() (string, error)) { + if v, err := detect(); err == nil { + r.attrs = append(r.attrs, key.String(v)) + } else { + r.errs = append(r.errs, err) + } +} + +func (r *resourceBuilder) addInt(key attribute.Key, detect func() (string, error)) { + if v, err := detect(); err == nil { + if vi, err := strconv.Atoi(v); err == nil { + r.attrs = append(r.attrs, key.Int(vi)) + } else { + r.errs = append(r.errs, err) + } + } else { + r.errs = append(r.errs, err) + } +} + +// zoneAndRegion functions are expected to return zone, region, err. +func (r *resourceBuilder) addZoneAndRegion(detect func() (string, string, error)) { + if zone, region, err := detect(); err == nil { + r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(zone)) + r.attrs = append(r.attrs, semconv.CloudRegion(region)) + } else { + r.errs = append(r.errs, err) + } +} + +func (r *resourceBuilder) addZoneOrRegion(detect func() (string, gcp.LocationType, error)) { + if v, locType, err := detect(); err == nil { + switch locType { + case gcp.Zone: + r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(v)) + case gcp.Region: + r.attrs = append(r.attrs, semconv.CloudRegion(v)) + default: + r.errs = append(r.errs, fmt.Errorf("location must be zone or region. Got %v", locType)) + } + } else { + r.errs = append(r.errs, err) + } +} + +func (r *resourceBuilder) build() (*resource.Resource, error) { + var err error + if len(r.errs) > 0 { + err = fmt.Errorf("%w: %s", resource.ErrPartialResource, r.errs) + } + return resource.NewWithAttributes(semconv.SchemaURL, r.attrs...), err +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go new file mode 100644 index 0000000000..2a29c420b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// GCE collects resource information of GCE computing instances. +// +// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes on GCE. +type GCE struct{} + +// compile time assertion that GCE implements the resource.Detector interface. +var _ resource.Detector = (*GCE)(nil) + +// Detect detects associated resources when running on GCE hosts. +func (gce *GCE) Detect(ctx context.Context) (*resource.Resource, error) { + if !metadata.OnGCE() { + return nil, nil + } + + attributes := []attribute.KeyValue{ + semconv.CloudProviderGCP, + } + + var errInfo []string + + if projectID, err := metadata.ProjectIDWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if projectID != "" { + attributes = append(attributes, semconv.CloudAccountID(projectID)) + } + + if zone, err := metadata.ZoneWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if zone != "" { + attributes = append(attributes, semconv.CloudAvailabilityZone(zone)) + + splitArr := strings.SplitN(zone, "-", 3) + if len(splitArr) == 3 { + attributes = append(attributes, semconv.CloudRegion(strings.Join(splitArr[0:2], "-"))) + } + } + + if instanceID, err := metadata.InstanceIDWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if instanceID != "" { + attributes = append(attributes, semconv.HostID(instanceID)) + } + + if name, err := metadata.InstanceNameWithContext(ctx); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if name != "" { + attributes = append(attributes, semconv.HostName(name)) + } + + if hostname, err := os.Hostname(); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if hostname != "" { + attributes = append(attributes, semconv.HostName(hostname)) + } + + if hostType, err := metadata.GetWithContext(ctx, "instance/machine-type"); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if hostType != "" { + attributes = append(attributes, semconv.HostType(hostType)) + } + + var aggregatedErr error + if len(errInfo) > 0 { + aggregatedErr = fmt.Errorf("detecting GCE resources: %s", errInfo) + } + + return resource.NewWithAttributes(semconv.SchemaURL, attributes...), aggregatedErr +} + +// hasProblem checks if the err is not nil or for missing resources. +func hasProblem(err error) bool { + if err == nil { + return false + } + + var nde metadata.NotDefinedError + if undefined := errors.As(err, &nde); undefined { + return false + } + return true +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go new file mode 100644 index 0000000000..0588ad6a48 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import ( + "context" + "fmt" + "os" + + "cloud.google.com/go/compute/metadata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// GKE collects resource information of GKE computing instances. +// +// Deprecated: Use gcp.NewDetector() instead, which does NOT detect container, pod, and namespace attributes. +// Set those using name using the OTEL_RESOURCE_ATTRIBUTES env var instead. +type GKE struct{} + +// compile time assertion that GKE implements the resource.Detector interface. +var _ resource.Detector = (*GKE)(nil) + +// Detect detects associated resources when running in GKE environment. +func (gke *GKE) Detect(ctx context.Context) (*resource.Resource, error) { + gcpDetecor := GCE{} + gceLablRes, err := gcpDetecor.Detect(ctx) + + if os.Getenv("KUBERNETES_SERVICE_HOST") == "" { + return gceLablRes, err + } + + var errInfo []string + if err != nil { + errInfo = append(errInfo, err.Error()) + } + + attributes := []attribute.KeyValue{ + semconv.K8SNamespaceName(os.Getenv("NAMESPACE")), + semconv.K8SPodName(os.Getenv("HOSTNAME")), + } + + if containerName := os.Getenv("CONTAINER_NAME"); containerName != "" { + attributes = append(attributes, semconv.ContainerName(containerName)) + } + + if clusterName, err := metadata.InstanceAttributeValueWithContext(ctx, "cluster-name"); hasProblem(err) { + errInfo = append(errInfo, err.Error()) + } else if clusterName != "" { + attributes = append(attributes, semconv.K8SClusterName(clusterName)) + } + + k8sattributeRes := resource.NewWithAttributes(semconv.SchemaURL, attributes...) + + res, err := resource.Merge(gceLablRes, k8sattributeRes) + if err != nil { + errInfo = append(errInfo, err.Error()) + } + + var aggregatedErr error + if len(errInfo) > 0 { + aggregatedErr = fmt.Errorf("detecting GKE resources: %s", errInfo) + } + + return res, aggregatedErr +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go new file mode 100644 index 0000000000..666d82e616 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +import "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp" + +// gcpDetector can detect attributes of GCP environments. +type gcpDetector interface { + ProjectID() (string, error) + CloudPlatform() gcp.Platform + GKEAvailabilityZoneOrRegion() (string, gcp.LocationType, error) + GKEClusterName() (string, error) + GKEHostID() (string, error) + FaaSName() (string, error) + FaaSVersion() (string, error) + FaaSID() (string, error) + FaaSCloudRegion() (string, error) + AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) + AppEngineStandardAvailabilityZone() (string, error) + AppEngineStandardCloudRegion() (string, error) + AppEngineServiceName() (string, error) + AppEngineServiceVersion() (string, error) + AppEngineServiceInstance() (string, error) + GCEAvailabilityZoneAndRegion() (string, string, error) + GCEHostType() (string, error) + GCEHostID() (string, error) + GCEHostName() (string, error) + GCEInstanceHostname() (string, error) + GCEInstanceName() (string, error) + CloudRunJobExecution() (string, error) + CloudRunJobTaskIndex() (string, error) +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go new file mode 100644 index 0000000000..1acc898319 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" + +// Version is the current release version of the GCP resource detector. +func Version() string { + return "1.29.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 0000000000..f81b1576ad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 0000000000..06e6d86854 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 0000000000..a4faa6a03d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 0000000000..f2cdf3c651 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// +// Deprecated: use [Scope] instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 0000000000..728115045b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 0000000000..fab61647c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 0000000000..68d296cbed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md new file mode 100644 index 0000000000..017f072a51 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md @@ -0,0 +1,3 @@ +# Metric SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go new file mode 100644 index 0000000000..e6f5cfb2ad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "fmt" + "slices" +) + +// errAgg is wrapped by misconfigured aggregations. +var errAgg = errors.New("aggregation") + +// Aggregation is the aggregation used to summarize recorded measurements. +type Aggregation interface { + // copy returns a deep copy of the Aggregation. + copy() Aggregation + + // err returns an error for any misconfigured Aggregation. + err() error +} + +// AggregationDrop is an Aggregation that drops all recorded data. +type AggregationDrop struct{} // AggregationDrop has no parameters. + +var _ Aggregation = AggregationDrop{} + +// copy returns a deep copy of d. +func (d AggregationDrop) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A drop aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDrop) err() error { return nil } + +// AggregationDefault is an Aggregation that uses the default instrument kind selection +// mapping to select another Aggregation. A metric reader can be configured to +// make an aggregation selection based on instrument kind that differs from +// the default. This Aggregation ensures the default is used. +// +// See the [DefaultAggregationSelector] for information about the default +// instrument kind selection mapping. +type AggregationDefault struct{} // AggregationDefault has no parameters. + +var _ Aggregation = AggregationDefault{} + +// copy returns a deep copy of d. +func (d AggregationDefault) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A default aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDefault) err() error { return nil } + +// AggregationSum is an Aggregation that summarizes a set of measurements as their +// arithmetic sum. +type AggregationSum struct{} // AggregationSum has no parameters. + +var _ Aggregation = AggregationSum{} + +// copy returns a deep copy of s. +func (s AggregationSum) copy() Aggregation { return s } + +// err returns an error for any misconfiguration. A sum aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationSum) err() error { return nil } + +// AggregationLastValue is an Aggregation that summarizes a set of measurements as the +// last one made. +type AggregationLastValue struct{} // AggregationLastValue has no parameters. + +var _ Aggregation = AggregationLastValue{} + +// copy returns a deep copy of l. +func (l AggregationLastValue) copy() Aggregation { return l } + +// err returns an error for any misconfiguration. A last-value aggregation has +// no parameters and cannot be misconfigured, therefore this always returns +// nil. +func (AggregationLastValue) err() error { return nil } + +// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with explicitly defined buckets. +type AggregationExplicitBucketHistogram struct { + // Boundaries are the increasing bucket boundary values. Boundary values + // define bucket upper bounds. Buckets are exclusive of their lower + // boundary and inclusive of their upper bound (except at positive + // infinity). A measurement is defined to fall into the greatest-numbered + // bucket with a boundary that is greater than or equal to the + // measurement. As an example, boundaries defined as: + // + // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000} + // + // Will define these buckets: + // + // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0], + // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0], + // (500.0, 1000.0], (1000.0, +∞) + Boundaries []float64 + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationExplicitBucketHistogram{} + +// errHist is returned by misconfigured ExplicitBucketHistograms. +var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg) + +// err returns an error for any misconfiguration. +func (h AggregationExplicitBucketHistogram) err() error { + if len(h.Boundaries) <= 1 { + return nil + } + + // Check boundaries are monotonic. + i := h.Boundaries[0] + for _, j := range h.Boundaries[1:] { + if i >= j { + return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries) + } + i = j + } + + return nil +} + +// copy returns a deep copy of h. +func (h AggregationExplicitBucketHistogram) copy() Aggregation { + return AggregationExplicitBucketHistogram{ + Boundaries: slices.Clone(h.Boundaries), + NoMinMax: h.NoMinMax, + } +} + +// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with bucket widths that grow exponentially. +type AggregationBase2ExponentialHistogram struct { + // MaxSize is the maximum number of buckets to use for the histogram. + MaxSize int32 + // MaxScale is the maximum resolution scale to use for the histogram. + // + // MaxScale has a maximum value of 20. Using a value of 20 means the + // maximum number of buckets that can fit within the range of a + // signed 32-bit integer index could be used. + // + // MaxScale has a minimum value of -10. Using a value of -10 means only + // two buckets will be used. + MaxScale int32 + + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationBase2ExponentialHistogram{} + +// copy returns a deep copy of the Aggregation. +func (e AggregationBase2ExponentialHistogram) copy() Aggregation { + return e +} + +const ( + expoMaxScale = 20 + expoMinScale = -10 +) + +// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms. +var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg) + +// err returns an error for any misconfigured Aggregation. +func (e AggregationBase2ExponentialHistogram) err() error { + if e.MaxScale > expoMaxScale { + return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale) + } + if e.MaxSize <= 0 { + return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go new file mode 100644 index 0000000000..63b88f0866 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "sync" +) + +// cache is a locking storage used to quickly return already computed values. +// +// The zero value of a cache is empty and ready to use. +// +// A cache must not be copied after first use. +// +// All methods of a cache are safe to call concurrently. +type cache[K comparable, V any] struct { + sync.Mutex + data map[K]V +} + +// Lookup returns the value stored in the cache with the associated key if it +// exists. Otherwise, f is called and its returned value is set in the cache +// for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cache lock, so f +// should not block excessively. +func (c *cache[K, V]) Lookup(key K, f func() V) V { + c.Lock() + defer c.Unlock() + + if c.data == nil { + val := f() + c.data = map[K]V{key: val} + return val + } + if v, ok := c.data[key]; ok { + return v + } + val := f() + c.data[key] = val + return val +} + +// HasKey returns true if Lookup has previously been called with that key +// +// HasKey is safe to call concurrently. +func (c *cache[K, V]) HasKey(key K) bool { + c.Lock() + defer c.Unlock() + _, ok := c.data[key] + return ok +} + +// cacheWithErr is a locking storage used to quickly return already computed values and an error. +// +// The zero value of a cacheWithErr is empty and ready to use. +// +// A cacheWithErr must not be copied after first use. +// +// All methods of a cacheWithErr are safe to call concurrently. +type cacheWithErr[K comparable, V any] struct { + cache[K, valAndErr[V]] +} + +type valAndErr[V any] struct { + val V + err error +} + +// Lookup returns the value stored in the cacheWithErr with the associated key +// if it exists. Otherwise, f is called and its returned value is set in the +// cacheWithErr for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f +// should not block excessively. +func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) { + combined := c.cache.Lookup(key, func() valAndErr[V] { + val, err := f() + return valAndErr[V]{val: val, err: err} + }) + return combined.val, combined.err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go new file mode 100644 index 0000000000..bbe7bf671f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/sdk/resource" +) + +// config contains configuration options for a MeterProvider. +type config struct { + res *resource.Resource + readers []Reader + views []View +} + +// readerSignals returns a force-flush and shutdown function for a +// MeterProvider to call in their respective options. All Readers c contains +// will have their force-flush and shutdown methods unified into returned +// single functions. +func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) { + var fFuncs, sFuncs []func(context.Context) error + for _, r := range c.readers { + sFuncs = append(sFuncs, r.Shutdown) + if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok { + fFuncs = append(fFuncs, f.ForceFlush) + } + } + + return unify(fFuncs), unifyShutdown(sFuncs) +} + +// unify unifies calling all of funcs into a single function call. All errors +// returned from calls to funcs will be unify into a single error return +// value. +func unify(funcs []func(context.Context) error) func(context.Context) error { + return func(ctx context.Context) error { + var errs []error + for _, f := range funcs { + if err := f(ctx); err != nil { + errs = append(errs, err) + } + } + return unifyErrors(errs) + } +} + +// unifyErrors combines multiple errors into a single error. +func unifyErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) + } +} + +// unifyShutdown unifies calling all of funcs once for a shutdown. If called +// more than once, an ErrReaderShutdown error is returned. +func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error { + f := unify(funcs) + var once sync.Once + return func(ctx context.Context) error { + err := ErrReaderShutdown + once.Do(func() { err = f(ctx) }) + return err + } +} + +// newConfig returns a config configured with options. +func newConfig(options []Option) config { + conf := config{res: resource.Default()} + for _, o := range options { + conf = o.apply(conf) + } + return conf +} + +// Option applies a configuration option value to a MeterProvider. +type Option interface { + apply(config) config +} + +// optionFunc applies a set of options to a config. +type optionFunc func(config) config + +// apply returns a config with option(s) applied. +func (o optionFunc) apply(conf config) config { + return o(conf) +} + +// WithResource associates a Resource with a MeterProvider. This Resource +// represents the entity producing telemetry and is associated with all Meters +// the MeterProvider will create. +// +// By default, if this Option is not used, the default Resource from the +// go.opentelemetry.io/otel/sdk/resource package will be used. +func WithResource(res *resource.Resource) Option { + return optionFunc(func(conf config) config { + conf.res = res + return conf + }) +} + +// WithReader associates Reader r with a MeterProvider. +// +// By default, if this option is not used, the MeterProvider will perform no +// operations; no data will be exported without a Reader. +func WithReader(r Reader) Option { + return optionFunc(func(cfg config) config { + if r == nil { + return cfg + } + cfg.readers = append(cfg.readers, r) + return cfg + }) +} + +// WithView associates views with a MeterProvider. +// +// Views are appended to existing ones in a MeterProvider if this option is +// used multiple times. +// +// By default, if this option is not used, the MeterProvider will use the +// default view. +func WithView(views ...View) Option { + return optionFunc(func(cfg config) config { + cfg.views = append(cfg.views, views...) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go new file mode 100644 index 0000000000..90a4ae16c1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package metric provides an implementation of the OpenTelemetry metrics SDK. +// +// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information +// about the concept of OpenTelemetry metrics and +// https://opentelemetry.io/docs/concepts/components/ for more information +// about OpenTelemetry SDKs. +// +// The entry point for the metric package is the MeterProvider. It is the +// object that all API calls use to create Meters, instruments, and ultimately +// make metric measurements. Also, it is an object that should be used to +// control the life-cycle (start, flush, and shutdown) of the SDK. +// +// A MeterProvider needs to be configured to export the measured data, this is +// done by configuring it with a Reader implementation (using the WithReader +// MeterProviderOption). Readers take two forms: ones that push to an endpoint +// (NewPeriodicReader), and ones that an endpoint pulls from. See +// [go.opentelemetry.io/otel/exporters] for exporters that can be used as +// or with these Readers. +// +// Each Reader, when registered with the MeterProvider, can be augmented with a +// View. Views allow users that run OpenTelemetry instrumented code to modify +// the generated data of that instrumentation. +// +// The data generated by a MeterProvider needs to include information about its +// origin. A MeterProvider needs to be configured with a Resource, using the +// WithResource MeterProviderOption, to include this information. This Resource +// should be used to describe the unique runtime environment instrumented code +// is being run on. That way when multiple instances of the code are collected +// at a single endpoint their origin is decipherable. +// +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// +// See [go.opentelemetry.io/otel/metric] for more information about +// the metric API. +// +// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about +// the experimental features. +package metric // import "go.opentelemetry.io/otel/sdk/metric" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go new file mode 100644 index 0000000000..a6c403797f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "strconv" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // The time interval (in milliseconds) between the start of two export attempts. + envInterval = "OTEL_METRIC_EXPORT_INTERVAL" + // Maximum allowed time (in milliseconds) to export data. + envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT" +) + +// envDuration returns an environment variable's value as duration in milliseconds if it is exists, +// or the defaultValue if the environment variable is not defined or the value is not valid. +func envDuration(key string, defaultValue time.Duration) time.Duration { + v := os.Getenv(key) + if v == "" { + return defaultValue + } + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "environment variable", key, "value", v) + return defaultValue + } + if d <= 0 { + global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v) + return defaultValue + } + return time.Duration(d) * time.Millisecond +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go new file mode 100644 index 0000000000..82619da78e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "runtime" + "slices" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/x" +) + +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and user defined +// environment variables. +// +// Note: This will only return non-nil values when the experimental exemplar +// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable +// is not set to always_off. +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { + if !x.Exemplars.Enabled() { + return nil + } + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + var filter exemplar.Filter + + switch os.Getenv(filterEnvKey) { + case "always_on": + filter = exemplar.AlwaysOnFilter + case "always_off": + return exemplar.Drop + case "trace_based": + fallthrough + default: + filter = exemplar.SampledFilter + } + + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults + // Explicit bucket histogram aggregation with more than 1 bucket will + // use AlignedHistogramBucketExemplarReservoir. + a, ok := agg.(AggregationExplicitBucketHistogram) + if ok && len(a.Boundaries) > 0 { + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) + } + } + + var n int + if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { + // Base2 Exponential Histogram Aggregation SHOULD use a + // SimpleFixedSizeExemplarReservoir with a reservoir equal to the + // smaller of the maximum number of buckets configured on the + // aggregation or twenty (e.g. min(20, max_buckets)). + n = int(a.MaxSize) + if n > 20 { + n = 20 + } + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + // This Exemplar reservoir MAY take a configuration parameter for + // the size of the reservoir. If no size configuration is + // provided, the default size MAY be the number of possible + // concurrent threads (e.g. number of CPUs) to help reduce + // contention. Otherwise, a default size of 1 SHOULD be used. + n = runtime.NumCPU() + if n < 1 { + // Should never be the case, but be defensive. + n = 1 + } + } + + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go new file mode 100644 index 0000000000..1a3cccb677 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ErrExporterShutdown is returned if Export or Shutdown are called after an +// Exporter has been Shutdown. +var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") + +// Exporter handles the delivery of metric data to external receivers. This is +// the final component in the metric push pipeline. +type Exporter interface { + // Temporality returns the Temporality to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Temporality(InstrumentKind) metricdata.Temporality + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Aggregation returns the Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Aggregation(InstrumentKind) Aggregation + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Export serializes and transmits metric data to a receiver. + // + // This is called synchronously, there is no concurrency safety + // requirement. Because of this, it is critical that all timeouts and + // cancellations of the passed context be honored. + // + // All retry logic must be contained in this function. The SDK does not + // implement any retry logic. All errors returned by this function are + // considered unrecoverable and will be reported to a configured error + // Handler. + // + // The passed ResourceMetrics may be reused when the call completes. If an + // exporter needs to hold this data after it returns, it needs to make a + // copy. + Export(context.Context, *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush flushes any metric data held by an exporter. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // This method needs to be concurrent safe. + ForceFlush(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric data held by an exporter and releases any + // held computational resources. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // After Shutdown is called, calls to Export will perform no operation and + // instead will return an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go new file mode 100644 index 0000000000..b52a330b3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -0,0 +1,347 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +var zeroScope instrumentation.Scope + +// InstrumentKind is the identifier of a group of instruments that all +// performing the same function. +type InstrumentKind uint8 + +const ( + // instrumentKindUndefined is an undefined instrument kind, it should not + // be used by any initialized type. + instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused + // InstrumentKindCounter identifies a group of instruments that record + // increasing values synchronously with the code path they are measuring. + InstrumentKindCounter InstrumentKind = 1 + // InstrumentKindUpDownCounter identifies a group of instruments that + // record increasing and decreasing values synchronously with the code path + // they are measuring. + InstrumentKindUpDownCounter InstrumentKind = 2 + // InstrumentKindHistogram identifies a group of instruments that record a + // distribution of values synchronously with the code path they are + // measuring. + InstrumentKindHistogram InstrumentKind = 3 + // InstrumentKindObservableCounter identifies a group of instruments that + // record increasing values in an asynchronous callback. + InstrumentKindObservableCounter InstrumentKind = 4 + // InstrumentKindObservableUpDownCounter identifies a group of instruments + // that record increasing and decreasing values in an asynchronous + // callback. + InstrumentKindObservableUpDownCounter InstrumentKind = 5 + // InstrumentKindObservableGauge identifies a group of instruments that + // record current values in an asynchronous callback. + InstrumentKindObservableGauge InstrumentKind = 6 + // InstrumentKindGauge identifies a group of instruments that record + // instantaneous values synchronously with the code path they are + // measuring. + InstrumentKindGauge InstrumentKind = 7 +) + +type nonComparable [0]func() // nolint: unused // This is indeed used. + +// Instrument describes properties an instrument is created with. +type Instrument struct { + // Name is the human-readable identifier of the instrument. + Name string + // Description describes the purpose of the instrument. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of measurement recorded by the instrument. + Unit string + // Scope identifies the instrumentation that created the instrument. + Scope instrumentation.Scope + + // Ensure forward compatibility if non-comparable fields need to be added. + nonComparable // nolint: unused +} + +// IsEmpty returns if all Instrument fields are their zero-value. +func (i Instrument) IsEmpty() bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +// matches returns whether all the non-zero-value fields of i match the +// corresponding fields of other. If i is empty it will match all other, and +// true will always be returned. +func (i Instrument) matches(other Instrument) bool { + return i.matchesName(other) && + i.matchesDescription(other) && + i.matchesKind(other) && + i.matchesUnit(other) && + i.matchesScope(other) +} + +// matchesName returns true if the Name of i is "" or it equals the Name of +// other, otherwise false. +func (i Instrument) matchesName(other Instrument) bool { + return i.Name == "" || i.Name == other.Name +} + +// matchesDescription returns true if the Description of i is "" or it equals +// the Description of other, otherwise false. +func (i Instrument) matchesDescription(other Instrument) bool { + return i.Description == "" || i.Description == other.Description +} + +// matchesKind returns true if the Kind of i is its zero-value or it equals the +// Kind of other, otherwise false. +func (i Instrument) matchesKind(other Instrument) bool { + return i.Kind == instrumentKindUndefined || i.Kind == other.Kind +} + +// matchesUnit returns true if the Unit of i is its zero-value or it equals the +// Unit of other, otherwise false. +func (i Instrument) matchesUnit(other Instrument) bool { + return i.Unit == "" || i.Unit == other.Unit +} + +// matchesScope returns true if the Scope of i is its zero-value or it equals +// the Scope of other, otherwise false. +func (i Instrument) matchesScope(other Instrument) bool { + return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) && + (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) && + (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL) +} + +// Stream describes the stream of data an instrument produces. +type Stream struct { + // Name is the human-readable identifier of the stream. + Name string + // Description describes the purpose of the data. + Description string + // Unit is the unit of measurement recorded. + Unit string + // Aggregation the stream uses for an instrument. + Aggregation Aggregation + // AttributeFilter is an attribute Filter applied to the attributes + // recorded for an instrument's measurement. If the filter returns false + // the attribute will not be recorded, otherwise, if it returns true, it + // will record the attribute. + // + // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to + // provide an allow-list of attribute keys here. + AttributeFilter attribute.Filter +} + +// instID are the identifying properties of a instrument. +type instID struct { + // Name is the name of the stream. + Name string + // Description is the description of the stream. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of the stream. + Unit string + // Number is the number type of the stream. + Number string +} + +// Returns a normalized copy of the instID i. +// +// Instrument names are considered case-insensitive. Standardize the instrument +// name to always be lowercase for the returned instID so it can be compared +// without the name casing affecting the comparison. +func (i instID) normalize() instID { + i.Name = strings.ToLower(i.Name) + return i +} + +type int64Inst struct { + measures []aggregate.Measure[int64] + + embedded.Int64Counter + embedded.Int64UpDownCounter + embedded.Int64Histogram + embedded.Int64Gauge +} + +var ( + _ metric.Int64Counter = (*int64Inst)(nil) + _ metric.Int64UpDownCounter = (*int64Inst)(nil) + _ metric.Int64Histogram = (*int64Inst)(nil) + _ metric.Int64Gauge = (*int64Inst)(nil) +) + +func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. + for _, in := range i.measures { + in(ctx, val, s) + } +} + +type float64Inst struct { + measures []aggregate.Measure[float64] + + embedded.Float64Counter + embedded.Float64UpDownCounter + embedded.Float64Histogram + embedded.Float64Gauge +} + +var ( + _ metric.Float64Counter = (*float64Inst)(nil) + _ metric.Float64UpDownCounter = (*float64Inst)(nil) + _ metric.Float64Histogram = (*float64Inst)(nil) + _ metric.Float64Gauge = (*float64Inst)(nil) +) + +func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { + for _, in := range i.measures { + in(ctx, val, s) + } +} + +// observablID is a comparable unique identifier of an observable. +type observablID[N int64 | float64] struct { + name string + description string + kind InstrumentKind + unit string + scope instrumentation.Scope +} + +type float64Observable struct { + metric.Float64Observable + *observable[float64] + + embedded.Float64ObservableCounter + embedded.Float64ObservableUpDownCounter + embedded.Float64ObservableGauge +} + +var ( + _ metric.Float64ObservableCounter = float64Observable{} + _ metric.Float64ObservableUpDownCounter = float64Observable{} + _ metric.Float64ObservableGauge = float64Observable{} +) + +func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable { + return float64Observable{ + observable: newObservable[float64](m, kind, name, desc, u), + } +} + +type int64Observable struct { + metric.Int64Observable + *observable[int64] + + embedded.Int64ObservableCounter + embedded.Int64ObservableUpDownCounter + embedded.Int64ObservableGauge +} + +var ( + _ metric.Int64ObservableCounter = int64Observable{} + _ metric.Int64ObservableUpDownCounter = int64Observable{} + _ metric.Int64ObservableGauge = int64Observable{} +) + +func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable { + return int64Observable{ + observable: newObservable[int64](m, kind, name, desc, u), + } +} + +type observable[N int64 | float64] struct { + metric.Observable + observablID[N] + + meter *meter + measures measures[N] + dropAggregation bool +} + +func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { + return &observable[N]{ + observablID: observablID[N]{ + name: name, + description: desc, + kind: kind, + unit: u, + scope: m.scope, + }, + meter: m, + } +} + +// observe records the val for the set of attrs. +func (o *observable[N]) observe(val N, s attribute.Set) { + o.measures.observe(val, s) +} + +func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) { + o.measures = append(o.measures, meas...) +} + +type measures[N int64 | float64] []aggregate.Measure[N] + +// observe records the val for the set of attrs. +func (m measures[N]) observe(val N, s attribute.Set) { + for _, in := range m { + in(context.Background(), val, s) + } +} + +var errEmptyAgg = errors.New("no aggregators for observable instrument") + +// registerable returns an error if the observable o should not be registered, +// and nil if it should. An errEmptyAgg error is returned if o is effectively a +// no-op because it does not have any aggregators. Also, an error is returned +// if scope defines a Meter other than the one o was created by. +func (o *observable[N]) registerable(m *meter) error { + if len(o.measures) == 0 { + return errEmptyAgg + } + if m != o.meter { + return fmt.Errorf( + "invalid registration: observable %q from Meter %q, registered with Meter %q", + o.name, + o.scope.Name, + m.scope.Name, + ) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go new file mode 100644 index 0000000000..25ea6244e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT. + +package metric + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[instrumentKindUndefined-0] + _ = x[InstrumentKindCounter-1] + _ = x[InstrumentKindUpDownCounter-2] + _ = x[InstrumentKindHistogram-3] + _ = x[InstrumentKindObservableCounter-4] + _ = x[InstrumentKindObservableUpDownCounter-5] + _ = x[InstrumentKindObservableGauge-6] + _ = x[InstrumentKindGauge-7] +} + +const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge" + +var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} + +func (i InstrumentKind) String() string { + if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go new file mode 100644 index 0000000000..b18ee719bd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// now is used to return the current local time while allowing tests to +// override the default time.Now function. +var now = time.Now + +// Measure receives measurements to be aggregated. +type Measure[N int64 | float64] func(context.Context, N, attribute.Set) + +// ComputeAggregation stores the aggregate of measurements into dest and +// returns the number of aggregate data-points output. +type ComputeAggregation func(dest *metricdata.Aggregation) int + +// Builder builds an aggregate function. +type Builder[N int64 | float64] struct { + // Temporality is the temporality used for the returned aggregate function. + // + // If this is not provided a default of cumulative will be used (except for + // the last-value aggregate function where delta is the only appropriate + // temporality). + Temporality metricdata.Temporality + // Filter is the attribute filter the aggregate function will use on the + // input of measurements. + Filter attribute.Filter + // ReservoirFunc is the factory function used by aggregate functions to + // create new exemplar reservoirs for a new seen attribute set. + // + // If this is not provided a default factory function that returns an + // exemplar.Drop reservoir will be used. + ReservoirFunc func() exemplar.FilteredReservoir[N] + // AggregationLimit is the cardinality limit of measurement attributes. Any + // measurement for new attributes once the limit has been reached will be + // aggregated into a single aggregate for the "otel.metric.overflow" + // attribute. + // + // If AggregationLimit is less than or equal to zero there will not be an + // aggregation limit imposed (i.e. unlimited attribute sets). + AggregationLimit int +} + +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { + if b.ReservoirFunc != nil { + return b.ReservoirFunc + } + + return exemplar.Drop +} + +type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) + +func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { + if b.Filter != nil { + fltr := b.Filter // Copy to make it immutable after assignment. + return func(ctx context.Context, n N, a attribute.Set) { + fAttr, dropped := a.Filter(fltr) + f(ctx, n, fAttr, dropped) + } + } + return func(ctx context.Context, n N, a attribute.Set) { + f(ctx, n, a, nil) + } +} + +// LastValue returns a last-value aggregate function input and output. +func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { + lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedSum returns a sum aggregate function input and output. The +// arguments passed to the input are expected to be the precomputed sum values. +func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// Sum returns a sum aggregate function input and output. +func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// ExplicitBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// ExponentialBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// reset ensures s has capacity and sets it length. If the capacity of s too +// small, a new slice is returned with the specified capacity and length. +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go new file mode 100644 index 0000000000..7b7225e6ef --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package aggregate provides aggregate types used compute aggregations and +// cycle the state of metric measurements made by the SDK. These types and +// functionality are meant only for internal SDK use. +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go new file mode 100644 index 0000000000..170ae8e58e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "sync" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var exemplarPool = sync.Pool{ + New: func() any { return new([]exemplar.Exemplar) }, +} + +func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { + dest := exemplarPool.Get().(*[]exemplar.Exemplar) + defer func() { + *dest = (*dest)[:0] + exemplarPool.Put(dest) + }() + + *dest = reset(*dest, len(*out), cap(*out)) + + f(dest) + + *out = reset(*out, len(*dest), cap(*dest)) + for i, e := range *dest { + (*out)[i].FilteredAttributes = e.FilteredAttributes + (*out)[i].Time = e.Time + (*out)[i].SpanID = e.SpanID + (*out)[i].TraceID = e.TraceID + + switch e.Value.Type() { + case exemplar.Int64ValueType: + (*out)[i].Value = N(e.Value.Int64()) + case exemplar.Float64ValueType: + (*out)[i].Value = N(e.Value.Float64()) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go new file mode 100644 index 0000000000..707342408a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -0,0 +1,444 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "errors" + "math" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +const ( + expoMaxScale = 20 + expoMinScale = -10 + + smallestNonZeroNormalFloat64 = 0x1p-1022 + + // These redefine the Math constants with a type, so the compiler won't coerce + // them into an int on 32 bit platforms. + maxInt64 int64 = math.MaxInt64 + minInt64 int64 = math.MinInt64 +) + +// expoHistogramDataPoint is a single data point in an exponential histogram. +type expoHistogramDataPoint[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + count uint64 + min N + max N + sum N + + maxSize int + noMinMax bool + noSum bool + + scale int32 + + posBuckets expoBuckets + negBuckets expoBuckets + zeroCount uint64 +} + +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { + f := math.MaxFloat64 + max := N(f) // if N is int64, max will overflow to -9223372036854775808 + min := N(-f) + if N(maxInt64) > N(f) { + max = N(maxInt64) + min = N(minInt64) + } + return &expoHistogramDataPoint[N]{ + attrs: attrs, + min: max, + max: min, + maxSize: maxSize, + noMinMax: noMinMax, + noSum: noSum, + scale: maxScale, + } +} + +// record adds a new measurement to the histogram. It will rescale the buckets if needed. +func (p *expoHistogramDataPoint[N]) record(v N) { + p.count++ + + if !p.noMinMax { + if v < p.min { + p.min = v + } + if v > p.max { + p.max = v + } + } + if !p.noSum { + p.sum += v + } + + absV := math.Abs(float64(v)) + + if float64(absV) == 0.0 { + p.zeroCount++ + return + } + + bin := p.getBin(absV) + + bucket := &p.posBuckets + if v < 0 { + bucket = &p.negBuckets + } + + // If the new bin would make the counts larger than maxScale, we need to + // downscale current measurements. + if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { + if p.scale-scaleDelta < expoMinScale { + // With a scale of -10 there is only two buckets for the whole range of float64 values. + // This can only happen if there is a max size of 1. + otel.Handle(errors.New("exponential histogram scale underflow")) + return + } + // Downscale + p.scale -= scaleDelta + p.posBuckets.downscale(scaleDelta) + p.negBuckets.downscale(scaleDelta) + + bin = p.getBin(absV) + } + + bucket.record(bin) +} + +// getBin returns the bin v should be recorded into. +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec + if p.scale <= 0 { + // Because of the choice of fraction is always 1 power of two higher than we want. + var correction int32 = 1 + if frac == .5 { + // If v is an exact power of two the frac will be .5 and the exp + // will be one higher than we want. + correction = 2 + } + return (exp - correction) >> (-p.scale) + } + return exp<= bin { + low = int(bin) + high = int(startBin) + length - 1 + } + + var count int32 + for high-low >= p.maxSize { + low = low >> 1 + high = high >> 1 + count++ + if count > expoMaxScale-expoMinScale { + return count + } + } + return count +} + +// expoBuckets is a set of buckets in an exponential histogram. +type expoBuckets struct { + startBin int32 + counts []uint64 +} + +// record increments the count for the given bin, and expands the buckets if needed. +// Size changes must be done before calling this function. +func (b *expoBuckets) record(bin int32) { + if len(b.counts) == 0 { + b.counts = []uint64{1} + b.startBin = bin + return + } + + endBin := int(b.startBin) + len(b.counts) - 1 + + // if the new bin is inside the current range + if bin >= b.startBin && int(bin) <= endBin { + b.counts[bin-b.startBin]++ + return + } + // if the new bin is before the current start add spaces to the counts + if bin < b.startBin { + origLen := len(b.counts) + newLength := endBin - int(bin) + 1 + shift := b.startBin - bin + + if newLength > cap(b.counts) { + b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) + } + + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + b.counts = b.counts[:newLength] + for i := 1; i < int(shift); i++ { + b.counts[i] = 0 + } + b.startBin = bin + b.counts[0] = 1 + return + } + // if the new is after the end add spaces to the end + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { + b.counts = b.counts[:bin-b.startBin+1] + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { + b.counts[i] = 0 + } + b.counts[bin-b.startBin] = 1 + return + } + + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) + b.counts = append(b.counts, end...) + b.counts[bin-b.startBin] = 1 + } +} + +// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the +// correct lower resolution bucket. +func (b *expoBuckets) downscale(delta int32) { + // Example + // delta = 2 + // Original offset: -6 + // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 + // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1 + // new Offset: -2 + // new Counts: [4, 14, 30, 10] + + if len(b.counts) <= 1 || delta < 1 { + b.startBin = b.startBin >> delta + return + } + + steps := int32(1) << delta + offset := b.startBin % steps + offset = (offset + steps) % steps // to make offset positive + for i := 1; i < len(b.counts); i++ { + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] + continue + } + b.counts[idx/int(steps)] += b.counts[i] + } + + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) + b.counts = b.counts[:lastIdx+1] + b.startBin = b.startBin >> delta +} + +// newExponentialHistogram returns an Aggregator that summarizes a set of +// measurements as an exponential histogram. Each histogram is scoped by attributes +// and the aggregation cycle the measurements were made in. +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { + return &expoHistogram[N]{ + noSum: noSum, + noMinMax: noMinMax, + maxSize: int(maxSize), + maxScale: maxScale, + + newRes: r, + limit: newLimiter[*expoHistogramDataPoint[N]](limit), + values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), + + start: now(), + } +} + +// expoHistogram summarizes a set of measurements as an histogram with exponentially +// defined buckets. +type expoHistogram[N int64 | float64] struct { + noSum bool + noMinMax bool + maxSize int + maxScale int32 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*expoHistogramDataPoint[N]] + values map[attribute.Distinct]*expoHistogramDataPoint[N] + valuesMu sync.Mutex + + start time.Time +} + +func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // Ignore NaN and infinity. + if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { + return + } + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + attr := e.limit.Attributes(fltrAttr, e.values) + v, ok := e.values[attr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes() + + e.values[attr.Equivalent()] = v + } + v.record(value) + v.res.Offer(ctx, value, droppedAttr) +} + +func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.DeltaTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(e.values) + + e.start = t + h.DataPoints = hDPts + *dest = h + return n +} + +func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go new file mode 100644 index 0000000000..ade0941f5f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "slices" + "sort" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type buckets[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + counts []uint64 + count uint64 + total N + min, max N +} + +// newBuckets returns buckets with n bins. +func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { + return &buckets[N]{attrs: attrs, counts: make([]uint64, n)} +} + +func (b *buckets[N]) sum(value N) { b.total += value } + +func (b *buckets[N]) bin(idx int, value N) { + b.counts[idx]++ + b.count++ + if value < b.min { + b.min = value + } else if value > b.max { + b.max = value + } +} + +// histValues summarizes a set of measurements as an histValues with +// explicitly defined buckets. +type histValues[N int64 | float64] struct { + noSum bool + bounds []float64 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*buckets[N]] + values map[attribute.Distinct]*buckets[N] + valuesMu sync.Mutex +} + +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { + // The responsibility of keeping all buckets correctly associated with the + // passed boundaries is ultimately this type's responsibility. Make a copy + // here so we can always guarantee this. Or, in the case of failure, have + // complete control over the fix. + b := slices.Clone(bounds) + slices.Sort(b) + return &histValues[N]{ + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[*buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), + } +} + +// Aggregate records the measurement value, scoped by attr, and aggregates it +// into a histogram. +func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // This search will return an index in the range [0, len(s.bounds)], where + // it will return len(s.bounds) if value is greater than the last element + // of s.bounds. This aligns with the buckets in that the length of buckets + // is len(s.bounds)+1, with the last bucket representing: + // (s.bounds[len(s.bounds)-1], +∞). + idx := sort.SearchFloat64s(s.bounds, float64(value)) + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + b, ok := s.values[attr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](attr, len(s.bounds)+1) + b.res = s.newRes() + + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[attr.Equivalent()] = b + } + b.bin(idx, value) + if !s.noSum { + b.sum(value) + } + b.res.Offer(ctx, value, droppedAttr) +} + +// newHistogram returns an Aggregator that summarizes a set of measurements as +// an histogram. +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { + return &histogram[N]{ + histValues: newHistValues[N](boundaries, noSum, limit, r), + noMinMax: noMinMax, + start: now(), + } +} + +// histogram summarizes a set of measurements as an histogram with explicitly +// defined buckets. +type histogram[N int64 | float64] struct { + *histValues[N] + + noMinMax bool + start time.Time +} + +func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.DeltaTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + hDPts[i].BucketCounts = val.counts + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + h.DataPoints = hDPts + *dest = h + + return n +} + +func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + + // The HistogramDataPoint field values returned need to be copies of + // the buckets value as we will keep updating them. + // + // TODO (#3047): Making copies for bounds and counts incurs a large + // memory allocation footprint. Alternatives should be explored. + hDPts[i].BucketCounts = slices.Clone(val.counts) + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go new file mode 100644 index 0000000000..c359368403 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -0,0 +1,162 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// datapoint is timestamped measurement data. +type datapoint[N int64 | float64] struct { + attrs attribute.Set + value N + res exemplar.FilteredReservoir[N] +} + +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { + return &lastValue[N]{ + newRes: r, + limit: newLimiter[datapoint[N]](limit), + values: make(map[attribute.Distinct]datapoint[N]), + start: now(), + } +} + +// lastValue summarizes a set of measurements as the last one made. +type lastValue[N int64 | float64] struct { + sync.Mutex + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[datapoint[N]] + values map[attribute.Distinct]datapoint[N] + start time.Time +} + +func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + d, ok := s.values[attr.Equivalent()] + if !ok { + d.res = s.newRes() + } + + d.attrs = attr + d.value = value + d.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = d +} + +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int { + n := len(s.values) + *dest = reset(*dest, n, n) + + var i int + for _, v := range s.values { + (*dest)[i].Attributes = v.attrs + (*dest)[i].StartTime = s.start + (*dest)[i].Time = t + (*dest)[i].Value = v.value + collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) + i++ + } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + *dest = gData + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go new file mode 100644 index 0000000000..9ea0251edd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import "go.opentelemetry.io/otel/attribute" + +// overflowSet is the attribute set used to record a measurement when adding +// another distinct attribute set to the aggregate would exceed the aggregate +// limit. +var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true)) + +// limiter limits aggregate values. +type limiter[V any] struct { + // aggLimit is the maximum number of metric streams that can be aggregated. + // + // Any metric stream with attributes distinct from any set already + // aggregated once the aggLimit will be meet will instead be aggregated + // into an "overflow" metric stream. That stream will only contain the + // "otel.metric.overflow"=true attribute. + aggLimit int +} + +// newLimiter returns a new Limiter with the provided aggregation limit. +func newLimiter[V any](aggregation int) limiter[V] { + return limiter[V]{aggLimit: aggregation} +} + +// Attributes checks if adding a measurement for attrs will exceed the +// aggregation cardinality limit for the existing measurements. If it will, +// overflowSet is returned. Otherwise, if it will not exceed the limit, or the +// limit is not set (limit <= 0), attr is returned. +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { + if l.aggLimit > 0 { + _, exists := measurements[attrs.Equivalent()] + if !exists && len(measurements) >= l.aggLimit-1 { + return overflowSet + } + } + + return attrs +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go new file mode 100644 index 0000000000..8913669226 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -0,0 +1,238 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type sumValue[N int64 | float64] struct { + n N + res exemplar.FilteredReservoir[N] + attrs attribute.Set +} + +// valueMap is the storage for sums. +type valueMap[N int64 | float64] struct { + sync.Mutex + newRes func() exemplar.FilteredReservoir[N] + limit limiter[sumValue[N]] + values map[attribute.Distinct]sumValue[N] +} + +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { + return &valueMap[N]{ + newRes: r, + limit: newLimiter[sumValue[N]](limit), + values: make(map[attribute.Distinct]sumValue[N]), + } +} + +func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + v, ok := s.values[attr.Equivalent()] + if !ok { + v.res = s.newRes() + } + + v.attrs = attr + v.n += value + v.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = v +} + +// newSum returns an aggregator that summarizes a set of measurements as their +// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle +// the measurements were made in. +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { + return &sum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// sum summarizes a set of measurements made as their arithmetic sum. +type sum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time +} + +func (s *sum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + i++ + } + // Do not report stale values. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, value := range s.values { + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = value.n + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + i++ + } + + sData.DataPoints = dPts + *dest = sData + + return n +} + +// newPrecomputedSum returns an aggregator that summarizes a set of +// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// the aggregation cycle the measurements were made in. +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { + return &precomputedSum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// precomputedSum summarizes a set of observatrions as their arithmetic sum. +type precomputedSum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time + + reported map[attribute.Distinct]N +} + +func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + newReported := make(map[attribute.Distinct]N) + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for key, value := range s.values { + delta := value.n - s.reported[key] + + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = delta + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + + newReported[key] = value.n + i++ + } + // Unused attribute sets do not report. + clear(s.values) + s.reported = newReported + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + + sData.DataPoints = dPts + *dest = sData + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go new file mode 100644 index 0000000000..5394f48e0d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go new file mode 100644 index 0000000000..5a0f39ae14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]Exemplar) { + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 0000000000..fcaa6a4697 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go new file mode 100644 index 0000000000..152a069a09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool + +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() +} + +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 0000000000..9fedfa4be6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go new file mode 100644 index 0000000000..a6ff86d027 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Histogram returns a [Reservoir] that samples the last measurement that falls +// within a histogram bucket. The histogram bucket upper-boundaries are define +// by bounds. +// +// The passed bounds will be sorted by this function. +func Histogram(bounds []float64) Reservoir { + slices.Sort(bounds) + return &histRes{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +type histRes struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go new file mode 100644 index 0000000000..199a2608f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -0,0 +1,191 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "math" + "math/rand" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) + +// random returns, as a float64, a uniform pseudo-random number in the open +// interval (0.0,1.0). +func random() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + rngMu.Lock() + defer rngMu.Unlock() + + f := rng.Float64() + for f == 0 { + f = rng.Float64() + } + return f +} + +// FixedSize returns a [Reservoir] that samples at most k exemplars. If there +// are k or less measurements made, the Reservoir will sample each one. If +// there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} + r.reset() + return r +} + +type randRes struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 +} + +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *randRes) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *randRes) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 +} + +func (r *randRes) Collect(dest *[]Exemplar) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go new file mode 100644 index 0000000000..80fa59554f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]Exemplar) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go new file mode 100644 index 0000000000..10b2976f79 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement +} + +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage) Collect(dest *[]Exemplar) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.Exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value Value + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 0000000000..1957d6b1e3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go new file mode 100644 index 0000000000..19ec6806ff --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +// ReuseSlice returns a zeroed view of slice if its capacity is greater than or +// equal to n. Otherwise, it returns a new []T with capacity equal to n. +func ReuseSlice[T any](slice []T, n int) []T { + if cap(slice) >= n { + return slice[:n] + } + return make([]T, n) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md new file mode 100644 index 0000000000..aba69d6547 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -0,0 +1,112 @@ +# Experimental Features + +The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Cardinality Limit](#cardinality-limit) +- [Exemplars](#exemplars) + +### Cardinality Limit + +The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. + +This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. +The value must be an integer value. +All other values are ignored. + +If the value set is less than or equal to `0`, no limit will be applied. + +#### Examples + +Set the cardinality limit to 2000. + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=2000 +``` + +Set an infinite cardinality limit (functionally equivalent to disabling the feature). + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=-1 +``` + +Disable the cardinality limit. + +```console +unset OTEL_GO_X_CARDINALITY_LIMIT +``` + +### Exemplars + +A sample of measurements made may be exported directly as a set of exemplars. + +This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable. +The value of must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + +Exemplar filters are a supported. +The exemplar filter applies to all measurements made. +They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir. + +To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable. +The value must be the case-sensitive string defined by the [OpenTelemetry specification]. + +- `"always_on"`: allows all measurements +- `"always_off"`: denies all measurements +- `"trace_based"`: allows only sampled measurements + +All values other than these will result in the default, `"trace_based"`, exemplar filter being used. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar + +#### Examples + +Enable exemplars to be exported. + +```console +export OTEL_GO_X_EXEMPLAR=true +``` + +Disable exemplars from being exported. + +```console +unset OTEL_GO_X_EXEMPLAR +``` + +Set the exemplar filter to allow all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_on +``` + +Set the exemplar filter to deny all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_off +``` + +Set the exemplar filter to only allow sampled measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=trace_based +``` + +Revert to the default exemplar filter (`"trace_based"`) + +```console +unset OTEL_METRICS_EXEMPLAR_FILTER +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go new file mode 100644 index 0000000000..8cd2f37417 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel metric SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" + +import ( + "os" + "strconv" + "strings" +) + +var ( + // Exemplars is an experimental feature flag that defines if exemplars + // should be recorded for metric data-points. + // + // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable + // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" + // will also enable this). + Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false + }) + + // CardinalityLimit is an experimental feature flag that defines if + // cardinality limits should be applied to the recorded metric data-points. + // + // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment + // variable to the integer limit value you want to use. + // + // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 + // will disable the cardinality limits. + CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true + }) +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go new file mode 100644 index 0000000000..e0fd86ca78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ManualReader is a simple Reader that allows an application to +// read metrics on demand. +type ManualReader struct { + sdkProducer atomic.Value + shutdownOnce sync.Once + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// Compile time check the manualReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&ManualReader{}: {}} + +// NewManualReader returns a Reader which is directly called to collect metrics. +func NewManualReader(opts ...ManualReaderOption) *ManualReader { + cfg := newManualReaderConfig(opts) + r := &ManualReader{ + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + } + r.externalProducers.Store(cfg.producers) + return r +} + +// register stores the sdkProducer which enables the caller +// to read metrics from the SDK on demand. +func (mr *ManualReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register manual reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality { + return mr.temporalitySelector(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return mr.aggregationSelector(kind) +} + +// Shutdown closes any connections and frees any resources used by the reader. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Shutdown(context.Context) error { + err := ErrReaderShutdown + mr.shutdownOnce.Do(func() { + // Any future call to Collect will now return ErrReaderShutdown. + mr.sdkProducer.Store(produceHolder{ + produce: shutdownProducer{}.produce, + }) + mr.mu.Lock() + defer mr.mu.Unlock() + mr.isShutdown = true + // release references to Producer(s) + mr.externalProducers.Store([]Producer{}) + err = nil + }) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + } + p := mr.sdkProducer.Load() + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("manual reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range mr.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("ManualReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// MarshalLog returns logging data about the ManualReader. +func (r *ManualReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Registered bool + Shutdown bool + }{ + Type: "ManualReader", + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + } +} + +// manualReaderConfig contains configuration options for a ManualReader. +type manualReaderConfig struct { + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + producers []Producer +} + +// newManualReaderConfig returns a manualReaderConfig configured with options. +func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { + cfg := manualReaderConfig{ + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + } + for _, opt := range opts { + cfg = opt.applyManual(cfg) + } + return cfg +} + +// ManualReaderOption applies a configuration option value to a ManualReader. +type ManualReaderOption interface { + applyManual(manualReaderConfig) manualReaderConfig +} + +// WithTemporalitySelector sets the TemporalitySelector a reader will use to +// determine the Temporality of an instrument based on its kind. If this +// option is not used, the reader will use the DefaultTemporalitySelector. +func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption { + return temporalitySelectorOption{selector: selector} +} + +type temporalitySelectorOption struct { + selector func(instrument InstrumentKind) metricdata.Temporality +} + +// applyManual returns a manualReaderConfig with option applied. +func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig { + mrc.temporalitySelector = t.selector + return mrc +} + +// WithAggregationSelector sets the AggregationSelector a reader will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// or the aggregation explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector AggregationSelector) ManualReaderOption { + return aggregationSelectorOption{selector: selector} +} + +type aggregationSelectorOption struct { + selector AggregationSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.aggregationSelector = t.selector + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go new file mode 100644 index 0000000000..2309e5b2b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -0,0 +1,729 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +// ErrInstrumentName indicates the created instrument has an invalid name. +// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter. +var ErrInstrumentName = errors.New("invalid instrument name") + +// meter handles the creation and coordination of all metric instruments. A +// meter represents a single instrumentation scope; all metric telemetry +// produced by an instrumentation scope will use metric instruments from a +// single meter. +type meter struct { + embedded.Meter + + scope instrumentation.Scope + pipes pipelines + + int64Insts *cacheWithErr[instID, *int64Inst] + float64Insts *cacheWithErr[instID, *float64Inst] + int64ObservableInsts *cacheWithErr[instID, int64Observable] + float64ObservableInsts *cacheWithErr[instID, float64Observable] + + int64Resolver resolver[int64] + float64Resolver resolver[float64] +} + +func newMeter(s instrumentation.Scope, p pipelines) *meter { + // viewCache ensures instrument conflicts, including number conflicts, this + // meter is asked to create are logged to the user. + var viewCache cache[string, instID] + + var int64Insts cacheWithErr[instID, *int64Inst] + var float64Insts cacheWithErr[instID, *float64Inst] + var int64ObservableInsts cacheWithErr[instID, int64Observable] + var float64ObservableInsts cacheWithErr[instID, float64Observable] + + return &meter{ + scope: s, + pipes: p, + int64Insts: &int64Insts, + float64Insts: &float64Insts, + int64ObservableInsts: &int64ObservableInsts, + float64ObservableInsts: &float64ObservableInsts, + int64Resolver: newResolver[int64](p, &viewCache), + float64Resolver: newResolver[float64](p, &viewCache), + } +} + +// Compile-time check meter implements metric.Meter. +var _ metric.Meter = (*meter)(nil) + +// Int64Counter returns a new instrument identified by name and configured with +// options. The instrument is used to synchronously record increasing int64 +// measurements during a computational operation. +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + cfg := metric.NewInt64CounterConfig(options...) + const kind = InstrumentKindCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// int64 measurements during a computational operation. +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + cfg := metric.NewInt64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + cfg := metric.NewInt64HistogramConfig(options...) + p := int64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + cfg := metric.NewInt64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// int64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) { + inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.int64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := int64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Int64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableUpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// int64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64Counter returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record increasing +// float64 measurements during a computational operation. +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + cfg := metric.NewFloat64CounterConfig(options...) + const kind = InstrumentKindCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// float64 measurements during a computational operation. +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + cfg := metric.NewFloat64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + cfg := metric.NewFloat64HistogramConfig(options...) + p := float64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + cfg := metric.NewFloat64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// float64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) { + inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.float64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := float64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Float64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableUpDownCounter returns a new instrument identified by name +// and configured with options. The instrument is used to asynchronously record +// float64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +func validateInstrumentName(name string) error { + if len(name) == 0 { + return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) + } + if len(name) > 255 { + return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name) + } + if !isAlpha([]rune(name)[0]) { + return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name) + } + if len(name) == 1 { + return nil + } + for _, c := range name[1:] { + if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' { + return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name) + } + } + return nil +} + +func isAlpha(c rune) bool { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} + +func isAlphanumeric(c rune) bool { + return isAlpha(c) || ('0' <= c && c <= '9') +} + +func warnRepeatedObservableCallbacks(id Instrument) { + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.", + "instrument", inst, + ) +} + +// RegisterCallback registers f to be called each collection cycle so it will +// make observations for insts during those cycles. +// +// The only instruments f can make observations for are insts. All other +// observations will be dropped and an error will be logged. +// +// Only instruments from this meter can be registered with f, an error is +// returned if other instrument are provided. +// +// Only observations made in the callback will be exported. Unlike synchronous +// instruments, asynchronous callbacks can "forget" attribute sets that are no +// longer relevant by omitting the observation during the callback. +// +// The returned Registration can be used to unregister f. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if len(insts) == 0 { + // Don't allocate a observer if not needed. + return noopRegister{}, nil + } + + reg := newObserver() + var errs multierror + for _, inst := range insts { + // Unwrap any global. + if u, ok := inst.(interface { + Unwrap() metric.Observable + }); ok { + inst = u.Unwrap() + } + + switch o := inst.(type) { + case int64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerInt64(o.observablID) + case float64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerFloat64(o.observablID) + default: + // Instrument external to the SDK. + return nil, fmt.Errorf("invalid observable: from different implementation") + } + } + + err := errs.errorOrNil() + if reg.len() == 0 { + // All insts use drop aggregation or are invalid. + return noopRegister{}, err + } + + // Some or all instruments were valid. + cback := func(ctx context.Context) error { return f(ctx, reg) } + return m.pipes.registerMultiCallback(cback), err +} + +type observer struct { + embedded.Observer + + float64 map[observablID[float64]]struct{} + int64 map[observablID[int64]]struct{} +} + +func newObserver() observer { + return observer{ + float64: make(map[observablID[float64]]struct{}), + int64: make(map[observablID[int64]]struct{}), + } +} + +func (r observer) len() int { + return len(r.float64) + len(r.int64) +} + +func (r observer) registerFloat64(id observablID[float64]) { + r.float64[id] = struct{}{} +} + +func (r observer) registerInt64(id observablID[int64]) { + r.int64[id] = struct{}{} +} + +var ( + errUnknownObserver = errors.New("unknown observable instrument") + errUnregObserver = errors.New("observable instrument not registered for callback") +) + +func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) { + var oImpl float64Observable + switch conv := o.(type) { + case float64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(float64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.float64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", float64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { + var oImpl int64Observable + switch conv := o.(type) { + case int64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(int64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.int64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", int64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +type noopRegister struct{ embedded.Registration } + +func (noopRegister) Unregister() error { + return nil +} + +// int64InstProvider provides int64 OpenTelemetry instruments. +type int64InstProvider struct{ *meter } + +func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.int64Resolver.Aggregators(inst) +} + +func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*int64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &int64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*int64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &int64Inst{measures: aggs}, err + }) +} + +// float64InstProvider provides float64 OpenTelemetry instruments. +type float64InstProvider struct{ *meter } + +func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.float64Resolver.Aggregators(inst) +} + +func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*float64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &float64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*float64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &float64Inst{measures: aggs}, err + }) +} + +type int64Observer struct { + embedded.Int64Observer + measures[int64] +} + +func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} + +type float64Observer struct { + embedded.Float64Observer + measures[float64] +} + +func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md new file mode 100644 index 0000000000..d1390df1b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md @@ -0,0 +1,3 @@ +# SDK Metric data + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/metricdata)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go new file mode 100644 index 0000000000..d32cfc67d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go @@ -0,0 +1,296 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +import ( + "encoding/json" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" +) + +// ResourceMetrics is a collection of ScopeMetrics and the associated Resource +// that created them. +type ResourceMetrics struct { + // Resource represents the entity that collected the metrics. + Resource *resource.Resource + // ScopeMetrics are the collection of metrics with unique Scopes. + ScopeMetrics []ScopeMetrics +} + +// ScopeMetrics is a collection of Metrics Produces by a Meter. +type ScopeMetrics struct { + // Scope is the Scope that the Meter was created with. + Scope instrumentation.Scope + // Metrics are a list of aggregations created by the Meter. + Metrics []Metrics +} + +// Metrics is a collection of one or more aggregated timeseries from an Instrument. +type Metrics struct { + // Name is the name of the Instrument that created this data. + Name string + // Description is the description of the Instrument, which can be used in documentation. + Description string + // Unit is the unit in which the Instrument reports. + Unit string + // Data is the aggregated data from an Instrument. + Data Aggregation +} + +// Aggregation is the store of data reported by an Instrument. +// It will be one of: Gauge, Sum, Histogram. +type Aggregation interface { + privateAggregation() +} + +// Gauge represents a measurement of the current value of an instrument. +type Gauge[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] +} + +func (Gauge[N]) privateAggregation() {} + +// Sum represents the sum of all measurements of values from an instrument. +type Sum[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality + // IsMonotonic represents if this aggregation only increases or decreases. + IsMonotonic bool +} + +func (Sum[N]) privateAggregation() {} + +// DataPoint is a single data point in a timeseries. +type DataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. (optional) + StartTime time.Time `json:",omitempty"` + // Time is the time when the timeseries was recorded. (optional) + Time time.Time `json:",omitempty"` + // Value is the value of this data point. + Value N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// Histogram represents the histogram of all measurements of values from an instrument. +type Histogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []HistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (Histogram[N]) privateAggregation() {} + +// HistogramDataPoint is a single histogram data point in a timeseries. +type HistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Bounds are the upper bounds of the buckets of the histogram. Because the + // last boundary is +infinity this one is implied. + Bounds []float64 + // BucketCounts is the count of each of the buckets. + BucketCounts []uint64 + + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialHistogram represents the histogram of all measurements of values from an instrument. +type ExponentialHistogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []ExponentialHistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (ExponentialHistogram[N]) privateAggregation() {} + +// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries. +type ExponentialHistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = 2 ^ (2 ^ -Scale) + Scale int32 + // ZeroCount is the number of values whose absolute value + // is less than or equal to [ZeroThreshold]. + // When ZeroThreshold is 0, this is the number of values that + // cannot be expressed using the standard exponential formula + // as well as values that have been rounded to zero. + // ZeroCount represents the special zero count bucket. + ZeroCount uint64 + + // PositiveBucket is range of positive value bucket counts. + PositiveBucket ExponentialBucket + // NegativeBucket is range of negative value bucket counts. + NegativeBucket ExponentialBucket + + // ZeroThreshold is the width of the zero region. Where the zero region is + // defined as the closed interval [-ZeroThreshold, ZeroThreshold]. + ZeroThreshold float64 + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialBucket are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialBucket struct { + // Offset is the bucket index of the first entry in the Counts slice. + Offset int32 + // Counts is an slice where Counts[i] carries the count of the bucket at + // index (Offset+i). Counts[i] is the count of values greater than + // base^(Offset+i) and less than or equal to base^(Offset+i+1). + Counts []uint64 +} + +// Extrema is the minimum or maximum value of a dataset. +type Extrema[N int64 | float64] struct { + value N + valid bool +} + +// MarshalText converts the Extrema value to text. +func (e Extrema[N]) MarshalText() ([]byte, error) { + if !e.valid { + return json.Marshal(nil) + } + return json.Marshal(e.value) +} + +// MarshalJSON converts the Extrema value to JSON number. +func (e *Extrema[N]) MarshalJSON() ([]byte, error) { + return e.MarshalText() +} + +// NewExtrema returns an Extrema set to v. +func NewExtrema[N int64 | float64](v N) Extrema[N] { + return Extrema[N]{value: v, valid: true} +} + +// Value returns the Extrema value and true if the Extrema is defined. +// Otherwise, if the Extrema is its zero-value, defined will be false. +func (e Extrema[N]) Value() (v N, defined bool) { + return e.value, e.valid +} + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar[N int64 | float64] struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value N + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// data type. +// +// These data points cannot always be merged in a meaningful way. The Summary +// type is only used by bridges from other metrics libraries, and cannot be +// produced using OpenTelemetry instrumentation. +type Summary struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []SummaryDataPoint +} + +func (Summary) privateAggregation() {} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this summary has been calculated with. + Count uint64 + + // Sum is the sum of the values recorded. + Sum float64 + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []QuantileValue +} + +// QuantileValue is the value at a given quantile of a summary. +type QuantileValue struct { + // Quantile is the quantile of this value. + // + // Must be in the interval [0.0, 1.0]. + Quantile float64 + + // Value is the value at the given quantile of a summary. + // + // Quantile values must NOT be negative. + Value float64 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go new file mode 100644 index 0000000000..187713dadf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=Temporality + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +// Temporality defines the window that an aggregation was calculated over. +type Temporality uint8 + +const ( + // undefinedTemporality represents an unset Temporality. + //nolint:deadcode,unused,varcheck + undefinedTemporality Temporality = iota + + // CumulativeTemporality defines a measurement interval that continues to + // expand forward in time from a starting point. New measurements are + // added to all previous measurements since a start time. + CumulativeTemporality + + // DeltaTemporality defines a measurement interval that resets each cycle. + // Measurements from one cycle are recorded independently, measurements + // from other cycles do not affect them. + DeltaTemporality +) + +// MarshalText returns the byte encoded of t. +func (t Temporality) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go new file mode 100644 index 0000000000..4da833cdce --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Temporality"; DO NOT EDIT. + +package metricdata + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[undefinedTemporality-0] + _ = x[CumulativeTemporality-1] + _ = x[DeltaTemporality-2] +} + +const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality" + +var _Temporality_index = [...]uint8{0, 20, 41, 57} + +func (i Temporality) String() string { + if i >= Temporality(len(_Temporality_index)-1) { + return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go new file mode 100644 index 0000000000..67ee1b11a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -0,0 +1,370 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Default periodic reader timing. +const ( + defaultTimeout = time.Millisecond * 30000 + defaultInterval = time.Millisecond * 60000 +) + +// periodicReaderConfig contains configuration options for a PeriodicReader. +type periodicReaderConfig struct { + interval time.Duration + timeout time.Duration + producers []Producer +} + +// newPeriodicReaderConfig returns a periodicReaderConfig configured with +// options. +func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { + c := periodicReaderConfig{ + interval: envDuration(envInterval, defaultInterval), + timeout: envDuration(envTimeout, defaultTimeout), + } + for _, o := range options { + c = o.applyPeriodic(c) + } + return c +} + +// PeriodicReaderOption applies a configuration option value to a PeriodicReader. +type PeriodicReaderOption interface { + applyPeriodic(periodicReaderConfig) periodicReaderConfig +} + +// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig. +type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig + +// applyPeriodic returns a periodicReaderConfig with option(s) applied. +func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig { + return o(conf) +} + +// WithTimeout configures the time a PeriodicReader waits for an export to +// complete before canceling it. This includes an export which occurs as part +// of Shutdown or ForceFlush if the user passed context does not have a +// deadline. If the user passed context does have a deadline, it will be used +// instead. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_TIMEOUT environment variable. +// +// If this option is not used or d is less than or equal to zero, 30 seconds +// is used as the default. +func WithTimeout(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.timeout = d + return conf + }) +} + +// WithInterval configures the intervening time between exports for a +// PeriodicReader. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_INTERVAL environment variable. +// +// If this option is not used or d is less than or equal to zero, 60 seconds +// is used as the default. +func WithInterval(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.interval = d + return conf + }) +} + +// NewPeriodicReader returns a Reader that collects and exports metric data to +// the exporter at a defined interval. By default, the returned Reader will +// collect and export data every 60 seconds, and will cancel any attempts that +// exceed 30 seconds, collect and export combined. The collect and export time +// are not counted towards the interval between attempts. +// +// The Collect method of the returned Reader continues to gather and return +// metric data to the user. It will not automatically send that data to the +// exporter. That is left to the user to accomplish. +func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { + conf := newPeriodicReaderConfig(options) + ctx, cancel := context.WithCancel(context.Background()) + r := &PeriodicReader{ + interval: conf.interval, + timeout: conf.timeout, + exporter: exporter, + flushCh: make(chan chan error), + cancel: cancel, + done: make(chan struct{}), + rmPool: sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, + }, + } + r.externalProducers.Store(conf.producers) + + go func() { + defer func() { close(r.done) }() + r.run(ctx, conf.interval) + }() + + return r +} + +// PeriodicReader is a Reader that continuously collects and exports metric +// data at a set interval. +type PeriodicReader struct { + sdkProducer atomic.Value + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + interval time.Duration + timeout time.Duration + exporter Exporter + flushCh chan chan error + + done chan struct{} + cancel context.CancelFunc + shutdownOnce sync.Once + + rmPool sync.Pool +} + +// Compile time check the periodicReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&PeriodicReader{}: {}} + +// newTicker allows testing override. +var newTicker = time.NewTicker + +// run continuously collects and exports metric data at the specified +// interval. This will run until ctx is canceled or times out. +func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) { + ticker := newTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := r.collectAndExport(ctx) + if err != nil { + otel.Handle(err) + } + case errCh := <-r.flushCh: + errCh <- r.collectAndExport(ctx) + ticker.Reset(interval) + case <-ctx.Done(): + return + } + } +} + +// register registers p as the producer of this reader. +func (r *PeriodicReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register periodic reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality { + return r.exporter.Temporality(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return r.exporter.Aggregation(kind) +} + +// collectAndExport gather all metric data related to the periodicReader r from +// the SDK and exports it with r's exporter. +func (r *PeriodicReader) collectAndExport(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, r.timeout) + defer cancel() + + // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. + rm := r.rmPool.Get().(*metricdata.ResourceMetrics) + err := r.Collect(ctx, rm) + if err == nil { + err = r.export(ctx, rm) + } + r.rmPool.Put(rm) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. The metric +// data is not exported to the configured exporter, it is left to the caller to +// handle that if desired. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("periodic reader: *metricdata.ResourceMetrics is nil") + } + // TODO (#3047): When collect is updated to accept output as param, pass rm. + return r.collect(ctx, r.sdkProducer.Load(), rm) +} + +// collect unwraps p as a produceHolder and returns its produce results. +func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("periodic reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range r.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("PeriodicReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// export exports metric data m using r's exporter. +func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error { + return r.exporter.Export(ctx, m) +} + +// ForceFlush flushes pending telemetry. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) ForceFlush(ctx context.Context) error { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + errCh := make(chan error, 1) + select { + case r.flushCh <- errCh: + select { + case err := <-errCh: + if err != nil { + return err + } + close(errCh) + case <-ctx.Done(): + return ctx.Err() + } + case <-r.done: + return ErrReaderShutdown + case <-ctx.Done(): + return ctx.Err() + } + return r.exporter.ForceFlush(ctx) +} + +// Shutdown flushes pending telemetry and then stops the export pipeline. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Shutdown(ctx context.Context) error { + err := ErrReaderShutdown + r.shutdownOnce.Do(func() { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + // Stop the run loop. + r.cancel() + <-r.done + + // Any future call to Collect will now return ErrReaderShutdown. + ph := r.sdkProducer.Swap(produceHolder{ + produce: shutdownProducer{}.produce, + }) + + if ph != nil { // Reader was registered. + // Flush pending telemetry. + m := r.rmPool.Get().(*metricdata.ResourceMetrics) + err = r.collect(ctx, ph, m) + if err == nil { + err = r.export(ctx, m) + } + r.rmPool.Put(m) + } + + sErr := r.exporter.Shutdown(ctx) + if err == nil || errors.Is(err, ErrReaderShutdown) { + err = sErr + } + + r.mu.Lock() + defer r.mu.Unlock() + r.isShutdown = true + // release references to Producer(s) + r.externalProducers.Store([]Producer{}) + }) + return err +} + +// MarshalLog returns logging data about the PeriodicReader. +func (r *PeriodicReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Exporter Exporter + Registered bool + Shutdown bool + Interval time.Duration + Timeout time.Duration + }{ + Type: "PeriodicReader", + Exporter: r.exporter, + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + Interval: r.interval, + Timeout: r.timeout, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go new file mode 100644 index 0000000000..823bf2fe3d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -0,0 +1,655 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "container/list" + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + "go.opentelemetry.io/otel/sdk/metric/internal/x" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var ( + errCreatingAggregators = errors.New("could not create all aggregators") + errIncompatibleAggregation = errors.New("incompatible aggregation") + errUnknownAggregation = errors.New("unrecognized aggregation") +) + +// instrumentSync is a synchronization point between a pipeline and an +// instrument's aggregate function. +type instrumentSync struct { + name string + description string + unit string + compAgg aggregate.ComputeAggregation +} + +func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { + if res == nil { + res = resource.Empty() + } + return &pipeline{ + resource: res, + reader: reader, + views: views, + // aggregations is lazy allocated when needed. + } +} + +// pipeline connects all of the instruments created by a meter provider to a Reader. +// This is the object that will be `Reader.register()` when a meter provider is created. +// +// As instruments are created the instrument should be checked if it exists in +// the views of a the Reader, and if so each aggregate function should be added +// to the pipeline. +type pipeline struct { + resource *resource.Resource + + reader Reader + views []View + + sync.Mutex + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List +} + +// addSync adds the instrumentSync to pipeline p with scope. This method is not +// idempotent. Duplicate calls will result in duplicate additions, it is the +// callers responsibility to ensure this is called with unique values. +func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) { + p.Lock() + defer p.Unlock() + if p.aggregations == nil { + p.aggregations = map[instrumentation.Scope][]instrumentSync{ + scope: {iSync}, + } + return + } + p.aggregations[scope] = append(p.aggregations[scope], iSync) +} + +type multiCallback func(context.Context) error + +// addMultiCallback registers a multi-instrument callback to be run when +// `produce()` is called. +func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { + p.Lock() + defer p.Unlock() + e := p.multiCallbacks.PushBack(c) + return func() { + p.Lock() + p.multiCallbacks.Remove(e) + p.Unlock() + } +} + +// produce returns aggregated metrics from a single collection. +// +// This method is safe to call concurrently. +func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { + p.Lock() + defer p.Unlock() + + var errs multierror + for _, c := range p.callbacks { + // TODO make the callbacks parallel. ( #3034 ) + if err := c(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { + // TODO make the callbacks parallel. ( #3034 ) + f := e.Value.(multiCallback) + if err := f(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + // This means the context expired before we finished running callbacks. + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + + rm.Resource = p.resource + rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations)) + + i := 0 + for scope, instruments := range p.aggregations { + rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments)) + j := 0 + for _, inst := range instruments { + data := rm.ScopeMetrics[i].Metrics[j].Data + if n := inst.compAgg(&data); n > 0 { + rm.ScopeMetrics[i].Metrics[j].Name = inst.name + rm.ScopeMetrics[i].Metrics[j].Description = inst.description + rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit + rm.ScopeMetrics[i].Metrics[j].Data = data + j++ + } + } + rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j] + if len(rm.ScopeMetrics[i].Metrics) > 0 { + rm.ScopeMetrics[i].Scope = scope + i++ + } + } + + rm.ScopeMetrics = rm.ScopeMetrics[:i] + + return errs.errorOrNil() +} + +// inserter facilitates inserting of new instruments from a single scope into a +// pipeline. +type inserter[N int64 | float64] struct { + // aggregators is a cache that holds aggregate function inputs whose + // outputs have been inserted into the underlying reader pipeline. This + // cache ensures no duplicate aggregate functions are inserted into the + // reader pipeline and if a new request during an instrument creation asks + // for the same aggregate function input the same instance is returned. + aggregators *cache[instID, aggVal[N]] + + // views is a cache that holds instrument identifiers for all the + // instruments a Meter has created, it is provided from the Meter that owns + // this inserter. This cache ensures during the creation of instruments + // with the same name but different options (e.g. description, unit) a + // warning message is logged. + views *cache[string, instID] + + pipeline *pipeline +} + +func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] { + if vc == nil { + vc = &cache[string, instID]{} + } + return &inserter[N]{ + aggregators: &cache[instID, aggVal[N]]{}, + views: vc, + pipeline: p, + } +} + +// Instrument inserts the instrument inst with instUnit into a pipeline. All +// views the pipeline contains are matched against, and any matching view that +// creates a unique aggregate function will have its output inserted into the +// pipeline and its input included in the returned slice. +// +// The returned aggregate function inputs are ensured to be deduplicated and +// unique. If another view in another pipeline that is cached by this +// inserter's cache has already inserted the same aggregate function for the +// same instrument, that functions input instance is returned. +// +// If another instrument has already been inserted by this inserter, or any +// other using the same cache, and it conflicts with the instrument being +// inserted in this call, an aggregate function input matching the arguments +// will still be returned but an Info level log message will also be logged to +// the OTel global logger. +// +// If the passed instrument would result in an incompatible aggregate function, +// an error is returned and that aggregate function output is not inserted nor +// is its input returned. +// +// If an instrument is determined to use a Drop aggregation, that instrument is +// not inserted nor returned. +func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) { + var ( + matched bool + measures []aggregate.Measure[N] + ) + + errs := &multierror{wrapped: errCreatingAggregators} + seen := make(map[uint64]struct{}) + for _, v := range i.pipeline.views { + stream, match := v(inst) + if !match { + continue + } + matched = true + in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in == nil { // Drop aggregation. + continue + } + if _, ok := seen[id]; ok { + // This aggregate function has already been added. + continue + } + seen[id] = struct{}{} + measures = append(measures, in) + } + + if matched { + return measures, errs.errorOrNil() + } + + // Apply implicit default view if no explicit matched. + stream := Stream{ + Name: inst.Name, + Description: inst.Description, + Unit: inst.Unit, + } + in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in != nil { + // Ensured to have not seen given matched was false. + measures = append(measures, in) + } + return measures, errs.errorOrNil() +} + +// addCallback registers a single instrument callback to be run when +// `produce()` is called. +func (i *inserter[N]) addCallback(cback func(context.Context) error) { + i.pipeline.Lock() + defer i.pipeline.Unlock() + i.pipeline.callbacks = append(i.pipeline.callbacks, cback) +} + +var aggIDCount uint64 + +// aggVal is the cached value in an aggregators cache. +type aggVal[N int64 | float64] struct { + ID uint64 + Measure aggregate.Measure[N] + Err error +} + +// readerDefaultAggregation returns the default aggregation for the instrument +// kind based on the reader's aggregation preferences. This is used unless the +// aggregation is overridden with a view. +func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation { + aggregation := i.pipeline.reader.aggregation(kind) + switch aggregation.(type) { + case nil, AggregationDefault: + // If the reader returns default or nil use the default selector. + aggregation = DefaultAggregationSelector(kind) + default: + // Deep copy and validate before using. + aggregation = aggregation.copy() + if err := aggregation.err(); err != nil { + orig := aggregation + aggregation = DefaultAggregationSelector(kind) + global.Error( + err, "using default aggregation instead", + "aggregation", orig, + "replacement", aggregation, + ) + } + } + return aggregation +} + +// cachedAggregator returns the appropriate aggregate input and output +// functions for an instrument configuration. If the exact instrument has been +// created within the inst.Scope, those aggregate function instances will be +// returned. Otherwise, new computed aggregate functions will be cached and +// returned. +// +// If the instrument configuration conflicts with an instrument that has +// already been created (e.g. description, unit, data type) a warning will be +// logged at the "Info" level with the global OTel logger. Valid new aggregate +// functions for the instrument configuration will still be returned without an +// error. +// +// If the instrument defines an unknown or incompatible aggregation, an error +// is returned. +func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) { + switch stream.Aggregation.(type) { + case nil: + // The aggregation was not overridden with a view. Use the aggregation + // provided by the reader. + stream.Aggregation = readerAggregation + case AggregationDefault: + // The view explicitly requested the default aggregation. + stream.Aggregation = DefaultAggregationSelector(kind) + } + + if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { + return nil, 0, fmt.Errorf( + "creating aggregator with instrumentKind: %d, aggregation %v: %w", + kind, stream.Aggregation, err, + ) + } + + id := i.instID(kind, stream) + // If there is a conflict, the specification says the view should + // still be applied and a warning should be logged. + i.logConflict(id) + + // If there are requests for the same instrument with different name + // casing, the first-seen needs to be returned. Use a normalize ID for the + // cache lookup to ensure the correct comparison. + normID := id.normalize() + cv := i.aggregators.Lookup(normID, func() aggVal[N] { + b := aggregate.Builder[N]{ + Temporality: i.pipeline.reader.temporality(kind), + ReservoirFunc: reservoirFunc[N](stream.Aggregation), + } + b.Filter = stream.AttributeFilter + // A value less than or equal to zero will disable the aggregation + // limits for the builder (an all the created aggregates). + // CardinalityLimit.Lookup returns 0 by default if unset (or + // unrecognized input). Use that value directly. + b.AggregationLimit, _ = x.CardinalityLimit.Lookup() + + in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) + if err != nil { + return aggVal[N]{0, nil, err} + } + if in == nil { // Drop aggregator. + return aggVal[N]{0, nil, nil} + } + i.pipeline.addSync(scope, instrumentSync{ + // Use the first-seen name casing for this and all subsequent + // requests of this instrument. + name: stream.Name, + description: stream.Description, + unit: stream.Unit, + compAgg: out, + }) + id := atomic.AddUint64(&aggIDCount, 1) + return aggVal[N]{id, in, err} + }) + return cv.Measure, cv.ID, cv.Err +} + +// logConflict validates if an instrument with the same case-insensitive name +// as id has already been created. If that instrument conflicts with id, a +// warning is logged. +func (i *inserter[N]) logConflict(id instID) { + // The API specification defines names as case-insensitive. If there is a + // different casing of a name it needs to be a conflict. + name := id.normalize().Name + existing := i.views.Lookup(name, func() instID { return id }) + if id == existing { + return + } + + const msg = "duplicate metric stream definitions" + args := []interface{}{ + "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), + "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), + "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), + "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit), + "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number), + } + + // The specification recommends logging a suggested view to resolve + // conflicts if possible. + // + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration + if id.Unit != existing.Unit || id.Number != existing.Number { + // There is no view resolution for these, don't make a suggestion. + global.Warn(msg, args...) + return + } + + var stream string + if id.Name != existing.Name || id.Kind != existing.Kind { + stream = `Stream{Name: "{{NEW_NAME}}"}` + } else if id.Description != existing.Description { + stream = fmt.Sprintf("Stream{Description: %q}", existing.Description) + } + + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream)) + + global.Warn(msg, args...) +} + +func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { + var zero N + return instID{ + Name: stream.Name, + Description: stream.Description, + Unit: stream.Unit, + Kind: kind, + Number: fmt.Sprintf("%T", zero), + } +} + +// aggregateFunc returns new aggregate functions matching agg, kind, and +// monotonic. If the agg is unknown or temporality is invalid, an error is +// returned. +func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { + switch a := agg.(type) { + case AggregationDefault: + return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind) + case AggregationDrop: + // Return nil in and out to signify the drop aggregator. + case AggregationLastValue: + switch kind { + case InstrumentKindGauge: + meas, comp = b.LastValue() + case InstrumentKindObservableGauge: + meas, comp = b.PrecomputedLastValue() + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter: + meas, comp = b.PrecomputedSum(true) + case InstrumentKindObservableUpDownCounter: + meas, comp = b.PrecomputedSum(false) + case InstrumentKindCounter, InstrumentKindHistogram: + meas, comp = b.Sum(true) + default: + // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and + // instrumentKindUndefined or other invalid instrument kinds. + meas, comp = b.Sum(false) + } + case AggregationExplicitBucketHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum) + case AggregationBase2ExponentialHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum) + + default: + err = errUnknownAggregation + } + + return meas, comp, err +} + +// isAggregatorCompatible checks if the aggregation can be used by the instrument. +// Current compatibility: +// +// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram | +// |--------------------------|------|-----------|-----|-----------|-----------------------| +// | Counter | ✓ | | ✓ | ✓ | ✓ | +// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Histogram | ✓ | | ✓ | ✓ | ✓ | +// | Gauge | ✓ | ✓ | | ✓ | ✓ | +// | Observable Counter | ✓ | | ✓ | ✓ | ✓ | +// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. +func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { + switch agg.(type) { + case AggregationDefault: + return nil + case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram: + switch kind { + case InstrumentKindCounter, + InstrumentKindUpDownCounter, + InstrumentKindHistogram, + InstrumentKindGauge, + InstrumentKindObservableCounter, + InstrumentKindObservableUpDownCounter, + InstrumentKindObservableGauge: + return nil + default: + return errIncompatibleAggregation + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter: + return nil + default: + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + } + case AggregationLastValue: + switch kind { + case InstrumentKindObservableGauge, InstrumentKindGauge: + return nil + } + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + case AggregationDrop: + return nil + default: + // This is used passed checking for default, it should be an error at this point. + return fmt.Errorf("%w: %v", errUnknownAggregation, agg) + } +} + +// pipelines is the group of pipelines connecting Readers with instrument +// measurement. +type pipelines []*pipeline + +func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { + pipes := make([]*pipeline, 0, len(readers)) + for _, r := range readers { + p := newPipeline(res, r, views) + r.register(p) + pipes = append(pipes, p) + } + return pipes +} + +func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { + unregs := make([]func(), len(p)) + for i, pipe := range p { + unregs[i] = pipe.addMultiCallback(c) + } + return unregisterFuncs{f: unregs} +} + +type unregisterFuncs struct { + embedded.Registration + f []func() +} + +func (u unregisterFuncs) Unregister() error { + for _, f := range u.f { + f() + } + return nil +} + +// resolver facilitates resolving aggregate functions an instrument calls to +// aggregate measurements with while updating all pipelines that need to pull +// from those aggregations. +type resolver[N int64 | float64] struct { + inserters []*inserter[N] +} + +func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] { + in := make([]*inserter[N], len(p)) + for i := range in { + in[i] = newInserter[N](p[i], vc) + } + return resolver[N]{in} +} + +// Aggregators returns the Aggregators that must be updated by the instrument +// defined by key. +func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument +// defined by key. If boundaries were provided on instrument instantiation, those take precedence +// over boundaries provided by the reader. +func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + agg := i.readerDefaultAggregation(id.Kind) + if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { + histAgg.Boundaries = boundaries + agg = histAgg + } + in, err := i.Instrument(id, agg) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +type multierror struct { + wrapped error + errors []string +} + +func (m *multierror) errorOrNil() error { + if len(m.errors) == 0 { + return nil + } + if m.wrapped == nil { + return errors.New(strings.Join(m.errors, "; ")) + } + return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) +} + +func (m *multierror) append(err error) { + m.errors = append(m.errors, err.Error()) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go new file mode 100644 index 0000000000..a82af538e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +// MeterProvider handles the creation and coordination of Meters. All Meters +// created by a MeterProvider will be associated with the same Resource, have +// the same Views applied to them, and have their produced metric telemetry +// passed to the configured Readers. +type MeterProvider struct { + embedded.MeterProvider + + pipes pipelines + meters cache[instrumentation.Scope, *meter] + + forceFlush, shutdown func(context.Context) error + stopped atomic.Bool +} + +// Compile-time check MeterProvider implements metric.MeterProvider. +var _ metric.MeterProvider = (*MeterProvider)(nil) + +// NewMeterProvider returns a new and configured MeterProvider. +// +// By default, the returned MeterProvider is configured with the default +// Resource and no Readers. Readers cannot be added after a MeterProvider is +// created. This means the returned MeterProvider, one created with no +// Readers, will perform no operations. +func NewMeterProvider(options ...Option) *MeterProvider { + conf := newConfig(options) + flush, sdown := conf.readerSignals() + + mp := &MeterProvider{ + pipes: newPipelines(conf.res, conf.readers, conf.views), + forceFlush: flush, + shutdown: sdown, + } + // Log after creation so all readers show correctly they are registered. + global.Info("MeterProvider created", + "Resource", conf.res, + "Readers", conf.readers, + "Views", len(conf.views), + ) + return mp +} + +// Meter returns a Meter with the given name and configured with options. +// +// The name should be the name of the instrumentation scope creating +// telemetry. This name may be the same as the instrumented code only if that +// code provides built-in instrumentation. +// +// Calls to the Meter method after Shutdown has been called will return Meters +// that perform no operations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter { + if name == "" { + global.Warn("Invalid Meter name.", "name", name) + } + + if mp.stopped.Load() { + return noop.Meter{} + } + + c := metric.NewMeterConfig(options...) + s := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + global.Info("Meter created", + "Name", s.Name, + "Version", s.Version, + "SchemaURL", s.SchemaURL, + ) + + return mp.meters.Lookup(s, func() *meter { + return newMeter(s, mp.pipes) + }) +} + +// ForceFlush flushes all pending telemetry. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// ForceFlush calls ForceFlush(context.Context) error +// on all Readers that implements this method. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) ForceFlush(ctx context.Context) error { + if mp.forceFlush != nil { + return mp.forceFlush(ctx) + } + return nil +} + +// Shutdown shuts down the MeterProvider flushing all pending telemetry and +// releasing any held computational resources. +// +// This call is idempotent. The first call will perform all flush and +// releasing operations. Subsequent calls will perform no action and will +// return an error stating this. +// +// Measurements made by instruments from meters this MeterProvider created +// will not be exported after Shutdown is called. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Shutdown(ctx context.Context) error { + // Even though it may seem like there is a synchronization issue between the + // call to `Store` and checking `shutdown`, the Go concurrency model ensures + // that is not the case, as all the atomic operations executed in a program + // behave as though executed in some sequentially consistent order. This + // definition provides the same semantics as C++'s sequentially consistent + // atomics and Java's volatile variables. + // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic. + + mp.stopped.Store(true) + if mp.shutdown != nil { + return mp.shutdown(ctx) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go new file mode 100644 index 0000000000..d94bdee75b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// errDuplicateRegister is logged by a Reader when an attempt to registered it +// more than once occurs. +var errDuplicateRegister = fmt.Errorf("duplicate reader registration") + +// ErrReaderNotRegistered is returned if Collect or Shutdown are called before +// the reader is registered with a MeterProvider. +var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") + +// ErrReaderShutdown is returned if Collect or Shutdown are called after a +// reader has been Shutdown once. +var ErrReaderShutdown = fmt.Errorf("reader is shutdown") + +// errNonPositiveDuration is logged when an environmental variable +// has non-positive value. +var errNonPositiveDuration = fmt.Errorf("non-positive duration") + +// Reader is the interface used between the SDK and an +// exporter. Control flow is bi-directional through the +// Reader, since the SDK initiates ForceFlush and Shutdown +// while the exporter initiates collection. The Register() method here +// informs the Reader that it can begin reading, signaling the +// start of bi-directional control flow. +// +// Typically, push-based exporters that are periodic will +// implement PeriodicExporter themselves and construct a +// PeriodicReader to satisfy this interface. +// +// Pull-based exporters will typically implement Register +// themselves, since they read on demand. +// +// Warning: methods may be added to this interface in minor releases. +type Reader interface { + // register registers a Reader with a MeterProvider. + // The producer argument allows the Reader to signal the sdk to collect + // and send aggregated metric measurements. + register(sdkProducer) + + // temporality reports the Temporality for the instrument kind provided. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + temporality(InstrumentKind) metricdata.Temporality + + // aggregation returns what Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. + + // Collect gathers and returns all metric data related to the Reader from + // the SDK and stores it in out. An error is returned if this is called + // after Shutdown or if out is nil. + // + // This method needs to be concurrent safe, and the cancellation of the + // passed context is expected to be honored. + Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric measurements held in an export pipeline and releases any + // held computational resources. + // + // This deadline or cancellation of the passed context are honored. An appropriate + // error will be returned in these situations. There is no guaranteed that all + // telemetry be flushed or all resources have been released in these + // situations. + // + // After Shutdown is called, calls to Collect will perform no operation and instead will return + // an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// sdkProducer produces metrics for a Reader. +type sdkProducer interface { + // produce returns aggregated metrics from a single collection. + // + // This method is safe to call concurrently. + produce(context.Context, *metricdata.ResourceMetrics) error +} + +// Producer produces metrics for a Reader from an external source. +type Producer interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Produce returns aggregated metrics from an external source. + // + // This method should be safe to call concurrently. + Produce(context.Context) ([]metricdata.ScopeMetrics, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// produceHolder is used as an atomic.Value to wrap the non-concrete producer +// type. +type produceHolder struct { + produce func(context.Context, *metricdata.ResourceMetrics) error +} + +// shutdownProducer produces an ErrReaderShutdown error always. +type shutdownProducer struct{} + +// produce returns an ErrReaderShutdown error. +func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { + return ErrReaderShutdown +} + +// TemporalitySelector selects the temporality to use based on the InstrumentKind. +type TemporalitySelector func(InstrumentKind) metricdata.Temporality + +// DefaultTemporalitySelector is the default TemporalitySelector used if +// WithTemporalitySelector is not provided. CumulativeTemporality will be used +// for all instrument kinds if this TemporalitySelector is used. +func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +// AggregationSelector selects the aggregation and the parameters to use for +// that aggregation based on the InstrumentKind. +// +// If the Aggregation returned is nil or DefaultAggregation, the selection from +// DefaultAggregationSelector will be used. +type AggregationSelector func(InstrumentKind) Aggregation + +// DefaultAggregationSelector returns the default aggregation and parameters +// that will be used to summarize measurement made from an instrument of +// InstrumentKind. This AggregationSelector using the following selection +// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum, +// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue, +// Histogram ⇨ ExplicitBucketHistogram. +func DefaultAggregationSelector(ik InstrumentKind) Aggregation { + switch ik { + case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: + return AggregationSum{} + case InstrumentKindObservableGauge, InstrumentKindGauge: + return AggregationLastValue{} + case InstrumentKindHistogram: + return AggregationExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + NoMinMax: false, + } + } + panic("unknown instrument kind") +} + +// ReaderOption is an option which can be applied to manual or Periodic +// readers. +type ReaderOption interface { + PeriodicReaderOption + ManualReaderOption +} + +// WithProducer registers producers as an external Producer of metric data +// for this Reader. +func WithProducer(p Producer) ReaderOption { + return producerOption{p: p} +} + +type producerOption struct { + p Producer +} + +// applyManual returns a manualReaderConfig with option applied. +func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.producers = append(c.producers, o.p) + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.producers = append(c.producers, o.p) + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go new file mode 100644 index 0000000000..44316caa11 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go new file mode 100644 index 0000000000..cd08c67324 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/global" +) + +var ( + errMultiInst = errors.New("name replacement for multiple instruments") + errEmptyView = errors.New("no criteria provided for view") + + emptyView = func(Instrument) (Stream, bool) { return Stream{}, false } +) + +// View is an override to the default behavior of the SDK. It defines how data +// should be collected for certain instruments. It returns true and the exact +// Stream to use for matching Instruments. Otherwise, if the view does not +// match, false is returned. +type View func(Instrument) (Stream, bool) + +// NewView returns a View that applies the Stream mask for all instruments that +// match criteria. The returned View will only apply mask if all non-zero-value +// fields of criteria match the corresponding Instrument passed to the view. If +// no criteria are provided, all field of criteria are their zero-values, a +// view that matches no instruments is returned. If you need to match a +// zero-value field, create a View directly. +// +// The Name field of criteria supports wildcard pattern matching. The "*" +// wildcard is recognized as matching zero or more characters, and "?" is +// recognized as matching exactly one character. For example, a pattern of "*" +// matches all instrument names. +// +// The Stream mask only applies updates for non-zero-value fields. By default, +// the Instrument the View matches against will be use for the Name, +// Description, and Unit of the returned Stream and no Aggregation or +// AttributeFilter are set. All non-zero-value fields of mask are used instead +// of the default. If you need to zero out an Stream field returned from a +// View, create a View directly. +func NewView(criteria Instrument, mask Stream) View { + if criteria.IsEmpty() { + global.Error( + errEmptyView, "dropping view", + "mask", mask, + ) + return emptyView + } + + var matchFunc func(Instrument) bool + if strings.ContainsAny(criteria.Name, "*?") { + if mask.Name != "" { + global.Error( + errMultiInst, "dropping view", + "criteria", criteria, + "mask", mask, + ) + return emptyView + } + + // Handle branching here in NewView instead of criteria.matches so + // criteria.matches remains inlinable for the simple case. + pattern := regexp.QuoteMeta(criteria.Name) + pattern = "^" + pattern + "$" + pattern = strings.ReplaceAll(pattern, `\?`, ".") + pattern = strings.ReplaceAll(pattern, `\*`, ".*") + re := regexp.MustCompile(pattern) + matchFunc = func(i Instrument) bool { + return re.MatchString(i.Name) && + criteria.matchesDescription(i) && + criteria.matchesKind(i) && + criteria.matchesUnit(i) && + criteria.matchesScope(i) + } + } else { + matchFunc = criteria.matches + } + + var agg Aggregation + if mask.Aggregation != nil { + agg = mask.Aggregation.copy() + if err := agg.err(); err != nil { + global.Error( + err, "not using aggregation with view", + "criteria", criteria, + "mask", mask, + ) + agg = nil + } + } + + return func(i Instrument) (Stream, bool) { + if matchFunc(i) { + return Stream{ + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + }, true + } + return Stream{}, false + } +} + +// nonZero returns v if it is non-zero-valued, otherwise alt. +func nonZero[T comparable](v, alt T) T { + var zero T + if v != zero { + return v + } + return alt +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 0000000000..4ad864d716 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 0000000000..95a61d61d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "strings" +) + +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect returns a new [Resource] merged from all the Resources each of the +// detectors produces. Each of the detectors are called sequentially, in the +// order they are passed, merging the produced resource into the previous. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if that error is returned from a detector. It may also +// return a merge-conflicting Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from different detectors results +// in a schema URL conflict. It is up to the caller to determine if this +// returned Resource should be used or not. +// +// If one of the detectors returns an error that is not [ErrPartialResource], +// the resource produced by the detector will not be merged and the returned +// error will wrap that detector's error. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + r := new(Resource) + return r, detect(ctx, r, detectors) +} + +// detect runs all detectors using ctx and merges the result into res. This +// assumes res is allocated and not nil, it will panic otherwise. +// +// If the detectors or merging resources produces any errors (i.e. +// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of +// these errors will be returned. Otherwise, nil is returned. +func detect(ctx context.Context, res *Resource, detectors []Detector) error { + var ( + r *Resource + errs detectErrs + err error + ) + + for _, detector := range detectors { + if detector == nil { + continue + } + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) + } + *res = *r + } + + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 0000000000..6ac1cdbf7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(sdk.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 0000000000..0d6e213d92 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithHostID adds host ID information to the configured resource. +func WithHostID() Option { + return WithDetectors(hostIDDetector{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 0000000000..5ecd859a52 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 0000000000..64939a2713 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +// +// While this package provides a stable API, +// the attributes added by resource detectors may change. +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 0000000000..813f056242 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { + k, v, found := strings.Cut(p, "=") + if !found { + invalid = append(invalid, p) + continue + } + key := strings.TrimSpace(k) + val, err := url.PathUnescape(strings.TrimSpace(v)) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + val = v + otel.Handle(err) + } + attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go new file mode 100644 index 0000000000..2d0f65498a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type hostIDProvider func() (string, error) + +var defaultHostIDProvider hostIDProvider = platformHostIDReader.read + +var hostID = defaultHostIDProvider + +type hostIDReader interface { + read() (string, error) +} + +type fileReader func(string) (string, error) + +type commandExecutor func(string, ...string) (string, error) + +// hostIDReaderBSD implements hostIDReader. +type hostIDReaderBSD struct { + execCommand commandExecutor + readFile fileReader +} + +// read attempts to read the machine-id from /etc/hostid. If not found it will +// execute `kenv -q smbios.system.uuid`. If neither location yields an id an +// error will be returned. +func (r *hostIDReaderBSD) read() (string, error) { + if result, err := r.readFile("/etc/hostid"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/hostid or kenv") +} + +// hostIDReaderDarwin implements hostIDReader. +type hostIDReaderDarwin struct { + execCommand commandExecutor +} + +// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id +// from the IOPlatformUUID line. If the command fails or the uuid cannot be +// parsed an error will be returned. +func (r *hostIDReaderDarwin) read() (string, error) { + result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") + if err != nil { + return "", err + } + + lines := strings.Split(result, "\n") + for _, line := range lines { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.Split(line, " = ") + if len(parts) == 2 { + return strings.Trim(parts[1], "\""), nil + } + break + } + } + + return "", errors.New("could not parse IOPlatformUUID") +} + +type hostIDReaderLinux struct { + readFile fileReader +} + +// read attempts to read the machine-id from /etc/machine-id followed by +// /var/lib/dbus/machine-id. If neither location yields an ID an error will +// be returned. +func (r *hostIDReaderLinux) read() (string, error) { + if result, err := r.readFile("/etc/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") +} + +type hostIDDetector struct{} + +// Detect returns a *Resource containing the platform specific host id. +func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { + hostID, err := hostID() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.HostID(hostID), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go new file mode 100644 index 0000000000..cc8b8938ed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris +// +build dragonfly freebsd netbsd openbsd solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderBSD{ + execCommand: execCommand, + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go new file mode 100644 index 0000000000..b09fde3b73 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ + execCommand: execCommand, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go new file mode 100644 index 0000000000..d9e5d1a8ff --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os/exec" + +func execCommand(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + b, err := cmd.Output() + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go new file mode 100644 index 0000000000..f84f173240 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux +// +build linux + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderLinux{ + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go new file mode 100644 index 0000000000..6354b35602 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os" + +func readFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go new file mode 100644 index 0000000000..df12c44c56 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// hostIDReaderUnsupported is a placeholder implementation for operating systems +// for which this project currently doesn't support host.id +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +type hostIDReaderUnsupported struct{} + +func (*hostIDReaderUnsupported) read() (string, error) { + return "", nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go new file mode 100644 index 0000000000..71386e2da4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "golang.org/x/sys/windows/registry" +) + +// implements hostIDReader +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the windows registry key: +// SOFTWARE\Microsoft\Cryptography +func (*hostIDReaderWindows) read() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, + registry.QUERY_VALUE|registry.WOW64_64KEY, + ) + + if err != nil { + return "", err + } + defer k.Close() + + guid, _, err := k.GetStringValue("MachineGuid") + if err != nil { + return "", err + } + + return guid, nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 0000000000..8a48ab4fa3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 0000000000..ce455dc544 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 0000000000..f537e5ca5c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + k, v, found := strings.Cut(line, "=") + + if !found || len(k) == 0 { + return "", "", false + } + + key := strings.TrimSpace(k) + value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 0000000000..a6ff26a4d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 0000000000..a77742b077 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 0000000000..5e3d199d78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 0000000000..085fe68fd7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 0000000000..ad4b50df40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + defaultResource *Resource + defaultResourceOnce sync.Once +) + +// ErrSchemaURLConflict is an error returned when two Resources are merged +// together that contain different, non-empty, schema URLs. +var ErrSchemaURLConflict = errors.New("conflicting Schema URL") + +// New returns a [Resource] built using opts. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if options that provide a [Detector] are used and that +// error is returned from one or more of the Detectors. It may also return a +// merge-conflict Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from the opts results in a +// schema URL conflict (see [Resource.Merge] for more information). It is up to +// the caller to determine if this returned Resource should be used or not +// based on these errors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + r := &Resource{schemaURL: cfg.schemaURL} + return r, detect(ctx, r, cfg.detectors) +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &Resource{} + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &Resource{} + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Resource. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new [Resource] by merging a and b. +// +// If there are common keys between a and b, then the value from b will +// overwrite the value from a, even if b's value is empty. +// +// The SchemaURL of the resources will be merged according to the +// [OpenTelemetry specification rules]: +// +// - If a's schema URL is empty then the returned Resource's schema URL will +// be set to the schema URL of b, +// - Else if b's schema URL is empty then the returned Resource's schema URL +// will be set to the schema URL of a, +// - Else if the schema URLs of a and b are the same then that will be the +// schema URL of the returned Resource, +// - Else this is a merging error. If the resources have different, +// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will +// be returned with the merged Resource. The merged Resource will have an +// empty schema URL. It may be the case that some unintended attributes +// have been overwritten or old semantic conventions persisted in the +// returned Resource. It is up to the caller to determine if this returned +// Resource should be used or not. +// +// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + + switch { + case a.schemaURL == "": + return NewWithAttributes(b.schemaURL, combine...), nil + case b.schemaURL == "": + return NewWithAttributes(a.schemaURL, combine...), nil + case a.schemaURL == b.schemaURL: + return NewWithAttributes(a.schemaURL, combine...), nil + } + // Return the merged resource with an appropriate error. It is up to + // the user to decide if the returned resource can be used or not. + return NewSchemaless(combine...), fmt.Errorf( + "%w: %s and %s", + ErrSchemaURLConflict, + a.schemaURL, + b.schemaURL, + ) +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &Resource{} +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultDetectors := []Detector{ + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &Resource{} + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go new file mode 100644 index 0000000000..b7cede891c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk // import "go.opentelemetry.io/otel/sdk" + +// Version is the current release version of the OpenTelemetry SDK in use. +func Version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md new file mode 100644 index 0000000000..0b6cbe960c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.24.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go new file mode 100644 index 0000000000..6e688345cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go @@ -0,0 +1,4387 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes FaaS attributes. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes Database attributes +const ( + // PoolNameKey is the attribute Key conforming to the "pool.name" semantic + // conventions. It represents the name of the connection pool; unique + // within the instrumented application. In case the connection pool + // implementation doesn't provide a name, then the + // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) + // should be used + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myDataSource' + PoolNameKey = attribute.Key("pool.name") + + // StateKey is the attribute Key conforming to the "state" semantic + // conventions. It represents the state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'idle' + StateKey = attribute.Key("state") +) + +var ( + // idle + StateIdle = StateKey.String("idle") + // used + StateUsed = StateKey.String("used") +) + +// PoolName returns an attribute KeyValue conforming to the "pool.name" +// semantic conventions. It represents the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, then the +// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) +// should be used +func PoolName(val string) attribute.KeyValue { + return PoolNameKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: experimental + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if the matched endpoint for the + // request had a rate-limiting policy.) + // Stability: experimental + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (if and only if the request was + // not handled.) + // Stability: experimental + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If and only if a route was + // successfully matched.) + // Stability: experimental + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// Describes JVM buffer metric attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// Describes System metric attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU metric attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory metric attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging metric attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem metric attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network metric attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process metric attributes +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // process state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) + +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + // Deprecated: use `http.request.method` instead. + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.request.header.content-length` instead. + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.response.header.content-length` instead. + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + // Deprecated: use `url.scheme` instead. + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + // Deprecated: use `http.response.status_code` instead. + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.path` and `url.query` instead. + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.full` instead. + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Deprecated: use `user_agent.original` instead. + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. +// +// Deprecated: use `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. +// +// Deprecated: use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. +// +// Deprecated: use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. +// +// Deprecated: use `url.scheme` instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. +// +// Deprecated: use `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. +// +// Deprecated: use `url.path` and `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. +// +// Deprecated: use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. +// +// Deprecated: use `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address`. + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port`. + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address` on client spans and `client.address` on + // server spans. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port` on client spans and `client.port` on + // server spans. + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + // Deprecated: use `network.protocol.name`. + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + // Deprecated: use `network.protocol.version`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: use `network.local.address`. + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `network.local.port`. + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + // Deprecated: use `network.peer.address`. + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: no replacement at this time. + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + // Deprecated: use `network.peer.port`. + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport`. + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // ip_tcp + // + // Deprecated: use `network.transport`. + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + // + // Deprecated: use `network.transport`. + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + // + // Deprecated: use `network.transport`. + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + // + // Deprecated: use `network.transport`. + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + // + // Deprecated: use `network.transport`. + NetTransportOther = NetTransportKey.String("other") +) + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. +// +// Deprecated: use `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. +// +// Deprecated: use `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. +// +// Deprecated: use `server.address` on client spans and `client.address` on +// server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. +// +// Deprecated: use `server.port` on client spans and `client.port` on server +// spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. +// +// Deprecated: use `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. +// +// Deprecated: use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. +// +// Deprecated: use `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. +// +// Deprecated: use `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. +// +// Deprecated: use `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. +// +// Deprecated: no replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. +// +// Deprecated: use `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents an identifier for + // the messaging system being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationPublish = MessagingOperationKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationCreate = MessagingOperationKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationReceive = MessagingOperationKey.String("receive") + // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs + MessagingOperationDeliver = MessagingOperationKey.String("deliver") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + // Azure Event Hubs + MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + // Azure Service Bus + MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the protocol specified in `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the protocol specified in `network.protocol.name`. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") +) + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go new file mode 100644 index 0000000000..d27e8a8f8b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.24.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go new file mode 100644 index 0000000000..6c019aafc3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // IosStateKey is the attribute Key conforming to the "ios.state" semantic + // conventions. It represents the this attribute represents the state the + // application has transitioned into at the occurrence of the event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate + // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), + // and from which the `OS terminology` column values are derived. + IosStateKey = attribute.Key("ios.state") +) + +var ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + IosStateActive = IosStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + IosStateInactive = IosStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + IosStateBackground = IosStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + IosStateForeground = IosStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + IosStateTerminate = IosStateKey.String("terminate") +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the this attribute represents the + // state the application has transitioned into at the occurrence of the + // event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessageTypeKey = attribute.Key("message.type") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go new file mode 100644 index 0000000000..7235bb51d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go new file mode 100644 index 0000000000..a6b953f625 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go new file mode 100644 index 0000000000..d66bbe9c23 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go @@ -0,0 +1,2545 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val int) attribute.KeyValue { + return HostCPUSteppingKey.Int(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// A serverless instance. +const ( + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + // Deprecated: use the `otel.scope.name` attribute. + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + // Deprecated: use the `otel.scope.version` attribute. + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. +// +// Deprecated: use the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. +// +// Deprecated: use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go new file mode 100644 index 0000000000..fe80b1731d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go new file mode 100644 index 0000000000..c1718234e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go @@ -0,0 +1,1323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") +) + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") +) + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/gocloud.dev/aws/aws.go b/vendor/gocloud.dev/aws/aws.go index 55dc38644d..4ee177c3be 100644 --- a/vendor/gocloud.dev/aws/aws.go +++ b/vendor/gocloud.dev/aws/aws.go @@ -22,10 +22,14 @@ import ( "strconv" awsv2 "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/config" awsv2cfg "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/session" "github.com/google/wire" ) @@ -79,8 +83,10 @@ func (co ConfigOverrider) ClientConfig(serviceName string, cfgs ...*aws.Config) // The following query options are supported: // - region: The AWS region for requests; sets aws.Config.Region. // - endpoint: The endpoint URL (hostname only or fully qualified URI); sets aws.Config.Endpoint. -// - disableSSL: A value of "true" disables SSL when sending requests; sets aws.Config.DisableSSL. -// - s3ForcePathStyle: A value of "true" forces the request to use path-style addressing; sets aws.Config.S3ForcePathStyle. +// - disable_ssl (or disableSSL): A value of "true" disables SSL when sending requests; sets aws.Config.DisableSSL. +// - s3_force_path_style (or s3ForcePathStyle): A value of "true" forces the request to use path-style addressing; sets aws.Config.S3ForcePathStyle. +// - dualstack: A value of "true" enables dual stack (IPv4 and IPv6) endpoints +// - fips: A value of "true" enables the use of FIPS endpoints func ConfigFromURLParams(q url.Values) (*aws.Config, error) { var cfg aws.Config for param, values := range q { @@ -90,18 +96,32 @@ func ConfigFromURLParams(q url.Values) (*aws.Config, error) { cfg.Region = aws.String(value) case "endpoint": cfg.Endpoint = aws.String(value) - case "disableSSL": + case "disable_ssl", "disableSSL": b, err := strconv.ParseBool(value) if err != nil { return nil, fmt.Errorf("invalid value for query parameter %q: %v", param, err) } cfg.DisableSSL = aws.Bool(b) - case "s3ForcePathStyle": + case "s3_force_path_style", "s3ForcePathStyle": b, err := strconv.ParseBool(value) if err != nil { return nil, fmt.Errorf("invalid value for query parameter %q: %v", param, err) } cfg.S3ForcePathStyle = aws.Bool(b) + case "dualstack": + b, err := strconv.ParseBool(value) + if err != nil { + return nil, fmt.Errorf("invalid value for query parameter %q: %v", param, err) + } + cfg.UseDualStack = aws.Bool(b) + case "fips": + b, err := strconv.ParseBool(value) + if err != nil { + return nil, fmt.Errorf("invalid value for query parameter %q: %v", param, err) + } + if b { + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } case "awssdk": // ignore, should be handled before this default: @@ -148,15 +168,14 @@ func NewSessionFromURLParams(q url.Values) (*session.Session, url.Values, error) // // "awssdk=v1" will force V1. // "awssdk=v2" will force V2. -// No "awssdk" parameter (or any other value) will return the default, currently V1. -// Note that the default may change in the future. +// No "awssdk" parameter (or any other value) will return the default, currently V2. func UseV2(q url.Values) bool { if values, ok := q["awssdk"]; ok { - if values[0] == "v2" || values[0] == "V2" { - return true + if values[0] == "v1" || values[0] == "V1" { + return false } } - return false + return true } // NewDefaultV2Config returns a aws.Config for AWS SDK v2, using the default options. @@ -178,30 +197,83 @@ func NewDefaultV2Config(ctx context.Context) (awsv2.Config, error) { // - region: The AWS region for requests; sets WithRegion. // - profile: The shared config profile to use; sets SharedConfigProfile. // - endpoint: The AWS service endpoint to send HTTP request. +// - hostname_immutable: Make the hostname immutable, only works if endpoint is also set. +// - dualstack: A value of "true" enables dual stack (IPv4 and IPv6) endpoints. +// - fips: A value of "true" enables the use of FIPS endpoints. +// - rate_limiter_capacity: A integer value configures the capacity of a token bucket used +// in client-side rate limits. If no value is set, the client-side rate limiting is disabled. +// See https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/retries-timeouts/#client-side-rate-limiting. func V2ConfigFromURLParams(ctx context.Context, q url.Values) (awsv2.Config, error) { + var endpoint string + var hostnameImmutable bool + var rateLimitCapacity int64 var opts []func(*awsv2cfg.LoadOptions) error for param, values := range q { value := values[0] switch param { + case "hostname_immutable": + var err error + hostnameImmutable, err = strconv.ParseBool(value) + if err != nil { + return awsv2.Config{}, fmt.Errorf("invalid value for hostname_immutable: %w", err) + } case "region": opts = append(opts, awsv2cfg.WithRegion(value)) case "endpoint": - customResolver := awsv2.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (awsv2.Endpoint, error) { - return awsv2.Endpoint{ - PartitionID: "aws", - URL: value, - SigningRegion: region, - }, nil - }) - opts = append(opts, awsv2cfg.WithEndpointResolverWithOptions(customResolver)) + endpoint = value case "profile": opts = append(opts, awsv2cfg.WithSharedConfigProfile(value)) + case "dualstack": + dualStack, err := strconv.ParseBool(value) + if err != nil { + return awsv2.Config{}, fmt.Errorf("invalid value for dualstack: %w", err) + } + if dualStack { + opts = append(opts, awsv2cfg.WithUseDualStackEndpoint(awsv2.DualStackEndpointStateEnabled)) + } + case "fips": + fips, err := strconv.ParseBool(value) + if err != nil { + return awsv2.Config{}, fmt.Errorf("invalid value for fips: %w", err) + } + if fips { + opts = append(opts, awsv2cfg.WithUseFIPSEndpoint(awsv2.FIPSEndpointStateEnabled)) + } + case "rate_limiter_capacity": + var err error + rateLimitCapacity, err = strconv.ParseInt(value, 10, 32) + if err != nil { + return awsv2.Config{}, fmt.Errorf("invalid value for capacity: %w", err) + } case "awssdk": // ignore, should be handled before this default: return awsv2.Config{}, fmt.Errorf("unknown query parameter %q", param) } } + if endpoint != "" { + customResolver := awsv2.EndpointResolverWithOptionsFunc( + func(service, region string, options ...interface{}) (awsv2.Endpoint, error) { + return awsv2.Endpoint{ + PartitionID: "aws", + URL: endpoint, + SigningRegion: region, + HostnameImmutable: hostnameImmutable, + }, nil + }) + opts = append(opts, awsv2cfg.WithEndpointResolverWithOptions(customResolver)) + } + + var rateLimiter retry.RateLimiter + rateLimiter = ratelimit.None + if rateLimitCapacity > 0 { + rateLimiter = ratelimit.NewTokenRateLimit(uint(rateLimitCapacity)) + } + opts = append(opts, config.WithRetryer(func() awsv2.Retryer { + return retry.NewStandard(func(so *retry.StandardOptions) { + so.RateLimiter = rateLimiter + }) + })) + return awsv2cfg.LoadDefaultConfig(ctx, opts...) } diff --git a/vendor/gocloud.dev/docstore/awsdynamodb/codec.go b/vendor/gocloud.dev/docstore/awsdynamodb/codec.go index 8fc3dc4aac..3f87c5123e 100644 --- a/vendor/gocloud.dev/docstore/awsdynamodb/codec.go +++ b/vendor/gocloud.dev/docstore/awsdynamodb/codec.go @@ -212,7 +212,6 @@ func (d decoder) AsFloat() (float64, bool) { return 0, false } return f, true - } func (d decoder) AsComplex() (complex128, bool) { diff --git a/vendor/gocloud.dev/docstore/awsdynamodb/dynamo.go b/vendor/gocloud.dev/docstore/awsdynamodb/dynamo.go index 6f71f15622..2df0aef081 100644 --- a/vendor/gocloud.dev/docstore/awsdynamodb/dynamo.go +++ b/vendor/gocloud.dev/docstore/awsdynamodb/dynamo.go @@ -180,17 +180,17 @@ func (c *collection) runGets(ctx context.Context, actions []*driver.Action, errs for i := 0; i < n; i++ { i := i t.Acquire() - go func() { + go func(group []*driver.Action) { defer t.Release() c.batchGet(ctx, group, errs, opts, batchSize*i, batchSize*(i+1)-1) - }() + }(group) } if n*batchSize < len(group) { t.Acquire() - go func() { + go func(group []*driver.Action) { defer t.Release() c.batchGet(ctx, group, errs, opts, batchSize*n, len(group)-1) - }() + }(group) } } t.Wait() diff --git a/vendor/gocloud.dev/docstore/awsdynamodb/query.go b/vendor/gocloud.dev/docstore/awsdynamodb/query.go index ae0eb61137..49d8dd31e7 100644 --- a/vendor/gocloud.dev/docstore/awsdynamodb/query.go +++ b/vendor/gocloud.dev/docstore/awsdynamodb/query.go @@ -471,17 +471,25 @@ type documentIterator struct { func (it *documentIterator) Next(ctx context.Context, doc driver.Document) error { // Skip the first 'n' documents where 'n' is the offset. - if it.offset > 0 && it.count < it.offset { - it.curr++ - it.count++ - return it.Next(ctx, doc) + for it.count < it.offset { + if err := it.next(ctx, doc, false); err != nil { + return err + } } + return it.next(ctx, doc, true) +} + +func (it *documentIterator) next(ctx context.Context, doc driver.Document, decode bool) error { // Only start counting towards the limit after the offset has been reached. - if it.limit > 0 && it.count >= it.offset+it.limit || it.curr >= len(it.items) && it.last == nil { + if it.limit > 0 && it.count >= it.offset+it.limit { return io.EOF } - if it.curr >= len(it.items) { + // it.items can be empty after a call to it.qr.run, but unless it.last is nil there may be more items. + for it.curr >= len(it.items) { // Make a new query request at the end of this page. + if it.last == nil { + return io.EOF + } var err error it.items, it.last, it.asFunc, err = it.qr.run(ctx, it.last) if err != nil { @@ -489,8 +497,10 @@ func (it *documentIterator) Next(ctx context.Context, doc driver.Document) error } it.curr = 0 } - if err := decodeDoc(&dyn.AttributeValue{M: it.items[it.curr]}, doc); err != nil { - return err + if decode { + if err := decodeDoc(&dyn.AttributeValue{M: it.items[it.curr]}, doc); err != nil { + return err + } } it.curr++ it.count++ diff --git a/vendor/gocloud.dev/docstore/driver/driver.go b/vendor/gocloud.dev/docstore/driver/driver.go index 285413f14f..bd2bbccb0e 100644 --- a/vendor/gocloud.dev/docstore/driver/driver.go +++ b/vendor/gocloud.dev/docstore/driver/driver.go @@ -225,7 +225,6 @@ type Filter struct { // A DocumentIterator iterates through the results (for Get action). type DocumentIterator interface { - // Next tries to get the next item in the query result and decodes into Document // with the driver's codec. // When there are no more results, it should return io.EOF. diff --git a/vendor/gocloud.dev/docstore/gcpfirestore/codec.go b/vendor/gocloud.dev/docstore/gcpfirestore/codec.go index 3f26d6c9dc..63debb125b 100644 --- a/vendor/gocloud.dev/docstore/gcpfirestore/codec.go +++ b/vendor/gocloud.dev/docstore/gcpfirestore/codec.go @@ -277,6 +277,7 @@ func (d decoder) DecodeList(f func(int, driver.Decoder) bool) { } } } + func (d decoder) MapLen() (int, bool) { m := d.pv.GetMapValue() if m == nil { @@ -284,6 +285,7 @@ func (d decoder) MapLen() (int, bool) { } return len(m.Fields), true } + func (d decoder) DecodeMap(f func(string, driver.Decoder, bool) bool) { for k, v := range d.pv.GetMapValue().Fields { if !f(k, decoder{v}, true) { diff --git a/vendor/gocloud.dev/docstore/gcpfirestore/fs.go b/vendor/gocloud.dev/docstore/gcpfirestore/fs.go index 5469927778..076b50f48d 100644 --- a/vendor/gocloud.dev/docstore/gcpfirestore/fs.go +++ b/vendor/gocloud.dev/docstore/gcpfirestore/fs.go @@ -83,6 +83,7 @@ import ( "gocloud.dev/internal/useragent" "google.golang.org/api/option" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" @@ -98,7 +99,8 @@ func Dial(ctx context.Context, ts gcp.TokenSource) (*vkit.Client, func(), error) useragent.ClientOption("docstore"), } if host := os.Getenv("FIRESTORE_EMULATOR_HOST"); host != "" { - conn, err := grpc.DialContext(ctx, host, grpc.WithInsecure()) + conn, err := grpc.DialContext(ctx, host, + grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, nil, err } @@ -602,7 +604,6 @@ func (c *collection) doCommitCall(ctx context.Context, call *commitCall, errs [] j++ } } - return } func hasFollowingTransform(writes []*pb.Write, i int) bool { diff --git a/vendor/gocloud.dev/docstore/gcpfirestore/query.go b/vendor/gocloud.dev/docstore/gcpfirestore/query.go index 0a551ff70d..ea714c134e 100644 --- a/vendor/gocloud.dev/docstore/gcpfirestore/query.go +++ b/vendor/gocloud.dev/docstore/gcpfirestore/query.go @@ -150,6 +150,17 @@ func evaluateFilter(f driver.Filter, doc driver.Document) bool { return applyComparison(f.Op, strings.Compare(lhs.String(), rhs.String())) } + if lhs.Kind() == reflect.Bool { + if rhs.Kind() != reflect.Bool { + return false + } + cmp := 0 + if lhs.Bool() != rhs.Bool() { + cmp = -1 + } + return applyComparison(f.Op, cmp) + } + cmp, err := driver.CompareNumbers(lhs, rhs) if err != nil { return false @@ -298,7 +309,8 @@ func (c *collection) filterToProto(f driver.Filter) (*pb.StructuredQuery_Filter, FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ UnaryFilter: &pb.StructuredQuery_UnaryFilter{ OperandType: &pb.StructuredQuery_UnaryFilter_Field{ - Field: fieldRef(f.FieldPath)}, + Field: fieldRef(f.FieldPath), + }, Op: uop, }, }, diff --git a/vendor/gocloud.dev/docstore/query.go b/vendor/gocloud.dev/docstore/query.go index 14703f5ff9..847d3ae8cc 100644 --- a/vendor/gocloud.dev/docstore/query.go +++ b/vendor/gocloud.dev/docstore/query.go @@ -38,7 +38,7 @@ func (c *Collection) Query() *Query { // Where expresses a condition on the query. // Valid ops are: "=", ">", "<", ">=", "<=, "in", "not-in". -// Valid values are strings, integers, floating-point numbers, and time.Time values. +// Valid values are strings, integers, floating-point numbers, time.Time and boolean (only for "=", "in" and "not-in") values. func (q *Query) Where(fp FieldPath, op string, value interface{}) *Query { if q.err != nil { return q @@ -66,7 +66,7 @@ func (q *Query) Where(fp FieldPath, op string, value interface{}) *Query { type valueValidator func(interface{}) bool var validOp = map[string]valueValidator{ - "=": validFilterValue, + "=": validEqualValue, ">": validFilterValue, "<": validFilterValue, ">=": validFilterValue, @@ -75,6 +75,16 @@ var validOp = map[string]valueValidator{ "not-in": validFilterSlice, } +func validEqualValue(v interface{}) bool { + if v == nil { + return false + } + if reflect.TypeOf(v).Kind() == reflect.Bool { + return true + } + return validFilterValue(v) +} + func validFilterValue(v interface{}) bool { if v == nil { return false @@ -102,7 +112,7 @@ func validFilterSlice(v interface{}) bool { } vv := reflect.ValueOf(v) for i := 0; i < vv.Len(); i++ { - if !validFilterValue(vv.Index(i).Interface()) { + if !validEqualValue(vv.Index(i).Interface()) { return false } } diff --git a/vendor/gocloud.dev/internal/retry/retry.go b/vendor/gocloud.dev/internal/retry/retry.go index cc8aebf58e..d06f0dace1 100644 --- a/vendor/gocloud.dev/internal/retry/retry.go +++ b/vendor/gocloud.dev/internal/retry/retry.go @@ -42,7 +42,8 @@ func Call(ctx context.Context, bo gax.Backoff, isRetryable func(error) bool, f f // Split out for testing. func call(ctx context.Context, bo gax.Backoff, isRetryable func(error) bool, f func() error, - sleep func(context.Context, time.Duration) error) error { + sleep func(context.Context, time.Duration) error, +) error { // Do nothing if context is done on entry. if err := ctx.Err(); err != nil { return &ContextError{CtxErr: err} diff --git a/vendor/gocloud.dev/internal/useragent/useragent.go b/vendor/gocloud.dev/internal/useragent/useragent.go index 6f7a42b74d..3ef6bac983 100644 --- a/vendor/gocloud.dev/internal/useragent/useragent.go +++ b/vendor/gocloud.dev/internal/useragent/useragent.go @@ -26,7 +26,7 @@ import ( const ( prefix = "go-cloud" - version = "0.37.0" + version = "0.40.0" ) // ClientOption returns an option.ClientOption that sets a Go CDK User-Agent. diff --git a/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go b/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go index 45d28e88c6..4a20fff2ba 100644 --- a/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go +++ b/vendor/gocloud.dev/pubsub/kafkapubsub/kafka.go @@ -136,7 +136,9 @@ const Scheme = "kafka" // URLOpener opens Kafka URLs like "kafka://mytopic" for topics and // "kafka://group?topic=mytopic" for subscriptions. // -// For topics, the URL's host+path is used as the topic name. +// For topics, the URL's host+path is used as the topic name, +// and the "key_name" query parameter is used to extract the routing key +// from metadata. // // For subscriptions, the URL's host+path is used as the group name, // and the "topic" query parameter(s) are used as the set of topics to @@ -159,9 +161,19 @@ type URLOpener struct { // OpenTopicURL opens a pubsub.Topic based on u. func (o *URLOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) { - for param := range u.Query() { - return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param) + for param, value := range u.Query() { + switch param { + case "key_name": + if len(value) != 1 || len(value[0]) == 0 { + return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param) + } + + o.TopicOptions.KeyName = value[0] + default: + return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param) + } } + topicName := path.Join(u.Host, u.Path) return OpenTopic(o.Brokers, o.Config, topicName, &o.TopicOptions) } diff --git a/vendor/gocloud.dev/pubsub/pubsub.go b/vendor/gocloud.dev/pubsub/pubsub.go index f68b17e03e..9569747cfe 100644 --- a/vendor/gocloud.dev/pubsub/pubsub.go +++ b/vendor/gocloud.dev/pubsub/pubsub.go @@ -66,6 +66,7 @@ package pubsub // import "gocloud.dev/pubsub" import ( "context" + "errors" "fmt" "log" "math" @@ -225,11 +226,6 @@ type Topic struct { cancel func() } -type msgErrChan struct { - msg *Message - errChan chan error -} - // Send publishes a message. It only returns after the message has been // sent, or failed to be sent. Send can be called from multiple goroutines // at once. @@ -276,7 +272,7 @@ func (t *Topic) Shutdown(ctx context.Context) (err error) { defer func() { t.tracer.End(ctx, err) }() t.mu.Lock() - if t.err == errTopicShutdown { + if errors.Is(t.err, errTopicShutdown) { defer t.mu.Unlock() return t.err } @@ -319,7 +315,6 @@ var NewTopic = newTopic // newSendBatcher creates a batcher for topics, for use with NewTopic. func newSendBatcher(ctx context.Context, t *Topic, dt driver.Topic, opts *batcher.Options) *batcher.Batcher { - const maxHandlers = 1 handler := func(items interface{}) error { dms := items.([]*driver.Message) err := retry.Call(ctx, gax.Backoff{}, dt.IsRetryable, func() (err error) { @@ -486,10 +481,10 @@ func (s *Subscription) updateBatchSize() int { // We first combine the previous value and the new value, with weighting // based on decay, and then cap the growth/shrinkage. newBatchSize := s.runningBatchSize*(1-decay) + idealBatchSize*decay - if max := s.runningBatchSize * maxGrowthFactor; newBatchSize > max { - s.runningBatchSize = max - } else if min := s.runningBatchSize * maxShrinkFactor; newBatchSize < min { - s.runningBatchSize = min + if maxSize := s.runningBatchSize * maxGrowthFactor; newBatchSize > maxSize { + s.runningBatchSize = maxSize + } else if minSize := s.runningBatchSize * maxShrinkFactor; newBatchSize < minSize { + s.runningBatchSize = minSize } else { s.runningBatchSize = newBatchSize } @@ -565,17 +560,28 @@ func (s *Subscription) Receive(ctx context.Context) (_ *Message, err error) { if s.preReceiveBatchHook != nil { s.preReceiveBatchHook(batchSize) } - msgs, err := s.getNextBatch(batchSize) - s.mu.Lock() - defer s.mu.Unlock() - if err != nil { - // Non-retryable error from ReceiveBatch -> permanent error. - s.err = err - } else if len(msgs) > 0 { - s.q = append(s.q, msgs...) + resultChannel := s.getNextBatch(batchSize) + for msgsOrError := range resultChannel { + if msgsOrError.msgs != nil && len(msgsOrError.msgs) > 0 { + // messages received from channel + s.mu.Lock() + s.q = append(s.q, msgsOrError.msgs...) + s.mu.Unlock() + // notify that queue should now have messages + s.waitc <- struct{}{} + } else if msgsOrError.err != nil { + // err can receive message only after batch group completes + // Non-retryable error from ReceiveBatch -> permanent error + s.mu.Lock() + s.err = msgsOrError.err + s.mu.Unlock() + } } + // batch reception finished + s.mu.Lock() close(s.waitc) s.waitc = nil + s.mu.Unlock() }() } if len(s.q) > 0 { @@ -625,11 +631,11 @@ func (s *Subscription) Receive(ctx context.Context) (_ *Message, err error) { } // A call to ReceiveBatch must be in flight. Wait for it. waitc := s.waitc - s.mu.Unlock() + s.mu.Unlock() // unlock to allow message or error processing from background goroutine select { case <-waitc: - s.mu.Lock() // Continue to top of loop. + s.mu.Lock() case <-ctx.Done(): s.mu.Lock() return nil, ctx.Err() @@ -637,16 +643,19 @@ func (s *Subscription) Receive(ctx context.Context) (_ *Message, err error) { } } -// getNextBatch gets the next batch of messages from the server and returns it. -func (s *Subscription) getNextBatch(nMessages int) ([]*driver.Message, error) { - var mu sync.Mutex - var q []*driver.Message +type msgsOrError struct { + msgs []*driver.Message + err error +} +// getNextBatch gets the next batch of messages from the server. It will return a channel that will itself return the +// messages as they come from each independent batch, or an operation error +func (s *Subscription) getNextBatch(nMessages int) chan msgsOrError { // Split nMessages into batches based on recvBatchOpts; we'll make a // separate ReceiveBatch call for each batch, and aggregate the results in // msgs. batches := batcher.Split(nMessages, s.recvBatchOpts) - + result := make(chan msgsOrError, len(batches)) g, ctx := errgroup.WithContext(s.backgroundCtx) for _, maxMessagesInBatch := range batches { // Make a copy of the loop variable since it will be used by a goroutine. @@ -663,16 +672,18 @@ func (s *Subscription) getNextBatch(nMessages int) ([]*driver.Message, error) { if err != nil { return wrapError(s.driver, err) } - mu.Lock() - defer mu.Unlock() - q = append(q, msgs...) + result <- msgsOrError{msgs: msgs} return nil }) } - if err := g.Wait(); err != nil { - return nil, err - } - return q, nil + go func() { + // wait on group completion on the background and proper channel closing + if err := g.Wait(); err != nil { + result <- msgsOrError{err: err} + } + close(result) + }() + return result } var errSubscriptionShutdown = gcerr.Newf(gcerr.FailedPrecondition, nil, "pubsub: Subscription has been Shutdown") @@ -683,7 +694,7 @@ func (s *Subscription) Shutdown(ctx context.Context) (err error) { defer func() { s.tracer.End(ctx, err) }() s.mu.Lock() - if s.err == errSubscriptionShutdown { + if errors.Is(s.err, errSubscriptionShutdown) { // Already Shutdown. defer s.mu.Unlock() return s.err @@ -758,7 +769,6 @@ func newSubscription(ds driver.Subscription, recvBatchOpts, ackBatcherOpts *batc } func newAckBatcher(ctx context.Context, s *Subscription, ds driver.Subscription, opts *batcher.Options) *batcher.Batcher { - const maxHandlers = 1 handler := func(items interface{}) error { var acks, nacks []driver.AckID for _, a := range items.([]*driver.AckInfo) { diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE index e4a47e17f1..c3c8b7d2b8 100644 --- a/vendor/golang.org/x/xerrors/LICENSE +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. +Copyright 2019 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 3bfd292b39..fd2e72c12d 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.201.0" +const Version = "0.203.0" diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 0000000000..6e01be017c --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,892 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/distribution.proto + +package distribution + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// `Distribution` contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those values +// across a set of buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by formulas for buckets of fixed or exponentially increasing +// widths. +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in `bucket_counts` if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + // Defines the histogram bucket boundaries. If the distribution does not + // contain a histogram, then omit this field. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // The number of values in each bucket of the histogram, as described in + // `bucket_options`. If the distribution does not have a histogram, then omit + // this field. If there is a histogram, then the sum of the values in + // `bucket_counts` must equal the value in the `count` field of the + // distribution. + // + // If present, `bucket_counts` should contain N values, where N is the number + // of buckets specified in `bucket_options`. If you supply fewer than N + // values, the remaining values are assumed to be 0. + // + // The order of the values in `bucket_counts` follows the bucket numbering + // schemes described for the three bucket types. The first value must be the + // count for the underflow bucket (number 0). The next N-2 values are the + // counts for the finite buckets (number 1 through N-2). The N'th value in + // `bucket_counts` is the count for the overflow bucket (number N-1). + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // Must be in increasing order of `value` field. + Exemplars []*Distribution_Exemplar `protobuf:"bytes,10,rep,name=exemplars,proto3" json:"exemplars,omitempty"` +} + +func (x *Distribution) Reset() { + *x = Distribution{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution) ProtoMessage() {} + +func (x *Distribution) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution.ProtoReflect.Descriptor instead. +func (*Distribution) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0} +} + +func (x *Distribution) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Distribution) GetMean() float64 { + if x != nil { + return x.Mean + } + return 0 +} + +func (x *Distribution) GetSumOfSquaredDeviation() float64 { + if x != nil { + return x.SumOfSquaredDeviation + } + return 0 +} + +func (x *Distribution) GetRange() *Distribution_Range { + if x != nil { + return x.Range + } + return nil +} + +func (x *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if x != nil { + return x.BucketOptions + } + return nil +} + +func (x *Distribution) GetBucketCounts() []int64 { + if x != nil { + return x.BucketCounts + } + return nil +} + +func (x *Distribution) GetExemplars() []*Distribution_Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` +} + +func (x *Distribution_Range) Reset() { + *x = Distribution_Range{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_Range) ProtoMessage() {} + +func (x *Distribution_Range) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_Range.ProtoReflect.Descriptor instead. +func (*Distribution_Range) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Distribution_Range) GetMin() float64 { + if x != nil { + return x.Min + } + return 0 +} + +func (x *Distribution_Range) GetMax() float64 { + if x != nil { + return x.Max + } + return 0 +} + +// `BucketOptions` describes the bucket boundaries used to create a histogram +// for the distribution. The buckets can be in a linear sequence, an +// exponential sequence, or each bucket can be specified explicitly. +// `BucketOptions` does not include the number of values in each bucket. +// +// A bucket has an inclusive lower bound and exclusive upper bound for the +// values that are counted for that bucket. The upper bound of a bucket must +// be strictly greater than the lower bound. The sequence of N buckets for a +// distribution consists of an underflow bucket (number 0), zero or more +// finite buckets (number 1 through N - 2) and an overflow bucket (number N - +// 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the +// same as the upper bound of bucket i - 1. The buckets span the whole range +// of finite values: lower bound of the underflow bucket is -infinity and the +// upper bound of the overflow bucket is +infinity. The finite buckets are +// so-called because both bounds are finite. +type Distribution_BucketOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Exactly one of these three fields must be set. + // + // Types that are assignable to Options: + // + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` +} + +func (x *Distribution_BucketOptions) Reset() { + *x = Distribution_BucketOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions) ProtoMessage() {} + +func (x *Distribution_BucketOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1} +} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (x *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (x *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (x *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := x.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + // The linear bucket. + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExponentialBuckets struct { + // The exponential buckets. + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExplicitBuckets struct { + // The explicit buckets. + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +// Exemplars are example points that may be used to annotate aggregated +// distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket, such as a trace ID that +// was active when a value was added. They may contain further information, +// such as a example values and timestamps, origin, etc. +type Distribution_Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Value of the exemplar point. This value determines to which bucket the + // exemplar belongs. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. Examples are: + // + // Trace: type.googleapis.com/google.monitoring.v3.SpanContext + // + // Literal string: type.googleapis.com/google.protobuf.StringValue + // + // Labels dropped during aggregation: + // type.googleapis.com/google.monitoring.v3.DroppedLabels + // + // There may be only a single attachment of any given message type in a + // single exemplar, and this is enforced by the system. + Attachments []*anypb.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` +} + +func (x *Distribution_Exemplar) Reset() { + *x = Distribution_Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_Exemplar) ProtoMessage() {} + +func (x *Distribution_Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_Exemplar.ProtoReflect.Descriptor instead. +func (*Distribution_Exemplar) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *Distribution_Exemplar) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Distribution_Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { + if x != nil { + return x.Attachments + } + return nil +} + +// Specifies a linear sequence of buckets that all have the same width +// (except overflow and underflow). Each bucket represents a constant +// absolute uncertainty on the specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` +} + +func (x *Distribution_BucketOptions_Linear) Reset() { + *x = Distribution_BucketOptions_Linear{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Linear) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Linear) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Linear.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 0} +} + +func (x *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if x != nil { + return x.NumFiniteBuckets + } + return 0 +} + +func (x *Distribution_BucketOptions_Linear) GetWidth() float64 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *Distribution_BucketOptions_Linear) GetOffset() float64 { + if x != nil { + return x.Offset + } + return 0 +} + +// Specifies an exponential sequence of buckets that have a width that is +// proportional to the value of the lower bound. Each bucket represents a +// constant relative uncertainty on a specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` +} + +func (x *Distribution_BucketOptions_Exponential) Reset() { + *x = Distribution_BucketOptions_Exponential{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Exponential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Exponential) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Exponential.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 1} +} + +func (x *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if x != nil { + return x.NumFiniteBuckets + } + return 0 +} + +func (x *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if x != nil { + return x.GrowthFactor + } + return 0 +} + +func (x *Distribution_BucketOptions_Exponential) GetScale() float64 { + if x != nil { + return x.Scale + } + return 0 +} + +// Specifies a set of buckets with arbitrary widths. +// +// There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following +// boundaries: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// The `bounds` field must contain at least one element. If `bounds` has +// only one element, then there are no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` +} + +func (x *Distribution_BucketOptions_Explicit) Reset() { + *x = Distribution_BucketOptions_Explicit{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_distribution_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Distribution_BucketOptions_Explicit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} + +func (x *Distribution_BucketOptions_Explicit) ProtoReflect() protoreflect.Message { + mi := &file_google_api_distribution_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Distribution_BucketOptions_Explicit.ProtoReflect.Descriptor instead. +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return file_google_api_distribution_proto_rawDescGZIP(), []int{0, 1, 2} +} + +func (x *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if x != nil { + return x.Bounds + } + return nil +} + +var File_google_api_distribution_proto protoreflect.FileDescriptor + +var file_google_api_distribution_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x08, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6d, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x6d, 0x65, + 0x61, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x75, 0x6d, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x71, 0x75, + 0x61, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x75, 0x6d, 0x4f, 0x66, 0x53, 0x71, 0x75, 0x61, 0x72, + 0x65, 0x64, 0x44, 0x65, 0x76, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x4d, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x1a, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x6d, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, + 0x6d, 0x61, 0x78, 0x1a, 0xb9, 0x04, 0x0a, 0x0d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x56, 0x0a, 0x0e, 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x65, 0x0a, + 0x13, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x00, + 0x52, 0x12, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x48, + 0x00, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x1a, 0x64, 0x0a, 0x06, 0x4c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x12, 0x2c, 0x0a, 0x12, + 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, + 0x69, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, + 0x64, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x1a, 0x76, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x46, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, + 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x67, 0x72, + 0x6f, 0x77, 0x74, 0x68, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, + 0x61, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, + 0x1a, 0x22, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x92, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x0b, + 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x42, 0x71, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x11, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_distribution_proto_rawDescOnce sync.Once + file_google_api_distribution_proto_rawDescData = file_google_api_distribution_proto_rawDesc +) + +func file_google_api_distribution_proto_rawDescGZIP() []byte { + file_google_api_distribution_proto_rawDescOnce.Do(func() { + file_google_api_distribution_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_distribution_proto_rawDescData) + }) + return file_google_api_distribution_proto_rawDescData +} + +var file_google_api_distribution_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_google_api_distribution_proto_goTypes = []interface{}{ + (*Distribution)(nil), // 0: google.api.Distribution + (*Distribution_Range)(nil), // 1: google.api.Distribution.Range + (*Distribution_BucketOptions)(nil), // 2: google.api.Distribution.BucketOptions + (*Distribution_Exemplar)(nil), // 3: google.api.Distribution.Exemplar + (*Distribution_BucketOptions_Linear)(nil), // 4: google.api.Distribution.BucketOptions.Linear + (*Distribution_BucketOptions_Exponential)(nil), // 5: google.api.Distribution.BucketOptions.Exponential + (*Distribution_BucketOptions_Explicit)(nil), // 6: google.api.Distribution.BucketOptions.Explicit + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*anypb.Any)(nil), // 8: google.protobuf.Any +} +var file_google_api_distribution_proto_depIdxs = []int32{ + 1, // 0: google.api.Distribution.range:type_name -> google.api.Distribution.Range + 2, // 1: google.api.Distribution.bucket_options:type_name -> google.api.Distribution.BucketOptions + 3, // 2: google.api.Distribution.exemplars:type_name -> google.api.Distribution.Exemplar + 4, // 3: google.api.Distribution.BucketOptions.linear_buckets:type_name -> google.api.Distribution.BucketOptions.Linear + 5, // 4: google.api.Distribution.BucketOptions.exponential_buckets:type_name -> google.api.Distribution.BucketOptions.Exponential + 6, // 5: google.api.Distribution.BucketOptions.explicit_buckets:type_name -> google.api.Distribution.BucketOptions.Explicit + 7, // 6: google.api.Distribution.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 8, // 7: google.api.Distribution.Exemplar.attachments:type_name -> google.protobuf.Any + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_google_api_distribution_proto_init() } +func file_google_api_distribution_proto_init() { + if File_google_api_distribution_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_distribution_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Linear); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Exponential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_distribution_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Distribution_BucketOptions_Explicit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_distribution_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_distribution_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_distribution_proto_goTypes, + DependencyIndexes: file_google_api_distribution_proto_depIdxs, + MessageInfos: file_google_api_distribution_proto_msgTypes, + }.Build() + File_google_api_distribution_proto = out.File + file_google_api_distribution_proto_rawDesc = nil + file_google_api_distribution_proto_goTypes = nil + file_google_api_distribution_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 0000000000..42bcacc363 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,249 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/label.proto + +package label + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +// Enum value maps for LabelDescriptor_ValueType. +var ( + LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", + } + LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, + } +) + +func (x LabelDescriptor_ValueType) Enum() *LabelDescriptor_ValueType { + p := new(LabelDescriptor_ValueType) + *p = x + return p +} + +func (x LabelDescriptor_ValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LabelDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_label_proto_enumTypes[0].Descriptor() +} + +func (LabelDescriptor_ValueType) Type() protoreflect.EnumType { + return &file_google_api_label_proto_enumTypes[0] +} + +func (x LabelDescriptor_ValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LabelDescriptor_ValueType.Descriptor instead. +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return file_google_api_label_proto_rawDescGZIP(), []int{0, 0} +} + +// A description of a label. +type LabelDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The label key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *LabelDescriptor) Reset() { + *x = LabelDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_label_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelDescriptor) ProtoMessage() {} + +func (x *LabelDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_label_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelDescriptor.ProtoReflect.Descriptor instead. +func (*LabelDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_label_proto_rawDescGZIP(), []int{0} +} + +func (x *LabelDescriptor) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return LabelDescriptor_STRING +} + +func (x *LabelDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +var File_google_api_label_proto protoreflect.FileDescriptor + +var file_google_api_label_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x22, 0xb9, 0x01, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, + 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, + 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, + 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_label_proto_rawDescOnce sync.Once + file_google_api_label_proto_rawDescData = file_google_api_label_proto_rawDesc +) + +func file_google_api_label_proto_rawDescGZIP() []byte { + file_google_api_label_proto_rawDescOnce.Do(func() { + file_google_api_label_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_label_proto_rawDescData) + }) + return file_google_api_label_proto_rawDescData +} + +var file_google_api_label_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_label_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_label_proto_goTypes = []interface{}{ + (LabelDescriptor_ValueType)(0), // 0: google.api.LabelDescriptor.ValueType + (*LabelDescriptor)(nil), // 1: google.api.LabelDescriptor +} +var file_google_api_label_proto_depIdxs = []int32{ + 0, // 0: google.api.LabelDescriptor.value_type:type_name -> google.api.LabelDescriptor.ValueType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_label_proto_init() } +func file_google_api_label_proto_init() { + if File_google_api_label_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_label_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_label_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_label_proto_goTypes, + DependencyIndexes: file_google_api_label_proto_depIdxs, + EnumInfos: file_google_api_label_proto_enumTypes, + MessageInfos: file_google_api_label_proto_msgTypes, + }.Build() + File_google_api_label_proto = out.File + file_google_api_label_proto_rawDesc = nil + file_google_api_label_proto_goTypes = nil + file_google_api_label_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 0000000000..d4b89c98d1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,771 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/metric.proto + +package metric + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The kind of measurement. It describes how the data is reported. +// For information on setting the start time and end time based on +// the MetricKind, see [TimeInterval][google.monitoring.v3.TimeInterval]. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +// Enum value maps for MetricDescriptor_MetricKind. +var ( + MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", + } + MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, + } +) + +func (x MetricDescriptor_MetricKind) Enum() *MetricDescriptor_MetricKind { + p := new(MetricDescriptor_MetricKind) + *p = x + return p +} + +func (x MetricDescriptor_MetricKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricDescriptor_MetricKind) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_metric_proto_enumTypes[0].Descriptor() +} + +func (MetricDescriptor_MetricKind) Type() protoreflect.EnumType { + return &file_google_api_metric_proto_enumTypes[0] +} + +func (x MetricDescriptor_MetricKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MetricDescriptor_MetricKind.Descriptor instead. +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +// Enum value maps for MetricDescriptor_ValueType. +var ( + MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", + } + MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, + } +) + +func (x MetricDescriptor_ValueType) Enum() *MetricDescriptor_ValueType { + p := new(MetricDescriptor_ValueType) + *p = x + return p +} + +func (x MetricDescriptor_ValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricDescriptor_ValueType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_metric_proto_enumTypes[1].Descriptor() +} + +func (MetricDescriptor_ValueType) Type() protoreflect.EnumType { + return &file_google_api_metric_proto_enumTypes[1] +} + +func (x MetricDescriptor_ValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MetricDescriptor_ValueType.Descriptor instead. +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the metric descriptor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types should + // use a natural hierarchical grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + // + // Different systems might scale the values to be more easily displayed (so a + // value of `0.02kBy` _might_ be displayed as `20By`, and a value of + // `3523kBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is + // `kBy`, then the value of the metric is always in thousands of bytes, no + // matter how it might be displayed. + // + // If you want a custom metric to record the exact number of CPU-seconds used + // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is + // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 + // CPU-seconds, then the value is written as `12005`. + // + // Alternatively, if you want a custom metric to record data in a more + // granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is + // `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), + // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). + // + // The supported units are a subset of [The Unified Code for Units of + // Measure](https://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // * `1` dimensionless + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10^3) + // * `M` mega (10^6) + // * `G` giga (10^9) + // * `T` tera (10^12) + // * `P` peta (10^15) + // * `E` exa (10^18) + // * `Z` zetta (10^21) + // * `Y` yotta (10^24) + // + // * `m` milli (10^-3) + // * `u` micro (10^-6) + // * `n` nano (10^-9) + // * `p` pico (10^-12) + // * `f` femto (10^-15) + // * `a` atto (10^-18) + // * `z` zepto (10^-21) + // * `y` yocto (10^-24) + // + // * `Ki` kibi (2^10) + // * `Mi` mebi (2^20) + // * `Gi` gibi (2^30) + // * `Ti` tebi (2^40) + // * `Pi` pebi (2^50) + // + // **Grammar** + // + // The grammar also includes these connectors: + // + // - `/` division or ratio (as an infix operator). For examples, + // `kBy/{email}` or `MiBy/10ms` (although you should almost never + // have `/s` in a metric `unit`; rates should always be computed at + // query time from the underlying cumulative or delta value). + // - `.` multiplication or composition (as an infix operator). For + // examples, `GBy.d` or `k{watt}.h`. + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // - `Annotation` is just a comment if it follows a `UNIT`. If the annotation + // is used alone, then the unit is equivalent to `1`. For examples, + // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. + // - `NAME` is a sequence of non-blank printable ASCII characters not + // containing `{` or `}`. + // - `1` represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such + // as in `1/s`. It is typically used when none of the basic units are + // appropriate. For example, "new users per day" can be represented as + // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). + // - `%` represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of 0..100, + // and a metric value `3` means "3 percent"). + // - `10^2.%` indicates a metric contains a ratio, typically in the range + // 0..1, that will be multiplied by 100 and displayed as a percentage + // (so a metric value `0.03` means "3 percent"). + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + // This field is optional but it is recommended to be set for any metrics + // associated with user-visible concepts, such as Quota. + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. Metadata which can be used to guide usage of the metric. + Metadata *MetricDescriptor_MetricDescriptorMetadata `protobuf:"bytes,10,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Optional. The launch stage of the metric definition. + LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // Read-only. If present, then a [time + // series][google.monitoring.v3.TimeSeries], which is identified partially by + // a metric type and a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor], that + // is associated with this metric type can only be associated with one of the + // monitored resource types listed here. + MonitoredResourceTypes []string `protobuf:"bytes,13,rep,name=monitored_resource_types,json=monitoredResourceTypes,proto3" json:"monitored_resource_types,omitempty"` +} + +func (x *MetricDescriptor) Reset() { + *x = MetricDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricDescriptor) ProtoMessage() {} + +func (x *MetricDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricDescriptor.ProtoReflect.Descriptor instead. +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0} +} + +func (x *MetricDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MetricDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MetricDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +func (x *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if x != nil { + return x.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (x *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if x != nil { + return x.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (x *MetricDescriptor) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +func (x *MetricDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *MetricDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *MetricDescriptor) GetMetadata() *MetricDescriptor_MetricDescriptorMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *MetricDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *MetricDescriptor) GetMonitoredResourceTypes() []string { + if x != nil { + return x.MonitoredResourceTypes + } + return nil +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An existing metric type, see + // [google.api.MetricDescriptor][google.api.MetricDescriptor]. For example, + // `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{1} +} + +func (x *Metric) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Metric) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Additional annotations that can be used to guide the usage of a metric. +type MetricDescriptor_MetricDescriptorMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated. Must use the + // [MetricDescriptor.launch_stage][google.api.MetricDescriptor.launch_stage] + // instead. + // + // Deprecated: Do not use. + LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // The sampling period of metric data points. For metrics which are written + // periodically, consecutive data points are stored at this time interval, + // excluding data loss due to errors. Metrics with a higher granularity have + // a smaller sampling period. + SamplePeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` + // The delay of data points caused by ingestion. Data points older than this + // age are guaranteed to be ingested and available to be read, excluding + // data loss due to errors. + IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { + *x = MetricDescriptor_MetricDescriptorMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_metric_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricDescriptor_MetricDescriptorMetadata) ProtoMessage() {} + +func (x *MetricDescriptor_MetricDescriptorMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_api_metric_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricDescriptor_MetricDescriptorMetadata.ProtoReflect.Descriptor instead. +func (*MetricDescriptor_MetricDescriptorMetadata) Descriptor() ([]byte, []int) { + return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0} +} + +// Deprecated: Do not use. +func (x *MetricDescriptor_MetricDescriptorMetadata) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *durationpb.Duration { + if x != nil { + return x.SamplePeriod + } + return nil +} + +func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb.Duration { + if x != nil { + return x.IngestDelay + } + return nil +} + +var File_google_api_metric_proto protoreflect.FileDescriptor + +var file_google_api_metric_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, + 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x48, + 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x6e, 0x69, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x3e, + 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x3c, + 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, + 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, + 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, + 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, + 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_metric_proto_rawDescOnce sync.Once + file_google_api_metric_proto_rawDescData = file_google_api_metric_proto_rawDesc +) + +func file_google_api_metric_proto_rawDescGZIP() []byte { + file_google_api_metric_proto_rawDescOnce.Do(func() { + file_google_api_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_metric_proto_rawDescData) + }) + return file_google_api_metric_proto_rawDescData +} + +var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_api_metric_proto_goTypes = []interface{}{ + (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind + (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType + (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor + (*Metric)(nil), // 3: google.api.Metric + (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata + nil, // 5: google.api.Metric.LabelsEntry + (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration +} +var file_google_api_metric_proto_depIdxs = []int32{ + 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor + 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata + 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage + 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry + 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage + 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration + 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_api_metric_proto_init() } +func file_google_api_metric_proto_init() { + if File_google_api_metric_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_metric_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_metric_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_metric_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricDescriptor_MetricDescriptorMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_metric_proto_rawDesc, + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_metric_proto_goTypes, + DependencyIndexes: file_google_api_metric_proto_depIdxs, + EnumInfos: file_google_api_metric_proto_enumTypes, + MessageInfos: file_google_api_metric_proto_msgTypes, + }.Build() + File_google_api_metric_proto = out.File + file_google_api_metric_proto_rawDesc = nil + file_google_api_metric_proto_goTypes = nil + file_google_api_metric_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 0000000000..b4cee29803 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,476 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/monitored_resource.proto + +package monitoredres + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An object that describes the schema of a +// [MonitoredResource][google.api.MonitoredResource] object using a type name +// and a set of labels. For example, the monitored resource descriptor for +// Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // + // For a list of types, see [Monitored resource + // types](https://cloud.google.com/monitoring/api/resources) + // + // and [Logging resource + // types](https://cloud.google.com/logging/docs/api/v2/resource-list). + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // Optional. The launch stage of the monitored resource definition. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` +} + +func (x *MonitoredResourceDescriptor) Reset() { + *x = MonitoredResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResourceDescriptor) ProtoMessage() {} + +func (x *MonitoredResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResourceDescriptor.ProtoReflect.Descriptor instead. +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *MonitoredResourceDescriptor) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { + if x != nil { + return x.Labels + } + return nil +} + +func (x *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object +// that describes the resource's schema. Information in the `labels` field +// identifies the actual resource and its attributes according to the schema. +// For example, a particular Compute Engine VM instance could be represented by +// the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for +// `"gce_instance"` has labels +// `"project_id"`, `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "project_id": "my-project", +// "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The monitored resource type. This field must match + // the `type` field of a + // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] + // object. For example, the type of a Compute Engine VM instance is + // `gce_instance`. Some descriptors include the service name in the type; for + // example, the type of a Datastream stream is + // `datastream.googleapis.com/Stream`. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonitoredResource) Reset() { + *x = MonitoredResource{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResource) ProtoMessage() {} + +func (x *MonitoredResource) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResource.ProtoReflect.Descriptor instead. +func (*MonitoredResource) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *MonitoredResource) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MonitoredResource) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] +// object. [MonitoredResource][google.api.MonitoredResource] objects contain the +// minimum set of information to uniquely identify a monitored resource +// instance. There is some other useful auxiliary metadata. Monitoring and +// Logging use an ingestion pipeline to extract metadata for cloud resources of +// all types, and store the metadata in this message. +type MonitoredResourceMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google, including + // "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + SystemLabels *structpb.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + // Output only. A map of user-defined metadata labels. + UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MonitoredResourceMetadata) Reset() { + *x = MonitoredResourceMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_monitored_resource_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MonitoredResourceMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MonitoredResourceMetadata) ProtoMessage() {} + +func (x *MonitoredResourceMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_api_monitored_resource_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MonitoredResourceMetadata.ProtoReflect.Descriptor instead. +func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { + return file_google_api_monitored_resource_proto_rawDescGZIP(), []int{2} +} + +func (x *MonitoredResourceMetadata) GetSystemLabels() *structpb.Struct { + if x != nil { + return x.SystemLabels + } + return nil +} + +func (x *MonitoredResourceMetadata) GetUserLabels() map[string]string { + if x != nil { + return x.UserLabels + } + return nil +} + +var File_google_api_monitored_resource_proto protoreflect.FileDescriptor + +var file_google_api_monitored_resource_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, + 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, + 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x01, 0x0a, 0x1b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x41, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, + 0x19, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0c, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x56, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, + 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x79, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x16, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x72, 0x65, 0x73, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x72, 0x65, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_monitored_resource_proto_rawDescOnce sync.Once + file_google_api_monitored_resource_proto_rawDescData = file_google_api_monitored_resource_proto_rawDesc +) + +func file_google_api_monitored_resource_proto_rawDescGZIP() []byte { + file_google_api_monitored_resource_proto_rawDescOnce.Do(func() { + file_google_api_monitored_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_monitored_resource_proto_rawDescData) + }) + return file_google_api_monitored_resource_proto_rawDescData +} + +var file_google_api_monitored_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_api_monitored_resource_proto_goTypes = []interface{}{ + (*MonitoredResourceDescriptor)(nil), // 0: google.api.MonitoredResourceDescriptor + (*MonitoredResource)(nil), // 1: google.api.MonitoredResource + (*MonitoredResourceMetadata)(nil), // 2: google.api.MonitoredResourceMetadata + nil, // 3: google.api.MonitoredResource.LabelsEntry + nil, // 4: google.api.MonitoredResourceMetadata.UserLabelsEntry + (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor + (api.LaunchStage)(0), // 6: google.api.LaunchStage + (*structpb.Struct)(nil), // 7: google.protobuf.Struct +} +var file_google_api_monitored_resource_proto_depIdxs = []int32{ + 5, // 0: google.api.MonitoredResourceDescriptor.labels:type_name -> google.api.LabelDescriptor + 6, // 1: google.api.MonitoredResourceDescriptor.launch_stage:type_name -> google.api.LaunchStage + 3, // 2: google.api.MonitoredResource.labels:type_name -> google.api.MonitoredResource.LabelsEntry + 7, // 3: google.api.MonitoredResourceMetadata.system_labels:type_name -> google.protobuf.Struct + 4, // 4: google.api.MonitoredResourceMetadata.user_labels:type_name -> google.api.MonitoredResourceMetadata.UserLabelsEntry + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_api_monitored_resource_proto_init() } +func file_google_api_monitored_resource_proto_init() { + if File_google_api_monitored_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_monitored_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_monitored_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_monitored_resource_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MonitoredResourceMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_monitored_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_monitored_resource_proto_goTypes, + DependencyIndexes: file_google_api_monitored_resource_proto_depIdxs, + MessageInfos: file_google_api_monitored_resource_proto_msgTypes, + }.Build() + File_google_api_monitored_resource_proto = out.File + file_google_api_monitored_resource_proto_rawDesc = nil + file_google_api_monitored_resource_proto_goTypes = nil + file_google_api_monitored_resource_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go new file mode 100644 index 0000000000..cae02bce14 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go @@ -0,0 +1,189 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/type/calendar_period.proto + +package calendarperiod + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A `CalendarPeriod` represents the abstract concept of a time period that has +// a canonical start. Grammatically, "the start of the current +// `CalendarPeriod`." All calendar times begin at midnight UTC. +type CalendarPeriod int32 + +const ( + // Undefined period, raises an error. + CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED CalendarPeriod = 0 + // A day. + CalendarPeriod_DAY CalendarPeriod = 1 + // A week. Weeks begin on Monday, following + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_WEEK CalendarPeriod = 2 + // A fortnight. The first calendar fortnight of the year begins at the start + // of week 1 according to + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_FORTNIGHT CalendarPeriod = 3 + // A month. + CalendarPeriod_MONTH CalendarPeriod = 4 + // A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each + // year. + CalendarPeriod_QUARTER CalendarPeriod = 5 + // A half-year. Half-years start on dates 1-Jan and 1-Jul. + CalendarPeriod_HALF CalendarPeriod = 6 + // A year. + CalendarPeriod_YEAR CalendarPeriod = 7 +) + +// Enum value maps for CalendarPeriod. +var ( + CalendarPeriod_name = map[int32]string{ + 0: "CALENDAR_PERIOD_UNSPECIFIED", + 1: "DAY", + 2: "WEEK", + 3: "FORTNIGHT", + 4: "MONTH", + 5: "QUARTER", + 6: "HALF", + 7: "YEAR", + } + CalendarPeriod_value = map[string]int32{ + "CALENDAR_PERIOD_UNSPECIFIED": 0, + "DAY": 1, + "WEEK": 2, + "FORTNIGHT": 3, + "MONTH": 4, + "QUARTER": 5, + "HALF": 6, + "YEAR": 7, + } +) + +func (x CalendarPeriod) Enum() *CalendarPeriod { + p := new(CalendarPeriod) + *p = x + return p +} + +func (x CalendarPeriod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CalendarPeriod) Descriptor() protoreflect.EnumDescriptor { + return file_google_type_calendar_period_proto_enumTypes[0].Descriptor() +} + +func (CalendarPeriod) Type() protoreflect.EnumType { + return &file_google_type_calendar_period_proto_enumTypes[0] +} + +func (x CalendarPeriod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CalendarPeriod.Descriptor instead. +func (CalendarPeriod) EnumDescriptor() ([]byte, []int) { + return file_google_type_calendar_period_proto_rawDescGZIP(), []int{0} +} + +var File_google_type_calendar_period_proto protoreflect.FileDescriptor + +var file_google_type_calendar_period_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, + 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2a, 0x7f, 0x0a, 0x0e, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x41, 0x4c, 0x45, 0x4e, 0x44, 0x41, 0x52, 0x5f, 0x50, + 0x45, 0x52, 0x49, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x41, 0x59, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x57, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x54, 0x4e, 0x49, + 0x47, 0x48, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x10, 0x04, + 0x12, 0x0b, 0x0a, 0x07, 0x51, 0x55, 0x41, 0x52, 0x54, 0x45, 0x52, 0x10, 0x05, 0x12, 0x08, 0x0a, + 0x04, 0x48, 0x41, 0x4c, 0x46, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, + 0x07, 0x42, 0x78, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x13, 0x43, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x3b, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_calendar_period_proto_rawDescOnce sync.Once + file_google_type_calendar_period_proto_rawDescData = file_google_type_calendar_period_proto_rawDesc +) + +func file_google_type_calendar_period_proto_rawDescGZIP() []byte { + file_google_type_calendar_period_proto_rawDescOnce.Do(func() { + file_google_type_calendar_period_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_calendar_period_proto_rawDescData) + }) + return file_google_type_calendar_period_proto_rawDescData +} + +var file_google_type_calendar_period_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_type_calendar_period_proto_goTypes = []interface{}{ + (CalendarPeriod)(0), // 0: google.type.CalendarPeriod +} +var file_google_type_calendar_period_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_calendar_period_proto_init() } +func file_google_type_calendar_period_proto_init() { + if File_google_type_calendar_period_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_calendar_period_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_calendar_period_proto_goTypes, + DependencyIndexes: file_google_type_calendar_period_proto_depIdxs, + EnumInfos: file_google_type_calendar_period_proto_enumTypes, + }.Build() + File_google_type_calendar_period_proto = out.File + file_google_type_calendar_period_proto_rawDesc = nil + file_google_type_calendar_period_proto_goTypes = nil + file_google_type_calendar_period_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go new file mode 100644 index 0000000000..7ea79410ad --- /dev/null +++ b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package audit contains interfaces for audit logging during authorization. +package audit + +import ( + "encoding/json" + "sync" +) + +// loggerBuilderRegistry holds a map of audit logger builders and a mutex +// to facilitate thread-safe reading/writing operations. +type loggerBuilderRegistry struct { + mu sync.Mutex + builders map[string]LoggerBuilder +} + +var ( + registry = loggerBuilderRegistry{ + builders: make(map[string]LoggerBuilder), + } +) + +// RegisterLoggerBuilder registers the builder in a global map +// using b.Name() as the key. +// +// This should only be called during initialization time (i.e. in an init() +// function). If multiple builders are registered with the same name, +// the one registered last will take effect. +func RegisterLoggerBuilder(b LoggerBuilder) { + registry.mu.Lock() + defer registry.mu.Unlock() + registry.builders[b.Name()] = b +} + +// GetLoggerBuilder returns a builder with the given name. +// It returns nil if the builder is not found in the registry. +func GetLoggerBuilder(name string) LoggerBuilder { + registry.mu.Lock() + defer registry.mu.Unlock() + return registry.builders[name] +} + +// Event contains information passed to the audit logger as part of an +// audit logging event. +type Event struct { + // FullMethodName is the full method name of the audited RPC, in the format + // of "/pkg.Service/Method". For example, "/helloworld.Greeter/SayHello". + FullMethodName string + // Principal is the identity of the caller. Currently it will only be + // available in certificate-based TLS authentication. + Principal string + // PolicyName is the authorization policy name or the xDS RBAC filter name. + PolicyName string + // MatchedRule is the matched rule or policy name in the xDS RBAC filter. + // It will be empty if there is no match. + MatchedRule string + // Authorized indicates whether the audited RPC is authorized or not. + Authorized bool +} + +// LoggerConfig represents an opaque data structure holding an audit +// logger configuration. Concrete types representing configuration of specific +// audit loggers must embed this interface to implement it. +type LoggerConfig interface { + loggerConfig() +} + +// Logger is the interface to be implemented by audit loggers. +// +// An audit logger is a logger instance that can be configured via the +// authorization policy API or xDS HTTP RBAC filters. When the authorization +// decision meets the condition for audit, all the configured audit loggers' +// Log() method will be invoked to log that event. +// +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. +type Logger interface { + // Log performs audit logging for the provided audit event. + // + // This method is invoked in the RPC path and therefore implementations + // must not block. + Log(*Event) +} + +// LoggerBuilder is the interface to be implemented by audit logger +// builders that are used at runtime to configure and instantiate audit loggers. +// +// Users who want to implement their own audit logging logic should +// implement this interface, along with the Logger interface, and register +// it by calling RegisterLoggerBuilder() at init time. +// +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. +type LoggerBuilder interface { + // ParseLoggerConfig parses the given JSON bytes into a structured + // logger config this builder can use to build an audit logger. + ParseLoggerConfig(config json.RawMessage) (LoggerConfig, error) + // Build builds an audit logger with the given logger config. + // This will only be called with valid configs returned from + // ParseLoggerConfig() and any runtime issues such as failing to + // create a file should be handled by the logger implementation instead of + // failing the logger instantiation. So implementers need to make sure it + // can return a logger without error at this stage. + Build(LoggerConfig) Logger + // Name returns the name of logger built by this builder. + // This is used to register and pick the builder. + Name() string +} diff --git a/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go b/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go new file mode 100644 index 0000000000..f9bfc2d7d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/authz/audit/stdout/stdout_logger.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stdout defines an stdout audit logger. +package stdout + +import ( + "encoding/json" + "log" + "os" + "time" + + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/grpclog" +) + +var grpcLogger = grpclog.Component("authz-audit") + +// Name is the string to identify this logger type in the registry +const Name = "stdout_logger" + +func init() { + audit.RegisterLoggerBuilder(&loggerBuilder{ + goLogger: log.New(os.Stdout, "", 0), + }) +} + +type event struct { + FullMethodName string `json:"rpc_method"` + Principal string `json:"principal"` + PolicyName string `json:"policy_name"` + MatchedRule string `json:"matched_rule"` + Authorized bool `json:"authorized"` + Timestamp string `json:"timestamp"` // Time when the audit event is logged via Log method +} + +// logger implements the audit.logger interface by logging to standard output. +type logger struct { + goLogger *log.Logger +} + +// Log marshals the audit.Event to json and prints it to standard output. +func (l *logger) Log(event *audit.Event) { + jsonContainer := map[string]any{ + "grpc_audit_log": convertEvent(event), + } + jsonBytes, err := json.Marshal(jsonContainer) + if err != nil { + grpcLogger.Errorf("failed to marshal AuditEvent data to JSON: %v", err) + return + } + l.goLogger.Println(string(jsonBytes)) +} + +// loggerConfig represents the configuration for the stdout logger. +// It is currently empty and implements the audit.Logger interface by embedding it. +type loggerConfig struct { + audit.LoggerConfig +} + +type loggerBuilder struct { + goLogger *log.Logger +} + +func (loggerBuilder) Name() string { + return Name +} + +// Build returns a new instance of the stdout logger. +// Passed in configuration is ignored as the stdout logger does not +// expect any configuration to be provided. +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &logger{ + goLogger: lb.goLogger, + } +} + +// ParseLoggerConfig is a no-op since the stdout logger does not accept any configuration. +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + if len(config) != 0 && string(config) != "{}" { + grpcLogger.Warningf("Stdout logger doesn't support custom configs. Ignoring:\n%s", string(config)) + } + return &loggerConfig{}, nil +} + +func convertEvent(auditEvent *audit.Event) *event { + return &event{ + FullMethodName: auditEvent.FullMethodName, + Principal: auditEvent.Principal, + PolicyName: auditEvent.PolicyName, + MatchedRule: auditEvent.MatchedRule, + Authorized: auditEvent.Authorized, + Timestamp: time.Now().Format(time.RFC3339Nano), + } +} diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go new file mode 100644 index 0000000000..ddd9bd269b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -0,0 +1,183 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package leastrequest implements a least request load balancer. +package leastrequest + +import ( + "encoding/json" + "fmt" + "math/rand" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/serviceconfig" +) + +// randuint32 is a global to stub out in tests. +var randuint32 = rand.Uint32 + +// Name is the name of the least request balancer. +const Name = "least_request_experimental" + +var logger = grpclog.Component("least-request") + +func init() { + balancer.Register(bb{}) +} + +// LBConfig is the balancer config for least_request_experimental balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // ChoiceCount is the number of random SubConns to sample to find the one + // with the fewest outstanding requests. If unset, defaults to 2. If set to + // < 2, the config will be rejected, and if set to > 10, will become 10. + ChoiceCount uint32 `json:"choiceCount,omitempty"` +} + +type bb struct{} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbConfig := &LBConfig{ + ChoiceCount: 2, + } + if err := json.Unmarshal(s, lbConfig); err != nil { + return nil, fmt.Errorf("least-request: unable to unmarshal LBConfig: %v", err) + } + // "If `choice_count < 2`, the config will be rejected." - A48 + if lbConfig.ChoiceCount < 2 { // sweet + return nil, fmt.Errorf("least-request: lbConfig.choiceCount: %v, must be >= 2", lbConfig.ChoiceCount) + } + // "If a LeastRequestLoadBalancingConfig with a choice_count > 10 is + // received, the least_request_experimental policy will set choice_count = + // 10." - A48 + if lbConfig.ChoiceCount > 10 { + lbConfig.ChoiceCount = 10 + } + return lbConfig, nil +} + +func (bb) Name() string { + return Name +} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &leastRequestBalancer{scRPCCounts: make(map[balancer.SubConn]*atomic.Int32)} + baseBuilder := base.NewBalancerBuilder(Name, b, base.Config{HealthCheck: true}) + b.Balancer = baseBuilder.Build(cc, bOpts) + return b +} + +type leastRequestBalancer struct { + // Embeds balancer.Balancer because needs to intercept UpdateClientConnState + // to learn about choiceCount. + balancer.Balancer + + choiceCount uint32 + scRPCCounts map[balancer.SubConn]*atomic.Int32 // Hold onto RPC counts to keep track for subsequent picker updates. +} + +func (lrb *leastRequestBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lrCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + logger.Errorf("least-request: received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + lrb.choiceCount = lrCfg.ChoiceCount + return lrb.Balancer.UpdateClientConnState(s) +} + +type scWithRPCCount struct { + sc balancer.SubConn + numRPCs *atomic.Int32 +} + +func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker { + if logger.V(2) { + logger.Infof("least-request: Build called with info: %v", info) + } + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + + for sc := range lrb.scRPCCounts { + if _, ok := info.ReadySCs[sc]; !ok { // If no longer ready, no more need for the ref to count active RPCs. + delete(lrb.scRPCCounts, sc) + } + } + + // Create new refs if needed. + for sc := range info.ReadySCs { + if _, ok := lrb.scRPCCounts[sc]; !ok { + lrb.scRPCCounts[sc] = new(atomic.Int32) + } + } + + // Copy refs to counters into picker. + scs := make([]scWithRPCCount, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, scWithRPCCount{ + sc: sc, + numRPCs: lrb.scRPCCounts[sc], // guaranteed to be present due to algorithm + }) + } + + return &picker{ + choiceCount: lrb.choiceCount, + subConns: scs, + } +} + +type picker struct { + // choiceCount is the number of random SubConns to find the one with + // the least request. + choiceCount uint32 + // Built out when receives list of ready RPCs. + subConns []scWithRPCCount +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + var pickedSC *scWithRPCCount + var pickedSCNumRPCs int32 + for i := 0; i < int(p.choiceCount); i++ { + index := randuint32() % uint32(len(p.subConns)) + sc := p.subConns[index] + n := sc.numRPCs.Load() + if pickedSC == nil || n < pickedSCNumRPCs { + pickedSC = &sc + pickedSCNumRPCs = n + } + } + // "The counter for a subchannel should be atomically incremented by one + // after it has been successfully picked by the picker." - A48 + pickedSC.numRPCs.Add(1) + // "the picker should add a callback for atomically decrementing the + // subchannel counter once the RPC finishes (regardless of Status code)." - + // A48. + done := func(balancer.DoneInfo) { + pickedSC.numRPCs.Add(-1) + } + return balancer.PickResult{ + SubConn: pickedSC.sc, + Done: done, + }, nil +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/vendor/google.golang.org/grpc/balancer/rls/balancer.go new file mode 100644 index 0000000000..5ae4d2e131 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/balancer.go @@ -0,0 +1,705 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS LB policy. +package rls + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" +) + +const ( + // Name is the name of the RLS LB policy. + // + // It currently has an experimental suffix which would be removed once + // end-to-end testing of the policy is completed. + Name = internal.RLSLoadBalancingPolicyName + // Default frequency for data cache purging. + periodicCachePurgeFreq = time.Minute +) + +var ( + logger = grpclog.Component("rls") + errBalancerClosed = errors.New("rls LB policy is closed") + + // Below defined vars for overriding in unit tests. + + // Default exponential backoff strategy for data cache entries. + defaultBackoffStrategy = backoff.Strategy(backoff.DefaultExponential) + // Ticker used for periodic data cache purging. + dataCachePurgeTicker = func() *time.Ticker { return time.NewTicker(periodicCachePurgeFreq) } + // We want every cache entry to live in the cache for at least this + // duration. If we encounter a cache entry whose minimum expiration time is + // in the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases where + // the cache is too small, when we receive an RLS Response, we keep the + // resulting cache entry around long enough for the pending incoming + // requests to be re-processed through the new Picker. If we didn't do this, + // then we'd risk throwing away each RLS response as we receive it, in which + // case we would fail to actually route any of our incoming requests. + minEvictDuration = 5 * time.Second + + // Following functions are no-ops in actual code, but can be overridden in + // tests to give tests visibility into exactly when certain events happen. + clientConnUpdateHook = func() {} + dataCachePurgeHook = func() {} + resetBackoffHook = func() {} + + cacheEntriesMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + cacheSizeMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + defaultTargetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.default_target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to the default target.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + targetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + failedPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.failed_picks", + Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target"}, + Default: false, + }) +) + +func init() { + balancer.Register(&rlsBB{}) +} + +type rlsBB struct{} + +func (rlsBB) Name() string { + return Name +} + +func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + lb := &rlsBalancer{ + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + purgeTicker: dataCachePurgeTicker(), + dataCachePurgeHook: dataCachePurgeHook, + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + updateCh: buffer.NewUnbounded(), + } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) + lb.dataCache = newDataCache(maxCacheSize, lb.logger, opts.MetricsRecorder, opts.Target.String()) + lb.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: opts, + StateAggregator: lb, + Logger: lb.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) + lb.bg.Start() + go lb.run() + return lb +} + +// rlsBalancer implements the RLS LB policy. +type rlsBalancer struct { + closed *grpcsync.Event // Fires when Close() is invoked. Guarded by stateMu. + done *grpcsync.Event // Fires when Close() is done. + cc balancer.ClientConn + bopts balancer.BuildOptions + purgeTicker *time.Ticker + dataCachePurgeHook func() + logger *internalgrpclog.PrefixLogger + + // If both cacheMu and stateMu need to be acquired, the former must be + // acquired first to prevent a deadlock. This order restriction is due to the + // fact that in places where we need to acquire both the locks, we always + // start off reading the cache. + + // cacheMu guards access to the data cache and pending requests map. We + // cannot use an RWMutex here since even an operation like + // dataCache.getEntry() modifies the underlying LRU, which is implemented as + // a doubly linked list. + cacheMu sync.Mutex + dataCache *dataCache // Cache of RLS data. + pendingMap map[cacheKey]*backoffState // Map of pending RLS requests. + + // stateMu guards access to all LB policy state. + stateMu sync.Mutex + lbCfg *lbConfig // Most recently received service config. + childPolicyBuilder balancer.Builder // Cached child policy builder. + resolverState resolver.State // Cached resolver state. + ctrlCh *controlChannel // Control channel to the RLS server. + bg *balancergroup.BalancerGroup + childPolicies map[string]*childPolicyWrapper + defaultPolicy *childPolicyWrapper + // A reference to the most recent picker sent to gRPC as part of a state + // update is cached in this field so that we can release the reference to the + // default child policy wrapper when a new picker is created. See + // sendNewPickerLocked() for details. + lastPicker *rlsPicker + // Set during UpdateClientConnState when pushing updates to child policies. + // Prevents state updates from child policies causing new pickers to be sent + // up the channel. Cleared after all child policies have processed the + // updates sent to them, after which a new picker is sent up the channel. + inhibitPickerUpdates bool + + // Channel on which all updates are pushed. Processed in run(). + updateCh *buffer.Unbounded +} + +type resumePickerUpdates struct { + done chan struct{} +} + +// childPolicyIDAndState wraps a child policy id and its state update. +type childPolicyIDAndState struct { + id string + state balancer.State +} + +type controlChannelReady struct{} + +// run is a long-running goroutine which handles all the updates that the +// balancer wishes to handle. The appropriate updateHandler will push the update +// on to a channel that this goroutine will select on, thereby the handling of +// the update will happen asynchronously. +func (b *rlsBalancer) run() { + // We exit out of the for loop below only after `Close()` has been invoked. + // Firing the done event here will ensure that Close() returns only after + // all goroutines are done. + defer func() { b.done.Fire() }() + + // Wait for purgeDataCache() goroutine to exit before returning from here. + doneCh := make(chan struct{}) + defer func() { + <-doneCh + }() + go b.purgeDataCache(doneCh) + + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case childPolicyIDAndState: + b.handleChildPolicyStateUpdate(update.id, update.state) + case controlChannelReady: + b.logger.Infof("Resetting backoff state after control channel getting back to READY") + b.cacheMu.Lock() + updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + resetBackoffHook() + case resumePickerUpdates: + b.stateMu.Lock() + b.logger.Infof("Resuming picker updates after config propagation to child policies") + b.inhibitPickerUpdates = false + b.sendNewPickerLocked() + close(update.done) + b.stateMu.Unlock() + default: + b.logger.Errorf("Unsupported update type %T", update) + } + case <-b.closed.Done(): + return + } + } +} + +// purgeDataCache is a long-running goroutine which periodically deletes expired +// entries. An expired entry is one for which both the expiryTime and +// backoffExpiryTime are in the past. +func (b *rlsBalancer) purgeDataCache(doneCh chan struct{}) { + defer close(doneCh) + + for { + select { + case <-b.closed.Done(): + return + case <-b.purgeTicker.C: + b.cacheMu.Lock() + updatePicker := b.dataCache.evictExpiredEntries() + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + b.dataCachePurgeHook() + } + } +} + +func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + defer clientConnUpdateHook() + + b.stateMu.Lock() + if b.closed.HasFired() { + b.stateMu.Unlock() + b.logger.Warningf("Received service config after balancer close: %s", pretty.ToJSON(ccs.BalancerConfig)) + return errBalancerClosed + } + + newCfg := ccs.BalancerConfig.(*lbConfig) + if b.lbCfg.Equal(newCfg) { + b.stateMu.Unlock() + b.logger.Infof("New service config matches existing config") + return nil + } + + b.logger.Infof("Delaying picker updates until config is propagated to and processed by child policies") + b.inhibitPickerUpdates = true + + // When the RLS server name changes, the old control channel needs to be + // swapped out for a new one. All state associated with the throttling + // algorithm is stored on a per-control-channel basis; when we swap out + // channels, we also swap out the throttling state. + b.handleControlChannelUpdate(newCfg) + + // Any changes to child policy name or configuration needs to be handled by + // either creating new child policies or pushing updates to existing ones. + b.resolverState = ccs.ResolverState + b.handleChildPolicyConfigUpdate(newCfg, &ccs) + + // Resize the cache if the size in the config has changed. + resizeCache := newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes + + // Update the copy of the config in the LB policy before releasing the lock. + b.lbCfg = newCfg + b.stateMu.Unlock() + + // We cannot do cache operations above because `cacheMu` needs to be grabbed + // before `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.updateRLSServerTarget(newCfg.lookupService) + if resizeCache { + // If the new config changes reduces the size of the data cache, we + // might have to evict entries to get the cache size down to the newly + // specified size. If we do evict an entry with valid backoff timer, + // the new picker needs to be sent to the channel to re-process any + // RPCs queued as a result of this backoff timer. + b.dataCache.resize(newCfg.cacheSizeBytes) + } + b.cacheMu.Unlock() + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + <-done + return nil +} + +// handleControlChannelUpdate handles updates to service config fields which +// influence the control channel to the RLS server. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { + if newCfg.lookupService == b.lbCfg.lookupService && newCfg.lookupServiceTimeout == b.lbCfg.lookupServiceTimeout { + return + } + + // Create a new control channel and close the existing one. + b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) + backToReadyFn := func() { + b.updateCh.Put(controlChannelReady{}) + } + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, backToReadyFn) + if err != nil { + // This is very uncommon and usually represents a non-transient error. + // There is not much we can do here other than wait for another update + // which might fix things. + b.logger.Errorf("Failed to create control channel to %q: %v", newCfg.lookupService, err) + return + } + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.ctrlCh = ctrlCh +} + +// handleChildPolicyConfigUpdate handles updates to service config fields which +// influence child policy configuration. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleChildPolicyConfigUpdate(newCfg *lbConfig, ccs *balancer.ClientConnState) { + // Update child policy builder first since other steps are dependent on this. + if b.childPolicyBuilder == nil || b.childPolicyBuilder.Name() != newCfg.childPolicyName { + b.logger.Infof("Child policy changed to %q", newCfg.childPolicyName) + b.childPolicyBuilder = balancer.Get(newCfg.childPolicyName) + for _, cpw := range b.childPolicies { + // If the child policy has changed, we need to remove the old policy + // from the BalancerGroup and add a new one. The BalancerGroup takes + // care of closing the old one in this case. + b.bg.Remove(cpw.target) + b.bg.Add(cpw.target, b.childPolicyBuilder) + } + } + + configSentToDefault := false + if b.lbCfg.defaultTarget != newCfg.defaultTarget { + // If the default target has changed, create a new childPolicyWrapper for + // the new target if required. If a new wrapper is created, add it to the + // childPolicies map and the BalancerGroup. + b.logger.Infof("Default target in LB config changing from %q to %q", b.lbCfg.defaultTarget, newCfg.defaultTarget) + cpw := b.childPolicies[newCfg.defaultTarget] + if cpw == nil { + cpw = newChildPolicyWrapper(newCfg.defaultTarget) + b.childPolicies[newCfg.defaultTarget] = cpw + b.bg.Add(newCfg.defaultTarget, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", newCfg.defaultTarget) + } + if err := b.buildAndPushChildPolicyConfigs(newCfg.defaultTarget, newCfg, ccs); err != nil { + cpw.lamify(err) + } + + // If an old default exists, release its reference. If this was the last + // reference, remove the child policy from the BalancerGroup and remove the + // corresponding entry the childPolicies map. + if b.defaultPolicy != nil { + if b.defaultPolicy.releaseRef() { + delete(b.childPolicies, b.lbCfg.defaultTarget) + b.bg.Remove(b.defaultPolicy.target) + } + } + b.defaultPolicy = cpw + configSentToDefault = true + } + + // No change in configuration affecting child policies. Return early. + if b.lbCfg.childPolicyName == newCfg.childPolicyName && b.lbCfg.childPolicyTargetField == newCfg.childPolicyTargetField && childPolicyConfigEqual(b.lbCfg.childPolicyConfig, newCfg.childPolicyConfig) { + return + } + + // If fields affecting child policy configuration have changed, the changes + // are pushed to the childPolicyWrapper which handles them appropriately. + for _, cpw := range b.childPolicies { + if configSentToDefault && cpw.target == newCfg.defaultTarget { + // Default target has already been taken care of. + continue + } + if err := b.buildAndPushChildPolicyConfigs(cpw.target, newCfg, ccs); err != nil { + cpw.lamify(err) + } + } +} + +// buildAndPushChildPolicyConfigs builds the final child policy configuration by +// adding the `targetField` to the base child policy configuration received in +// RLS LB policy configuration. The `targetField` is set to target and +// configuration is pushed to the child policy through the BalancerGroup. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) buildAndPushChildPolicyConfigs(target string, newCfg *lbConfig, ccs *balancer.ClientConnState) error { + jsonTarget, err := json.Marshal(target) + if err != nil { + return fmt.Errorf("failed to marshal child policy target %q: %v", target, err) + } + + config := newCfg.childPolicyConfig + targetField := newCfg.childPolicyTargetField + config[targetField] = jsonTarget + jsonCfg, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("failed to marshal child policy config %+v: %v", config, err) + } + + parser, _ := b.childPolicyBuilder.(balancer.ConfigParser) + parsedCfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("childPolicy config parsing failed: %v", err) + } + + state := balancer.ClientConnState{ResolverState: ccs.ResolverState, BalancerConfig: parsedCfg} + b.logger.Infof("Pushing new state to child policy %q: %+v", target, state) + if err := b.bg.UpdateClientConnState(target, state); err != nil { + b.logger.Warningf("UpdateClientConnState(%q, %+v) failed : %v", target, ccs, err) + } + return nil +} + +func (b *rlsBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *rlsBalancer) Close() { + b.stateMu.Lock() + b.closed.Fire() + b.purgeTicker.Stop() + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.bg.Close() + b.stateMu.Unlock() + + b.cacheMu.Lock() + b.dataCache.stop() + b.cacheMu.Unlock() + + b.updateCh.Close() + + <-b.done.Done() +} + +func (b *rlsBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// sendNewPickerLocked pushes a new picker on to the channel. +// +// Note that regardless of what connectivity state is reported, the policy will +// return its own picker, and not a picker that unconditionally queues +// (typically used for IDLE or CONNECTING) or a picker that unconditionally +// fails (typically used for TRANSIENT_FAILURE). This is required because, +// irrespective of the connectivity state, we need to able to perform RLS +// lookups for incoming RPCs and affect the status of queued RPCs based on the +// receipt of RLS responses. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) sendNewPickerLocked() { + aggregatedState := b.aggregatedConnectivityState() + + // Acquire a separate reference for the picker. This is required to ensure + // that the wrapper held by the old picker is not closed when the default + // target changes in the config, and a new wrapper is created for the new + // default target. See handleChildPolicyConfigUpdate() for how config changes + // affecting the default target are handled. + if b.defaultPolicy != nil { + b.defaultPolicy.acquireRef() + } + + picker := &rlsPicker{ + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + rlsServerTarget: b.lbCfg.lookupService, + grpcTarget: b.bopts.Target.String(), + metricsRecorder: b.bopts.MetricsRecorder, + } + picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) + state := balancer.State{ + ConnectivityState: aggregatedState, + Picker: picker, + } + + if !b.inhibitPickerUpdates { + b.logger.Infof("New balancer.State: %+v", state) + b.cc.UpdateState(state) + } else { + b.logger.Infof("Delaying picker update: %+v", state) + } + + if b.lastPicker != nil { + if b.defaultPolicy != nil { + b.defaultPolicy.releaseRef() + } + } + b.lastPicker = picker +} + +func (b *rlsBalancer) sendNewPicker() { + b.stateMu.Lock() + defer b.stateMu.Unlock() + if b.closed.HasFired() { + return + } + b.sendNewPickerLocked() +} + +// The aggregated connectivity state reported is determined as follows: +// - If there is at least one child policy in state READY, the connectivity +// state is READY. +// - Otherwise, if there is at least one child policy in state CONNECTING, the +// connectivity state is CONNECTING. +// - Otherwise, if there is at least one child policy in state IDLE, the +// connectivity state is IDLE. +// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the +// connectivity state is TRANSIENT_FAILURE. +// +// If the RLS policy has no child policies and no configured default target, +// then we will report connectivity state IDLE. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) aggregatedConnectivityState() connectivity.State { + if len(b.childPolicies) == 0 && b.lbCfg.defaultTarget == "" { + return connectivity.Idle + } + + var readyN, connectingN, idleN int + for _, cpw := range b.childPolicies { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + switch state.ConnectivityState { + case connectivity.Ready: + readyN++ + case connectivity.Connecting: + connectingN++ + case connectivity.Idle: + idleN++ + } + } + + switch { + case readyN > 0: + return connectivity.Ready + case connectingN > 0: + return connectivity.Connecting + case idleN > 0: + return connectivity.Idle + default: + return connectivity.TransientFailure + } +} + +// UpdateState is a implementation of the balancergroup.BalancerStateAggregator +// interface. The actual state aggregation functionality is handled +// asynchronously. This method only pushes the state update on to channel read +// and dispatched by the run() goroutine. +func (b *rlsBalancer) UpdateState(id string, state balancer.State) { + b.updateCh.Put(childPolicyIDAndState{id: id, state: state}) +} + +// handleChildPolicyStateUpdate provides the state aggregator functionality for +// the BalancerGroup. +// +// This method is invoked by the BalancerGroup whenever a child policy sends a +// state update. We cache the child policy's connectivity state and picker for +// two reasons: +// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states +// other than READY +// - to delegate picks to child policies +func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) { + b.stateMu.Lock() + defer b.stateMu.Unlock() + + cpw := b.childPolicies[id] + if cpw == nil { + // All child policies start with an entry in the map. If ID is not in + // map, it's either been removed, or never existed. + b.logger.Warningf("Received state update %+v for missing child policy %q", newState, id) + return + } + + oldState := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + if oldState.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting { + // Ignore state transitions from TRANSIENT_FAILURE to CONNECTING, and thus + // fail pending RPCs instead of queuing them indefinitely when all + // subChannels are failing, even if the subChannels are bouncing back and + // forth between CONNECTING and TRANSIENT_FAILURE. + return + } + atomic.StorePointer(&cpw.state, unsafe.Pointer(&newState)) + b.logger.Infof("Child policy %q has new state %+v", id, newState) + b.sendNewPickerLocked() +} + +// acquireChildPolicyReferences attempts to acquire references to +// childPolicyWrappers corresponding to the passed in targets. If there is no +// childPolicyWrapper corresponding to one of the targets, a new one is created +// and added to the BalancerGroup. +func (b *rlsBalancer) acquireChildPolicyReferences(targets []string) []*childPolicyWrapper { + b.stateMu.Lock() + var newChildPolicies []*childPolicyWrapper + for _, target := range targets { + // If the target exists in the LB policy's childPolicies map. a new + // reference is taken here and added to the new list. + if cpw := b.childPolicies[target]; cpw != nil { + cpw.acquireRef() + newChildPolicies = append(newChildPolicies, cpw) + continue + } + + // If the target does not exist in the child policy map, then a new + // child policy wrapper is created and added to the new list. + cpw := newChildPolicyWrapper(target) + b.childPolicies[target] = cpw + b.bg.Add(target, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", target) + newChildPolicies = append(newChildPolicies, cpw) + if err := b.buildAndPushChildPolicyConfigs(target, b.lbCfg, &balancer.ClientConnState{ + ResolverState: b.resolverState, + }); err != nil { + cpw.lamify(err) + } + } + b.stateMu.Unlock() + return newChildPolicies +} + +// releaseChildPolicyReferences releases references to childPolicyWrappers +// corresponding to the passed in targets. If the release reference was the last +// one, the child policy is removed from the BalancerGroup. +func (b *rlsBalancer) releaseChildPolicyReferences(targets []string) { + b.stateMu.Lock() + for _, target := range targets { + if cpw := b.childPolicies[target]; cpw.releaseRef() { + delete(b.childPolicies, cpw.target) + b.bg.Remove(cpw.target) + } + } + b.stateMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/cache.go b/vendor/google.golang.org/grpc/balancer/rls/cache.go new file mode 100644 index 0000000000..7fe796c958 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/cache.go @@ -0,0 +1,383 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "container/list" + "time" + + "github.com/google/uuid" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" +) + +// cacheKey represents the key used to uniquely identify an entry in the data +// cache and in the pending requests map. +type cacheKey struct { + // path is the full path of the incoming RPC request. + path string + // keys is a stringified version of the RLS request key map built using the + // RLS keyBuilder. Since maps are not a type which is comparable in Go, it + // cannot be part of the key for another map (entries in the data cache and + // pending requests map are stored in maps). + keys string +} + +// cacheEntry wraps all the data to be stored in a data cache entry. +type cacheEntry struct { + // childPolicyWrappers contains the list of child policy wrappers + // corresponding to the targets returned by the RLS server for this entry. + childPolicyWrappers []*childPolicyWrapper + // headerData is received in the RLS response and is to be sent in the + // X-Google-RLS-Data header for matching RPCs. + headerData string + // expiryTime is the absolute time at which this cache entry stops + // being valid. When an RLS request succeeds, this is set to the current + // time plus the max_age field from the LB policy config. + expiryTime time.Time + // staleTime is the absolute time after which this cache entry will be + // proactively refreshed if an incoming RPC matches this entry. When an RLS + // request succeeds, this is set to the current time plus the stale_age from + // the LB policy config. + staleTime time.Time + // earliestEvictTime is the absolute time before which this entry should not + // be evicted from the cache. When a cache entry is created, this is set to + // the current time plus a default value of 5 seconds. This is required to + // make sure that a new entry added to the cache is not evicted before the + // RLS response arrives (usually when the cache is too small). + earliestEvictTime time.Time + + // status stores the RPC status of the previous RLS request for this + // entry. Picks for entries with a non-nil value for this field are failed + // with the error stored here. + status error + // backoffState contains all backoff related state. When an RLS request + // succeeds, backoffState is reset. This state moves between the data cache + // and the pending requests map. + backoffState *backoffState + // backoffTime is the absolute time at which the backoff period for this + // entry ends. When an RLS request fails, this is set to the current time + // plus the backoff value returned by the backoffState. The backoff timer is + // also setup with this value. No new RLS requests are sent out for this + // entry until the backoff period ends. + // + // Set to zero time instant upon a successful RLS response. + backoffTime time.Time + // backoffExpiryTime is the absolute time at which an entry which has gone + // through backoff stops being valid. When an RLS request fails, this is + // set to the current time plus twice the backoff time. The cache expiry + // timer will only delete entries for which both expiryTime and + // backoffExpiryTime are in the past. + // + // Set to zero time instant upon a successful RLS response. + backoffExpiryTime time.Time + + // size stores the size of this cache entry. Used to enforce the cache size + // specified in the LB policy configuration. + size int64 +} + +// backoffState wraps all backoff related state associated with a cache entry. +type backoffState struct { + // retries keeps track of the number of RLS failures, to be able to + // determine the amount of time to backoff before the next attempt. + retries int + // bs is the exponential backoff implementation which returns the amount of + // time to backoff, given the number of retries. + bs backoff.Strategy + // timer fires when the backoff period ends and incoming requests after this + // will trigger a new RLS request. + timer *time.Timer +} + +// lru is a cache implementation with a least recently used eviction policy. +// Internally it uses a doubly linked list, with the least recently used element +// at the front of the list and the most recently used element at the back of +// the list. The value stored in this cache will be of type `cacheKey`. +// +// It is not safe for concurrent access. +type lru struct { + ll *list.List + + // A map from the value stored in the lru to its underlying list element is + // maintained to have a clean API. Without this, a subset of the lru's API + // would accept/return cacheKey while another subset would accept/return + // list elements. + m map[cacheKey]*list.Element +} + +// newLRU creates a new cache with a least recently used eviction policy. +func newLRU() *lru { + return &lru{ + ll: list.New(), + m: make(map[cacheKey]*list.Element), + } +} + +func (l *lru) addEntry(key cacheKey) { + e := l.ll.PushBack(key) + l.m[key] = e +} + +func (l *lru) makeRecent(key cacheKey) { + e := l.m[key] + l.ll.MoveToBack(e) +} + +func (l *lru) removeEntry(key cacheKey) { + e := l.m[key] + l.ll.Remove(e) + delete(l.m, key) +} + +func (l *lru) getLeastRecentlyUsed() cacheKey { + e := l.ll.Front() + if e == nil { + return cacheKey{} + } + return e.Value.(cacheKey) +} + +// dataCache contains a cache of RLS data used by the LB policy to make routing +// decisions. +// +// The dataCache will be keyed by the request's path and keys, represented by +// the `cacheKey` type. It will maintain the cache keys in an `lru` and the +// cache data, represented by the `cacheEntry` type, in a native map. +// +// It is not safe for concurrent access. +type dataCache struct { + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event + rlsServerTarget string + + // Read only after initialization. + grpcTarget string + uuid string + metricsRecorder estats.MetricsRecorder +} + +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger, metricsRecorder estats.MetricsRecorder, grpcTarget string) *dataCache { + return &dataCache{ + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + grpcTarget: grpcTarget, + uuid: uuid.New().String(), + metricsRecorder: metricsRecorder, + } +} + +// updateRLSServerTarget updates the RLS Server Target the RLS Balancer is +// configured with. +func (dc *dataCache) updateRLSServerTarget(rlsServerTarget string) { + dc.rlsServerTarget = rlsServerTarget +} + +// resize changes the maximum allowed size of the data cache. +// +// The return value indicates if an entry with a valid backoff timer was +// evicted. This is important to the RLS LB policy which would send a new picker +// on the channel to re-process any RPCs queued as a result of this backoff +// timer. +func (dc *dataCache) resize(size int64) (backoffCancelled bool) { + if dc.shutdown.HasFired() { + return false + } + + backoffCancelled = false + for dc.currentSize > size { + key := dc.keys.getLeastRecentlyUsed() + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to resize it", key) + break + } + + // When we encounter a cache entry whose minimum expiration time is in + // the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases + // where the cache is too small, when we receive an RLS Response, we + // keep the resulting cache entry around long enough for the pending + // incoming requests to be re-processed through the new Picker. If we + // didn't do this, then we'd risk throwing away each RLS response as we + // receive it, in which case we would fail to actually route any of our + // incoming requests. + if entry.earliestEvictTime.After(time.Now()) { + dc.logger.Warningf("cachekey %+v is too recent to be evicted. Stopping cache resizing for now", key) + break + } + + // Stop the backoff timer before evicting the entry. + if entry.backoffState != nil && entry.backoffState.timer != nil { + if entry.backoffState.timer.Stop() { + entry.backoffState.timer = nil + backoffCancelled = true + } + } + dc.deleteAndCleanup(key, entry) + } + dc.maxSize = size + return backoffCancelled +} + +// evictExpiredEntries sweeps through the cache and deletes expired entries. An +// expired entry is one for which both the `expiryTime` and `backoffExpiryTime` +// fields are in the past. +// +// The return value indicates if any expired entries were evicted. +// +// The LB policy invokes this method periodically to purge expired entries. +func (dc *dataCache) evictExpiredEntries() bool { + if dc.shutdown.HasFired() { + return false + } + + evicted := false + for key, entry := range dc.entries { + // Only evict entries for which both the data expiration time and + // backoff expiration time fields are in the past. + now := time.Now() + if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { + continue + } + dc.deleteAndCleanup(key, entry) + evicted = true + } + return evicted +} + +// resetBackoffState sweeps through the cache and for entries with a backoff +// state, the backoff timer is cancelled and the backoff state is reset. The +// return value indicates if any entries were mutated in this fashion. +// +// The LB policy invokes this method when the control channel moves from READY +// to TRANSIENT_FAILURE back to READY. See `monitorConnectivityState` method on +// the `controlChannel` type for more details. +func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) bool { + if dc.shutdown.HasFired() { + return false + } + + backoffReset := false + for _, entry := range dc.entries { + if entry.backoffState == nil { + continue + } + if entry.backoffState.timer != nil { + entry.backoffState.timer.Stop() + entry.backoffState.timer = nil + } + entry.backoffState = &backoffState{bs: newBackoffState.bs} + entry.backoffTime = time.Time{} + entry.backoffExpiryTime = time.Time{} + backoffReset = true + } + return backoffReset +} + +// addEntry adds a cache entry for the given key. +// +// Return value backoffCancelled indicates if a cache entry with a valid backoff +// timer was evicted to make space for the current entry. This is important to +// the RLS LB policy which would send a new picker on the channel to re-process +// any RPCs queued as a result of this backoff timer. +// +// Return value ok indicates if entry was successfully added to the cache. +func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled bool, ok bool) { + if dc.shutdown.HasFired() { + return false, false + } + + // Handle the extremely unlikely case that a single entry is bigger than the + // size of the cache. + if entry.size > dc.maxSize { + return false, false + } + dc.entries[key] = entry + dc.currentSize += entry.size + dc.keys.addEntry(key) + // If the new entry makes the cache go over its configured size, remove some + // old entries. + if dc.currentSize > dc.maxSize { + backoffCancelled = dc.resize(dc.maxSize) + } + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + return backoffCancelled, true +} + +// updateEntrySize updates the size of a cache entry and the current size of the +// data cache. An entry's size can change upon receipt of an RLS response. +func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { + dc.currentSize -= entry.size + entry.size = newSize + dc.currentSize += entry.size + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) +} + +func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { + if dc.shutdown.HasFired() { + return nil + } + + entry, ok := dc.entries[key] + if !ok { + return nil + } + dc.keys.makeRecent(key) + return entry +} + +func (dc *dataCache) removeEntryForTesting(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + return + } + dc.deleteAndCleanup(key, entry) +} + +// deleteAndCleanup performs actions required at the time of deleting an entry +// from the data cache. +// - the entry is removed from the map of entries +// - current size of the data cache is update +// - the key is removed from the LRU +func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) { + delete(dc.entries, key) + dc.currentSize -= entry.size + dc.keys.removeEntry(key) + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) +} + +func (dc *dataCache) stop() { + for key, entry := range dc.entries { + dc.deleteAndCleanup(key, entry) + } + dc.shutdown.Fire() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/child_policy.go b/vendor/google.golang.org/grpc/balancer/rls/child_policy.go new file mode 100644 index 0000000000..c74184cac2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/child_policy.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +// childPolicyWrapper is a reference counted wrapper around a child policy. +// +// The LB policy maintains a map of these wrappers keyed by the target returned +// by RLS. When a target is seen for the first time, a child policy wrapper is +// created for it and the wrapper is added to the child policy map. Each entry +// in the data cache holds references to the corresponding child policy +// wrappers. The LB policy also holds a reference to the child policy wrapper +// for the default target specified in the LB Policy Configuration +// +// When a cache entry is evicted, it releases references to the child policy +// wrappers that it contains. When all references have been released, the +// wrapper is removed from the child policy map and is destroyed. +// +// The child policy wrapper also caches the connectivity state and most recent +// picker from the child policy. Once the child policy wrapper reports +// TRANSIENT_FAILURE, it will continue reporting that state until it goes READY; +// transitions from TRANSIENT_FAILURE to CONNECTING are ignored. +// +// Whenever a child policy wrapper changes its connectivity state, the LB policy +// returns a new picker to the channel, since the channel may need to re-process +// the picks for queued RPCs. +// +// It is not safe for concurrent access. +type childPolicyWrapper struct { + logger *internalgrpclog.PrefixLogger + target string // RLS target corresponding to this child policy. + refCnt int // Reference count. + + // Balancer state reported by the child policy. The RLS LB policy maintains + // these child policies in a BalancerGroup. The state reported by the child + // policy is pushed to the state aggregator (which is also implemented by the + // RLS LB policy) and cached here. See handleChildPolicyStateUpdate() for + // details on how the state aggregation is performed. + // + // While this field is written to by the LB policy, it is read by the picker + // at Pick time. Making this an atomic to enable the picker to read this value + // without a mutex. + state unsafe.Pointer // *balancer.State +} + +// newChildPolicyWrapper creates a child policy wrapper for the given target, +// and is initialized with one reference and starts off in CONNECTING state. +func newChildPolicyWrapper(target string) *childPolicyWrapper { + c := &childPolicyWrapper{ + target: target, + refCnt: 1, + state: unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }), + } + c.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-child-policy-wrapper %s %p] ", c.target, c)) + c.logger.Infof("Created") + return c +} + +// acquireRef increments the reference count on the child policy wrapper. +func (c *childPolicyWrapper) acquireRef() { + c.refCnt++ +} + +// releaseRef decrements the reference count on the child policy wrapper. The +// return value indicates whether the released reference was the last one. +func (c *childPolicyWrapper) releaseRef() bool { + c.refCnt-- + return c.refCnt == 0 +} + +// lamify causes the child policy wrapper to return a picker which will always +// fail requests. This is used when the wrapper runs into errors when trying to +// build and parse the child policy configuration. +func (c *childPolicyWrapper) lamify(err error) { + c.logger.Warningf("Entering lame mode: %v", err) + atomic.StorePointer(&c.state, unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + })) +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/config.go b/vendor/google.golang.org/grpc/balancer/rls/config.go new file mode 100644 index 0000000000..439581c78b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/config.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/pretty" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Default max_age if not specified (or greater than this value) in the + // service config. + maxMaxAge = 5 * time.Minute + // Upper limit for cache_size since we don't fully trust the service config. + maxCacheSize = 5 * 1024 * 1024 * 8 // 5MB in bytes + // Default lookup_service_timeout if not specified in the service config. + defaultLookupServiceTimeout = 10 * time.Second + // Default value for targetNameField in the child policy config during + // service config validation. + dummyChildPolicyTarget = "target_name_to_be_filled_in_later" +) + +// lbConfig is the internal representation of the RLS LB policy's config. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + cacheSizeBytes int64 // Keep this field 64-bit aligned. + kbMap keys.BuilderMap + lookupService string + lookupServiceTimeout time.Duration + maxAge time.Duration + staleAge time.Duration + defaultTarget string + + childPolicyName string + childPolicyConfig map[string]json.RawMessage + childPolicyTargetField string + controlChannelServiceConfig string +} + +func (lbCfg *lbConfig) Equal(other *lbConfig) bool { + return lbCfg.kbMap.Equal(other.kbMap) && + lbCfg.lookupService == other.lookupService && + lbCfg.lookupServiceTimeout == other.lookupServiceTimeout && + lbCfg.maxAge == other.maxAge && + lbCfg.staleAge == other.staleAge && + lbCfg.cacheSizeBytes == other.cacheSizeBytes && + lbCfg.defaultTarget == other.defaultTarget && + lbCfg.childPolicyName == other.childPolicyName && + lbCfg.childPolicyTargetField == other.childPolicyTargetField && + lbCfg.controlChannelServiceConfig == other.controlChannelServiceConfig && + childPolicyConfigEqual(lbCfg.childPolicyConfig, other.childPolicyConfig) +} + +func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { + if (b == nil) != (a == nil) { + return false + } + if len(b) != len(a) { + return false + } + for k, jsonA := range a { + jsonB, ok := b[k] + if !ok { + return false + } + if !bytes.Equal(jsonA, jsonB) { + return false + } + } + return true +} + +// This struct resembles the JSON representation of the loadBalancing config +// and makes it easier to unmarshal. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage + RouteLookupChannelServiceConfig json.RawMessage + ChildPolicy []map[string]json.RawMessage + ChildPolicyConfigTargetFieldName string +} + +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +// +// When parsing a config update, the following validations are performed: +// - routeLookupConfig: +// - grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same `Name` +// - within each entry: +// - must have at least one `Name` +// - must not have a `Name` with the `service` field unset or empty +// - within each `headers` entry: +// - must not have `required_match` set +// - must not have `key` unset or empty +// - across all `headers`, `constant_keys` and `extra_keys` fields: +// - must not have the same `key` specified twice +// - no `key` must be the empty string +// - `lookup_service` field must be set and must parse as a target URI +// - if `max_age` > 5m, it should be set to 5 minutes +// - if `stale_age` > `max_age`, ignore it +// - if `stale_age` is set, then `max_age` must also be set +// - ignore `valid_targets` field +// - `cache_size_bytes` field must have a value greater than 0, and if its +// value is greater than 5M, we cap it at 5M +// +// - routeLookupChannelServiceConfig: +// - if specified, must parse as valid service config +// +// - childPolicy: +// - must find a valid child policy with a valid config +// +// - childPolicyConfigTargetFieldName: +// - must be set and non-empty +func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) + cfgJSON := &lbConfigJSON{} + if err := json.Unmarshal(c, cfgJSON); err != nil { + return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) + } + + m := protojson.UnmarshalOptions{DiscardUnknown: true} + rlsProto := &rlspb.RouteLookupConfig{} + if err := m.Unmarshal(cfgJSON.RouteLookupConfig, rlsProto); err != nil { + return nil, fmt.Errorf("rls: bad RouteLookupConfig proto %+v: %v", string(cfgJSON.RouteLookupConfig), err) + } + lbCfg, err := parseRLSProto(rlsProto) + if err != nil { + return nil, err + } + + if sc := string(cfgJSON.RouteLookupChannelServiceConfig); sc != "" { + parsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(sc) + if parsed.Err != nil { + return nil, fmt.Errorf("rls: bad control channel service config %q: %v", sc, parsed.Err) + } + lbCfg.controlChannelServiceConfig = sc + } + + if cfgJSON.ChildPolicyConfigTargetFieldName == "" { + return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config %+v", string(c)) + } + name, config, err := parseChildPolicyConfigs(cfgJSON.ChildPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) + if err != nil { + return nil, err + } + lbCfg.childPolicyName = name + lbCfg.childPolicyConfig = config + lbCfg.childPolicyTargetField = cfgJSON.ChildPolicyConfigTargetFieldName + return lbCfg, nil +} + +func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { + // Validations specified on the `grpc_keybuilders` field are performed here. + kbMap, err := keys.MakeBuilderMap(rlsProto) + if err != nil { + return nil, err + } + + // `lookup_service` field must be set and must parse as a target URI. + lookupService := rlsProto.GetLookupService() + if lookupService == "" { + return nil, fmt.Errorf("rls: empty lookup_service in route lookup config %+v", rlsProto) + } + parsedTarget, err := url.Parse(lookupService) + if err != nil { + // url.Parse() fails if scheme is missing. Retry with default scheme. + parsedTarget, err = url.Parse(resolver.GetDefaultScheme() + ":///" + lookupService) + if err != nil { + return nil, fmt.Errorf("rls: invalid target URI in lookup_service %s", lookupService) + } + } + if parsedTarget.Scheme == "" { + parsedTarget.Scheme = resolver.GetDefaultScheme() + } + if resolver.Get(parsedTarget.Scheme) == nil { + return nil, fmt.Errorf("rls: unregistered scheme in lookup_service %s", lookupService) + } + + lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config %+v: %v", rlsProto, err) + } + if lookupServiceTimeout == 0 { + lookupServiceTimeout = defaultLookupServiceTimeout + } + + // Validations performed here: + // - if `max_age` > 5m, it should be set to 5 minutes + // - if `stale_age` > `max_age`, ignore it + // - if `stale_age` is set, then `max_age` must also be set + maxAge, err := convertDuration(rlsProto.GetMaxAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config %+v: %v", rlsProto, err) + } + staleAge, err := convertDuration(rlsProto.GetStaleAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config %+v: %v", rlsProto, err) + } + if staleAge != 0 && maxAge == 0 { + return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config %+v", rlsProto) + } + if staleAge >= maxAge { + logger.Infof("rls: stale_age %v is not less than max_age %v, ignoring it", staleAge, maxAge) + staleAge = 0 + } + if maxAge == 0 || maxAge > maxMaxAge { + logger.Infof("rls: max_age in route lookup config is %v, using %v", maxAge, maxMaxAge) + maxAge = maxMaxAge + } + + // `cache_size_bytes` field must have a value greater than 0, and if its + // value is greater than 5M, we cap it at 5M + cacheSizeBytes := rlsProto.GetCacheSizeBytes() + if cacheSizeBytes <= 0 { + return nil, fmt.Errorf("rls: cache_size_bytes must be set to a non-zero value: %+v", rlsProto) + } + if cacheSizeBytes > maxCacheSize { + logger.Info("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) + cacheSizeBytes = maxCacheSize + } + return &lbConfig{ + kbMap: kbMap, + lookupService: lookupService, + lookupServiceTimeout: lookupServiceTimeout, + maxAge: maxAge, + staleAge: staleAge, + cacheSizeBytes: cacheSizeBytes, + defaultTarget: rlsProto.GetDefaultTarget(), + }, nil +} + +// parseChildPolicyConfigs iterates through the list of child policies and picks +// the first registered policy and validates its config. +func parseChildPolicyConfigs(childPolicies []map[string]json.RawMessage, targetFieldName string) (string, map[string]json.RawMessage, error) { + for i, config := range childPolicies { + if len(config) != 1 { + return "", nil, fmt.Errorf("rls: invalid childPolicy: entry %v does not contain exactly 1 policy/config pair: %q", i, config) + } + + var name string + var rawCfg json.RawMessage + for name, rawCfg = range config { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + return "", nil, fmt.Errorf("rls: childPolicy %q with config %q does not support config parsing", name, string(rawCfg)) + } + + // To validate child policy configs we do the following: + // - unmarshal the raw JSON bytes of the child policy config into a map + // - add an entry with key set to `target_field_name` and a dummy value + // - marshal the map back to JSON and parse the config using the parser + // retrieved previously + var childConfig map[string]json.RawMessage + if err := json.Unmarshal(rawCfg, &childConfig); err != nil { + return "", nil, fmt.Errorf("rls: json unmarshal failed for child policy config %q: %v", string(rawCfg), err) + } + childConfig[targetFieldName], _ = json.Marshal(dummyChildPolicyTarget) + jsonCfg, err := json.Marshal(childConfig) + if err != nil { + return "", nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) + } + if _, err := parser.ParseConfig(jsonCfg); err != nil { + return "", nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) + } + return name, childConfig, nil + } + return "", nil, fmt.Errorf("rls: invalid childPolicy config: no supported policies found in %+v", childPolicies) +} + +func convertDuration(d *durationpb.Duration) (time.Duration, error) { + if d == nil { + return 0, nil + } + return d.AsDuration(), d.CheckValid() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/control_channel.go b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go new file mode 100644 index 0000000000..4acc11d90e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go @@ -0,0 +1,220 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/adaptive" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" +) + +var newAdaptiveThrottler = func() adaptiveThrottler { return adaptive.New() } + +type adaptiveThrottler interface { + ShouldThrottle() bool + RegisterBackendResponse(throttled bool) +} + +// controlChannel is a wrapper around the gRPC channel to the RLS server +// specified in the service config. +type controlChannel struct { + // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB + // policy receives this value in its service config. + rpcTimeout time.Duration + // backToReadyFunc is a callback to be invoked when the connectivity state + // changes from READY --> TRANSIENT_FAILURE --> READY. + backToReadyFunc func() + // throttler in an adaptive throttling implementation used to avoid + // hammering the RLS service while it is overloaded or down. + throttler adaptiveThrottler + + cc *grpc.ClientConn + client rlsgrpc.RouteLookupServiceClient + logger *internalgrpclog.PrefixLogger +} + +// newControlChannel creates a controlChannel to rlsServerName and uses +// serviceConfig, if non-empty, as the default service config for the underlying +// gRPC channel. +func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyFunc func()) (*controlChannel, error) { + ctrlCh := &controlChannel{ + rpcTimeout: rpcTimeout, + backToReadyFunc: backToReadyFunc, + throttler: newAdaptiveThrottler(), + } + ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) + + dopts, err := ctrlCh.dialOpts(bOpts, serviceConfig) + if err != nil { + return nil, err + } + ctrlCh.cc, err = grpc.Dial(rlsServerName, dopts...) + if err != nil { + return nil, err + } + ctrlCh.client = rlsgrpc.NewRouteLookupServiceClient(ctrlCh.cc) + ctrlCh.logger.Infof("Control channel created to RLS server at: %v", rlsServerName) + + go ctrlCh.monitorConnectivityState() + return ctrlCh, nil +} + +// dialOpts constructs the dial options for the control plane channel. +func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions, serviceConfig string) ([]grpc.DialOption, error) { + // The control plane channel will use the same authority as the parent + // channel for server authorization. This ensures that the identity of the + // RLS server and the identity of the backends is the same, so if the RLS + // config is injected by an attacker, it cannot cause leakage of private + // information contained in headers set by the application. + dopts := []grpc.DialOption{grpc.WithAuthority(bOpts.Authority)} + if bOpts.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(bOpts.Dialer)) + } + + // The control channel will use the channel credentials from the parent + // channel, including any call creds associated with the channel creds. + var credsOpt grpc.DialOption + switch { + case bOpts.DialCreds != nil: + credsOpt = grpc.WithTransportCredentials(bOpts.DialCreds.Clone()) + case bOpts.CredsBundle != nil: + // The "fallback" mode in google default credentials (which is the only + // type of credentials we expect to be used with RLS) uses TLS/ALTS + // creds for transport and uses the same call creds as that on the + // parent bundle. + bundle, err := bOpts.CredsBundle.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + return nil, err + } + credsOpt = grpc.WithCredentialsBundle(bundle) + default: + cc.logger.Warningf("no credentials available, using Insecure") + credsOpt = grpc.WithTransportCredentials(insecure.NewCredentials()) + } + dopts = append(dopts, credsOpt) + + // If the RLS LB policy's configuration specified a service config for the + // control channel, use that and disable service config fetching via the name + // resolver for the control channel. + if serviceConfig != "" { + cc.logger.Infof("Disabling service config from the name resolver and instead using: %s", serviceConfig) + dopts = append(dopts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(serviceConfig)) + } + + return dopts, nil +} + +func (cc *controlChannel) monitorConnectivityState() { + cc.logger.Infof("Starting connectivity state monitoring goroutine") + // Since we use two mechanisms to deal with RLS server being down: + // - adaptive throttling for the channel as a whole + // - exponential backoff on a per-request basis + // we need a way to avoid double-penalizing requests by counting failures + // toward both mechanisms when the RLS server is unreachable. + // + // To accomplish this, we monitor the state of the control plane channel. If + // the state has been TRANSIENT_FAILURE since the last time it was in state + // READY, and it then transitions into state READY, we push on a channel + // which is being read by the LB policy. + // + // The LB the policy will iterate through the cache to reset the backoff + // timeouts in all cache entries. Specifically, this means that it will + // reset the backoff state and cancel the pending backoff timer. Note that + // when cancelling the backoff timer, just like when the backoff timer fires + // normally, a new picker is returned to the channel, to force it to + // re-process any wait-for-ready RPCs that may still be queued if we failed + // them while we were in backoff. However, we should optimize this case by + // returning only one new picker, regardless of how many backoff timers are + // cancelled. + + // Using the background context is fine here since we check for the ClientConn + // entering SHUTDOWN and return early in that case. + ctx := context.Background() + + first := true + for { + // Wait for the control channel to become READY. + for s := cc.cc.GetState(); s != connectivity.Ready; s = cc.cc.GetState() { + if s == connectivity.Shutdown { + return + } + cc.cc.WaitForStateChange(ctx, s) + } + cc.logger.Infof("Connectivity state is READY") + + if !first { + cc.logger.Infof("Control channel back to READY") + cc.backToReadyFunc() + } + first = false + + // Wait for the control channel to move out of READY. + cc.cc.WaitForStateChange(ctx, connectivity.Ready) + if cc.cc.GetState() == connectivity.Shutdown { + return + } + cc.logger.Infof("Connectivity state is %s", cc.cc.GetState()) + } +} + +func (cc *controlChannel) close() { + cc.logger.Infof("Closing control channel") + cc.cc.Close() +} + +type lookupCallback func(targets []string, headerData string, err error) + +// lookup starts a RouteLookup RPC in a separate goroutine and returns the +// results (and error, if any) in the provided callback. +// +// The returned boolean indicates whether the request was throttled by the +// client-side adaptive throttling algorithm in which case the provided callback +// will not be invoked. +func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string, cb lookupCallback) (throttled bool) { + if cc.throttler.ShouldThrottle() { + cc.logger.Infof("RLS request throttled by client-side adaptive throttling") + return true + } + go func() { + req := &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: reqKeys, + Reason: reason, + StaleHeaderData: staleHeaders, + } + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + + ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) + defer cancel() + resp, err := cc.client.RouteLookup(ctx, req) + cb(resp.GetTargets(), resp.GetHeaderData(), err) + }() + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go new file mode 100644 index 0000000000..8b17860434 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package adaptive provides functionality for adaptive client-side throttling. +package adaptive + +import ( + "math/rand" + "sync" + "time" +) + +// For overriding in unittests. +var ( + timeNowFunc = func() time.Time { return time.Now() } + randFunc = func() float64 { return rand.Float64() } +) + +const ( + defaultDuration = 30 * time.Second + defaultBins = 100 + defaultRatioForAccepts = 2.0 + defaultRequestsPadding = 8.0 +) + +// Throttler implements a client-side throttling recommendation system. All +// methods are safe for concurrent use by multiple goroutines. +// +// The throttler has the following knobs for which we will use defaults for +// now. If there is a need to make them configurable at a later point in time, +// support for the same will be added. +// - Duration: amount of recent history that will be taken into account for +// making client-side throttling decisions. A default of 30 seconds is used. +// - Bins: number of bins to be used for bucketing historical data. A default +// of 100 is used. +// - RatioForAccepts: ratio by which accepts are multiplied, typically a value +// slightly larger than 1.0. This is used to make the throttler behave as if +// the backend had accepted more requests than it actually has, which lets us +// err on the side of sending to the backend more requests than we think it +// will accept for the sake of speeding up the propagation of state. A +// default of 2.0 is used. +// - RequestsPadding: is used to decrease the (client-side) throttling +// probability in the low QPS regime (to speed up propagation of state), as +// well as to safeguard against hitting a client-side throttling probability +// of 100%. The weight of this value decreases as the number of requests in +// recent history grows. A default of 8 is used. +// +// The adaptive throttler attempts to estimate the probability that a request +// will be throttled using recent history. Server requests (both throttled and +// accepted) are registered with the throttler (via the RegisterBackendResponse +// method), which then recommends client-side throttling (via the +// ShouldThrottle method) with probability given by: +// (requests - RatioForAccepts * accepts) / (requests + RequestsPadding) +type Throttler struct { + ratioForAccepts float64 + requestsPadding float64 + + // Number of total accepts and throttles in the lookback period. + mu sync.Mutex + accepts *lookback + throttles *lookback +} + +// New initializes a new adaptive throttler with the default values. +func New() *Throttler { + return newWithArgs(defaultDuration, defaultBins, defaultRatioForAccepts, defaultRequestsPadding) +} + +// newWithArgs initializes a new adaptive throttler with the provided values. +// Used only in unittests. +func newWithArgs(duration time.Duration, bins int64, ratioForAccepts, requestsPadding float64) *Throttler { + return &Throttler{ + ratioForAccepts: ratioForAccepts, + requestsPadding: requestsPadding, + accepts: newLookback(bins, duration), + throttles: newLookback(bins, duration), + } +} + +// ShouldThrottle returns a probabilistic estimate of whether the server would +// throttle the next request. This should be called for every request before +// allowing it to hit the network. If the returned value is true, the request +// should be aborted immediately (as if it had been throttled by the server). +func (t *Throttler) ShouldThrottle() bool { + randomProbability := randFunc() + now := timeNowFunc() + + t.mu.Lock() + defer t.mu.Unlock() + + accepts, throttles := float64(t.accepts.sum(now)), float64(t.throttles.sum(now)) + requests := accepts + throttles + throttleProbability := (requests - t.ratioForAccepts*accepts) / (requests + t.requestsPadding) + if throttleProbability <= randomProbability { + return false + } + + t.throttles.add(now, 1) + return true +} + +// RegisterBackendResponse registers a response received from the backend for a +// request allowed by ShouldThrottle. This should be called for every response +// received from the backend (i.e., once for each request for which +// ShouldThrottle returned false). +func (t *Throttler) RegisterBackendResponse(throttled bool) { + now := timeNowFunc() + + t.mu.Lock() + if throttled { + t.throttles.add(now, 1) + } else { + t.accepts.add(now, 1) + } + t.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go new file mode 100644 index 0000000000..1ab874c356 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package adaptive + +import "time" + +// lookback implements a moving sum over an int64 timeline. +type lookback struct { + bins int64 // Number of bins to use for lookback. + width time.Duration // Width of each bin. + + head int64 // Absolute bin index (time * bins / duration) of the current head bin. + total int64 // Sum over all the values in buf, within the lookback window behind head. + buf []int64 // Ring buffer for keeping track of the sum elements. +} + +// newLookback creates a new lookback for the given duration with a set number +// of bins. +func newLookback(bins int64, duration time.Duration) *lookback { + return &lookback{ + bins: bins, + width: duration / time.Duration(bins), + buf: make([]int64, bins), + } +} + +// add is used to increment the lookback sum. +func (l *lookback) add(t time.Time, v int64) { + pos := l.advance(t) + + if (l.head - pos) >= l.bins { + // Do not increment counters if pos is more than bins behind head. + return + } + l.buf[pos%l.bins] += v + l.total += v +} + +// sum returns the sum of the lookback buffer at the given time or head, +// whichever is greater. +func (l *lookback) sum(t time.Time) int64 { + l.advance(t) + return l.total +} + +// advance prepares the lookback buffer for calls to add() or sum() at time t. +// If head is greater than t then the lookback buffer will be untouched. The +// absolute bin index corresponding to t is returned. It will always be less +// than or equal to head. +func (l *lookback) advance(t time.Time) int64 { + ch := l.head // Current head bin index. + nh := t.UnixNano() / l.width.Nanoseconds() // New head bin index. + + if nh <= ch { + // Either head unchanged or clock jitter (time has moved backwards). Do + // not advance. + return nh + } + + jmax := min(l.bins, nh-ch) + for j := int64(0); j < jmax; j++ { + i := (ch + j + 1) % l.bins + l.total -= l.buf[i] + l.buf[i] = 0 + } + l.head = nh + return nh +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go new file mode 100644 index 0000000000..cc5ce510ad --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go @@ -0,0 +1,267 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keys provides functionality required to build RLS request keys. +package keys + +import ( + "errors" + "fmt" + "sort" + "strings" + + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" +) + +// BuilderMap maps from request path to the key builder for that path. +type BuilderMap map[string]builder + +// MakeBuilderMap parses the provided RouteLookupConfig proto and returns a map +// from paths to key builders. +func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { + kbs := cfg.GetGrpcKeybuilders() + if len(kbs) == 0 { + return nil, errors.New("rls: RouteLookupConfig does not contain any GrpcKeyBuilder") + } + + bm := make(map[string]builder) + for _, kb := range kbs { + // Extract keys from `headers`, `constant_keys` and `extra_keys` fields + // and populate appropriate values in the builder struct. Also ensure + // that keys are not repeated. + var matchers []matcher + seenKeys := make(map[string]bool) + constantKeys := kb.GetConstantKeys() + for k := range kb.GetConstantKeys() { + seenKeys[k] = true + } + for _, h := range kb.GetHeaders() { + if h.GetRequiredMatch() { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set {%+v}", kbs) + } + key := h.GetKey() + if seenKeys[key] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q across headers, constant_keys and extra_keys {%+v}", key, kbs) + } + seenKeys[key] = true + matchers = append(matchers, matcher{key: h.GetKey(), names: h.GetNames()}) + } + if seenKeys[kb.GetExtraKeys().GetHost()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetHost(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetService()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetService(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetMethod()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetMethod(), kbs) + } + b := builder{ + headerKeys: matchers, + constantKeys: constantKeys, + hostKey: kb.GetExtraKeys().GetHost(), + serviceKey: kb.GetExtraKeys().GetService(), + methodKey: kb.GetExtraKeys().GetMethod(), + } + + // Store the builder created above in the BuilderMap based on the value + // of the `Names` field, which wraps incoming request's service and + // method. Also, ensure that there are no repeated `Names` field. + names := kb.GetNames() + if len(names) == 0 { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name {%+v}", kbs) + } + for _, name := range names { + if name.GetService() == "" { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service {%+v}", kbs) + } + if strings.Contains(name.GetMethod(), `/`) { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash {%+v}", kbs) + } + path := "/" + name.GetService() + "/" + name.GetMethod() + if _, ok := bm[path]; ok { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Name field {%+v}", kbs) + } + bm[path] = b + } + } + return bm, nil +} + +// KeyMap represents the RLS keys to be used for a request. +type KeyMap struct { + // Map is the representation of an RLS key as a Go map. This is used when + // an actual RLS request is to be sent out on the wire, since the + // RouteLookupRequest proto expects a Go map. + Map map[string]string + // Str is the representation of an RLS key as a string, sorted by keys. + // Since the RLS keys are part of the cache key in the request cache + // maintained by the RLS balancer, and Go maps cannot be used as keys for + // Go maps (the cache is implemented as a map), we need a stringified + // version of it. + Str string +} + +// RLSKey builds the RLS keys to be used for the given request, identified by +// the request path and the request headers stored in metadata. +func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { + // The path passed in is of the form "/service/method". The keyBuilderMap is + // indexed with keys of the form "/service/" or "/service/method". The service + // that we set in the keyMap (to be sent out in the RLS request) should not + // include any slashes though. + i := strings.LastIndex(path, "/") + service, method := path[:i+1], path[i+1:] + b, ok := bm[path] + if !ok { + b, ok = bm[service] + if !ok { + return KeyMap{} + } + } + + kvMap := b.buildHeaderKeys(md) + if b.hostKey != "" { + kvMap[b.hostKey] = host + } + if b.serviceKey != "" { + kvMap[b.serviceKey] = strings.Trim(service, "/") + } + if b.methodKey != "" { + kvMap[b.methodKey] = method + } + for k, v := range b.constantKeys { + kvMap[k] = v + } + return KeyMap{Map: kvMap, Str: mapToString(kvMap)} +} + +// Equal reports whether bm and am represent equivalent BuilderMaps. +func (bm BuilderMap) Equal(am BuilderMap) bool { + if (bm == nil) != (am == nil) { + return false + } + if len(bm) != len(am) { + return false + } + + for key, bBuilder := range bm { + aBuilder, ok := am[key] + if !ok { + return false + } + if !bBuilder.Equal(aBuilder) { + return false + } + } + return true +} + +// builder provides the actual functionality of building RLS keys. +type builder struct { + headerKeys []matcher + constantKeys map[string]string + // The following keys mirror corresponding fields in `extra_keys`. + hostKey string + serviceKey string + methodKey string +} + +// Equal reports whether b and a represent equivalent key builders. +func (b builder) Equal(a builder) bool { + if len(b.headerKeys) != len(a.headerKeys) { + return false + } + // Protobuf serialization maintains the order of repeated fields. Matchers + // are specified as a repeated field inside the KeyBuilder proto. If the + // order changes, it means that the order in the protobuf changed. We report + // this case as not being equal even though the builders could possible be + // functionally equal. + for i, bMatcher := range b.headerKeys { + aMatcher := a.headerKeys[i] + if !bMatcher.Equal(aMatcher) { + return false + } + } + + if len(b.constantKeys) != len(a.constantKeys) { + return false + } + for k, v := range b.constantKeys { + if a.constantKeys[k] != v { + return false + } + } + + return b.hostKey == a.hostKey && b.serviceKey == a.serviceKey && b.methodKey == a.methodKey +} + +// matcher helps extract a key from request headers based on a given name. +type matcher struct { + // The key used in the keyMap sent as part of the RLS request. + key string + // List of header names which can supply the value for this key. + names []string +} + +// Equal reports if m and a are equivalent headerKeys. +func (m matcher) Equal(a matcher) bool { + if m.key != a.key { + return false + } + if len(m.names) != len(a.names) { + return false + } + for i := 0; i < len(m.names); i++ { + if m.names[i] != a.names[i] { + return false + } + } + return true +} + +func (b builder) buildHeaderKeys(md metadata.MD) map[string]string { + kvMap := make(map[string]string) + if len(md) == 0 { + return kvMap + } + for _, m := range b.headerKeys { + for _, name := range m.names { + if vals := md.Get(name); vals != nil { + kvMap[m.key] = strings.Join(vals, ",") + break + } + } + } + return kvMap +} + +func mapToString(kv map[string]string) string { + keys := make([]string, 0, len(kv)) + for k := range kv { + keys = append(keys, k) + } + sort.Strings(keys) + var sb strings.Builder + for i, k := range keys { + if i != 0 { + fmt.Fprint(&sb, ",") + } + fmt.Fprintf(&sb, "%s=%s", k, kv[k]) + } + return sb.String() +} diff --git a/vendor/google.golang.org/grpc/balancer/rls/picker.go b/vendor/google.golang.org/grpc/balancer/rls/picker.go new file mode 100644 index 0000000000..e5c86f2906 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/rls/picker.go @@ -0,0 +1,397 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + errRLSThrottled = errors.New("RLS call throttled at client side") + + // Function to compute data cache entry size. + computeDataCacheEntrySize = dcEntrySize +) + +// exitIdler wraps the only method on the BalancerGroup that the picker calls. +type exitIdler interface { + ExitIdleOne(id string) +} + +// rlsPicker selects the subConn to be used for a particular RPC. It does not +// manage subConns directly and delegates to pickers provided by child policies. +type rlsPicker struct { + // The keyBuilder map used to generate RLS keys for the RPC. This is built + // by the LB policy based on the received ServiceConfig. + kbm keys.BuilderMap + // Endpoint from the user's original dial target. Used to set the `host_key` + // field in `extra_keys`. + origEndpoint string + + lb *rlsBalancer + + // The picker is given its own copy of the below fields from the RLS LB policy + // to avoid having to grab the mutex on the latter. + rlsServerTarget string + grpcTarget string + metricsRecorder estats.MetricsRecorder + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger +} + +// isFullMethodNameValid return true if name is of the form `/service/method`. +func isFullMethodNameValid(name string) bool { + return strings.HasPrefix(name, "/") && strings.Count(name, "/") == 2 +} + +// Pick makes the routing decision for every outbound RPC. +func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + if name := info.FullMethodName; !isFullMethodNameValid(name) { + return balancer.PickResult{}, fmt.Errorf("rls: method name %q is not of the form '/service/method", name) + } + + // Build the request's keys using the key builders from LB config. + md, _ := metadata.FromOutgoingContext(info.Ctx) + reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) + + p.lb.cacheMu.Lock() + var pr balancer.PickResult + var err error + + // Record metrics without the cache mutex held, to prevent lock contention + // between concurrent RPC's and their Pick calls. Metrics Recording can + // potentially be expensive. + metricsCallback := func() {} + defer func() { + p.lb.cacheMu.Unlock() + metricsCallback() + }() + + // Lookup data cache and pending request map using request path and keys. + cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} + dcEntry := p.lb.dataCache.getEntry(cacheKey) + pendingEntry := p.lb.pendingMap[cacheKey] + now := time.Now() + + switch { + // No data cache entry. No pending request. + case dcEntry == nil && pendingEntry == nil: + throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // No data cache entry. Pending request exits. + case dcEntry == nil && pendingEntry != nil: + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. No pending request. + case dcEntry != nil && pendingEntry == nil: + if dcEntry.expiryTime.After(now) { + if !dcEntry.staleTime.IsZero() && dcEntry.staleTime.Before(now) && dcEntry.backoffTime.Before(now) { + p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) + } + // Delegate to child policies. + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err + } + + // We get here only if the data cache entry has expired. If entry is in + // backoff, delegate to default target or fail the pick. + if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { + // Avoid propagating the status code received on control plane RPCs to the + // data plane which can lead to unexpected outcomes as we do not control + // the status code sent by the control plane. Propagating the status + // message received from the control plane is still fine, as it could be + // useful for debugging purposes. + st := dcEntry.status + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + return pr, err + } + + // We get here only if the entry has expired and is not in backoff. + throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. Pending request exists. + default: + if dcEntry.expiryTime.After(now) { + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err + } + // Data cache entry has expired and pending request exists. Queue pick. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} + +// errToPickResult is a helper function which converts the error value returned +// by Pick() to a string that represents the pick result. +func errToPickResult(err error) string { + if err == nil { + return "complete" + } + if errors.Is(err, balancer.ErrNoSubConnAvailable) { + return "queue" + } + if _, ok := status.FromError(err); ok { + return "drop" + } + return "fail" +} + +// delegateToChildPoliciesLocked is a helper function which iterates through the +// list of child policy wrappers in a cache entry and attempts to find a child +// policy to which this RPC can be routed to. If all child policies are in +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, func(), error) { + const rlsDataHeaderName = "x-google-rls-data" + for i, cpw := range dcEntry.childPolicyWrappers { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if + // it is the last one (which handles the case of delegating to the last + // child picker if all child policies are in TRANSIENT_FAILURE). + if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { + // Any header data received from the RLS server is stored in the + // cache entry and needs to be sent to the actual backend in the + // X-Google-RLS-Data header. + res, err := state.Picker.Pick(info) + if err != nil { + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, pr) + }, err + } + + if res.Metadata == nil { + res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) + } else { + res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) + } + return res, func() { + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, "complete") + }, nil + } + } + + // In the unlikely event that we have a cache entry with no targets, we end up + // queueing the RPC. + return balancer.PickResult{}, func() {}, balancer.ErrNoSubConnAvailable +} + +// useDefaultPickIfPossible is a helper method which delegates to the default +// target if one is configured, or fails the pick with the given error. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, func(), error) { + if p.defaultPolicy != nil { + state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) + res, err := state.Picker.Pick(info) + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + defaultTargetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, p.defaultPolicy.target, pr) + }, err + } + + return balancer.PickResult{}, func() { + failedPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget) + }, errOnNoDefault +} + +// sendRouteLookupRequestLocked adds an entry to the pending request map and +// sends out an RLS request using the passed in arguments. Returns a value +// indicating if the request was throttled by the client-side adaptive +// throttler. +func (p *rlsPicker) sendRouteLookupRequestLocked(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { + if p.lb.pendingMap[cacheKey] != nil { + return false + } + + p.lb.pendingMap[cacheKey] = bs + throttled := p.ctrlCh.lookup(reqKeys, reason, staleHeaders, func(targets []string, headerData string, err error) { + p.handleRouteLookupResponse(cacheKey, targets, headerData, err) + }) + if throttled { + delete(p.lb.pendingMap, cacheKey) + } + return throttled +} + +// handleRouteLookupResponse is the callback invoked by the control channel upon +// receipt of an RLS response. Modifies the data cache and pending requests map +// and sends a new picker. +// +// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. +func (p *rlsPicker) handleRouteLookupResponse(cacheKey cacheKey, targets []string, headerData string, err error) { + p.logger.Infof("Received RLS response for key %+v with targets %+v, headerData %q, err: %v", cacheKey, targets, headerData, err) + + p.lb.cacheMu.Lock() + defer func() { + // Pending request map entry is unconditionally deleted since the request is + // no longer pending. + p.logger.Infof("Removing pending request entry for key %+v", cacheKey) + delete(p.lb.pendingMap, cacheKey) + p.lb.sendNewPicker() + p.lb.cacheMu.Unlock() + }() + + // Lookup the data cache entry or create a new one. + dcEntry := p.lb.dataCache.getEntry(cacheKey) + if dcEntry == nil { + dcEntry = &cacheEntry{} + if _, ok := p.lb.dataCache.addEntry(cacheKey, dcEntry); !ok { + // This is a very unlikely case where we are unable to add a + // data cache entry. Log and leave. + p.logger.Warningf("Failed to add data cache entry for %+v", cacheKey) + return + } + } + + // For failed requests, the data cache entry is modified as follows: + // - status is set to error returned from the control channel + // - current backoff state is available in the pending entry + // - `retries` field is incremented and + // - backoff state is moved to the data cache + // - backoffTime is set to the time indicated by the backoff state + // - backoffExpirationTime is set to twice the backoff time + // - backoffTimer is set to fire after backoffTime + // + // When a proactive cache refresh fails, this would leave the targets and the + // expiry time from the old entry unchanged. And this mean that the old valid + // entry would be used until expiration, and a new picker would be sent upon + // backoff expiry. + now := time.Now() + + // "An RLS request is considered to have failed if it returns a non-OK + // status or the RLS response's targets list is non-empty." - RLS LB Policy + // design. + if len(targets) == 0 && err == nil { + err = fmt.Errorf("RLS response's target list does not contain any entries for key %+v", cacheKey) + // If err is set, rpc error from the control plane and no control plane + // configuration is why no targets were passed into this helper, no need + // to specify and tell the user this information. + } + if err != nil { + dcEntry.status = err + pendingEntry := p.lb.pendingMap[cacheKey] + pendingEntry.retries++ + backoffTime := pendingEntry.bs.Backoff(pendingEntry.retries) + dcEntry.backoffState = pendingEntry + dcEntry.backoffTime = now.Add(backoffTime) + dcEntry.backoffExpiryTime = now.Add(2 * backoffTime) + if dcEntry.backoffState.timer != nil { + dcEntry.backoffState.timer.Stop() + } + dcEntry.backoffState.timer = time.AfterFunc(backoffTime, p.lb.sendNewPicker) + return + } + + // For successful requests, the cache entry is modified as follows: + // - childPolicyWrappers is set to point to the child policy wrappers + // associated with the targets specified in the received response + // - headerData is set to the value received in the response + // - expiryTime, stateTime and earliestEvictionTime are set + // - status is set to nil (OK status) + // - backoff state is cleared + p.setChildPolicyWrappersInCacheEntry(dcEntry, targets) + dcEntry.headerData = headerData + dcEntry.expiryTime = now.Add(p.maxAge) + if p.staleAge != 0 { + dcEntry.staleTime = now.Add(p.staleAge) + } + dcEntry.earliestEvictTime = now.Add(minEvictDuration) + dcEntry.status = nil + dcEntry.backoffState = &backoffState{bs: defaultBackoffStrategy} + dcEntry.backoffTime = time.Time{} + dcEntry.backoffExpiryTime = time.Time{} + p.lb.dataCache.updateEntrySize(dcEntry, computeDataCacheEntrySize(cacheKey, dcEntry)) +} + +// setChildPolicyWrappersInCacheEntry sets up the childPolicyWrappers field in +// the cache entry to point to the child policy wrappers for the targets +// specified in the RLS response. +// +// Caller must hold a write-lock on p.lb.cacheMu. +func (p *rlsPicker) setChildPolicyWrappersInCacheEntry(dcEntry *cacheEntry, newTargets []string) { + // If the childPolicyWrappers field is already pointing to the right targets, + // then the field's value does not need to change. + targetsChanged := true + func() { + if cpws := dcEntry.childPolicyWrappers; cpws != nil { + if len(newTargets) != len(cpws) { + return + } + for i, target := range newTargets { + if cpws[i].target != target { + return + } + } + targetsChanged = false + } + }() + if !targetsChanged { + return + } + + // If the childPolicyWrappers field is not already set to the right targets, + // then it must be reset. We construct a new list of child policies and + // then swap out the old list for the new one. + newChildPolicies := p.lb.acquireChildPolicyReferences(newTargets) + oldChildPolicyTargets := make([]string, len(dcEntry.childPolicyWrappers)) + for i, cpw := range dcEntry.childPolicyWrappers { + oldChildPolicyTargets[i] = cpw.target + } + p.lb.releaseChildPolicyReferences(oldChildPolicyTargets) + dcEntry.childPolicyWrappers = newChildPolicies +} + +func dcEntrySize(key cacheKey, entry *cacheEntry) int64 { + return int64(len(key.path) + len(key.keys) + len(entry.headerData)) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go new file mode 100644 index 0000000000..88bf64ec4e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -0,0 +1,636 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/internal/grpclog" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// Name is the name of the weighted round robin balancer. +const Name = "weighted_round_robin" + +var ( + rrFallbackMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.rr_fallback", + Description: "EXPERIMENTAL. Number of scheduler updates in which there were not enough endpoints with valid weight, which caused the WRR policy to fall back to RR behavior.", + Unit: "update", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightNotYetUsableMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_not_yet_usable", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update that don't yet have usable weight information (i.e., either the load report has not yet been received, or it is within the blackout period).", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightStaleMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_stale", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update whose latest weight is older than the expiration period.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + endpointWeightsMetric = estats.RegisterFloat64Histo(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weights", + Description: "EXPERIMENTAL. Weight of each endpoint, recorded on every scheduler update. Endpoints without usable weights will be recorded as weight 0.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &wrrBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + scMap: make(map[balancer.SubConn]*weightedSubConn), + connectivityState: connectivity.Connecting, + target: bOpts.Target.String(), + metricsRecorder: bOpts.MetricsRecorder, + } + + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{ + // Default values as documented in A58. + OOBReportingPeriod: iserviceconfig.Duration(10 * time.Second), + BlackoutPeriod: iserviceconfig.Duration(10 * time.Second), + WeightExpirationPeriod: iserviceconfig.Duration(3 * time.Minute), + WeightUpdatePeriod: iserviceconfig.Duration(time.Second), + ErrorUtilizationPenalty: 1, + } + if err := json.Unmarshal(js, lbCfg); err != nil { + return nil, fmt.Errorf("wrr: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + + if lbCfg.ErrorUtilizationPenalty < 0 { + return nil, fmt.Errorf("wrr: errorUtilizationPenalty must be non-negative") + } + + // For easier comparisons later, ensure the OOB reporting period is unset + // (0s) when OOB reports are disabled. + if !lbCfg.EnableOOBLoadReport { + lbCfg.OOBReportingPeriod = 0 + } + + // Impose lower bound of 100ms on weightUpdatePeriod. + if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < iserviceconfig.Duration(100*time.Millisecond) { + lbCfg.WeightUpdatePeriod = iserviceconfig.Duration(100 * time.Millisecond) + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// wrrBalancer implements the weighted round robin LB policy. +type wrrBalancer struct { + // The following fields are immutable. + cc balancer.ClientConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + cfg *lbConfig // active config + subConns *resolver.AddressMap // active weightedSubConns mapped by address + scMap map[balancer.SubConn]*weightedSubConn + connectivityState connectivity.State // aggregate state + csEvltr *balancer.ConnectivityStateEvaluator + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + stopPicker func() + locality string +} + +func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + b.logger.Infof("UpdateCCS: %v", ccs) + b.resolverErr = nil + cfg, ok := ccs.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) + } + + b.cfg = cfg + b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) + b.updateAddresses(ccs.ResolverState.Addresses) + + if len(ccs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + + return nil +} + +func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { + addrsSet := resolver.NewAddressMap() + + // Loop through new address list and create subconns for any new addresses. + for _, addr := range addrs { + if _, ok := addrsSet.Get(addr); ok { + // Redundant address; skip. + continue + } + addrsSet.Set(addr, nil) + + var wsc *weightedSubConn + wsci, ok := b.subConns.Get(addr) + if ok { + wsc = wsci.(*weightedSubConn) + } else { + // addr is a new address (not existing in b.subConns). + var sc balancer.SubConn + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sc, state) + }, + }) + if err != nil { + b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) + continue + } + wsc = &weightedSubConn{ + SubConn: sc, + logger: b.logger, + connectivityState: connectivity.Idle, + // Initially, we set load reports to off, because they are not + // running upon initial weightedSubConn creation. + cfg: &lbConfig{EnableOOBLoadReport: false}, + + metricsRecorder: b.metricsRecorder, + target: b.target, + locality: b.locality, + } + b.subConns.Set(addr, wsc) + b.scMap[sc] = wsc + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + // Update config for existing weightedSubConn or send update for first + // time to new one. Ensures an OOB listener is running if needed + // (and stops the existing one if applicable). + wsc.updateConfig(b.cfg) + } + + // Loop through existing subconns and remove ones that are not in addrs. + for _, addr := range b.subConns.Keys() { + if _, ok := addrsSet.Get(addr); ok { + // Existing address also in new address list; skip. + continue + } + // addr was removed by resolver. Remove. + wsci, _ := b.subConns.Get(addr) + wsc := wsci.(*weightedSubConn) + wsc.SubConn.Shutdown() + b.subConns.Delete(addr) + } +} + +func (b *wrrBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.connectivityState = connectivity.TransientFailure + } + if b.connectivityState != connectivity.TransientFailure { + // No need to update the picker since no error is being returned. + return + } + b.regeneratePicker() +} + +func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + wsc := b.scMap[sc] + if wsc == nil { + b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) + return + } + if b.logger.V(2) { + logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) + } + + cs := state.ConnectivityState + + if cs == connectivity.TransientFailure { + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + if cs == connectivity.Shutdown { + delete(b.scMap, sc) + // The subconn was removed from b.subConns when the address was removed + // in updateAddresses. + } + + oldCS := wsc.updateConnectivityState(cs) + b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || + b.connectivityState == connectivity.TransientFailure { + b.regeneratePicker() + } +} + +// Close stops the balancer. It cancels any ongoing scheduler updates and +// stops any ORCA listeners. +func (b *wrrBalancer) Close() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + for _, wsc := range b.scMap { + // Ensure any lingering OOB watchers are stopped. + wsc.updateConnectivityState(connectivity.Shutdown) + } +} + +// ExitIdle is ignored; we always connect to all backends. +func (b *wrrBalancer) ExitIdle() {} + +func (b *wrrBalancer) readySubConns() []*weightedSubConn { + var ret []*weightedSubConn + for _, v := range b.subConns.Values() { + wsc := v.(*weightedSubConn) + if wsc.connectivityState == connectivity.Ready { + ret = append(ret, wsc) + } + } + return ret +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.connectivityState is +// TransientFailure. +func (b *wrrBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *wrrBalancer) regeneratePicker() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + + switch b.connectivityState { + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(b.mergeErrors()), + }) + return + case connectivity.Connecting, connectivity.Idle: + // Idle could happen very briefly if all subconns are Idle and we've + // asked them to connect but they haven't reported Connecting yet. + // Report the same as Connecting since this is temporary. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }) + return + case connectivity.Ready: + b.connErr = nil + } + + p := &picker{ + v: rand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + metricsRecorder: b.metricsRecorder, + locality: b.locality, + target: b.target, + } + var ctx context.Context + ctx, b.stopPicker = context.WithCancel(context.Background()) + p.start(ctx) + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.connectivityState, + Picker: p, + }) +} + +// picker is the WRR policy's picker. It uses live-updating backend weights to +// update the scheduler periodically and ensure picks are routed proportional +// to those weights. +type picker struct { + scheduler unsafe.Pointer // *scheduler; accessed atomically + v uint32 // incrementing value used by the scheduler; accessed atomically + cfg *lbConfig // active config when picker created + subConns []*weightedSubConn // all READY subconns + + // The following fields are immutable. + target string + locality string + metricsRecorder estats.MetricsRecorder +} + +func (p *picker) scWeights(recordMetrics bool) []float64 { + ws := make([]float64, len(p.subConns)) + now := internal.TimeNow() + for i, wsc := range p.subConns { + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) + } + + return ws +} + +func (p *picker) inc() uint32 { + return atomic.AddUint32(&p.v, 1) +} + +func (p *picker) regenerateScheduler() { + s := p.newScheduler(true) + atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) +} + +func (p *picker) start(ctx context.Context) { + p.regenerateScheduler() + if len(p.subConns) == 1 { + // No need to regenerate weights with only one backend. + return + } + + go func() { + ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.regenerateScheduler() + } + } + }() +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + // Read the scheduler atomically. All scheduler operations are threadsafe, + // and if the scheduler is replaced during this usage, we want to use the + // scheduler that was live when the pick started. + sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) + + pickedSC := p.subConns[sched.nextIndex()] + pr := balancer.PickResult{SubConn: pickedSC.SubConn} + if !p.cfg.EnableOOBLoadReport { + pr.Done = func(info balancer.DoneInfo) { + if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { + pickedSC.OnLoadReport(load) + } + } + } + return pr, nil +} + +// weightedSubConn is the wrapper of a subconn that holds the subconn and its +// weight (and other parameters relevant to computing the effective weight). +// When needed, it also tracks connectivity state, listens for metrics updates +// by implementing the orca.OOBListener interface and manages that listener. +type weightedSubConn struct { + // The following fields are immutable. + balancer.SubConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + locality string + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + connectivityState connectivity.State + stopORCAListener func() + + // The following fields are accessed asynchronously and are protected by + // mu. Note that mu may not be held when calling into the stopORCAListener + // or when registering a new listener, as those calls require the ORCA + // producer mu which is held when calling the listener, and the listener + // holds mu. + mu sync.Mutex + weightVal float64 + nonEmptySince time.Time + lastUpdated time.Time + cfg *lbConfig +} + +func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { + if w.logger.V(2) { + w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) + } + // Update weights of this subchannel according to the reported load + utilization := load.ApplicationUtilization + if utilization == 0 { + utilization = load.CpuUtilization + } + if utilization == 0 || load.RpsFractional == 0 { + if w.logger.V(2) { + w.logger.Infof("Ignoring empty load report for subchannel %v", w.SubConn) + } + return + } + + w.mu.Lock() + defer w.mu.Unlock() + + errorRate := load.Eps / load.RpsFractional + w.weightVal = load.RpsFractional / (utilization + errorRate*w.cfg.ErrorUtilizationPenalty) + if w.logger.V(2) { + w.logger.Infof("New weight for subchannel %v: %v", w.SubConn, w.weightVal) + } + + w.lastUpdated = internal.TimeNow() + if w.nonEmptySince.Equal(time.Time{}) { + w.nonEmptySince = w.lastUpdated + } +} + +// updateConfig updates the parameters of the WRR policy and +// stops/starts/restarts the ORCA OOB listener. +func (w *weightedSubConn) updateConfig(cfg *lbConfig) { + w.mu.Lock() + oldCfg := w.cfg + w.cfg = cfg + w.mu.Unlock() + + newPeriod := cfg.OOBReportingPeriod + if cfg.EnableOOBLoadReport == oldCfg.EnableOOBLoadReport && + newPeriod == oldCfg.OOBReportingPeriod { + // Load reporting wasn't enabled before or after, or load reporting was + // enabled before and after, and had the same period. (Note that with + // load reporting disabled, OOBReportingPeriod is always 0.) + return + } + // (Optionally stop and) start the listener to use the new config's + // settings for OOB reporting. + + if w.stopORCAListener != nil { + w.stopORCAListener() + } + if !cfg.EnableOOBLoadReport { + w.stopORCAListener = nil + return + } + if w.logger.V(2) { + w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, newPeriod) + } + opts := orca.OOBListenerOptions{ReportInterval: time.Duration(newPeriod)} + w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) +} + +func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { + switch cs { + case connectivity.Idle: + // Always reconnect when idle. + w.SubConn.Connect() + case connectivity.Ready: + // If we transition back to READY state, reset nonEmptySince so that we + // apply the blackout period after we start receiving load data. Also + // reset lastUpdated to trigger endpoint weight not yet usable in the + // case endpoint gets asked what weight it is before receiving a new + // load report. Note that we cannot guarantee that we will never receive + // lingering callbacks for backend metric reports from the previous + // connection after the new connection has been established, but they + // should be masked by new backend metric reports from the new + // connection by the time the blackout period ends. + w.mu.Lock() + w.nonEmptySince = time.Time{} + w.lastUpdated = time.Time{} + w.mu.Unlock() + case connectivity.Shutdown: + if w.stopORCAListener != nil { + w.stopORCAListener() + } + } + + oldCS := w.connectivityState + + if oldCS == connectivity.TransientFailure && + (cs == connectivity.Connecting || cs == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return oldCS + } + + w.connectivityState = cs + + return oldCS +} + +// weight returns the current effective weight of the subconn, taking into +// account the parameters. Returns 0 for blacked out or expired data, which +// will cause the backend weight to be treated as the mean of the weights of the +// other backends. If forScheduler is set to true, this function will emit +// metrics through the metrics registry. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { + w.mu.Lock() + defer w.mu.Unlock() + + if recordMetrics { + defer func() { + endpointWeightsMetric.Record(w.metricsRecorder, weight, w.target, w.locality) + }() + } + + // The SubConn has not received a load report (i.e. just turned READY with + // no load report). + if w.lastUpdated.Equal(time.Time{}) { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + return 0 + } + + // If the most recent update was longer ago than the expiration period, + // reset nonEmptySince so that we apply the blackout period again if we + // start getting data again in the future, and return 0. + if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + if recordMetrics { + endpointWeightStaleMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } + w.nonEmptySince = time.Time{} + return 0 + } + + // If we don't have at least blackoutPeriod worth of data, return 0. + if blackoutPeriod != 0 && (w.nonEmptySince.Equal(time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + if recordMetrics { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } + return 0 + } + + return w.weightVal +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go new file mode 100644 index 0000000000..38f89d32fb --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/config.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Whether to enable out-of-band utilization reporting collection from the + // endpoints. By default, per-request utilization reporting is used. + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OOBReportingPeriod iserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + + // A given endpoint must report load metrics continuously for at least this + // long before the endpoint weight will be used. This avoids churn when + // the set of endpoint addresses changes. Takes effect both immediately + // after we establish a connection to an endpoint and after + // weight_expiration_period has caused us to stop using the most recent + // load metrics. Default is 10 seconds. + BlackoutPeriod iserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod iserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + + // How often endpoint weights are recalculated. Default is 1 second. + WeightUpdatePeriod iserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Default is 1.0. + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go new file mode 100644 index 0000000000..7b64fbf4e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/internal/internal.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal allows for easier testing of the weightedroundrobin +// package. +package internal + +import ( + "time" +) + +// AllowAnyWeightUpdatePeriod permits any setting of WeightUpdatePeriod for +// testing. Normally a minimum of 100ms is applied. +var AllowAnyWeightUpdatePeriod bool + +// LBConfig allows tests to produce a JSON form of the config from the struct +// instead of using a string. +type LBConfig struct { + EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod *string `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod *string `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod *string `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod *string `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` +} + +// TimeNow can be overridden by tests to return a different value for the +// current iserviceconfig. +var TimeNow = time.Now diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go new file mode 100644 index 0000000000..43184ca9ab --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[%p] " + +var logger = grpclog.Component("weighted-round-robin") + +func prefixLogger(p *wrrBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go new file mode 100644 index 0000000000..56aa15da10 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -0,0 +1,146 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "math" +) + +type scheduler interface { + nextIndex() int +} + +// newScheduler uses scWeights to create a new scheduler for selecting subconns +// in a picker. It will return a round robin implementation if at least +// len(scWeights)-1 are zero or there is only a single subconn, otherwise it +// will return an Earliest Deadline First (EDF) scheduler implementation that +// selects the subchannels according to their weights. +func (p *picker) newScheduler(recordMetrics bool) scheduler { + scWeights := p.scWeights(recordMetrics) + n := len(scWeights) + if n == 0 { + return nil + } + if n == 1 { + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: 1, inc: p.inc} + } + sum := float64(0) + numZero := 0 + max := float64(0) + for _, w := range scWeights { + sum += w + if w > max { + max = w + } + if w == 0 { + numZero++ + } + } + + if numZero >= n-1 { + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: uint32(n), inc: p.inc} + } + unscaledMean := sum / float64(n-numZero) + scalingFactor := maxWeight / max + mean := uint16(math.Round(scalingFactor * unscaledMean)) + + weights := make([]uint16, n) + allEqual := true + for i, w := range scWeights { + if w == 0 { + // Backends with weight = 0 use the mean. + weights[i] = mean + } else { + scaledWeight := uint16(math.Round(scalingFactor * w)) + weights[i] = scaledWeight + if scaledWeight != mean { + allEqual = false + } + } + } + + if allEqual { + return &rrScheduler{numSCs: uint32(n), inc: p.inc} + } + + logger.Infof("using edf scheduler with weights: %v", weights) + return &edfScheduler{weights: weights, inc: p.inc} +} + +const maxWeight = math.MaxUint16 + +// edfScheduler implements EDF using the same algorithm as grpc-c++ here: +// +// https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc +type edfScheduler struct { + inc func() uint32 + weights []uint16 +} + +// Returns the index in s.weights for the picker to choose. +func (s *edfScheduler) nextIndex() int { + const offset = maxWeight / 2 + + for { + idx := uint64(s.inc()) + + // The sequence number (idx) is split in two: the lower %n gives the + // index of the backend, and the rest gives the number of times we've + // iterated through all backends. `generation` is used to + // deterministically decide whether we pick or skip the backend on this + // iteration, in proportion to the backend's weight. + + backendIndex := idx % uint64(len(s.weights)) + generation := idx / uint64(len(s.weights)) + weight := uint64(s.weights[backendIndex]) + + // We pick a backend `weight` times per `maxWeight` generations. The + // multiply and modulus ~evenly spread out the picks for a given + // backend between different generations. The offset by `backendIndex` + // helps to reduce the chance of multiple consecutive non-picks: if we + // have two consecutive backends with an equal, say, 80% weight of the + // max, with no offset we would see 1/5 generations that skipped both. + // TODO(b/190488683): add test for offset efficacy. + mod := uint64(weight*generation+backendIndex*offset) % maxWeight + + if mod < maxWeight-weight { + continue + } + return int(backendIndex) + } +} + +// A simple RR scheduler to use for fallback when fewer than two backends have +// non-zero weights, or all backends have the same weight, or when only one +// subconn exists. +type rrScheduler struct { + inc func() uint32 + numSCs uint32 +} + +func (s *rrScheduler) nextIndex() int { + idx := s.inc() + return int(idx % s.numSCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go new file mode 100644 index 0000000000..8741fdad19 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package weightedroundrobin provides an implementation of the weighted round +// robin LB policy, as defined in [gRFC A58]. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +// +// [gRFC A58]: https://github.com/grpc/proposal/blob/master/A58-client-side-weighted-round-robin-lb-policy.md +package weightedroundrobin + +import ( + "fmt" + + "google.golang.org/grpc/resolver" +) + +// attributeKey is the type used as the key to store AddrInfo in the +// BalancerAttributes field of resolver.Address. +type attributeKey struct{} + +// AddrInfo will be stored in the BalancerAttributes field of Address in order +// to use weighted roundrobin balancer. +type AddrInfo struct { + Weight uint32 +} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o any) bool { + oa, ok := o.(AddrInfo) + return ok && oa.Weight == a.Weight +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with addrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +// GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. +func GetAddrInfo(addr resolver.Address) AddrInfo { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, _ := v.(AddrInfo) + return ai +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Weight: %d", a.Weight) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go new file mode 100644 index 0000000000..e406139307 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedtarget + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[weighted-target-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *weightedTargetBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go new file mode 100644 index 0000000000..bcc8aca8b4 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -0,0 +1,309 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package weightedaggregator implements state aggregator for weighted_target +// balancer. +// +// This is a separate package so it can be shared by weighted_target and eds. +// The eds balancer will be refactored to use weighted_target directly. After +// that, all functions and structs in this package can be moved to package +// weightedtarget and unexported. +package weightedaggregator + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/wrr" +) + +type weightedPickerState struct { + weight uint32 + state balancer.State + // stateToAggregate is the connectivity state used only for state + // aggregation. It could be different from state.ConnectivityState. For + // example when a sub-balancer transitions from TransientFailure to + // connecting, state.ConnectivityState is Connecting, but stateToAggregate + // is still TransientFailure. + stateToAggregate connectivity.State +} + +func (s *weightedPickerState) String() string { + return fmt.Sprintf("weight:%v,picker:%p,state:%v,stateToAggregate:%v", s.weight, s.state.Picker, s.state.ConnectivityState, s.stateToAggregate) +} + +// Aggregator is the weighted balancer state aggregator. +type Aggregator struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + newWRR func() wrr.WRR + + csEvltr *balancer.ConnectivityStateEvaluator + + mu sync.Mutex + // If started is false, no updates should be sent to the parent cc. A closed + // sub-balancer could still send pickers to this aggregator. This makes sure + // that no updates will be forwarded to parent when the whole balancer group + // and states aggregator is closed. + started bool + // All balancer IDs exist as keys in this map, even if balancer group is not + // started. + // + // If an ID is not in map, it's either removed or never added. + idToPickerState map[string]*weightedPickerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool +} + +// New creates a new weighted balancer state aggregator. +func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr.WRR) *Aggregator { + return &Aggregator{ + cc: cc, + logger: logger, + newWRR: newWRR, + csEvltr: &balancer.ConnectivityStateEvaluator{}, + idToPickerState: make(map[string]*weightedPickerState), + } +} + +// Start starts the aggregator. It can be called after Stop to restart the +// aggregator. +func (wbsa *Aggregator) Start() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.started = true +} + +// Stop stops the aggregator. When the aggregator is stopped, it won't call +// parent ClientConn to update balancer state. +func (wbsa *Aggregator) Stop() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.started = false + wbsa.clearStates() +} + +// Add adds a sub-balancer state with weight. It adds a place holder, and waits for +// the real sub-balancer to update state. +func (wbsa *Aggregator) Add(id string, weight uint32) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.idToPickerState[id] = &weightedPickerState{ + weight: weight, + // Start everything in CONNECTING, so if one of the sub-balancers + // reports TransientFailure, the RPCs will still wait for the other + // sub-balancers. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + stateToAggregate: connectivity.Connecting, + } + wbsa.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + + wbsa.buildAndUpdateLocked() +} + +// Remove removes the sub-balancer state. Future updates from this sub-balancer, +// if any, will be ignored. +func (wbsa *Aggregator) Remove(id string) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + if _, ok := wbsa.idToPickerState[id]; !ok { + return + } + // Setting the state of the deleted sub-balancer to Shutdown will get csEvltr + // to remove the previous state for any aggregated state evaluations. + // transitions to and from connectivity.Shutdown are ignored by csEvltr. + wbsa.csEvltr.RecordTransition(wbsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) + // Remove id and picker from picker map. This also results in future updates + // for this ID to be ignored. + delete(wbsa.idToPickerState, id) + wbsa.buildAndUpdateLocked() +} + +// UpdateWeight updates the weight for the given id. Note that this doesn't +// trigger an update to the parent ClientConn. The caller should decide when +// it's necessary, and call BuildAndUpdate. +func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + pState, ok := wbsa.idToPickerState[id] + if !ok { + return + } + pState.weight = newWeight +} + +// PauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (wbsa *Aggregator) PauseStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = true + wbsa.needUpdateStateOnResume = false +} + +// ResumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (wbsa *Aggregator) ResumeStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = false + if wbsa.needUpdateStateOnResume { + wbsa.cc.UpdateState(wbsa.build()) + } +} + +// NeedUpdateStateOnResume sets the UpdateStateOnResume bool to true, letting a +// picker update be sent once ResumeStateUpdates is called. +func (wbsa *Aggregator) NeedUpdateStateOnResume() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.needUpdateStateOnResume = true +} + +// UpdateState is called to report a balancer state change from sub-balancer. +// It's usually called by the balancer group. +// +// It calls parent ClientConn's UpdateState with the new aggregated state. +func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + state, ok := wbsa.idToPickerState[id] + if !ok { + // All state starts with an entry in pickStateMap. If ID is not in map, + // it's either removed, or never existed. + return + } + + if !(state.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { + // If old state is TransientFailure, and new state is Connecting, don't + // update the state, to prevent the aggregated state from being always + // CONNECTING. Otherwise, stateToAggregate is the same as + // state.ConnectivityState. + wbsa.csEvltr.RecordTransition(state.stateToAggregate, newState.ConnectivityState) + state.stateToAggregate = newState.ConnectivityState + } + state.state = newState + + wbsa.buildAndUpdateLocked() +} + +// clearState Reset everything to init state (Connecting) but keep the entry in +// map (to keep the weight). +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) clearStates() { + for _, pState := range wbsa.idToPickerState { + pState.state = balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + } + pState.stateToAggregate = connectivity.Connecting + } +} + +// buildAndUpdateLocked aggregates the connectivity states of the sub-balancers, +// builds a new picker and sends an update to the parent ClientConn. +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) buildAndUpdateLocked() { + if !wbsa.started { + return + } + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + + wbsa.cc.UpdateState(wbsa.build()) +} + +// build combines sub-states into one. +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) build() balancer.State { + wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) + + // Make sure picker's return error is consistent with the aggregatedState. + pickers := make([]weightedPickerState, 0, len(wbsa.idToPickerState)) + + switch aggState := wbsa.csEvltr.CurrentState(); aggState { + case connectivity.Connecting: + return balancer.State{ + ConnectivityState: aggState, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)} + case connectivity.TransientFailure: + // this means that all sub-balancers are now in TransientFailure. + for _, ps := range wbsa.idToPickerState { + pickers = append(pickers, *ps) + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} + default: + for _, ps := range wbsa.idToPickerState { + if ps.stateToAggregate == connectivity.Ready { + pickers = append(pickers, *ps) + } + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} + } + +} + +type weightedPickerGroup struct { + w wrr.WRR +} + +// newWeightedPickerGroup takes pickers with weights, and groups them into one +// picker. +// +// Note it only takes ready pickers. The map shouldn't contain non-ready +// pickers. +func newWeightedPickerGroup(readyWeightedPickers []weightedPickerState, newWRR func() wrr.WRR) *weightedPickerGroup { + w := newWRR() + for _, ps := range readyWeightedPickers { + w.Add(ps.state.Picker, int64(ps.weight)) + } + + return &weightedPickerGroup{ + w: w, + } +} + +func (pg *weightedPickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + p, ok := pg.w.Next().(balancer.Picker) + if !ok { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + return p.Pick(info) +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go new file mode 100644 index 0000000000..dfd1ef26dc --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package weightedtarget implements the weighted_target balancer. +// +// All APIs in this package are experimental. +package weightedtarget + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the weighted_target balancer. +const Name = "weighted_target_experimental" + +// NewRandomWRR is the WRR constructor used to pick sub-pickers from +// sub-balancers. It's to be modified in tests. +var NewRandomWRR = wrr.NewRandom + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &weightedTargetBalancer{} + b.logger = prefixLogger(b) + b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) + b.stateAggregator.Start() + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: bOpts, + StateAggregator: b.stateAggregator, + Logger: b.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) + b.bg.Start() + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type weightedTargetBalancer struct { + logger *grpclog.PrefixLogger + + bg *balancergroup.BalancerGroup + stateAggregator *weightedaggregator.Aggregator + + targets map[string]Target +} + +type localityKeyType string + +const localityKey = localityKeyType("locality") + +// LocalityFromResolverState returns the locality from the resolver.State +// provided, or an empty string if not present. +func LocalityFromResolverState(state resolver.State) string { + locality, _ := state.Attributes.Value(localityKey).(string) + return locality +} + +// UpdateClientConnState takes the new targets in balancer group, +// creates/deletes sub-balancers and sends them update. addresses are split into +// groups based on hierarchy path. +func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + b.stateAggregator.PauseStateUpdates() + defer b.stateAggregator.ResumeStateUpdates() + + // Remove sub-pickers and sub-balancers that are not in the new config. + for name := range b.targets { + if _, ok := newConfig.Targets[name]; !ok { + b.stateAggregator.Remove(name) + b.bg.Remove(name) + } + } + + // For sub-balancers in the new config + // - if it's new. add to balancer group, + // - if it's old, but has a new weight, update weight in balancer group. + // + // For all sub-balancers, forward the address/balancer config update. + for name, newT := range newConfig.Targets { + oldT, ok := b.targets[name] + if !ok { + // If this is a new sub-balancer, add weights to the picker map. + b.stateAggregator.Add(name, newT.Weight) + // Then add to the balancer group. + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + // Not trigger a state/picker update. Wait for the new sub-balancer + // to send its updates. + } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { + // If the child policy name is different, remove from balancer group + // and re-add. + b.stateAggregator.Remove(name) + b.bg.Remove(name) + b.stateAggregator.Add(name, newT.Weight) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + } else if newT.Weight != oldT.Weight { + // If this is an existing sub-balancer, update weight if necessary. + b.stateAggregator.UpdateWeight(name, newT.Weight) + } + + // Forwards all the update: + // - addresses are from the map after splitting with hierarchy path, + // - Top level service config and attributes are the same, + // - Balancer config comes from the targets map. + // + // TODO: handle error? How to aggregate errors and return? + _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), + }, + BalancerConfig: newT.ChildPolicy.Config, + }) + } + + b.targets = newConfig.Targets + + // If the targets length is zero, it means we have removed all child + // policies from the balancer group and aggregator. + // At the start of this UpdateClientConnState() operation, a call to + // b.stateAggregator.ResumeStateUpdates() is deferred. Thus, setting the + // needUpdateStateOnResume bool to true here will ensure a new picker is + // built as part of that deferred function. Since there are now no child + // policies, the aggregated connectivity state reported form the Aggregator + // will be TRANSIENT_FAILURE. + if len(b.targets) == 0 { + b.stateAggregator.NeedUpdateStateOnResume() + } + + return nil +} + +func (b *weightedTargetBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *weightedTargetBalancer) Close() { + b.stateAggregator.Stop() + b.bg.Close() +} + +func (b *weightedTargetBalancer) ExitIdle() { + b.bg.ExitIdle() +} diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go new file mode 100644 index 0000000000..52090cd67b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget_config.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedtarget + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Target represents one target with the weight and the child policy. +type Target struct { + // Weight is the weight of the child policy. + Weight uint32 `json:"weight,omitempty"` + // ChildPolicy is the child policy and it's config. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// LBConfig is the balancer config for weighted_target. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Targets map[string]Target `json:"targets,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go new file mode 100644 index 0000000000..11fae92ada --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/distributor.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/grpcsync" +) + +// Distributor makes it easy for provider implementations to furnish new key +// materials by handling synchronization between the producer and consumers of +// the key material. +// +// Provider implementations which choose to use a Distributor should do the +// following: +// - create a new Distributor using the NewDistributor() function. +// - invoke the Set() method whenever they have new key material or errors to +// report. +// - delegate to the distributor when handing calls to KeyMaterial(). +// - invoke the Stop() method when they are done using the distributor. +type Distributor struct { + // mu protects the underlying key material. + mu sync.Mutex + km *KeyMaterial + pErr error + + // ready channel to unblock KeyMaterial() invocations blocked on + // availability of key material. + ready *grpcsync.Event + // done channel to notify provider implementations and unblock any + // KeyMaterial() calls, once the Distributor is closed. + closed *grpcsync.Event +} + +// NewDistributor returns a new Distributor. +func NewDistributor() *Distributor { + return &Distributor{ + ready: grpcsync.NewEvent(), + closed: grpcsync.NewEvent(), + } +} + +// Set updates the key material in the distributor with km. +// +// Provider implementations which use the distributor must not modify the +// contents of the KeyMaterial struct pointed to by km. +// +// A non-nil err value indicates the error that the provider implementation ran +// into when trying to fetch key material, and makes it possible to surface the +// error to the user. A non-nil error value passed here causes distributor's +// KeyMaterial() method to return nil key material. +func (d *Distributor) Set(km *KeyMaterial, err error) { + d.mu.Lock() + d.km = km + d.pErr = err + if err != nil { + // If a non-nil err is passed, we ignore the key material being passed. + d.km = nil + } + d.ready.Fire() + d.mu.Unlock() +} + +// KeyMaterial returns the most recent key material provided to the Distributor. +// If no key material was provided at the time of this call, it will block until +// the deadline on the context expires or fresh key material arrives. +func (d *Distributor) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { + if d.closed.HasFired() { + return nil, errProviderClosed + } + + if d.ready.HasFired() { + return d.keyMaterial() + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-d.closed.Done(): + return nil, errProviderClosed + case <-d.ready.Done(): + return d.keyMaterial() + } +} + +func (d *Distributor) keyMaterial() (*KeyMaterial, error) { + d.mu.Lock() + defer d.mu.Unlock() + return d.km, d.pErr +} + +// Stop turns down the distributor, releases allocated resources and fails any +// active KeyMaterial() call waiting for new key material. +func (d *Distributor) Stop() { + d.closed.Fire() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go new file mode 100644 index 0000000000..8c15baeb59 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/builder.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package pemfile + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + PluginName = "file_watcher" + defaultRefreshInterval = 10 * time.Minute +) + +func init() { + certprovider.Register(&pluginBuilder{}) +} + +type pluginBuilder struct{} + +func (p *pluginBuilder) ParseConfig(c any) (*certprovider.BuildableConfig, error) { + data, ok := c.(json.RawMessage) + if !ok { + return nil, fmt.Errorf("meshca: unsupported config type: %T", c) + } + opts, err := pluginConfigFromJSON(data) + if err != nil { + return nil, err + } + return certprovider.NewBuildableConfig(PluginName, opts.canonical(), func(certprovider.BuildOptions) certprovider.Provider { + return newProvider(opts) + }), nil +} + +func (p *pluginBuilder) Name() string { + return PluginName +} + +func pluginConfigFromJSON(jd json.RawMessage) (Options, error) { + // The only difference between this anonymous struct and the Options struct + // is that the refresh_interval is represented here as a duration proto, + // while in the latter a time.Duration is used. + cfg := &struct { + CertificateFile string `json:"certificate_file,omitempty"` + PrivateKeyFile string `json:"private_key_file,omitempty"` + CACertificateFile string `json:"ca_certificate_file,omitempty"` + RefreshInterval json.RawMessage `json:"refresh_interval,omitempty"` + }{} + if err := json.Unmarshal(jd, cfg); err != nil { + return Options{}, fmt.Errorf("pemfile: json.Unmarshal(%s) failed: %v", string(jd), err) + } + + opts := Options{ + CertFile: cfg.CertificateFile, + KeyFile: cfg.PrivateKeyFile, + RootFile: cfg.CACertificateFile, + // Refresh interval is the only field in the configuration for which we + // support a default value. We cannot possibly have valid defaults for + // file paths to watch. Also, it is valid to specify an empty path for + // some of those fields if the user does not want to watch them. + RefreshDuration: defaultRefreshInterval, + } + if cfg.RefreshInterval != nil { + dur := &durationpb.Duration{} + if err := protojson.Unmarshal(cfg.RefreshInterval, dur); err != nil { + return Options{}, fmt.Errorf("pemfile: protojson.Unmarshal(%+v) failed: %v", cfg.RefreshInterval, err) + } + opts.RefreshDuration = dur.AsDuration() + } + + if err := opts.validate(); err != nil { + return Options{}, err + } + return opts, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go new file mode 100644 index 0000000000..7ed5c53ba4 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/pemfile/watcher.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pemfile provides a file watching certificate provider plugin +// implementation which works for files with PEM contents. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package pemfile + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/grpclog" +) + +const defaultCertRefreshDuration = 1 * time.Hour + +var ( + // For overriding from unit tests. + newDistributor = func() distributor { return certprovider.NewDistributor() } + + logger = grpclog.Component("pemfile") +) + +// Options configures a certificate provider plugin that watches a specified set +// of files that contain certificates and keys in PEM format. +type Options struct { + // CertFile is the file that holds the identity certificate. + // Optional. If this is set, KeyFile must also be set. + CertFile string + // KeyFile is the file that holds identity private key. + // Optional. If this is set, CertFile must also be set. + KeyFile string + // RootFile is the file that holds trusted root certificate(s). + // Optional. + RootFile string + // RefreshDuration is the amount of time the plugin waits before checking + // for updates in the specified files. + // Optional. If not set, a default value (1 hour) will be used. + RefreshDuration time.Duration +} + +func (o Options) canonical() []byte { + return []byte(fmt.Sprintf("%s:%s:%s:%s", o.CertFile, o.KeyFile, o.RootFile, o.RefreshDuration)) +} + +func (o Options) validate() error { + if o.CertFile == "" && o.KeyFile == "" && o.RootFile == "" { + return fmt.Errorf("pemfile: at least one credential file needs to be specified") + } + if keySpecified, certSpecified := o.KeyFile != "", o.CertFile != ""; keySpecified != certSpecified { + return fmt.Errorf("pemfile: private key file and identity cert file should be both specified or not specified") + } + // C-core has a limitation that they cannot verify that a certificate file + // matches a key file. So, the only way to get around this is to make sure + // that both files are in the same directory and that they do an atomic + // read. Even though Java/Go do not have this limitation, we want the + // overall plugin behavior to be consistent across languages. + if certDir, keyDir := filepath.Dir(o.CertFile), filepath.Dir(o.KeyFile); certDir != keyDir { + return errors.New("pemfile: certificate and key file must be in the same directory") + } + return nil +} + +// NewProvider returns a new certificate provider plugin that is configured to +// watch the PEM files specified in the passed in options. +func NewProvider(o Options) (certprovider.Provider, error) { + if err := o.validate(); err != nil { + return nil, err + } + return newProvider(o), nil +} + +// newProvider is used to create a new certificate provider plugin after +// validating the options, and hence does not return an error. +func newProvider(o Options) certprovider.Provider { + if o.RefreshDuration == 0 { + o.RefreshDuration = defaultCertRefreshDuration + } + + provider := &watcher{opts: o} + if o.CertFile != "" && o.KeyFile != "" { + provider.identityDistributor = newDistributor() + } + if o.RootFile != "" { + provider.rootDistributor = newDistributor() + } + + ctx, cancel := context.WithCancel(context.Background()) + provider.cancel = cancel + go provider.run(ctx) + return provider +} + +// watcher is a certificate provider plugin that implements the +// certprovider.Provider interface. It watches a set of certificate and key +// files and provides the most up-to-date key material for consumption by +// credentials implementation. +type watcher struct { + identityDistributor distributor + rootDistributor distributor + opts Options + certFileContents []byte + keyFileContents []byte + rootFileContents []byte + cancel context.CancelFunc +} + +// distributor wraps the methods on certprovider.Distributor which are used by +// the plugin. This is very useful in tests which need to know exactly when the +// plugin updates its key material. +type distributor interface { + KeyMaterial(ctx context.Context) (*certprovider.KeyMaterial, error) + Set(km *certprovider.KeyMaterial, err error) + Stop() +} + +// updateIdentityDistributor checks if the cert/key files that the plugin is +// watching have changed, and if so, reads the new contents and updates the +// identityDistributor with the new key material. +// +// Skips updates when file reading or parsing fails. +// TODO(easwars): Retry with limit (on the number of retries or the amount of +// time) upon failures. +func (w *watcher) updateIdentityDistributor() { + if w.identityDistributor == nil { + return + } + + certFileContents, err := os.ReadFile(w.opts.CertFile) + if err != nil { + logger.Warningf("certFile (%s) read failed: %v", w.opts.CertFile, err) + return + } + keyFileContents, err := os.ReadFile(w.opts.KeyFile) + if err != nil { + logger.Warningf("keyFile (%s) read failed: %v", w.opts.KeyFile, err) + return + } + // If the file contents have not changed, skip updating the distributor. + if bytes.Equal(w.certFileContents, certFileContents) && bytes.Equal(w.keyFileContents, keyFileContents) { + return + } + + cert, err := tls.X509KeyPair(certFileContents, keyFileContents) + if err != nil { + logger.Warningf("tls.X509KeyPair(%q, %q) failed: %v", certFileContents, keyFileContents, err) + return + } + w.certFileContents = certFileContents + w.keyFileContents = keyFileContents + w.identityDistributor.Set(&certprovider.KeyMaterial{Certs: []tls.Certificate{cert}}, nil) +} + +// updateRootDistributor checks if the root cert file that the plugin is +// watching hs changed, and if so, updates the rootDistributor with the new key +// material. +// +// Skips updates when root cert reading or parsing fails. +// TODO(easwars): Retry with limit (on the number of retries or the amount of +// time) upon failures. +func (w *watcher) updateRootDistributor() { + if w.rootDistributor == nil { + return + } + + rootFileContents, err := os.ReadFile(w.opts.RootFile) + if err != nil { + logger.Warningf("rootFile (%s) read failed: %v", w.opts.RootFile, err) + return + } + trustPool := x509.NewCertPool() + if !trustPool.AppendCertsFromPEM(rootFileContents) { + logger.Warning("failed to parse root certificate") + return + } + // If the file contents have not changed, skip updating the distributor. + if bytes.Equal(w.rootFileContents, rootFileContents) { + return + } + + w.rootFileContents = rootFileContents + w.rootDistributor.Set(&certprovider.KeyMaterial{Roots: trustPool}, nil) +} + +// run is a long running goroutine which watches the configured files for +// changes, and pushes new key material into the appropriate distributors which +// is returned from calls to KeyMaterial(). +func (w *watcher) run(ctx context.Context) { + ticker := time.NewTicker(w.opts.RefreshDuration) + for { + w.updateIdentityDistributor() + w.updateRootDistributor() + select { + case <-ctx.Done(): + ticker.Stop() + if w.identityDistributor != nil { + w.identityDistributor.Stop() + } + if w.rootDistributor != nil { + w.rootDistributor.Stop() + } + return + case <-ticker.C: + } + } +} + +// KeyMaterial returns the key material sourced by the watcher. +// Callers are expected to use the returned value as read-only. +func (w *watcher) KeyMaterial(ctx context.Context) (*certprovider.KeyMaterial, error) { + km := &certprovider.KeyMaterial{} + if w.identityDistributor != nil { + identityKM, err := w.identityDistributor.KeyMaterial(ctx) + if err != nil { + return nil, err + } + km.Certs = identityKM.Certs + } + if w.rootDistributor != nil { + rootKM, err := w.rootDistributor.KeyMaterial(ctx) + if err != nil { + return nil, err + } + km.Roots = rootKM.Roots + } + return km, nil +} + +// Close cleans up resources allocated by the watcher. +func (w *watcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go new file mode 100644 index 0000000000..07ba05b107 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/provider.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certprovider defines APIs for Certificate Providers in gRPC. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package certprovider + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.GetCertificateProviderBuilder = getBuilder +} + +var ( + // errProviderClosed is returned by Distributor.KeyMaterial when it is + // closed. + errProviderClosed = errors.New("provider instance is closed") + + // m is a map from name to Provider builder. + m = make(map[string]Builder) +) + +// Register registers the Provider builder, whose name as returned by its Name() +// method will be used as the name registered with this builder. Registered +// Builders are used by the Store to create Providers. +func Register(b Builder) { + m[b.Name()] = b +} + +// getBuilder returns the Provider builder registered with the given name. +// If no builder is registered with the provided name, nil will be returned. +func getBuilder(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return nil +} + +// Builder creates a Provider. +type Builder interface { + // ParseConfig parses the given config, which is in a format specific to individual + // implementations, and returns a BuildableConfig on success. + ParseConfig(any) (*BuildableConfig, error) + + // Name returns the name of providers built by this builder. + Name() string +} + +// Provider makes it possible to keep channel credential implementations up to +// date with secrets that they rely on to secure communications on the +// underlying channel. +// +// Provider implementations are free to rely on local or remote sources to fetch +// the latest secrets, and free to share any state between different +// instantiations as they deem fit. +type Provider interface { + // KeyMaterial returns the key material sourced by the Provider. + // Callers are expected to use the returned value as read-only. + KeyMaterial(ctx context.Context) (*KeyMaterial, error) + + // Close cleans up resources allocated by the Provider. + Close() +} + +// KeyMaterial wraps the certificates and keys returned by a Provider instance. +type KeyMaterial struct { + // Certs contains a slice of cert/key pairs used to prove local identity. + Certs []tls.Certificate + // Roots contains the set of trusted roots to validate the peer's identity. + Roots *x509.CertPool +} + +// BuildOptions contains parameters passed to a Provider at build time. +type BuildOptions struct { + // CertName holds the certificate name, whose key material is of interest to + // the caller. + CertName string + // WantRoot indicates if the caller is interested in the root certificate. + WantRoot bool + // WantIdentity indicates if the caller is interested in the identity + // certificate. + WantIdentity bool +} diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go new file mode 100644 index 0000000000..a4b99e3d4a --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package certprovider + +import ( + "context" + "fmt" + "sync" + "sync/atomic" +) + +// provStore is the global singleton certificate provider store. +var provStore = &store{ + providers: make(map[storeKey]*wrappedProvider), +} + +// storeKey acts as the key to the map of providers maintained by the store. A +// combination of provider name and configuration is used to uniquely identify +// every provider instance in the store. Go maps need to be indexed by +// comparable types, so the provider configuration is converted from `any` to +// `string` using the ParseConfig method while creating this key. +type storeKey struct { + // name of the certificate provider. + name string + // configuration of the certificate provider in string form. + config string + // opts contains the certificate name and other keyMaterial options. + opts BuildOptions +} + +// wrappedProvider wraps a provider instance with a reference count. +type wrappedProvider struct { + Provider + refCount int + + // A reference to the key and store are also kept here to override the + // Close method on the provider. + storeKey storeKey + store *store +} + +// closedProvider always returns errProviderClosed error. +type closedProvider struct{} + +func (c closedProvider) KeyMaterial(context.Context) (*KeyMaterial, error) { + return nil, errProviderClosed +} + +func (c closedProvider) Close() { +} + +// singleCloseWrappedProvider wraps a provider instance with a reference count +// to properly handle multiple calls to Close. +type singleCloseWrappedProvider struct { + provider atomic.Pointer[Provider] +} + +// store is a collection of provider instances, safe for concurrent access. +type store struct { + mu sync.Mutex + providers map[storeKey]*wrappedProvider +} + +// Close overrides the Close method of the embedded provider. It releases the +// reference held by the caller on the underlying provider and if the +// provider's reference count reaches zero, it is removed from the store, and +// its Close method is also invoked. +func (wp *wrappedProvider) Close() { + ps := wp.store + ps.mu.Lock() + defer ps.mu.Unlock() + + wp.refCount-- + if wp.refCount == 0 { + wp.Provider.Close() + delete(ps.providers, wp.storeKey) + } +} + +// Close overrides the Close method of the embedded provider to avoid release the +// already released reference. +func (w *singleCloseWrappedProvider) Close() { + newProvider := Provider(closedProvider{}) + oldProvider := w.provider.Swap(&newProvider) + (*oldProvider).Close() +} + +// KeyMaterial returns the key material sourced by the Provider. +// Callers are expected to use the returned value as read-only. +func (w *singleCloseWrappedProvider) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { + return (*w.provider.Load()).KeyMaterial(ctx) +} + +// newSingleCloseWrappedProvider create wrapper a provider instance with a reference count +// to properly handle multiple calls to Close. +func newSingleCloseWrappedProvider(provider Provider) *singleCloseWrappedProvider { + w := &singleCloseWrappedProvider{} + w.provider.Store(&provider) + return w +} + +// BuildableConfig wraps parsed provider configuration and functionality to +// instantiate provider instances. +type BuildableConfig struct { + name string + config []byte + starter func(BuildOptions) Provider + pStore *store +} + +// NewBuildableConfig creates a new BuildableConfig with the given arguments. +// Provider implementations are expected to invoke this function after parsing +// the given configuration as part of their ParseConfig() method. +// Equivalent configurations are expected to invoke this function with the same +// config argument. +func NewBuildableConfig(name string, config []byte, starter func(BuildOptions) Provider) *BuildableConfig { + return &BuildableConfig{ + name: name, + config: config, + starter: starter, + pStore: provStore, + } +} + +// Build kicks off a provider instance with the wrapped configuration. Multiple +// invocations of this method with the same opts will result in provider +// instances being reused. +func (bc *BuildableConfig) Build(opts BuildOptions) (Provider, error) { + provStore.mu.Lock() + defer provStore.mu.Unlock() + + sk := storeKey{ + name: bc.name, + config: string(bc.config), + opts: opts, + } + if wp, ok := provStore.providers[sk]; ok { + wp.refCount++ + return newSingleCloseWrappedProvider(wp), nil + } + + provider := bc.starter(opts) + if provider == nil { + return nil, fmt.Errorf("provider(%q, %q).Build(%v) failed", sk.name, sk.config, opts) + } + wp := &wrappedProvider{ + Provider: provider, + refCount: 1, + storeKey: sk, + store: provStore, + } + provStore.providers[sk] = wp + return newSingleCloseWrappedProvider(wp), nil +} + +// String returns the provider name and config as a colon separated string. +func (bc *BuildableConfig) String() string { + return fmt.Sprintf("%s:%s", bc.name, string(bc.config)) +} + +// ParseConfig is a convenience function to create a BuildableConfig given a +// provider name and configuration. Returns an error if there is no registered +// builder for the given name or if the config parsing fails. +func ParseConfig(name string, config any) (*BuildableConfig, error) { + parser := getBuilder(name) + if parser == nil { + return nil, fmt.Errorf("no certificate provider builder found for %q", name) + } + return parser.ParseConfig(config) +} + +// GetProvider is a convenience function to create a provider given the name, +// config and build options. +func GetProvider(name string, config any, opts BuildOptions) (Provider, error) { + bc, err := ParseConfig(name, config) + if err != nil { + return nil, err + } + return bc.Build(opts) +} diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 0000000000..6306e8bb0f --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package gzip + +import ( + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() any { + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type writer struct { + *gzip.Writer + pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) + c.poolCompressor.New = func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return &writer{Writer: w, pool: &c.poolCompressor} + } + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + z := c.poolCompressor.Get().(*writer) + z.Writer.Reset(w) + return z, nil +} + +func (z *writer) Close() error { + defer z.pool.Put(z) + return z.Writer.Close() +} + +type reader struct { + *gzip.Reader + pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + z, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil + } + if err := z.Reset(r); err != nil { + c.poolDecompressor.Put(z) + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { + n, err = z.Reader.Read(p) + if err == io.EOF { + z.pool.Put(z) + } + return n, err +} + +// RFC1952 specifies that the last four bytes "contains the size of +// the original (uncompressed) input data modulo 2^32." +// gRPC has a max message size of 2GB so we don't need to worry about wraparound. +func (c *compressor) DecompressedSize(buf []byte) int { + last := len(buf) + if last < 4 { + return -1 + } + return int(binary.LittleEndian.Uint32(buf[last-4 : last])) +} + +func (c *compressor) Name() string { + return Name +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} diff --git a/vendor/google.golang.org/grpc/internal/admin/admin.go b/vendor/google.golang.org/grpc/internal/admin/admin.go new file mode 100644 index 0000000000..a9285ee748 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/admin/admin.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package admin contains internal implementation for admin service. +package admin + +import "google.golang.org/grpc" + +// services is a map from name to service register functions. +var services []func(grpc.ServiceRegistrar) (func(), error) + +// AddService adds a service to the list of admin services. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. +// +// If multiple services with the same service name are added (e.g. two services +// for `grpc.channelz.v1.Channelz`), the server will panic on `Register()`. +func AddService(f func(grpc.ServiceRegistrar) (func(), error)) { + services = append(services, f) +} + +// Register registers the set of admin services to the given server. +func Register(s grpc.ServiceRegistrar) (cleanup func(), _ error) { + var cleanups []func() + for _, f := range services { + cleanup, err := f(s) + if err != nil { + callFuncs(cleanups) + return nil, err + } + if cleanup != nil { + cleanups = append(cleanups, cleanup) + } + } + return func() { + callFuncs(cleanups) + }, nil +} + +func callFuncs(fs []func()) { + for _, f := range fs { + f() + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go b/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go new file mode 100644 index 0000000000..0c96f1b811 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/nop/nop.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package nop implements a balancer with all of its balancer operations as +// no-ops, other than returning a Transient Failure Picker on a Client Conn +// update. +package nop + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" +) + +// bal is a balancer with all of its balancer operations as no-ops, other than +// returning a Transient Failure Picker on a Client Conn update. +type bal struct { + cc balancer.ClientConn + err error +} + +// NewBalancer returns a no-op balancer. +func NewBalancer(cc balancer.ClientConn, err error) balancer.Balancer { + return &bal{ + cc: cc, + err: err, + } +} + +// UpdateClientConnState updates the bal's Client Conn with an Error Picker +// and a Connectivity State of TRANSIENT_FAILURE. +func (b *bal) UpdateClientConnState(_ balancer.ClientConnState) error { + b.cc.UpdateState(balancer.State{ + Picker: base.NewErrPicker(b.err), + ConnectivityState: connectivity.TransientFailure, + }) + return nil +} + +// ResolverError is a no-op. +func (b *bal) ResolverError(_ error) {} + +// UpdateSubConnState is a no-op. +func (b *bal) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) {} + +// Close is a no-op. +func (b *bal) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go new file mode 100644 index 0000000000..31c9cdc9d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -0,0 +1,613 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancergroup implements a utility struct to bind multiple balancers +// into one balancer. +package balancergroup + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// subBalancerWrapper is used to keep the configurations that will be used to start +// the underlying balancer. It can be called to start/stop the underlying +// balancer. +// +// When the config changes, it will pass the update to the underlying balancer +// if it exists. +// +// TODO: move to a separate file? +type subBalancerWrapper struct { + // subBalancerWrapper is passed to the sub-balancer as a ClientConn + // wrapper, only to keep the state and picker. When sub-balancer is + // restarted while in cache, the picker needs to be resent. + // + // It also contains the sub-balancer ID, so the parent balancer group can + // keep track of SubConn/pickers and the sub-balancers they belong to. Some + // of the actions are forwarded to the parent ClientConn with no change. + // Some are forward to balancer group with the sub-balancer ID. + balancer.ClientConn + id string + group *BalancerGroup + + mu sync.Mutex + state balancer.State + + // The static part of sub-balancer. Keeps balancerBuilders and addresses. + // To be used when restarting sub-balancer. + builder balancer.Builder + // Options to be passed to sub-balancer at the time of creation. + buildOpts balancer.BuildOptions + // ccState is a cache of the addresses/balancer config, so when the balancer + // is restarted after close, it will get the previous update. It's a pointer + // and is set to nil at init, so when the balancer is built for the first + // time (not a restart), it won't receive an empty update. Note that this + // isn't reset to nil when the underlying balancer is closed. + ccState *balancer.ClientConnState + // The dynamic part of sub-balancer. Only used when balancer group is + // started. Gets cleared when sub-balancer is closed. + balancer *gracefulswitch.Balancer +} + +// UpdateState overrides balancer.ClientConn, to keep state and picker. +func (sbc *subBalancerWrapper) UpdateState(state balancer.State) { + sbc.mu.Lock() + sbc.state = state + sbc.group.updateBalancerState(sbc.id, state) + sbc.mu.Unlock() +} + +// NewSubConn overrides balancer.ClientConn, so balancer group can keep track of +// the relation between subconns and sub-balancers. +func (sbc *subBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return sbc.group.newSubConn(sbc, addrs, opts) +} + +func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() { + sbc.mu.Lock() + if sbc.state.Picker != nil { + sbc.group.updateBalancerState(sbc.id, sbc.state) + } + sbc.mu.Unlock() +} + +func (sbc *subBalancerWrapper) startBalancer() { + if sbc.balancer == nil { + sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) + } + sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id) + sbc.balancer.SwitchTo(sbc.builder) + if sbc.ccState != nil { + sbc.balancer.UpdateClientConnState(*sbc.ccState) + } +} + +// exitIdle invokes the sub-balancer's ExitIdle method. Returns a boolean +// indicating whether or not the operation was completed. +func (sbc *subBalancerWrapper) exitIdle() (complete bool) { + b := sbc.balancer + if b == nil { + return true + } + b.ExitIdle() + return true +} + +func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) error { + sbc.ccState = &s + b := sbc.balancer + if b == nil { + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. + return nil + } + return b.UpdateClientConnState(s) +} + +func (sbc *subBalancerWrapper) resolverError(err error) { + b := sbc.balancer + if b == nil { + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. + return + } + b.ResolverError(err) +} + +func (sbc *subBalancerWrapper) stopBalancer() { + if sbc.balancer == nil { + return + } + sbc.balancer.Close() + sbc.balancer = nil +} + +// BalancerGroup takes a list of balancers, each behind a gracefulswitch +// balancer, and make them into one balancer. +// +// Note that this struct doesn't implement balancer.Balancer, because it's not +// intended to be used directly as a balancer. It's expected to be used as a +// sub-balancer manager by a high level balancer. +// +// Updates from ClientConn are forwarded to sub-balancers +// - service config update +// - address update +// - subConn state change +// - find the corresponding balancer and forward +// +// Actions from sub-balances are forwarded to parent ClientConn +// - new/remove SubConn +// - picker update and health states change +// - sub-pickers are sent to an aggregator provided by the parent, which +// will group them into a group-picker. The aggregated connectivity state is +// also handled by the aggregator. +// - resolveNow +// +// Sub-balancers are only built when the balancer group is started. If the +// balancer group is closed, the sub-balancers are also closed. And it's +// guaranteed that no updates will be sent to parent ClientConn from a closed +// balancer group. +type BalancerGroup struct { + cc balancer.ClientConn + buildOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + + // stateAggregator is where the state/picker updates will be sent to. It's + // provided by the parent balancer, to build a picker with all the + // sub-pickers. + stateAggregator BalancerStateAggregator + + // outgoingMu guards all operations in the direction: + // ClientConn-->Sub-balancer. Including start, stop, resolver updates and + // SubConn state changes. + // + // The corresponding boolean outgoingStarted is used to stop further updates + // to sub-balancers after they are closed. + outgoingMu sync.Mutex + outgoingStarted bool + idToBalancerConfig map[string]*subBalancerWrapper + // Cache for sub-balancers when they are removed. This is `nil` if caching + // is disabled by passing `0` for Options.SubBalancerCloseTimeout`. + deletedBalancerCache *cache.TimeoutCache + + // incomingMu is to make sure this balancer group doesn't send updates to cc + // after it's closed. + // + // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer + // may call back to balancer group inline. It causes deadlock if they + // require the same mutex). + // + // We should never need to hold multiple locks at the same time in this + // struct. The case where two locks are held can only happen when the + // underlying balancer calls back into balancer group inline. So there's an + // implicit lock acquisition order that outgoingMu is locked before + // incomingMu. + + // incomingMu guards all operations in the direction: + // Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn. It also + // guards the map from SubConn to balancer ID, so updateSubConnState needs + // to hold it shortly to potentially delete from the map. + // + // UpdateState is called by the balancer state aggregator, and it will + // decide when and whether to call. + // + // The corresponding boolean incomingStarted is used to stop further updates + // from sub-balancers after they are closed. + incomingMu sync.Mutex + incomingStarted bool // This boolean only guards calls back to ClientConn. + scToSubBalancer map[balancer.SubConn]*subBalancerWrapper +} + +// Options wraps the arguments to be passed to the BalancerGroup ctor. +type Options struct { + // CC is a reference to the parent balancer.ClientConn. + CC balancer.ClientConn + // BuildOpts contains build options to be used when creating sub-balancers. + BuildOpts balancer.BuildOptions + // StateAggregator is an implementation of the BalancerStateAggregator + // interface to aggregate picker and connectivity states from sub-balancers. + StateAggregator BalancerStateAggregator + // Logger is a group specific prefix logger. + Logger *grpclog.PrefixLogger + // SubBalancerCloseTimeout is the amount of time deleted sub-balancers spend + // in the idle cache. A value of zero here disables caching of deleted + // sub-balancers. + SubBalancerCloseTimeout time.Duration +} + +// New creates a new BalancerGroup. Note that the BalancerGroup +// needs to be started to work. +func New(opts Options) *BalancerGroup { + var bc *cache.TimeoutCache + if opts.SubBalancerCloseTimeout != time.Duration(0) { + bc = cache.NewTimeoutCache(opts.SubBalancerCloseTimeout) + } + + return &BalancerGroup{ + cc: opts.CC, + buildOpts: opts.BuildOpts, + stateAggregator: opts.StateAggregator, + logger: opts.Logger, + + deletedBalancerCache: bc, + idToBalancerConfig: make(map[string]*subBalancerWrapper), + scToSubBalancer: make(map[balancer.SubConn]*subBalancerWrapper), + } +} + +// Start starts the balancer group, including building all the sub-balancers, +// and send the existing addresses to them. +// +// A BalancerGroup can be closed and started later. When a BalancerGroup is +// closed, it can still receive address updates, which will be applied when +// restarted. +func (bg *BalancerGroup) Start() { + bg.incomingMu.Lock() + bg.incomingStarted = true + bg.incomingMu.Unlock() + + bg.outgoingMu.Lock() + if bg.outgoingStarted { + bg.outgoingMu.Unlock() + return + } + + for _, config := range bg.idToBalancerConfig { + config.startBalancer() + } + bg.outgoingStarted = true + bg.outgoingMu.Unlock() +} + +// AddWithClientConn adds a balancer with the given id to the group. The +// balancer is built with a balancer builder registered with balancerName. The +// given ClientConn is passed to the newly built balancer instead of the +// one passed to balancergroup.New(). +// +// TODO: Get rid of the existing Add() API and replace it with this. +func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { + bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id) + builder := balancer.Get(balancerName) + if builder == nil { + return fmt.Errorf("unregistered balancer name %q", balancerName) + } + + // Store data in static map, and then check to see if bg is started. + bg.outgoingMu.Lock() + defer bg.outgoingMu.Unlock() + var sbc *subBalancerWrapper + // If outgoingStarted is true, search in the cache. Otherwise, cache is + // guaranteed to be empty, searching is unnecessary. Also, skip the cache if + // caching is disabled. + if bg.outgoingStarted && bg.deletedBalancerCache != nil { + if old, ok := bg.deletedBalancerCache.Remove(id); ok { + if bg.logger.V(2) { + bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + sbc, _ = old.(*subBalancerWrapper) + if sbc != nil && sbc.builder != builder { + // If the sub-balancer in cache was built with a different + // balancer builder, don't use it, cleanup this old-balancer, + // and behave as sub-balancer is not found in cache. + // + // NOTE that this will also drop the cached addresses for this + // sub-balancer, which seems to be reasonable. + sbc.stopBalancer() + // cleanupSubConns must be done before the new balancer starts, + // otherwise new SubConns created by the new balancer might be + // removed by mistake. + bg.cleanupSubConns(sbc) + sbc = nil + } + } + } + if sbc == nil { + sbc = &subBalancerWrapper{ + ClientConn: cc, + id: id, + group: bg, + builder: builder, + buildOpts: bg.buildOpts, + } + if bg.outgoingStarted { + // Only start the balancer if bg is started. Otherwise, we only keep the + // static data. + sbc.startBalancer() + } + } else { + // When brining back a sub-balancer from cache, re-send the cached + // picker and state. + sbc.updateBalancerStateWithCachedPicker() + } + bg.idToBalancerConfig[id] = sbc + return nil +} + +// Add adds a balancer built by builder to the group, with given id. +func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { + bg.AddWithClientConn(id, builder.Name(), bg.cc) +} + +// Remove removes the balancer with id from the group. +// +// But doesn't close the balancer. The balancer is kept in a cache, and will be +// closed after timeout. Cleanup work (closing sub-balancer and removing +// subconns) will be done after timeout. +func (bg *BalancerGroup) Remove(id string) { + bg.logger.Infof("Removing child policy for child %q", id) + + bg.outgoingMu.Lock() + + sbToRemove, ok := bg.idToBalancerConfig[id] + if !ok { + bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id) + bg.outgoingMu.Unlock() + return + } + + // Unconditionally remove the sub-balancer config from the map. + delete(bg.idToBalancerConfig, id) + if !bg.outgoingStarted { + // Nothing needs to be done here, since we wouldn't have created the + // sub-balancer. + bg.outgoingMu.Unlock() + return + } + + if bg.deletedBalancerCache != nil { + if bg.logger.V(2) { + bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + bg.deletedBalancerCache.Add(id, sbToRemove, func() { + if bg.logger.V(2) { + bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + + // A sub-balancer evicted from the timeout cache needs to closed + // and its subConns need to removed, unconditionally. There is a + // possibility that a sub-balancer might be removed (thereby + // moving it to the cache) around the same time that the + // balancergroup is closed, and by the time we get here the + // balancergroup might be closed. Check for `outgoingStarted == + // true` at that point can lead to a leaked sub-balancer. + bg.outgoingMu.Lock() + sbToRemove.stopBalancer() + bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) + }) + bg.outgoingMu.Unlock() + return + } + + // Remove the sub-balancer with immediate effect if we are not caching. + sbToRemove.stopBalancer() + bg.outgoingMu.Unlock() + bg.cleanupSubConns(sbToRemove) +} + +// bg.remove(id) doesn't do cleanup for the sub-balancer. This function does +// cleanup after the timeout. +func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) { + bg.incomingMu.Lock() + // Remove SubConns. This is only done after the balancer is + // actually closed. + // + // NOTE: if NewSubConn is called by this (closed) balancer later, the + // SubConn will be leaked. This shouldn't happen if the balancer + // implementation is correct. To make sure this never happens, we need to + // add another layer (balancer manager) between balancer group and the + // sub-balancers. + for sc, b := range bg.scToSubBalancer { + if b == config { + delete(bg.scToSubBalancer, sc) + } + } + bg.incomingMu.Unlock() +} + +// connect attempts to connect to all subConns belonging to sb. +func (bg *BalancerGroup) connect(sb *subBalancerWrapper) { + bg.incomingMu.Lock() + for sc, b := range bg.scToSubBalancer { + if b == sb { + sc.Connect() + } + } + bg.incomingMu.Unlock() +} + +// Following are actions from the parent grpc.ClientConn, forward to sub-balancers. + +// updateSubConnState forwards the update to cb and updates scToSubBalancer if +// needed. +func (bg *BalancerGroup) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + bg.incomingMu.Lock() + if _, ok := bg.scToSubBalancer[sc]; !ok { + bg.incomingMu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + // Only delete sc from the map when state changed to Shutdown. + delete(bg.scToSubBalancer, sc) + } + bg.incomingMu.Unlock() + + bg.outgoingMu.Lock() + if cb != nil { + cb(state) + } + bg.outgoingMu.Unlock() +} + +// UpdateSubConnState handles the state for the subconn. It finds the +// corresponding balancer and forwards the update. +func (bg *BalancerGroup) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + bg.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// UpdateClientConnState handles ClientState (including balancer config and +// addresses) from resolver. It finds the balancer and forwards the update. +func (bg *BalancerGroup) UpdateClientConnState(id string, s balancer.ClientConnState) error { + bg.outgoingMu.Lock() + defer bg.outgoingMu.Unlock() + if config, ok := bg.idToBalancerConfig[id]; ok { + return config.updateClientConnState(s) + } + return nil +} + +// ResolverError forwards resolver errors to all sub-balancers. +func (bg *BalancerGroup) ResolverError(err error) { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + config.resolverError(err) + } + bg.outgoingMu.Unlock() +} + +// Following are actions from sub-balancers, forward to ClientConn. + +// newSubConn: forward to ClientConn, and also create a map from sc to balancer, +// so state update will find the right balancer. +// +// One note about removing SubConn: only forward to ClientConn, but not delete +// from map. Delete sc from the map only when state changes to Shutdown. Since +// it's just forwarding the action, there's no need for a removeSubConn() +// wrapper function. +func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + // NOTE: if balancer with id was already removed, this should also return + // error. But since we call balancer.stopBalancer when removing the balancer, this + // shouldn't happen. + bg.incomingMu.Lock() + if !bg.incomingStarted { + bg.incomingMu.Unlock() + return nil, fmt.Errorf("NewSubConn is called after balancer group is closed") + } + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bg.updateSubConnState(sc, state, oldListener) } + sc, err := bg.cc.NewSubConn(addrs, opts) + if err != nil { + bg.incomingMu.Unlock() + return nil, err + } + bg.scToSubBalancer[sc] = config + bg.incomingMu.Unlock() + return sc, nil +} + +// updateBalancerState: forward the new state to balancer state aggregator. The +// aggregator will create an aggregated picker and an aggregated connectivity +// state, then forward to ClientConn. +func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { + bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state) + + // Send new state to the aggregator, without holding the incomingMu. + // incomingMu is to protect all calls to the parent ClientConn, this update + // doesn't necessary trigger a call to ClientConn, and should already be + // protected by aggregator's mutex if necessary. + if bg.stateAggregator != nil { + bg.stateAggregator.UpdateState(id, state) + } +} + +// Close closes the balancer. It stops sub-balancers, and removes the subconns. +// The BalancerGroup can be restarted later. +func (bg *BalancerGroup) Close() { + bg.incomingMu.Lock() + if bg.incomingStarted { + bg.incomingStarted = false + // Also remove all SubConns. + for sc := range bg.scToSubBalancer { + sc.Shutdown() + delete(bg.scToSubBalancer, sc) + } + } + bg.incomingMu.Unlock() + + // Clear(true) runs clear function to close sub-balancers in cache. It + // must be called out of outgoing mutex. + if bg.deletedBalancerCache != nil { + bg.deletedBalancerCache.Clear(true) + } + + bg.outgoingMu.Lock() + if bg.outgoingStarted { + bg.outgoingStarted = false + for _, config := range bg.idToBalancerConfig { + config.stopBalancer() + } + } + bg.outgoingMu.Unlock() +} + +// ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked. +// It will trigger this on all sub-balancers, or reconnect their subconns if +// not supported. +func (bg *BalancerGroup) ExitIdle() { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + if !config.exitIdle() { + bg.connect(config) + } + } + bg.outgoingMu.Unlock() +} + +// ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if +// appropriate and possible. +func (bg *BalancerGroup) ExitIdleOne(id string) { + bg.outgoingMu.Lock() + if config := bg.idToBalancerConfig[id]; config != nil { + if !config.exitIdle() { + bg.connect(config) + } + } + bg.outgoingMu.Unlock() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return gracefulswitch.ParseConfig(cfg) +} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go new file mode 100644 index 0000000000..8168695553 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancergroup + +import ( + "google.golang.org/grpc/balancer" +) + +// BalancerStateAggregator aggregates sub-picker and connectivity states into a +// state. +// +// It takes care of merging sub-picker into one picker. The picking config is +// passed directly from the parent to the aggregator implementation (instead +// via balancer group). +type BalancerStateAggregator interface { + // UpdateState updates the state of the id. + // + // It's up to the implementation whether this will trigger an update to the + // parent ClientConn. + UpdateState(id string, state balancer.State) +} diff --git a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go new file mode 100644 index 0000000000..2fa4870102 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go @@ -0,0 +1,151 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cache implements caches to be used in gRPC. +package cache + +import ( + "sync" + "time" +) + +type cacheEntry struct { + item any + // Note that to avoid deadlocks (potentially caused by lock ordering), + // callback can only be called without holding cache's mutex. + callback func() + timer *time.Timer + // deleted is set to true in Remove() when the call to timer.Stop() fails. + // This can happen when the timer in the cache entry fires around the same + // time that timer.stop() is called in Remove(). + deleted bool +} + +// TimeoutCache is a cache with items to be deleted after a timeout. +type TimeoutCache struct { + mu sync.Mutex + timeout time.Duration + cache map[any]*cacheEntry +} + +// NewTimeoutCache creates a TimeoutCache with the given timeout. +func NewTimeoutCache(timeout time.Duration) *TimeoutCache { + return &TimeoutCache{ + timeout: timeout, + cache: make(map[any]*cacheEntry), + } +} + +// Add adds an item to the cache, with the specified callback to be called when +// the item is removed from the cache upon timeout. If the item is removed from +// the cache using a call to Remove before the timeout expires, the callback +// will not be called. +// +// If the Add was successful, it returns (newly added item, true). If there is +// an existing entry for the specified key, the cache entry is not be updated +// with the specified item and it returns (existing item, false). +func (c *TimeoutCache) Add(key, item any, callback func()) (any, bool) { + c.mu.Lock() + defer c.mu.Unlock() + if e, ok := c.cache[key]; ok { + return e.item, false + } + + entry := &cacheEntry{ + item: item, + callback: callback, + } + entry.timer = time.AfterFunc(c.timeout, func() { + c.mu.Lock() + if entry.deleted { + c.mu.Unlock() + // Abort the delete since this has been taken care of in Remove(). + return + } + delete(c.cache, key) + c.mu.Unlock() + entry.callback() + }) + c.cache[key] = entry + return item, true +} + +// Remove the item with the key from the cache. +// +// If the specified key exists in the cache, it returns (item associated with +// key, true) and the callback associated with the item is guaranteed to be not +// called. If the given key is not found in the cache, it returns (nil, false) +func (c *TimeoutCache) Remove(key any) (item any, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + entry, ok := c.removeInternal(key) + if !ok { + return nil, false + } + return entry.item, true +} + +// removeInternal removes and returns the item with key. +// +// caller must hold c.mu. +func (c *TimeoutCache) removeInternal(key any) (*cacheEntry, bool) { + entry, ok := c.cache[key] + if !ok { + return nil, false + } + delete(c.cache, key) + if !entry.timer.Stop() { + // If stop was not successful, the timer has fired (this can only happen + // in a race). But the deleting function is blocked on c.mu because the + // mutex was held by the caller of this function. + // + // Set deleted to true to abort the deleting function. When the lock is + // released, the delete function will acquire the lock, check the value + // of deleted and return. + entry.deleted = true + } + return entry, true +} + +// Clear removes all entries, and runs the callbacks if runCallback is true. +func (c *TimeoutCache) Clear(runCallback bool) { + var entries []*cacheEntry + c.mu.Lock() + for key := range c.cache { + if e, ok := c.removeInternal(key); ok { + entries = append(entries, e) + } + } + c.mu.Unlock() + + if !runCallback { + return + } + + // removeInternal removes entries from cache, and also stops the timer, so + // the callback is guaranteed to be not called. If runCallback is true, + // manual execute all callbacks. + for _, entry := range entries { + entry.callback() + } +} + +// Len returns the number of entries in the cache. +func (c *TimeoutCache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.cache) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go new file mode 100644 index 0000000000..dcff7ad622 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go @@ -0,0 +1,297 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds contains non-user facing functionality of the xds credentials. +package xds + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "strings" + "unsafe" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/resolver" +) + +func init() { + internal.GetXDSHandshakeInfoForTesting = GetHandshakeInfo +} + +// handshakeAttrKey is the type used as the key to store HandshakeInfo in +// the Attributes field of resolver.Address. +type handshakeAttrKey struct{} + +// Equal reports whether the handshake info structs are identical. +func (hi *HandshakeInfo) Equal(other *HandshakeInfo) bool { + if hi == nil && other == nil { + return true + } + if hi == nil || other == nil { + return false + } + if hi.rootProvider != other.rootProvider || + hi.identityProvider != other.identityProvider || + hi.requireClientCert != other.requireClientCert || + len(hi.sanMatchers) != len(other.sanMatchers) { + return false + } + for i := range hi.sanMatchers { + if !hi.sanMatchers[i].Equal(other.sanMatchers[i]) { + return false + } + } + return true +} + +// SetHandshakeInfo returns a copy of addr in which the Attributes field is +// updated with hiPtr. +func SetHandshakeInfo(addr resolver.Address, hiPtr *unsafe.Pointer) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeAttrKey{}, hiPtr) + return addr +} + +// GetHandshakeInfo returns a pointer to the *HandshakeInfo stored in attr. +func GetHandshakeInfo(attr *attributes.Attributes) *unsafe.Pointer { + v := attr.Value(handshakeAttrKey{}) + hi, _ := v.(*unsafe.Pointer) + return hi +} + +// HandshakeInfo wraps all the security configuration required by client and +// server handshake methods in xds credentials. The xDS implementation will be +// responsible for populating these fields. +type HandshakeInfo struct { + // All fields written at init time and read only after that, so no + // synchronization needed. + rootProvider certprovider.Provider + identityProvider certprovider.Provider + sanMatchers []matcher.StringMatcher // Only on the client side. + requireClientCert bool // Only on server side. +} + +// NewHandshakeInfo returns a new handshake info configured with the provided +// options. +func NewHandshakeInfo(rootProvider certprovider.Provider, identityProvider certprovider.Provider, sanMatchers []matcher.StringMatcher, requireClientCert bool) *HandshakeInfo { + return &HandshakeInfo{ + rootProvider: rootProvider, + identityProvider: identityProvider, + sanMatchers: sanMatchers, + requireClientCert: requireClientCert, + } +} + +// UseFallbackCreds returns true when fallback credentials are to be used based +// on the contents of the HandshakeInfo. +func (hi *HandshakeInfo) UseFallbackCreds() bool { + if hi == nil { + return true + } + return hi.identityProvider == nil && hi.rootProvider == nil +} + +// GetSANMatchersForTesting returns the SAN matchers stored in HandshakeInfo. +// To be used only for testing purposes. +func (hi *HandshakeInfo) GetSANMatchersForTesting() []matcher.StringMatcher { + return append([]matcher.StringMatcher{}, hi.sanMatchers...) +} + +// ClientSideTLSConfig constructs a tls.Config to be used in a client-side +// handshake based on the contents of the HandshakeInfo. +func (hi *HandshakeInfo) ClientSideTLSConfig(ctx context.Context) (*tls.Config, error) { + // On the client side, rootProvider is mandatory. IdentityProvider is + // optional based on whether the client is doing TLS or mTLS. + if hi.rootProvider == nil { + return nil, errors.New("xds: CertificateProvider to fetch trusted roots is missing, cannot perform TLS handshake. Please check configuration on the management server") + } + // Since the call to KeyMaterial() can block, we read the providers under + // the lock but call the actual function after releasing the lock. + rootProv, idProv := hi.rootProvider, hi.identityProvider + + // InsecureSkipVerify needs to be set to true because we need to perform + // custom verification to check the SAN on the received certificate. + // Currently the Go stdlib does complete verification of the cert (which + // includes hostname verification) or none. We are forced to go with the + // latter and perform the normal cert validation ourselves. + cfg := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{"h2"}, + } + + km, err := rootProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err) + } + cfg.RootCAs = km.Roots + + if idProv != nil { + km, err := idProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err) + } + cfg.Certificates = km.Certs + } + return cfg, nil +} + +// ServerSideTLSConfig constructs a tls.Config to be used in a server-side +// handshake based on the contents of the HandshakeInfo. +func (hi *HandshakeInfo) ServerSideTLSConfig(ctx context.Context) (*tls.Config, error) { + cfg := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"h2"}, + } + // On the server side, identityProvider is mandatory. RootProvider is + // optional based on whether the server is doing TLS or mTLS. + if hi.identityProvider == nil { + return nil, errors.New("xds: CertificateProvider to fetch identity certificate is missing, cannot perform TLS handshake. Please check configuration on the management server") + } + // Since the call to KeyMaterial() can block, we read the providers under + // the lock but call the actual function after releasing the lock. + rootProv, idProv := hi.rootProvider, hi.identityProvider + if hi.requireClientCert { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + // identityProvider is mandatory on the server side. + km, err := idProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching identity certificates from CertificateProvider failed: %v", err) + } + cfg.Certificates = km.Certs + + if rootProv != nil { + km, err := rootProv.KeyMaterial(ctx) + if err != nil { + return nil, fmt.Errorf("xds: fetching trusted roots from CertificateProvider failed: %v", err) + } + cfg.ClientCAs = km.Roots + } + return cfg, nil +} + +// MatchingSANExists returns true if the SANs contained in cert match the +// criteria enforced by the list of SAN matchers in HandshakeInfo. +// +// If the list of SAN matchers in the HandshakeInfo is empty, this function +// returns true for all input certificates. +func (hi *HandshakeInfo) MatchingSANExists(cert *x509.Certificate) bool { + if len(hi.sanMatchers) == 0 { + return true + } + + // SANs can be specified in any of these four fields on the parsed cert. + for _, san := range cert.DNSNames { + if hi.matchSAN(san, true) { + return true + } + } + for _, san := range cert.EmailAddresses { + if hi.matchSAN(san, false) { + return true + } + } + for _, san := range cert.IPAddresses { + if hi.matchSAN(san.String(), false) { + return true + } + } + for _, san := range cert.URIs { + if hi.matchSAN(san.String(), false) { + return true + } + } + return false +} + +// Caller must hold mu. +func (hi *HandshakeInfo) matchSAN(san string, isDNS bool) bool { + for _, matcher := range hi.sanMatchers { + if em := matcher.ExactMatch(); em != "" && isDNS { + // This is a special case which is documented in the xDS protos. + // If the DNS SAN is a wildcard entry, and the match criteria is + // `exact`, then we need to perform DNS wildcard matching + // instead of regular string comparison. + if dnsMatch(em, san) { + return true + } + continue + } + if matcher.Match(san) { + return true + } + } + return false +} + +// dnsMatch implements a DNS wildcard matching algorithm based on RFC2828 and +// grpc-java's implementation in `OkHostnameVerifier` class. +// +// NOTE: Here the `host` argument is the one from the set of string matchers in +// the xDS proto and the `san` argument is a DNS SAN from the certificate, and +// this is the one which can potentially contain a wildcard pattern. +func dnsMatch(host, san string) bool { + // Add trailing "." and turn them into absolute domain names. + if !strings.HasSuffix(host, ".") { + host += "." + } + if !strings.HasSuffix(san, ".") { + san += "." + } + // Domain names are case-insensitive. + host = strings.ToLower(host) + san = strings.ToLower(san) + + // If san does not contain a wildcard, do exact match. + if !strings.Contains(san, "*") { + return host == san + } + + // Wildcard dns matching rules + // - '*' is only permitted in the left-most label and must be the only + // character in that label. For example, *.example.com is permitted, while + // *a.example.com, a*.example.com, a*b.example.com, a.*.example.com are + // not permitted. + // - '*' matches a single domain name component. For example, *.example.com + // matches test.example.com but does not match sub.test.example.com. + // - Wildcard patterns for single-label domain names are not permitted. + if san == "*." || !strings.HasPrefix(san, "*.") || strings.Contains(san[1:], "*") { + return false + } + // Optimization: at this point, we know that the san contains a '*' and + // is the first domain component of san. So, the host name must be at + // least as long as the san to be able to match. + if len(host) < len(san) { + return false + } + // Hostname must end with the non-wildcard portion of san. + if !strings.HasSuffix(host, san[1:]) { + return false + } + // At this point we know that the hostName and san share the same suffix + // (the non-wildcard portion of san). Now, we just need to make sure + // that the '*' does not match across domain components. + hostPrefix := strings.TrimSuffix(host, san[1:]) + return !strings.Contains(hostPrefix, ".") +} diff --git a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go new file mode 100644 index 0000000000..c3baac3643 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package hierarchy contains functions to set and get hierarchy string from +// addresses. +// +// This package is experimental. +package hierarchy + +import ( + "google.golang.org/grpc/resolver" +) + +type pathKeyType string + +const pathKey = pathKeyType("grpc.internal.address.hierarchical_path") + +type pathValue []string + +func (p pathValue) Equal(o any) bool { + op, ok := o.(pathValue) + if !ok { + return false + } + if len(op) != len(p) { + return false + } + for i, v := range p { + if v != op[i] { + return false + } + } + return true +} + +// Get returns the hierarchical path of addr. +func Get(addr resolver.Address) []string { + attrs := addr.BalancerAttributes + if attrs == nil { + return nil + } + path, _ := attrs.Value(pathKey).(pathValue) + return ([]string)(path) +} + +// Set overrides the hierarchical path in addr with path. +func Set(addr resolver.Address, path []string) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(pathKey, pathValue(path)) + return addr +} + +// Group splits a slice of addresses into groups based on +// the first hierarchy path. The first hierarchy path will be removed from the +// result. +// +// Input: +// [ +// +// {addr0, path: [p0, wt0]} +// {addr1, path: [p0, wt1]} +// {addr2, path: [p1, wt2]} +// {addr3, path: [p1, wt3]} +// +// ] +// +// Addresses will be split into p0/p1, and the p0/p1 will be removed from the +// path. +// +// Output: +// +// { +// p0: [ +// {addr0, path: [wt0]}, +// {addr1, path: [wt1]}, +// ], +// p1: [ +// {addr2, path: [wt2]}, +// {addr3, path: [wt3]}, +// ], +// } +// +// If hierarchical path is not set, or has no path in it, the address is +// dropped. +func Group(addrs []resolver.Address) map[string][]resolver.Address { + ret := make(map[string][]resolver.Address) + for _, addr := range addrs { + oldPath := Get(addr) + if len(oldPath) == 0 { + continue + } + curPath := oldPath[0] + newPath := oldPath[1:] + newAddr := Set(addr, newPath) + ret[curPath] = append(ret[curPath], newAddr) + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go new file mode 100644 index 0000000000..703091047b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -0,0 +1,387 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/lookup/v1/rls.proto + +package grpc_lookup_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Possible reasons for making a request. +type RouteLookupRequest_Reason int32 + +const ( + RouteLookupRequest_REASON_UNKNOWN RouteLookupRequest_Reason = 0 // Unused + RouteLookupRequest_REASON_MISS RouteLookupRequest_Reason = 1 // No data available in local cache + RouteLookupRequest_REASON_STALE RouteLookupRequest_Reason = 2 // Data in local cache is stale +) + +// Enum value maps for RouteLookupRequest_Reason. +var ( + RouteLookupRequest_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_MISS", + 2: "REASON_STALE", + } + RouteLookupRequest_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_MISS": 1, + "REASON_STALE": 2, + } +) + +func (x RouteLookupRequest_Reason) Enum() *RouteLookupRequest_Reason { + p := new(RouteLookupRequest_Reason) + *p = x + return p +} + +func (x RouteLookupRequest_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteLookupRequest_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_lookup_v1_rls_proto_enumTypes[0].Descriptor() +} + +func (RouteLookupRequest_Reason) Type() protoreflect.EnumType { + return &file_grpc_lookup_v1_rls_proto_enumTypes[0] +} + +func (x RouteLookupRequest_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteLookupRequest_Reason.Descriptor instead. +func (RouteLookupRequest_Reason) EnumDescriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0, 0} +} + +type RouteLookupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target type allows the client to specify what kind of target format it + // would like from RLS to allow it to find the regional server, e.g. "grpc". + TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` + // Reason for making this request. + Reason RouteLookupRequest_Reason `protobuf:"varint,5,opt,name=reason,proto3,enum=grpc.lookup.v1.RouteLookupRequest_Reason" json:"reason,omitempty"` + // For REASON_STALE, the header_data from the stale response, if any. + StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` + // Map of key values extracted via key builders for the gRPC or HTTP request. + KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *RouteLookupRequest) Reset() { + *x = RouteLookupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupRequest) ProtoMessage() {} + +func (x *RouteLookupRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupRequest.ProtoReflect.Descriptor instead. +func (*RouteLookupRequest) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0} +} + +func (x *RouteLookupRequest) GetTargetType() string { + if x != nil { + return x.TargetType + } + return "" +} + +func (x *RouteLookupRequest) GetReason() RouteLookupRequest_Reason { + if x != nil { + return x.Reason + } + return RouteLookupRequest_REASON_UNKNOWN +} + +func (x *RouteLookupRequest) GetStaleHeaderData() string { + if x != nil { + return x.StaleHeaderData + } + return "" +} + +func (x *RouteLookupRequest) GetKeyMap() map[string]string { + if x != nil { + return x.KeyMap + } + return nil +} + +func (x *RouteLookupRequest) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +type RouteLookupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Prioritized list (best one first) of addressable entities to use + // for routing, using syntax requested by the request target_type. + // The targets will be tried in order until a healthy one is found. + Targets []string `protobuf:"bytes,3,rep,name=targets,proto3" json:"targets,omitempty"` + // Optional header value to pass along to AFE in the X-Google-RLS-Data header. + // Cached with "target" and sent with all requests that match the request key. + // Allows the RLS to pass its work product to the eventual target. + HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *RouteLookupResponse) Reset() { + *x = RouteLookupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupResponse) ProtoMessage() {} + +func (x *RouteLookupResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupResponse.ProtoReflect.Descriptor instead. +func (*RouteLookupResponse) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{1} +} + +func (x *RouteLookupResponse) GetTargets() []string { + if x != nil { + return x.Targets + } + return nil +} + +func (x *RouteLookupResponse) GetHeaderData() string { + if x != nil { + return x.HeaderData + } + return "" +} + +func (x *RouteLookupResponse) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor + +var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07, + 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x22, 0x94, 0x01, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, + 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lookup_v1_rls_proto_rawDescOnce sync.Once + file_grpc_lookup_v1_rls_proto_rawDescData = file_grpc_lookup_v1_rls_proto_rawDesc +) + +func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { + file_grpc_lookup_v1_rls_proto_rawDescOnce.Do(func() { + file_grpc_lookup_v1_rls_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lookup_v1_rls_proto_rawDescData) + }) + return file_grpc_lookup_v1_rls_proto_rawDescData +} + +var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_grpc_lookup_v1_rls_proto_goTypes = []any{ + (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason + (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest + (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse + nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ + 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason + 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + 4, // 2: grpc.lookup.v1.RouteLookupRequest.extensions:type_name -> google.protobuf.Any + 4, // 3: grpc.lookup.v1.RouteLookupResponse.extensions:type_name -> google.protobuf.Any + 1, // 4: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 5: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_grpc_lookup_v1_rls_proto_init() } +func file_grpc_lookup_v1_rls_proto_init() { + if File_grpc_lookup_v1_rls_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lookup_v1_rls_proto_rawDesc, + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_lookup_v1_rls_proto_goTypes, + DependencyIndexes: file_grpc_lookup_v1_rls_proto_depIdxs, + EnumInfos: file_grpc_lookup_v1_rls_proto_enumTypes, + MessageInfos: file_grpc_lookup_v1_rls_proto_msgTypes, + }.Build() + File_grpc_lookup_v1_rls_proto = out.File + file_grpc_lookup_v1_rls_proto_rawDesc = nil + file_grpc_lookup_v1_rls_proto_goTypes = nil + file_grpc_lookup_v1_rls_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go new file mode 100644 index 0000000000..a0be3c8cb2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -0,0 +1,949 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/lookup/v1/rls_config.proto + +package grpc_lookup_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Extract a key based on a given name (e.g. header name or query parameter +// name). The name must match one of the names listed in the "name" field. If +// the "required_match" field is true, one of the specified names must be +// present for the keybuilder to match. +type NameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name that will be used in the RLS key_map to refer to this value. + // If required_match is true, you may omit this field or set it to an empty + // string, in which case the matcher will require a match, but won't update + // the key_map. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Ordered list of names (headers or query parameter names) that can supply + // this value; the first one with a non-empty value is used. + Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` + // If true, make this extraction required; the key builder will not match + // if no value is found. + RequiredMatch bool `protobuf:"varint,3,opt,name=required_match,json=requiredMatch,proto3" json:"required_match,omitempty"` +} + +func (x *NameMatcher) Reset() { + *x = NameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NameMatcher) ProtoMessage() {} + +func (x *NameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NameMatcher.ProtoReflect.Descriptor instead. +func (*NameMatcher) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{0} +} + +func (x *NameMatcher) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *NameMatcher) GetNames() []string { + if x != nil { + return x.Names + } + return nil +} + +func (x *NameMatcher) GetRequiredMatch() bool { + if x != nil { + return x.RequiredMatch + } + return false +} + +// A GrpcKeyBuilder applies to a given gRPC service, name, and headers. +type GrpcKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Names []*GrpcKeyBuilder_Name `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + ExtraKeys *GrpcKeyBuilder_ExtraKeys `protobuf:"bytes,3,opt,name=extra_keys,json=extraKeys,proto3" json:"extra_keys,omitempty"` + // Extract keys from all listed headers. + // For gRPC, it is an error to specify "required_match" on the NameMatcher + // protos. + Headers []*NameMatcher `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty"` + // You can optionally set one or more specific key/value pairs to be added to + // the key_map. This can be useful to identify which builder built the key, + // for example if you are suppressing the actual method, but need to + // separately cache and request all the matched methods. + ConstantKeys map[string]string `protobuf:"bytes,4,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GrpcKeyBuilder) Reset() { + *x = GrpcKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder) ProtoMessage() {} + +func (x *GrpcKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1} +} + +func (x *GrpcKeyBuilder) GetNames() []*GrpcKeyBuilder_Name { + if x != nil { + return x.Names + } + return nil +} + +func (x *GrpcKeyBuilder) GetExtraKeys() *GrpcKeyBuilder_ExtraKeys { + if x != nil { + return x.ExtraKeys + } + return nil +} + +func (x *GrpcKeyBuilder) GetHeaders() []*NameMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *GrpcKeyBuilder) GetConstantKeys() map[string]string { + if x != nil { + return x.ConstantKeys + } + return nil +} + +// An HttpKeyBuilder applies to a given HTTP URL and headers. +// +// Path and host patterns use the matching syntax from gRPC transcoding to +// extract named key/value pairs from the path and host components of the URL: +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +// +// It is invalid to specify the same key name in multiple places in a pattern. +// +// For a service where the project id can be expressed either as a subdomain or +// in the path, separate HttpKeyBuilders must be used: +// +// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' +// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// +// If the host is exactly 'example.com', the first path segment will be used as +// the id and the second segment as the object. If the host has a subdomain, the +// subdomain will be used as the id and the first segment as the object. If +// neither pattern matches, no keys will be extracted. +type HttpKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // host_pattern is an ordered list of host template patterns for the desired + // value. If any host_pattern values are specified, then at least one must + // match, and the last one wins and sets any specified variables. A host + // consists of labels separated by dots. Each label is matched against the + // label in the pattern as follows: + // - "*": Matches any single label. + // - "**": Matches zero or more labels (first or last part of host only). + // - "{=...}": One or more label capture, where "..." can be any + // template that does not include a capture. + // - "{}": A single label capture. Identical to {=*}. + // + // Examples: + // - "example.com": Only applies to the exact host example.com. + // - "*.example.com": Matches subdomains of example.com. + // - "**.example.com": matches example.com, and all levels of subdomains. + // - "{project}.example.com": Extracts the third level subdomain. + // - "{project=**}.example.com": Extracts the third level+ subdomains. + // - "{project=**}": Extracts the entire host. + HostPatterns []string `protobuf:"bytes,1,rep,name=host_patterns,json=hostPatterns,proto3" json:"host_patterns,omitempty"` + // path_pattern is an ordered list of path template patterns for the desired + // value. If any path_pattern values are specified, then at least one must + // match, and the last one wins and sets any specified variables. A path + // consists of segments separated by slashes. Each segment is matched against + // the segment in the pattern as follows: + // - "*": Matches any single segment. + // - "**": Matches zero or more segments (first or last part of path only). + // - "{=...}": One or more segment capture, where "..." can be any + // template that does not include a capture. + // - "{}": A single segment capture. Identical to {=*}. + // + // A custom method may also be specified by appending ":" and the custom + // method name or "*" to indicate any custom method (including no custom + // method). For example, "/*/projects/{project_id}/**:*" extracts + // `{project_id}` for any version, resource and custom method that includes + // it. By default, any custom method will be matched. + // + // Examples: + // - "/v1/{name=messages/*}": extracts a name like "messages/12345". + // - "/v1/messages/{message_id}": extracts a message_id like "12345". + // - "/v1/users/{user_id}/messages/{message_id}": extracts two key values. + PathPatterns []string `protobuf:"bytes,2,rep,name=path_patterns,json=pathPatterns,proto3" json:"path_patterns,omitempty"` + // List of query parameter names to try to match. + // For example: ["parent", "name", "resource.name"] + // We extract all the specified query_parameters (case-sensitively). If any + // are marked as "required_match" and are not present, this keybuilder fails + // to match. If a given parameter appears multiple times (?foo=a&foo=b) we + // will report it as a comma-separated string (foo=a,b). + QueryParameters []*NameMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` + // List of headers to try to match. + // We extract all the specified header values (case-insensitively). If any + // are marked as "required_match" and are not present, this keybuilder fails + // to match. If a given header appears multiple times in the request we will + // report it as a comma-separated string, in standard HTTP fashion. + Headers []*NameMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // You can optionally set one or more specific key/value pairs to be added to + // the key_map. This can be useful to identify which builder built the key, + // for example if you are suppressing a lot of information from the URL, but + // need to separately cache and request URLs with that content. + ConstantKeys map[string]string `protobuf:"bytes,5,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the HTTP method/verb will be extracted under this key name. + Method string `protobuf:"bytes,6,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *HttpKeyBuilder) Reset() { + *x = HttpKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpKeyBuilder) ProtoMessage() {} + +func (x *HttpKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpKeyBuilder.ProtoReflect.Descriptor instead. +func (*HttpKeyBuilder) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpKeyBuilder) GetHostPatterns() []string { + if x != nil { + return x.HostPatterns + } + return nil +} + +func (x *HttpKeyBuilder) GetPathPatterns() []string { + if x != nil { + return x.PathPatterns + } + return nil +} + +func (x *HttpKeyBuilder) GetQueryParameters() []*NameMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +func (x *HttpKeyBuilder) GetHeaders() []*NameMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpKeyBuilder) GetConstantKeys() map[string]string { + if x != nil { + return x.ConstantKeys + } + return nil +} + +func (x *HttpKeyBuilder) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +type RouteLookupConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Ordered specifications for constructing keys for HTTP requests. Last + // match wins. If no HttpKeyBuilder matches, an empty key_map will be sent to + // the lookup service; it should likely reply with a global default route + // and raise an alert. + HttpKeybuilders []*HttpKeyBuilder `protobuf:"bytes,1,rep,name=http_keybuilders,json=httpKeybuilders,proto3" json:"http_keybuilders,omitempty"` + // Unordered specifications for constructing keys for gRPC requests. All + // GrpcKeyBuilders on this list must have unique "name" fields so that the + // client is free to prebuild a hash map keyed by name. If no GrpcKeyBuilder + // matches, an empty key_map will be sent to the lookup service; it should + // likely reply with a global default route and raise an alert. + GrpcKeybuilders []*GrpcKeyBuilder `protobuf:"bytes,2,rep,name=grpc_keybuilders,json=grpcKeybuilders,proto3" json:"grpc_keybuilders,omitempty"` + // The name of the lookup service as a gRPC URI. Typically, this will be + // a subdomain of the target, such as "lookup.datastore.googleapis.com". + LookupService string `protobuf:"bytes,3,opt,name=lookup_service,json=lookupService,proto3" json:"lookup_service,omitempty"` + // Configure a timeout value for lookup service requests. + // Defaults to 10 seconds if not specified. + LookupServiceTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=lookup_service_timeout,json=lookupServiceTimeout,proto3" json:"lookup_service_timeout,omitempty"` + // How long are responses valid for (like HTTP Cache-Control). + // If omitted or zero, the longest valid cache time is used. + // This value is clamped to 5 minutes to avoid unflushable bad responses. + MaxAge *durationpb.Duration `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` + // After a response has been in the client cache for this amount of time + // and is re-requested, start an asynchronous RPC to re-validate it. + // This value should be less than max_age by at least the length of a + // typical RTT to the Route Lookup Service to fully mask the RTT latency. + // If omitted, keys are only re-requested after they have expired. + StaleAge *durationpb.Duration `protobuf:"bytes,6,opt,name=stale_age,json=staleAge,proto3" json:"stale_age,omitempty"` + // Rough indicator of amount of memory to use for the client cache. Some of + // the data structure overhead is not accounted for, so actual memory consumed + // will be somewhat greater than this value. If this field is omitted or set + // to zero, a client default will be used. The value may be capped to a lower + // amount based on client configuration. + CacheSizeBytes int64 `protobuf:"varint,7,opt,name=cache_size_bytes,json=cacheSizeBytes,proto3" json:"cache_size_bytes,omitempty"` + // This is a list of all the possible targets that can be returned by the + // lookup service. If a target not on this list is returned, it will be + // treated the same as an unhealthy target. + ValidTargets []string `protobuf:"bytes,8,rep,name=valid_targets,json=validTargets,proto3" json:"valid_targets,omitempty"` + // This value provides a default target to use if needed. If set, it will be + // used if RLS returns an error, times out, or returns an invalid response. + // Note that requests can be routed only to a subdomain of the original + // target, e.g. "us_east_1.cloudbigtable.googleapis.com". + DefaultTarget string `protobuf:"bytes,9,opt,name=default_target,json=defaultTarget,proto3" json:"default_target,omitempty"` +} + +func (x *RouteLookupConfig) Reset() { + *x = RouteLookupConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupConfig) ProtoMessage() {} + +func (x *RouteLookupConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupConfig.ProtoReflect.Descriptor instead. +func (*RouteLookupConfig) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{3} +} + +func (x *RouteLookupConfig) GetHttpKeybuilders() []*HttpKeyBuilder { + if x != nil { + return x.HttpKeybuilders + } + return nil +} + +func (x *RouteLookupConfig) GetGrpcKeybuilders() []*GrpcKeyBuilder { + if x != nil { + return x.GrpcKeybuilders + } + return nil +} + +func (x *RouteLookupConfig) GetLookupService() string { + if x != nil { + return x.LookupService + } + return "" +} + +func (x *RouteLookupConfig) GetLookupServiceTimeout() *durationpb.Duration { + if x != nil { + return x.LookupServiceTimeout + } + return nil +} + +func (x *RouteLookupConfig) GetMaxAge() *durationpb.Duration { + if x != nil { + return x.MaxAge + } + return nil +} + +func (x *RouteLookupConfig) GetStaleAge() *durationpb.Duration { + if x != nil { + return x.StaleAge + } + return nil +} + +func (x *RouteLookupConfig) GetCacheSizeBytes() int64 { + if x != nil { + return x.CacheSizeBytes + } + return 0 +} + +func (x *RouteLookupConfig) GetValidTargets() []string { + if x != nil { + return x.ValidTargets + } + return nil +} + +func (x *RouteLookupConfig) GetDefaultTarget() string { + if x != nil { + return x.DefaultTarget + } + return "" +} + +// RouteLookupClusterSpecifier is used in xDS to represent a cluster specifier +// plugin for RLS. +type RouteLookupClusterSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The RLS config for this cluster specifier plugin instance. + RouteLookupConfig *RouteLookupConfig `protobuf:"bytes,1,opt,name=route_lookup_config,json=routeLookupConfig,proto3" json:"route_lookup_config,omitempty"` +} + +func (x *RouteLookupClusterSpecifier) Reset() { + *x = RouteLookupClusterSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupClusterSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupClusterSpecifier) ProtoMessage() {} + +func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupClusterSpecifier.ProtoReflect.Descriptor instead. +func (*RouteLookupClusterSpecifier) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{4} +} + +func (x *RouteLookupClusterSpecifier) GetRouteLookupConfig() *RouteLookupConfig { + if x != nil { + return x.RouteLookupConfig + } + return nil +} + +// To match, one of the given Name fields must match; the service and method +// fields are specified as fixed strings. The service name is required and +// includes the proto package name. The method name may be omitted, in +// which case any method on the given service is matched. +type GrpcKeyBuilder_Name struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *GrpcKeyBuilder_Name) Reset() { + *x = GrpcKeyBuilder_Name{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder_Name) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder_Name) ProtoMessage() {} + +func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder_Name.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder_Name) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *GrpcKeyBuilder_Name) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GrpcKeyBuilder_Name) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +// If you wish to include the host, service, or method names as keys in the +// generated RouteLookupRequest, specify key names to use in the extra_keys +// submessage. If a key name is empty, no key will be set for that value. +// If this submessage is specified, the normal host/path fields will be left +// unset in the RouteLookupRequest. We are deprecating host/path in the +// RouteLookupRequest, so services should migrate to the ExtraKeys approach. +type GrpcKeyBuilder_ExtraKeys struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,3,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *GrpcKeyBuilder_ExtraKeys) Reset() { + *x = GrpcKeyBuilder_ExtraKeys{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder_ExtraKeys) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} + +func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder_ExtraKeys.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder_ExtraKeys) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +var File_grpc_lookup_v1_rls_config_proto protoreflect.FileDescriptor + +var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, + 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5c, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0xf0, 0x03, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x12, 0x39, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x09, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x35, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x38, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x51, + 0x0a, 0x09, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x89, 0x03, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x6f, + 0x73, 0x74, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, + 0x46, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x35, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x3f, 0x0a, + 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, + 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, + 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, + 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lookup_v1_rls_config_proto_rawDescOnce sync.Once + file_grpc_lookup_v1_rls_config_proto_rawDescData = file_grpc_lookup_v1_rls_config_proto_rawDesc +) + +func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { + file_grpc_lookup_v1_rls_config_proto_rawDescOnce.Do(func() { + file_grpc_lookup_v1_rls_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lookup_v1_rls_config_proto_rawDescData) + }) + return file_grpc_lookup_v1_rls_config_proto_rawDescData +} + +var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_grpc_lookup_v1_rls_config_proto_goTypes = []any{ + (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher + (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder + (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder + (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig + (*RouteLookupClusterSpecifier)(nil), // 4: grpc.lookup.v1.RouteLookupClusterSpecifier + (*GrpcKeyBuilder_Name)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.Name + (*GrpcKeyBuilder_ExtraKeys)(nil), // 6: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + nil, // 7: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + nil, // 8: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_grpc_lookup_v1_rls_config_proto_depIdxs = []int32{ + 5, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name + 6, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + 0, // 2: grpc.lookup.v1.GrpcKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher + 7, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + 0, // 4: grpc.lookup.v1.HttpKeyBuilder.query_parameters:type_name -> grpc.lookup.v1.NameMatcher + 0, // 5: grpc.lookup.v1.HttpKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher + 8, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + 2, // 7: grpc.lookup.v1.RouteLookupConfig.http_keybuilders:type_name -> grpc.lookup.v1.HttpKeyBuilder + 1, // 8: grpc.lookup.v1.RouteLookupConfig.grpc_keybuilders:type_name -> grpc.lookup.v1.GrpcKeyBuilder + 9, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration + 9, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration + 9, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration + 3, // 12: grpc.lookup.v1.RouteLookupClusterSpecifier.route_lookup_config:type_name -> grpc.lookup.v1.RouteLookupConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_grpc_lookup_v1_rls_config_proto_init() } +func file_grpc_lookup_v1_rls_config_proto_init() { + if File_grpc_lookup_v1_rls_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*NameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*HttpKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupClusterSpecifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lookup_v1_rls_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_lookup_v1_rls_config_proto_goTypes, + DependencyIndexes: file_grpc_lookup_v1_rls_config_proto_depIdxs, + MessageInfos: file_grpc_lookup_v1_rls_config_proto_msgTypes, + }.Build() + File_grpc_lookup_v1_rls_config_proto = out.File + file_grpc_lookup_v1_rls_config_proto_rawDesc = nil + file_grpc_lookup_v1_rls_config_proto_goTypes = nil + file_grpc_lookup_v1_rls_config_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go new file mode 100644 index 0000000000..23dcb2100c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -0,0 +1,137 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/lookup/v1/rls.proto + +package grpc_lookup_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + RouteLookupService_RouteLookup_FullMethodName = "/grpc.lookup.v1.RouteLookupService/RouteLookup" +) + +// RouteLookupServiceClient is the client API for RouteLookupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RouteLookupServiceClient interface { + // Lookup returns a target for a single key. + RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) +} + +type routeLookupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRouteLookupServiceClient(cc grpc.ClientConnInterface) RouteLookupServiceClient { + return &routeLookupServiceClient{cc} +} + +func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RouteLookupResponse) + err := c.cc.Invoke(ctx, RouteLookupService_RouteLookup_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RouteLookupServiceServer is the server API for RouteLookupService service. +// All implementations must embed UnimplementedRouteLookupServiceServer +// for forward compatibility. +type RouteLookupServiceServer interface { + // Lookup returns a target for a single key. + RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) + mustEmbedUnimplementedRouteLookupServiceServer() +} + +// UnimplementedRouteLookupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRouteLookupServiceServer struct{} + +func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") +} +func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {} +func (UnimplementedRouteLookupServiceServer) testEmbeddedByValue() {} + +// UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RouteLookupServiceServer will +// result in compilation errors. +type UnsafeRouteLookupServiceServer interface { + mustEmbedUnimplementedRouteLookupServiceServer() +} + +func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) { + // If the following call panics, it indicates UnimplementedRouteLookupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&RouteLookupService_ServiceDesc, srv) +} + +func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RouteLookupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteLookupServiceServer).RouteLookup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RouteLookupService_RouteLookup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RouteLookupService_ServiceDesc is the grpc.ServiceDesc for RouteLookupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RouteLookupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lookup.v1.RouteLookupService", + HandlerType: (*RouteLookupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RouteLookup", + Handler: _RouteLookupService_RouteLookup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc/lookup/v1/rls.proto", +} diff --git a/vendor/google.golang.org/grpc/internal/wrr/edf.go b/vendor/google.golang.org/grpc/internal/wrr/edf.go new file mode 100644 index 0000000000..a06656f464 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/wrr/edf.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wrr + +import ( + "container/heap" + "sync" +) + +// edfWrr is a struct for EDF weighted round robin implementation. +type edfWrr struct { + lock sync.Mutex + items edfPriorityQueue + currentOrderOffset uint64 + currentTime float64 +} + +// NewEDF creates Earliest Deadline First (EDF) +// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling) implementation for weighted round robin. +// Each pick from the schedule has the earliest deadline entry selected. Entries have deadlines set +// at current time + 1 / weight, providing weighted round robin behavior with O(log n) pick time. +func NewEDF() WRR { + return &edfWrr{} +} + +// edfEntry is an internal wrapper for item that also stores weight and relative position in the queue. +type edfEntry struct { + deadline float64 + weight int64 + orderOffset uint64 + item any +} + +// edfPriorityQueue is a heap.Interface implementation for edfEntry elements. +type edfPriorityQueue []*edfEntry + +func (pq edfPriorityQueue) Len() int { return len(pq) } +func (pq edfPriorityQueue) Less(i, j int) bool { + return pq[i].deadline < pq[j].deadline || pq[i].deadline == pq[j].deadline && pq[i].orderOffset < pq[j].orderOffset +} +func (pq edfPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } + +func (pq *edfPriorityQueue) Push(x any) { + *pq = append(*pq, x.(*edfEntry)) +} + +func (pq *edfPriorityQueue) Pop() any { + old := *pq + *pq = old[0 : len(old)-1] + return old[len(old)-1] +} + +func (edf *edfWrr) Add(item any, weight int64) { + edf.lock.Lock() + defer edf.lock.Unlock() + entry := edfEntry{ + deadline: edf.currentTime + 1.0/float64(weight), + weight: weight, + item: item, + orderOffset: edf.currentOrderOffset, + } + edf.currentOrderOffset++ + heap.Push(&edf.items, &entry) +} + +func (edf *edfWrr) Next() any { + edf.lock.Lock() + defer edf.lock.Unlock() + if len(edf.items) == 0 { + return nil + } + item := edf.items[0] + edf.currentTime = item.deadline + item.deadline = edf.currentTime + 1.0/float64(item.weight) + heap.Fix(&edf.items, 0) + return item.item +} diff --git a/vendor/google.golang.org/grpc/internal/wrr/random.go b/vendor/google.golang.org/grpc/internal/wrr/random.go new file mode 100644 index 0000000000..3f611a3505 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/wrr/random.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package wrr + +import ( + "fmt" + "math/rand" + "sort" +) + +// weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. +type weightedItem struct { + item any + weight int64 + accumulatedWeight int64 +} + +func (w *weightedItem) String() string { + return fmt.Sprint(*w) +} + +// randomWRR is a struct that contains weighted items implement weighted random algorithm. +type randomWRR struct { + items []*weightedItem + // Are all item's weights equal + equalWeights bool +} + +// NewRandom creates a new WRR with random. +func NewRandom() WRR { + return &randomWRR{} +} + +var randInt63n = rand.Int63n + +func (rw *randomWRR) Next() (item any) { + if len(rw.items) == 0 { + return nil + } + if rw.equalWeights { + return rw.items[randInt63n(int64(len(rw.items)))].item + } + + sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight + // Random number in [0, sumOfWeights). + randomWeight := randInt63n(sumOfWeights) + // Item's accumulated weights are in ascending order, because item's weight >= 0. + // Binary search rw.items to find first item whose accumulatedWeight > randomWeight + // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight + i := sort.Search(len(rw.items), func(i int) bool { return rw.items[i].accumulatedWeight > randomWeight }) + return rw.items[i].item +} + +func (rw *randomWRR) Add(item any, weight int64) { + accumulatedWeight := weight + equalWeights := true + if len(rw.items) > 0 { + lastItem := rw.items[len(rw.items)-1] + accumulatedWeight = lastItem.accumulatedWeight + weight + equalWeights = rw.equalWeights && weight == lastItem.weight + } + rw.equalWeights = equalWeights + rItem := &weightedItem{item: item, weight: weight, accumulatedWeight: accumulatedWeight} + rw.items = append(rw.items, rItem) +} + +func (rw *randomWRR) String() string { + return fmt.Sprint(rw.items) +} diff --git a/vendor/google.golang.org/grpc/internal/wrr/wrr.go b/vendor/google.golang.org/grpc/internal/wrr/wrr.go new file mode 100644 index 0000000000..91a40d7357 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/wrr/wrr.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package wrr contains the interface and common implementations of wrr +// algorithms. +package wrr + +// WRR defines an interface that implements weighted round robin. +type WRR interface { + // Add adds an item with weight to the WRR set. Add must be only called + // before any calls to Next. + Add(item any, weight int64) + // Next returns the next picked item. + // + // Next needs to be thread safe. Add may not be called after any call to + // Next. + Next() any +} diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go new file mode 100644 index 0000000000..8317859e1e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go @@ -0,0 +1,828 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to initialize certain aspects +// of an xDS client by reading a bootstrap file. +package bootstrap + +import ( + "bytes" + "encoding/json" + "fmt" + "maps" + "net/url" + "os" + "slices" + "strings" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/bootstrap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +const ( + serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" +) + +// For overriding in unit tests. +var bootstrapFileReadFunc = os.ReadFile + +// ChannelCreds contains the credentials to be used while communicating with an +// xDS server. It is also used to dedup servers with the same server URI. +// +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. +type ChannelCreds struct { + // Type contains a unique name identifying the credentials type. The only + // supported types currently are "google_default" and "insecure". + Type string `json:"type,omitempty"` + // Config contains the JSON configuration associated with the credentials. + Config json.RawMessage `json:"config,omitempty"` +} + +// Equal reports whether cc and other are considered equal. +func (cc ChannelCreds) Equal(other ChannelCreds) bool { + return cc.Type == other.Type && bytes.Equal(cc.Config, other.Config) +} + +// String returns a string representation of the credentials. It contains the +// type and the config (if non-nil) separated by a "-". +func (cc ChannelCreds) String() string { + if cc.Config == nil { + return cc.Type + } + + // We do not expect the Marshal call to fail since we wrote to cc.Config + // after a successful unmarshalling from JSON configuration. Therefore, + // it is safe to ignore the error here. + b, _ := json.Marshal(cc.Config) + return cc.Type + "-" + string(b) +} + +// ServerConfigs represents a collection of server configurations. +type ServerConfigs []*ServerConfig + +// Equal returns true if scs equals other. +func (scs *ServerConfigs) Equal(other *ServerConfigs) bool { + if len(*scs) != len(*other) { + return false + } + for i := range *scs { + if !(*scs)[i].Equal((*other)[i]) { + return false + } + } + return true +} + +// UnmarshalJSON takes the json data (a list of server configurations) and +// unmarshals it to the struct. +func (scs *ServerConfigs) UnmarshalJSON(data []byte) error { + servers := []*ServerConfig{} + if err := json.Unmarshal(data, &servers); err != nil { + return fmt.Errorf("xds: failed to JSON unmarshal server configurations during bootstrap: %v, config:\n%s", err, string(data)) + } + // Only use the first server config if fallback support is disabled. + if !envconfig.XDSFallbackSupport { + if len(servers) > 1 { + servers = servers[:1] + } + } + *scs = servers + return nil +} + +// String returns a string representation of the ServerConfigs, by concatenating +// the string representations of the underlying server configs. +func (scs *ServerConfigs) String() string { + ret := "" + for i, sc := range *scs { + if i > 0 { + ret += ", " + } + ret += sc.String() + } + return ret +} + +// Authority contains configuration for an xDS control plane authority. +// +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. + // + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string `json:"client_listener_resource_name_template,omitempty"` + // XDSServers contains the list of server configurations for this authority. + XDSServers ServerConfigs `json:"xds_servers,omitempty"` +} + +// Equal returns true if a equals other. +func (a *Authority) Equal(other *Authority) bool { + switch { + case a == nil && other == nil: + return true + case (a != nil) != (other != nil): + return false + case a.ClientListenerResourceNameTemplate != other.ClientListenerResourceNameTemplate: + return false + case !a.XDSServers.Equal(&other.XDSServers): + return false + } + return true +} + +// ServerConfig contains the configuration to connect to a server. +type ServerConfig struct { + serverURI string + channelCreds []ChannelCreds + serverFeatures []string + + // As part of unmarshalling the JSON config into this struct, we ensure that + // the credentials config is valid by building an instance of the specified + // credentials and store it here for easy access. + selectedCreds ChannelCreds + credsDialOption grpc.DialOption + + cleanups []func() +} + +// ServerURI returns the URI of the management server to connect to. +func (sc *ServerConfig) ServerURI() string { + return sc.serverURI +} + +// ChannelCreds returns the credentials configuration to use when communicating +// with this server. Also used to dedup servers with the same server URI. +func (sc *ServerConfig) ChannelCreds() []ChannelCreds { + return sc.channelCreds +} + +// ServerFeatures returns the list of features supported by this server. Also +// used to dedup servers with the same server URI and channel creds. +func (sc *ServerConfig) ServerFeatures() []string { + return sc.serverFeatures +} + +// ServerFeaturesIgnoreResourceDeletion returns true if this server supports a +// feature where the xDS client can ignore resource deletions from this server, +// as described in gRFC A53. +// +// This feature controls the behavior of the xDS client when the server deletes +// a previously sent Listener or Cluster resource. If set, the xDS client will +// not invoke the watchers' OnResourceDoesNotExist() method when a resource is +// deleted, nor will it remove the existing resource value from its cache. +func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { + for _, sf := range sc.serverFeatures { + if sf == serverFeaturesIgnoreResourceDeletion { + return true + } + } + return false +} + +// CredsDialOption returns the first supported transport credentials from the +// configuration, as a dial option. +func (sc *ServerConfig) CredsDialOption() grpc.DialOption { + return sc.credsDialOption +} + +// Cleanups returns a collection of functions to be called when the xDS client +// for this server is closed. Allows cleaning up resources created specifically +// for this server. +func (sc *ServerConfig) Cleanups() []func() { + return sc.cleanups +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.serverURI != other.serverURI: + return false + case !slices.EqualFunc(sc.channelCreds, other.channelCreds, func(a, b ChannelCreds) bool { return a.Equal(b) }): + return false + case !slices.Equal(sc.serverFeatures, other.serverFeatures): + return false + case !sc.selectedCreds.Equal(other.selectedCreds): + return false + } + return true +} + +// String returns the string representation of the ServerConfig. +func (sc *ServerConfig) String() string { + if len(sc.serverFeatures) == 0 { + return fmt.Sprintf("%s-%s", sc.serverURI, sc.selectedCreds.String()) + } + features := strings.Join(sc.serverFeatures, "-") + return strings.Join([]string{sc.serverURI, sc.selectedCreds.String(), features}, "-") +} + +// The following fields correspond 1:1 with the JSON schema for ServerConfig. +type serverConfigJSON struct { + ServerURI string `json:"server_uri,omitempty"` + ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"` + ServerFeatures []string `json:"server_features,omitempty"` +} + +// MarshalJSON returns marshaled JSON bytes corresponding to this server config. +func (sc *ServerConfig) MarshalJSON() ([]byte, error) { + server := &serverConfigJSON{ + ServerURI: sc.serverURI, + ChannelCreds: sc.channelCreds, + ServerFeatures: sc.serverFeatures, + } + return json.Marshal(server) +} + +// UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. +func (sc *ServerConfig) UnmarshalJSON(data []byte) error { + server := serverConfigJSON{} + if err := json.Unmarshal(data, &server); err != nil { + return fmt.Errorf("xds: failed to JSON unmarshal server configuration during bootstrap: %v, config:\n%s", err, string(data)) + } + + sc.serverURI = server.ServerURI + sc.channelCreds = server.ChannelCreds + sc.serverFeatures = server.ServerFeatures + + for _, cc := range server.ChannelCreds { + // We stop at the first credential type that we support. + c := bootstrap.GetCredentials(cc.Type) + if c == nil { + continue + } + bundle, cancel, err := c.Build(cc.Config) + if err != nil { + return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) + } + sc.selectedCreds = cc + sc.credsDialOption = grpc.WithCredentialsBundle(bundle) + sc.cleanups = append(sc.cleanups, cancel) + break + } + if sc.serverURI == "" { + return fmt.Errorf("xds: `server_uri` field in server config cannot be empty: %s", string(data)) + } + if sc.credsDialOption == nil { + return fmt.Errorf("xds: `channel_creds` field in server config cannot be empty: %s", string(data)) + } + return nil +} + +// ServerConfigTestingOptions specifies options for creating a new ServerConfig +// for testing purposes. +// +// # Testing-Only +type ServerConfigTestingOptions struct { + // URI is the name of the server corresponding to this server config. + URI string + // ChannelCreds contains a list of channel credentials to use when talking + // to this server. If unspecified, `insecure` credentials will be used. + ChannelCreds []ChannelCreds + // ServerFeatures represents the list of features supported by this server. + ServerFeatures []string +} + +// ServerConfigForTesting creates a new ServerConfig from the passed in options, +// for testing purposes. +// +// # Testing-Only +func ServerConfigForTesting(opts ServerConfigTestingOptions) (*ServerConfig, error) { + cc := opts.ChannelCreds + if cc == nil { + cc = []ChannelCreds{{Type: "insecure"}} + } + scInternal := &serverConfigJSON{ + ServerURI: opts.URI, + ChannelCreds: cc, + ServerFeatures: opts.ServerFeatures, + } + scJSON, err := json.Marshal(scInternal) + if err != nil { + return nil, err + } + + sc := new(ServerConfig) + if err := sc.UnmarshalJSON(scJSON); err != nil { + return nil, err + } + return sc, nil +} + +// Config is the internal representation of the bootstrap configuration provided +// to the xDS client. +type Config struct { + xDSServers ServerConfigs + cpcs map[string]certproviderNameAndConfig + serverListenerResourceNameTemplate string + clientDefaultListenerResourceNameTemplate string + authorities map[string]*Authority + node node + + // A map from certprovider instance names to parsed buildable configs. + certProviderConfigs map[string]*certprovider.BuildableConfig +} + +// XDSServers returns the top-level list of management servers to connect to, +// ordered by priority. +func (c *Config) XDSServers() ServerConfigs { + return c.xDSServers +} + +// CertProviderConfigs returns a map from certificate provider plugin instance +// name to their configuration. Callers must not modify the returned map. +func (c *Config) CertProviderConfigs() map[string]*certprovider.BuildableConfig { + return c.certProviderConfigs +} + +// ServerListenerResourceNameTemplate returns template for the name of the +// Listener resource to subscribe to for a gRPC server. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with the IP +// and port on which the server is listening. (e.g., "0.0.0.0:8080", +// "[::]:8080"). For example, a value of "example/resource/%s" could become +// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", +// the replaced string will be %-encoded. +// +// There is no default; if unset, xDS-based server creation fails. +func (c *Config) ServerListenerResourceNameTemplate() string { + return c.serverListenerResourceNameTemplate +} + +// ClientDefaultListenerResourceNameTemplate returns a template for the name of +// the Listener resource to subscribe to for a gRPC client channel. Used only +// when the channel is created with an "xds:" URI with no authority. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with +// the service authority (i.e., the path part of the target URI +// used to create the gRPC channel). If the template starts with +// "xdstp:", the replaced string will be %-encoded. +// +// Defaults to "%s". +func (c *Config) ClientDefaultListenerResourceNameTemplate() string { + return c.clientDefaultListenerResourceNameTemplate +} + +// Authorities returns a map of authority name to corresponding configuration. +// Callers must not modify the returned map. +// +// This is used in the following cases: +// - A gRPC client channel is created using an "xds:" URI that includes +// an authority. +// - A gRPC client channel is created using an "xds:" URI with no +// authority, but the "client_default_listener_resource_name_template" +// field above turns it into an "xdstp:" URI. +// - A gRPC server is created and the +// "server_listener_resource_name_template" field is an "xdstp:" URI. +// +// In any of those cases, it is an error if the specified authority is +// not present in this map. +func (c *Config) Authorities() map[string]*Authority { + return c.authorities +} + +// Node returns xDS a v3 Node proto corresponding to the node field in the +// bootstrap configuration, which identifies a specific gRPC instance. +func (c *Config) Node() *v3corepb.Node { + return c.node.toProto() +} + +// Equal returns true if c equals other. +func (c *Config) Equal(other *Config) bool { + switch { + case c == nil && other == nil: + return true + case (c != nil) != (other != nil): + return false + case !c.xDSServers.Equal(&other.xDSServers): + return false + case !maps.EqualFunc(c.certProviderConfigs, other.certProviderConfigs, func(a, b *certprovider.BuildableConfig) bool { return a.String() == b.String() }): + return false + case c.serverListenerResourceNameTemplate != other.serverListenerResourceNameTemplate: + return false + case c.clientDefaultListenerResourceNameTemplate != other.clientDefaultListenerResourceNameTemplate: + return false + case !maps.EqualFunc(c.authorities, other.authorities, func(a, b *Authority) bool { return a.Equal(b) }): + return false + case !c.node.Equal(other.node): + return false + } + return true +} + +// String returns a string representation of the Config. +func (c *Config) String() string { + s, _ := c.MarshalJSON() + return string(s) +} + +// The following fields correspond 1:1 with the JSON schema for Config. +type configJSON struct { + XDSServers ServerConfigs `json:"xds_servers,omitempty"` + CertificateProviders map[string]certproviderNameAndConfig `json:"certificate_providers,omitempty"` + ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` + Authorities map[string]*Authority `json:"authorities,omitempty"` + Node node `json:"node,omitempty"` +} + +// MarshalJSON returns marshaled JSON bytes corresponding to this config. +func (c *Config) MarshalJSON() ([]byte, error) { + config := &configJSON{ + XDSServers: c.xDSServers, + CertificateProviders: c.cpcs, + ServerListenerResourceNameTemplate: c.serverListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: c.clientDefaultListenerResourceNameTemplate, + Authorities: c.authorities, + Node: c.node, + } + return json.MarshalIndent(config, " ", " ") +} + +// UnmarshalJSON takes the json data (the complete bootstrap configuration) and +// unmarshals it to the struct. +func (c *Config) UnmarshalJSON(data []byte) error { + // Initialize the node field with client controlled values. This ensures + // even if the bootstrap configuration did not contain the node field, we + // will have a node field with client controlled fields alone. + config := configJSON{Node: newNode()} + if err := json.Unmarshal(data, &config); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%s) failed during bootstrap: %v", string(data), err) + } + + c.xDSServers = config.XDSServers + c.cpcs = config.CertificateProviders + c.serverListenerResourceNameTemplate = config.ServerListenerResourceNameTemplate + c.clientDefaultListenerResourceNameTemplate = config.ClientDefaultListenerResourceNameTemplate + c.authorities = config.Authorities + c.node = config.Node + + // Build the certificate providers configuration to ensure that it is valid. + cpcCfgs := make(map[string]*certprovider.BuildableConfig) + getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) + for instance, nameAndConfig := range c.cpcs { + name := nameAndConfig.PluginName + parser := getBuilder(nameAndConfig.PluginName) + if parser == nil { + // We ignore plugins that we do not know about. + continue + } + bc, err := parser.ParseConfig(nameAndConfig.Config) + if err != nil { + return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err) + } + cpcCfgs[instance] = bc + } + c.certProviderConfigs = cpcCfgs + + // Default value of the default client listener name template is "%s". + if c.clientDefaultListenerResourceNameTemplate == "" { + c.clientDefaultListenerResourceNameTemplate = "%s" + } + if len(c.xDSServers) == 0 { + return fmt.Errorf("xds: required field `xds_servers` not found in bootstrap configuration: %s", string(data)) + } + + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range c.authorities { + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return fmt.Errorf("xds: field clientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } + } + return nil +} + +// GetConfiguration returns the bootstrap configuration initialized by reading +// the bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents +// specified at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the +// former is preferred. +// +// If none of the env vars are set, this function returns the fallback +// configuration if it is not nil. Else, it returns an error. +// +// This function tries to process as much of the bootstrap file as possible (in +// the presence of the errors) and may return a Config object with certain +// fields left unspecified, in which case the caller should use some sane +// defaults. +func GetConfiguration() (*Config, error) { + fName := envconfig.XDSBootstrapFileName + fContent := envconfig.XDSBootstrapFileContent + + if fName != "" { + if logger.V(2) { + logger.Infof("Using bootstrap file with name %q from GRPC_XDS_BOOTSTRAP environment variable", fName) + } + cfg, err := bootstrapFileReadFunc(fName) + if err != nil { + return nil, fmt.Errorf("xds: failed to read bootstrap config from file %q: %v", fName, err) + } + return newConfigFromContents(cfg) + } + + if fContent != "" { + if logger.V(2) { + logger.Infof("Using bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG environment variable") + } + return newConfigFromContents([]byte(fContent)) + } + + if cfg := fallbackBootstrapConfig(); cfg != nil { + if logger.V(2) { + logger.Infof("Using bootstrap contents from fallback config") + } + return cfg, nil + } + + return nil, fmt.Errorf("bootstrap environment variables (%q or %q) not defined, and no fallback config set", envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) +} + +func newConfigFromContents(data []byte) (*Config, error) { + // Normalize the input configuration. + buf := bytes.Buffer{} + err := json.Indent(&buf, data, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON bootstrap configuration: %v", err) + } + data = bytes.TrimSpace(buf.Bytes()) + + config := &Config{} + if err := config.UnmarshalJSON(data); err != nil { + return nil, err + } + return config, nil +} + +// ConfigOptionsForTesting specifies options for creating a new bootstrap +// configuration for testing purposes. +// +// # Testing-Only +type ConfigOptionsForTesting struct { + // Servers is the top-level xDS server configuration. It contains a list of + // server configurations. + Servers json.RawMessage + // CertificateProviders is the certificate providers configuration. + CertificateProviders map[string]json.RawMessage + // ServerListenerResourceNameTemplate is the listener resource name template + // to be used on the gRPC server. + ServerListenerResourceNameTemplate string + // ClientDefaultListenerResourceNameTemplate is the default listener + // resource name template to be used on the gRPC client. + ClientDefaultListenerResourceNameTemplate string + // Authorities is a list of non-default authorities. + Authorities map[string]json.RawMessage + // Node identifies the gRPC client/server node in the + // proxyless service mesh. + Node json.RawMessage +} + +// NewContentsForTesting creates a new bootstrap configuration from the passed in +// options, for testing purposes. +// +// # Testing-Only +func NewContentsForTesting(opts ConfigOptionsForTesting) ([]byte, error) { + var servers ServerConfigs + if err := json.Unmarshal(opts.Servers, &servers); err != nil { + return nil, err + } + certProviders := make(map[string]certproviderNameAndConfig) + for k, v := range opts.CertificateProviders { + cp := certproviderNameAndConfig{} + if err := json.Unmarshal(v, &cp); err != nil { + return nil, fmt.Errorf("failed to unmarshal certificate provider configuration for %s: %s", k, string(v)) + } + certProviders[k] = cp + } + authorities := make(map[string]*Authority) + for k, v := range opts.Authorities { + a := &Authority{} + if err := json.Unmarshal(v, a); err != nil { + return nil, fmt.Errorf("failed to unmarshal authority configuration for %s: %s", k, string(v)) + } + authorities[k] = a + } + node := newNode() + if err := json.Unmarshal(opts.Node, &node); err != nil { + return nil, fmt.Errorf("failed to unmarshal node configuration %s: %v", string(opts.Node), err) + } + cfgJSON := configJSON{ + XDSServers: servers, + CertificateProviders: certProviders, + ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, + Authorities: authorities, + Node: node, + } + contents, err := json.MarshalIndent(cfgJSON, " ", " ") + if err != nil { + return nil, fmt.Errorf("failed to marshal bootstrap configuration for provided options %+v: %v", opts, err) + } + return contents, nil +} + +// NewConfigForTesting creates a new bootstrap configuration from the provided +// contents, for testing purposes. +// +// # Testing-Only +func NewConfigForTesting(contents []byte) (*Config, error) { + return newConfigFromContents(contents) +} + +// certproviderNameAndConfig is the internal representation of +// the`certificate_providers` field in the bootstrap configuration. +type certproviderNameAndConfig struct { + PluginName string `json:"plugin_name"` + Config json.RawMessage `json:"config"` +} + +// locality is the internal representation of the locality field within node. +type locality struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` + SubZone string `json:"sub_zone,omitempty"` +} + +func (l locality) Equal(other locality) bool { + return l.Region == other.Region && l.Zone == other.Zone && l.SubZone == other.SubZone +} + +func (l locality) isEmpty() bool { + return l.Equal(locality{}) +} + +type userAgentVersion struct { + UserAgentVersion string `json:"user_agent_version,omitempty"` +} + +// node is the internal representation of the node field in the bootstrap +// configuration. +type node struct { + ID string `json:"id,omitempty"` + Cluster string `json:"cluster,omitempty"` + Locality locality `json:"locality,omitempty"` + Metadata *structpb.Struct `json:"metadata,omitempty"` + + // The following fields are controlled by the client implementation and + // should not unmarshaled from JSON. + userAgentName string + userAgentVersionType userAgentVersion + clientFeatures []string +} + +// newNode is a convenience function to create a new node instance with fields +// controlled by the client implementation set to the desired values. +func newNode() node { + return node{ + userAgentName: gRPCUserAgentName, + userAgentVersionType: userAgentVersion{UserAgentVersion: grpc.Version}, + clientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, + } +} + +func (n node) Equal(other node) bool { + switch { + case n.ID != other.ID: + return false + case n.Cluster != other.Cluster: + return false + case !n.Locality.Equal(other.Locality): + return false + case n.userAgentName != other.userAgentName: + return false + case n.userAgentVersionType != other.userAgentVersionType: + return false + } + + // Consider failures in JSON marshaling as being unable to perform the + // comparison, and hence return false. + nMetadata, err := n.Metadata.MarshalJSON() + if err != nil { + return false + } + otherMetadata, err := other.Metadata.MarshalJSON() + if err != nil { + return false + } + if !bytes.Equal(nMetadata, otherMetadata) { + return false + } + + return slices.Equal(n.clientFeatures, other.clientFeatures) +} + +func (n node) toProto() *v3corepb.Node { + return &v3corepb.Node{ + Id: n.ID, + Cluster: n.Cluster, + Locality: func() *v3corepb.Locality { + if n.Locality.isEmpty() { + return nil + } + return &v3corepb.Locality{ + Region: n.Locality.Region, + Zone: n.Locality.Zone, + SubZone: n.Locality.SubZone, + } + }(), + Metadata: proto.Clone(n.Metadata).(*structpb.Struct), + UserAgentName: n.userAgentName, + UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: n.userAgentVersionType.UserAgentVersion}, + ClientFeatures: slices.Clone(n.clientFeatures), + } +} + +// SetFallbackBootstrapConfig sets the fallback bootstrap configuration to be +// used when the bootstrap environment variables are unset. +// +// The provided configuration must be valid JSON. Returns a non-nil error if +// parsing the provided configuration fails. +func SetFallbackBootstrapConfig(cfgJSON []byte) error { + config, err := newConfigFromContents(cfgJSON) + if err != nil { + return err + } + + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = config + return nil +} + +// UnsetFallbackBootstrapConfigForTesting unsets the fallback bootstrap +// configuration to be used when the bootstrap environment variables are unset. +// +// # Testing-Only +func UnsetFallbackBootstrapConfigForTesting() { + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = nil +} + +// fallbackBootstrapConfig returns the fallback bootstrap configuration +// that will be used by the xDS client when the bootstrap environment +// variables are unset. +func fallbackBootstrapConfig() *Config { + configMu.Lock() + defer configMu.Unlock() + return fallbackBootstrapCfg +} + +var ( + configMu sync.Mutex + fallbackBootstrapCfg *Config +) diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go new file mode 100644 index 0000000000..fdd811dd8a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package bootstrap + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-bootstrap] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go new file mode 100644 index 0000000000..ec1a30919e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bootstrap + +import ( + "net/url" + "strings" +) + +// PopulateResourceTemplate populates the given template using the target +// string. "%s", if exists in the template, will be replaced with target. +// +// If the template starts with "xdstp:", the replaced string will be %-encoded. +// But note that "/" is not percent encoded. +func PopulateResourceTemplate(template, target string) string { + if !strings.Contains(template, "%s") { + return template + } + if strings.HasPrefix(template, "xdstp:") { + target = percentEncode(target) + } + return strings.ReplaceAll(template, "%s", target) +} + +// percentEncode percent encode t, except for "/". See the tests for examples. +func percentEncode(t string) string { + segs := strings.Split(t, "/") + for i := range segs { + segs[i] = url.PathEscape(segs[i]) + } + return strings.Join(segs, "/") +} diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go new file mode 100644 index 0000000000..02da3dbf34 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/tlscreds/bundle.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tlscreds implements mTLS Credentials in xDS Bootstrap File. +// See gRFC A65: github.com/grpc/proposal/blob/master/A65-xds-mtls-creds-in-bootstrap.md. +package tlscreds + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/credentials/tls/certprovider/pemfile" + "google.golang.org/grpc/internal/grpcsync" +) + +// bundle is an implementation of credentials.Bundle which implements mTLS +// Credentials in xDS Bootstrap File. +type bundle struct { + transportCredentials credentials.TransportCredentials +} + +// NewBundle returns a credentials.Bundle which implements mTLS Credentials in xDS +// Bootstrap File. It delegates certificate loading to a file_watcher provider +// if either client certificates or server root CA is specified. The second +// return value is a close func that should be called when the caller no longer +// needs this bundle. +// See gRFC A65: github.com/grpc/proposal/blob/master/A65-xds-mtls-creds-in-bootstrap.md +func NewBundle(jd json.RawMessage) (credentials.Bundle, func(), error) { + cfg := &struct { + CertificateFile string `json:"certificate_file"` + CACertificateFile string `json:"ca_certificate_file"` + PrivateKeyFile string `json:"private_key_file"` + }{} + + if jd != nil { + if err := json.Unmarshal(jd, cfg); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal config: %v", err) + } + } // Else the config field is absent. Treat it as an empty config. + + if cfg.CACertificateFile == "" && cfg.CertificateFile == "" && cfg.PrivateKeyFile == "" { + // We cannot use (and do not need) a file_watcher provider in this case, + // and can simply directly use the TLS transport credentials. + // Quoting A65: + // + // > The only difference between the file-watcher certificate provider + // > config and this one is that in the file-watcher certificate + // > provider, at least one of the "certificate_file" or + // > "ca_certificate_file" fields must be specified, whereas in this + // > configuration, it is acceptable to specify neither one. + return &bundle{transportCredentials: credentials.NewTLS(&tls.Config{})}, func() {}, nil + } + // Otherwise we need to use a file_watcher provider to watch the CA, + // private and public keys. + + // The pemfile plugin (file_watcher) currently ignores BuildOptions. + provider, err := certprovider.GetProvider(pemfile.PluginName, jd, certprovider.BuildOptions{}) + if err != nil { + return nil, nil, err + } + return &bundle{ + transportCredentials: &reloadingCreds{provider: provider}, + }, grpcsync.OnceFunc(func() { provider.Close() }), nil +} + +func (t *bundle) TransportCredentials() credentials.TransportCredentials { + return t.transportCredentials +} + +func (t *bundle) PerRPCCredentials() credentials.PerRPCCredentials { + // mTLS provides transport credentials only. There are no per-RPC + // credentials. + return nil +} + +func (t *bundle) NewWithMode(string) (credentials.Bundle, error) { + // This bundle has a single mode which only uses TLS transport credentials, + // so there is no legitimate case where callers would call NewWithMode. + return nil, fmt.Errorf("xDS TLS credentials only support one mode") +} + +// reloadingCreds is a credentials.TransportCredentials for client +// side mTLS that reloads the server root CA certificate and the client +// certificates from the provider on every client handshake. This is necessary +// because the standard TLS credentials do not support reloading CA +// certificates. +type reloadingCreds struct { + provider certprovider.Provider +} + +func (c *reloadingCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + km, err := c.provider.KeyMaterial(ctx) + if err != nil { + return nil, nil, err + } + config := &tls.Config{ + RootCAs: km.Roots, + Certificates: km.Certs, + } + return credentials.NewTLS(config).ClientHandshake(ctx, authority, rawConn) +} + +func (c *reloadingCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "tls"} +} + +func (c *reloadingCreds) Clone() credentials.TransportCredentials { + return &reloadingCreds{provider: c.provider} +} + +func (c *reloadingCreds) OverrideServerName(string) error { + return errors.New("overriding server name is not supported by xDS client TLS credentials") +} + +func (c *reloadingCreds) ServerHandshake(net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, nil, errors.New("server handshake is not supported by xDS client TLS credentials") +} diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go new file mode 100644 index 0000000000..01433f4122 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go @@ -0,0 +1,274 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package matcher + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" +) + +// HeaderMatcher is an interface for header matchers. These are +// documented in (EnvoyProxy link here?). These matchers will match on different +// aspects of HTTP header name/value pairs. +type HeaderMatcher interface { + Match(metadata.MD) bool + String() string +} + +// mdValuesFromOutgoingCtx retrieves metadata from context. If there are +// multiple values, the values are concatenated with "," (comma and no space). +// +// All header matchers only match against the comma-concatenated string. +func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { + vs, ok := md[key] + if !ok { + return "", false + } + return strings.Join(vs, ","), true +} + +// HeaderExactMatcher matches on an exact match of the value of the header. +type HeaderExactMatcher struct { + key string + exact string + invert bool +} + +// NewHeaderExactMatcher returns a new HeaderExactMatcher. +func NewHeaderExactMatcher(key, exact string, invert bool) *HeaderExactMatcher { + return &HeaderExactMatcher{key: key, exact: exact, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderExactMatcher. +func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hem.key) + if !ok { + return false + } + return (v == hem.exact) != hem.invert +} + +func (hem *HeaderExactMatcher) String() string { + return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) +} + +// HeaderRegexMatcher matches on whether the entire request header value matches +// the regex. +type HeaderRegexMatcher struct { + key string + re *regexp.Regexp + invert bool +} + +// NewHeaderRegexMatcher returns a new HeaderRegexMatcher. +func NewHeaderRegexMatcher(key string, re *regexp.Regexp, invert bool) *HeaderRegexMatcher { + return &HeaderRegexMatcher{key: key, re: re, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRegexMatcher. +func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + return grpcutil.FullMatchWithRegex(hrm.re, v) != hrm.invert +} + +func (hrm *HeaderRegexMatcher) String() string { + return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) +} + +// HeaderRangeMatcher matches on whether the request header value is within the +// range. The header value must be an integer in base 10 notation. +type HeaderRangeMatcher struct { + key string + start, end int64 // represents [start, end). + invert bool +} + +// NewHeaderRangeMatcher returns a new HeaderRangeMatcher. +func NewHeaderRangeMatcher(key string, start, end int64, invert bool) *HeaderRangeMatcher { + return &HeaderRangeMatcher{key: key, start: start, end: end, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRangeMatcher. +func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { + return !hrm.invert + } + return hrm.invert +} + +func (hrm *HeaderRangeMatcher) String() string { + return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) +} + +// HeaderPresentMatcher will match based on whether the header is present in the +// whole request. +type HeaderPresentMatcher struct { + key string + present bool +} + +// NewHeaderPresentMatcher returns a new HeaderPresentMatcher. +func NewHeaderPresentMatcher(key string, present bool, invert bool) *HeaderPresentMatcher { + if invert { + present = !present + } + return &HeaderPresentMatcher{key: key, present: present} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPresentMatcher. +func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { + vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) + present := ok && len(vs) > 0 // TODO: Are we sure we need this len(vs) > 0? + return present == hpm.present +} + +func (hpm *HeaderPresentMatcher) String() string { + return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) +} + +// HeaderPrefixMatcher matches on whether the prefix of the header value matches +// the prefix passed into this struct. +type HeaderPrefixMatcher struct { + key string + prefix string + invert bool +} + +// NewHeaderPrefixMatcher returns a new HeaderPrefixMatcher. +func NewHeaderPrefixMatcher(key string, prefix string, invert bool) *HeaderPrefixMatcher { + return &HeaderPrefixMatcher{key: key, prefix: prefix, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPrefixMatcher. +func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hpm.key) + if !ok { + return false + } + return strings.HasPrefix(v, hpm.prefix) != hpm.invert +} + +func (hpm *HeaderPrefixMatcher) String() string { + return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) +} + +// HeaderSuffixMatcher matches on whether the suffix of the header value matches +// the suffix passed into this struct. +type HeaderSuffixMatcher struct { + key string + suffix string + invert bool +} + +// NewHeaderSuffixMatcher returns a new HeaderSuffixMatcher. +func NewHeaderSuffixMatcher(key string, suffix string, invert bool) *HeaderSuffixMatcher { + return &HeaderSuffixMatcher{key: key, suffix: suffix, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderSuffixMatcher. +func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return strings.HasSuffix(v, hsm.suffix) != hsm.invert +} + +func (hsm *HeaderSuffixMatcher) String() string { + return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) +} + +// HeaderContainsMatcher matches on whether the header value contains the +// value passed into this struct. +type HeaderContainsMatcher struct { + key string + contains string + invert bool +} + +// NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP +// Header key to match on, and contains is the value that the header should +// should contain for a successful match. An empty contains string does not +// work, use HeaderPresentMatcher in that case. +func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderContainsMatcher { + return &HeaderContainsMatcher{key: key, contains: contains, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderContainsMatcher. +func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hcm.key) + if !ok { + return false + } + return strings.Contains(v, hcm.contains) != hcm.invert +} + +func (hcm *HeaderContainsMatcher) String() string { + return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) +} + +// HeaderStringMatcher matches on whether the header value matches against the +// StringMatcher specified. +type HeaderStringMatcher struct { + key string + stringMatcher StringMatcher + invert bool +} + +// NewHeaderStringMatcher returns a new HeaderStringMatcher. +func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderStringMatcher { + return &HeaderStringMatcher{ + key: key, + stringMatcher: sm, + invert: invert, + } +} + +// Match returns whether the passed in HTTP Headers match according to the +// specified StringMatcher. +func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return hsm.stringMatcher.Match(v) != hsm.invert +} + +func (hsm *HeaderStringMatcher) String() string { + return fmt.Sprintf("headerString:%v:%v", hsm.key, hsm.stringMatcher) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go b/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go new file mode 100644 index 0000000000..c138f78735 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package matcher contains types that need to be shared between code under +// google.golang.org/grpc/xds/... and the rest of gRPC. +package matcher + +import ( + "errors" + "fmt" + "regexp" + "strings" + + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + "google.golang.org/grpc/internal/grpcutil" +) + +// StringMatcher contains match criteria for matching a string, and is an +// internal representation of the `StringMatcher` proto defined at +// https://github.com/envoyproxy/envoy/blob/main/api/envoy/type/matcher/v3/string.proto. +type StringMatcher struct { + // Since these match fields are part of a `oneof` in the corresponding xDS + // proto, only one of them is expected to be set. + exactMatch *string + prefixMatch *string + suffixMatch *string + regexMatch *regexp.Regexp + containsMatch *string + // If true, indicates the exact/prefix/suffix/contains matching should be + // case insensitive. This has no effect on the regex match. + ignoreCase bool +} + +// Match returns true if input matches the criteria in the given StringMatcher. +func (sm StringMatcher) Match(input string) bool { + if sm.ignoreCase { + input = strings.ToLower(input) + } + switch { + case sm.exactMatch != nil: + return input == *sm.exactMatch + case sm.prefixMatch != nil: + return strings.HasPrefix(input, *sm.prefixMatch) + case sm.suffixMatch != nil: + return strings.HasSuffix(input, *sm.suffixMatch) + case sm.regexMatch != nil: + return grpcutil.FullMatchWithRegex(sm.regexMatch, input) + case sm.containsMatch != nil: + return strings.Contains(input, *sm.containsMatch) + } + return false +} + +// StringMatcherFromProto is a helper function to create a StringMatcher from +// the corresponding StringMatcher proto. +// +// Returns a non-nil error if matcherProto is invalid. +func StringMatcherFromProto(matcherProto *v3matcherpb.StringMatcher) (StringMatcher, error) { + if matcherProto == nil { + return StringMatcher{}, errors.New("input StringMatcher proto is nil") + } + + matcher := StringMatcher{ignoreCase: matcherProto.GetIgnoreCase()} + switch mt := matcherProto.GetMatchPattern().(type) { + case *v3matcherpb.StringMatcher_Exact: + matcher.exactMatch = &mt.Exact + if matcher.ignoreCase { + *matcher.exactMatch = strings.ToLower(*matcher.exactMatch) + } + case *v3matcherpb.StringMatcher_Prefix: + if matcherProto.GetPrefix() == "" { + return StringMatcher{}, errors.New("empty prefix is not allowed in StringMatcher") + } + matcher.prefixMatch = &mt.Prefix + if matcher.ignoreCase { + *matcher.prefixMatch = strings.ToLower(*matcher.prefixMatch) + } + case *v3matcherpb.StringMatcher_Suffix: + if matcherProto.GetSuffix() == "" { + return StringMatcher{}, errors.New("empty suffix is not allowed in StringMatcher") + } + matcher.suffixMatch = &mt.Suffix + if matcher.ignoreCase { + *matcher.suffixMatch = strings.ToLower(*matcher.suffixMatch) + } + case *v3matcherpb.StringMatcher_SafeRegex: + regex := matcherProto.GetSafeRegex().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return StringMatcher{}, fmt.Errorf("safe_regex matcher %q is invalid", regex) + } + matcher.regexMatch = re + case *v3matcherpb.StringMatcher_Contains: + if matcherProto.GetContains() == "" { + return StringMatcher{}, errors.New("empty contains is not allowed in StringMatcher") + } + matcher.containsMatch = &mt.Contains + if matcher.ignoreCase { + *matcher.containsMatch = strings.ToLower(*matcher.containsMatch) + } + default: + return StringMatcher{}, fmt.Errorf("unrecognized string matcher: %+v", matcherProto) + } + return matcher, nil +} + +// StringMatcherForTesting is a helper function to create a StringMatcher based +// on the given arguments. Intended only for testing purposes. +func StringMatcherForTesting(exact, prefix, suffix, contains *string, regex *regexp.Regexp, ignoreCase bool) StringMatcher { + sm := StringMatcher{ + exactMatch: exact, + prefixMatch: prefix, + suffixMatch: suffix, + regexMatch: regex, + containsMatch: contains, + ignoreCase: ignoreCase, + } + if ignoreCase { + switch { + case sm.exactMatch != nil: + *sm.exactMatch = strings.ToLower(*exact) + case sm.prefixMatch != nil: + *sm.prefixMatch = strings.ToLower(*prefix) + case sm.suffixMatch != nil: + *sm.suffixMatch = strings.ToLower(*suffix) + case sm.containsMatch != nil: + *sm.containsMatch = strings.ToLower(*contains) + } + } + return sm +} + +// ExactMatch returns the value of the configured exact match or an empty string +// if exact match criteria was not specified. +func (sm StringMatcher) ExactMatch() string { + if sm.exactMatch != nil { + return *sm.exactMatch + } + return "" +} + +// Equal returns true if other and sm are equivalent to each other. +func (sm StringMatcher) Equal(other StringMatcher) bool { + if sm.ignoreCase != other.ignoreCase { + return false + } + + if (sm.exactMatch != nil) != (other.exactMatch != nil) || + (sm.prefixMatch != nil) != (other.prefixMatch != nil) || + (sm.suffixMatch != nil) != (other.suffixMatch != nil) || + (sm.regexMatch != nil) != (other.regexMatch != nil) || + (sm.containsMatch != nil) != (other.containsMatch != nil) { + return false + } + + switch { + case sm.exactMatch != nil: + return *sm.exactMatch == *other.exactMatch + case sm.prefixMatch != nil: + return *sm.prefixMatch == *other.prefixMatch + case sm.suffixMatch != nil: + return *sm.suffixMatch == *other.suffixMatch + case sm.regexMatch != nil: + return sm.regexMatch.String() == other.regexMatch.String() + case sm.containsMatch != nil: + return *sm.containsMatch == *other.containsMatch + } + return true +} diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go new file mode 100644 index 0000000000..fb599954a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go @@ -0,0 +1,101 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "encoding/json" + "fmt" + "strings" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig) (audit.Logger, error) { + if loggerConfig.GetAuditLogger().GetTypedConfig() == nil { + return nil, fmt.Errorf("missing required field: TypedConfig") + } + customConfig, loggerName, err := getCustomConfig(loggerConfig.AuditLogger.TypedConfig) + if err != nil { + return nil, err + } + if loggerName == "" { + return nil, fmt.Errorf("field TypedConfig.TypeURL cannot be an empty string") + } + factory := audit.GetLoggerBuilder(loggerName) + if factory == nil { + if loggerConfig.IsOptional { + return nil, nil + } + return nil, fmt.Errorf("no builder registered for %v", loggerName) + } + auditLoggerConfig, err := factory.ParseLoggerConfig(customConfig) + if err != nil { + return nil, fmt.Errorf("custom config could not be parsed by registered factory. error: %v", err) + } + auditLogger := factory.Build(auditLoggerConfig) + return auditLogger, nil +} + +func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { + c, err := config.UnmarshalNew() + if err != nil { + return nil, "", err + } + switch m := c.(type) { + case *v1xdsudpatypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3xdsxdstypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3auditloggersstreampb.StdoutAuditLog: + return convertStdoutConfig(m) + } + return nil, "", fmt.Errorf("custom config not implemented for type [%v]", config.GetTypeUrl()) +} + +func convertStdoutConfig(config *v3auditloggersstreampb.StdoutAuditLog) (json.RawMessage, string, error) { + json, err := protojson.Marshal(config) + return json, stdout.Name, err +} + +func convertCustomConfig(typeURL string, s *structpb.Struct) (json.RawMessage, string, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + urls := strings.Split(typeURL, "/") + if len(urls) == 0 { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: typeURL must have a url-like format with the typeName being the value after the last /", typeURL, s) + } + name := urls[len(urls)-1] + + rawJSON := []byte("{}") + var err error + if s != nil { + rawJSON, err = json.Marshal(s) + if err != nil { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: %v", typeURL, s, err) + } + } + return rawJSON, name, nil +} diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go new file mode 100644 index 0000000000..e1c15018bd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -0,0 +1,432 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "errors" + "fmt" + "net" + "regexp" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3route_componentspb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + internalmatcher "google.golang.org/grpc/internal/xds/matcher" +) + +// matcher is an interface that takes data about incoming RPC's and returns +// whether it matches with whatever matcher implements this interface. +type matcher interface { + match(data *rpcData) bool +} + +// policyMatcher helps determine whether an incoming RPC call matches a policy. +// A policy is a logical role (e.g. Service Admin), which is comprised of +// permissions and principals. A principal is an identity (or identities) for a +// downstream subject which are assigned the policy (role), and a permission is +// an action(s) that a principal(s) can take. A policy matches if both a +// permission and a principal match, which will be determined by the child or +// permissions and principal matchers. policyMatcher implements the matcher +// interface. +type policyMatcher struct { + permissions *orMatcher + principals *orMatcher +} + +func newPolicyMatcher(policy *v3rbacpb.Policy) (*policyMatcher, error) { + permissions, err := matchersFromPermissions(policy.Permissions) + if err != nil { + return nil, err + } + principals, err := matchersFromPrincipals(policy.Principals) + if err != nil { + return nil, err + } + return &policyMatcher{ + permissions: &orMatcher{matchers: permissions}, + principals: &orMatcher{matchers: principals}, + }, nil +} + +func (pm *policyMatcher) match(data *rpcData) bool { + // A policy matches if and only if at least one of its permissions match the + // action taking place AND at least one if its principals match the + // downstream peer. + return pm.permissions.match(data) && pm.principals.match(data) +} + +// matchersFromPermissions takes a list of permissions (can also be +// a single permission, e.g. from a not matcher which is logically !permission) +// and returns a list of matchers which correspond to that permission. This will +// be called in many instances throughout the initial construction of the RBAC +// engine from the AND and OR matchers and also from the NOT matcher. +func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, error) { + var matchers []matcher + for _, permission := range permissions { + switch permission.GetRule().(type) { + case *v3rbacpb.Permission_AndRules: + mList, err := matchersFromPermissions(permission.GetAndRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Permission_OrRules: + mList, err := matchersFromPermissions(permission.GetOrRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Permission_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Permission_Header: + m, err := newHeaderMatcher(permission.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_UrlPath: + m, err := newURLPathMatcher(permission.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationIp: + // Due to this being on server side, the destination IP is the local + // IP. + m, err := newLocalIPMatcher(permission.GetDestinationIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationPort: + matchers = append(matchers, newPortMatcher(permission.GetDestinationPort())) + case *v3rbacpb.Permission_NotRule: + mList, err := matchersFromPermissions([]*v3rbacpb.Permission{{Rule: permission.GetNotRule().Rule}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Permission_Metadata: + // Never matches - so no-op if not inverted, always match if + // inverted. + if permission.GetMetadata().GetInvert() { // Test metadata being no-op and also metadata with invert always matching + matchers = append(matchers, &alwaysMatcher{}) + } + case *v3rbacpb.Permission_RequestedServerName: + // Not supported in gRPC RBAC currently - a permission typed as + // requested server name in the initial config will be a no-op. + } + } + return matchers, nil +} + +func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) { + var matchers []matcher + for _, principal := range principals { + switch principal.GetIdentifier().(type) { + case *v3rbacpb.Principal_AndIds: + mList, err := matchersFromPrincipals(principal.GetAndIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Principal_OrIds: + mList, err := matchersFromPrincipals(principal.GetOrIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Principal_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Principal_Authenticated_: + authenticatedMatcher, err := newAuthenticatedMatcher(principal.GetAuthenticated()) + if err != nil { + return nil, err + } + matchers = append(matchers, authenticatedMatcher) + case *v3rbacpb.Principal_DirectRemoteIp: + m, err := newRemoteIPMatcher(principal.GetDirectRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Header: + // Do we need an error here? + m, err := newHeaderMatcher(principal.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_UrlPath: + m, err := newURLPathMatcher(principal.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_NotId: + mList, err := matchersFromPrincipals([]*v3rbacpb.Principal{{Identifier: principal.GetNotId().Identifier}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Principal_SourceIp: + // The source ip principal identifier is deprecated. Thus, a + // principal typed as a source ip in the identifier will be a no-op. + // The config should use DirectRemoteIp instead. + case *v3rbacpb.Principal_RemoteIp: + // RBAC in gRPC treats direct_remote_ip and remote_ip as logically + // equivalent, as per A41. + m, err := newRemoteIPMatcher(principal.GetRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Metadata: + // Not supported in gRPC RBAC currently - a principal typed as + // Metadata in the initial config will be a no-op. + } + } + return matchers, nil +} + +// orMatcher is a matcher where it successfully matches if one of it's +// children successfully match. It also logically represents a principal or +// permission, but can also be it's own entity further down the tree of +// matchers. orMatcher implements the matcher interface. +type orMatcher struct { + matchers []matcher +} + +func (om *orMatcher) match(data *rpcData) bool { + // Range through child matchers and pass in data about incoming RPC, and + // only one child matcher has to match to be logically successful. + for _, m := range om.matchers { + if m.match(data) { + return true + } + } + return false +} + +// andMatcher is a matcher that is successful if every child matcher +// matches. andMatcher implements the matcher interface. +type andMatcher struct { + matchers []matcher +} + +func (am *andMatcher) match(data *rpcData) bool { + for _, m := range am.matchers { + if !m.match(data) { + return false + } + } + return true +} + +// alwaysMatcher is a matcher that will always match. This logically +// represents an any rule for a permission or a principal. alwaysMatcher +// implements the matcher interface. +type alwaysMatcher struct { +} + +func (am *alwaysMatcher) match(*rpcData) bool { + return true +} + +// notMatcher is a matcher that nots an underlying matcher. notMatcher +// implements the matcher interface. +type notMatcher struct { + matcherToNot matcher +} + +func (nm *notMatcher) match(data *rpcData) bool { + return !nm.matcherToNot.match(data) +} + +// headerMatcher is a matcher that matches on incoming HTTP Headers present +// in the incoming RPC. headerMatcher implements the matcher interface. +type headerMatcher struct { + matcher internalmatcher.HeaderMatcher +} + +func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) (*headerMatcher, error) { + var m internalmatcher.HeaderMatcher + switch headerMatcherConfig.HeaderMatchSpecifier.(type) { + case *v3route_componentspb.HeaderMatcher_ExactMatch: + m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_SafeRegexMatch: + regex, err := regexp.Compile(headerMatcherConfig.GetSafeRegexMatch().Regex) + if err != nil { + return nil, err + } + m = internalmatcher.NewHeaderRegexMatcher(headerMatcherConfig.Name, regex, headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_RangeMatch: + m = internalmatcher.NewHeaderRangeMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetRangeMatch().Start, headerMatcherConfig.GetRangeMatch().End, headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_PresentMatch: + m = internalmatcher.NewHeaderPresentMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPresentMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_PrefixMatch: + m = internalmatcher.NewHeaderPrefixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPrefixMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_SuffixMatch: + m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_ContainsMatch: + m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_StringMatch: + sm, err := internalmatcher.StringMatcherFromProto(headerMatcherConfig.GetStringMatch()) + if err != nil { + return nil, fmt.Errorf("invalid string matcher %+v: %v", headerMatcherConfig.GetStringMatch(), err) + } + m = internalmatcher.NewHeaderStringMatcher(headerMatcherConfig.Name, sm, headerMatcherConfig.InvertMatch) + default: + return nil, errors.New("unknown header matcher type") + } + return &headerMatcher{matcher: m}, nil +} + +func (hm *headerMatcher) match(data *rpcData) bool { + return hm.matcher.Match(data.md) +} + +// urlPathMatcher matches on the URL Path of the incoming RPC. In gRPC, this +// logically maps to the full method name the RPC is calling on the server side. +// urlPathMatcher implements the matcher interface. +type urlPathMatcher struct { + stringMatcher internalmatcher.StringMatcher +} + +func newURLPathMatcher(pathMatcher *v3matcherpb.PathMatcher) (*urlPathMatcher, error) { + stringMatcher, err := internalmatcher.StringMatcherFromProto(pathMatcher.GetPath()) + if err != nil { + return nil, err + } + return &urlPathMatcher{stringMatcher: stringMatcher}, nil +} + +func (upm *urlPathMatcher) match(data *rpcData) bool { + return upm.stringMatcher.Match(data.fullMethod) +} + +// remoteIPMatcher and localIPMatcher both are matchers that match against +// a CIDR Range. Two different matchers are needed as the remote and destination +// ip addresses come from different parts of the data about incoming RPC's +// passed in. Matching a CIDR Range means to determine whether the IP Address +// falls within the CIDR Range or not. They both implement the matcher +// interface. +type remoteIPMatcher struct { + // ipNet represents the CidrRange that this matcher was configured with. + // This is what will remote and destination IP's will be matched against. + ipNet *net.IPNet +} + +func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) { + // Convert configuration to a cidrRangeString, as Go standard library has + // methods that parse cidr string. + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &remoteIPMatcher{ipNet: ipNet}, nil +} + +func (sim *remoteIPMatcher) match(data *rpcData) bool { + return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) +} + +type localIPMatcher struct { + ipNet *net.IPNet +} + +func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &localIPMatcher{ipNet: ipNet}, nil +} + +func (dim *localIPMatcher) match(data *rpcData) bool { + return dim.ipNet.Contains(net.IP(net.ParseIP(data.localAddr.String()))) +} + +// portMatcher matches on whether the destination port of the RPC matches the +// destination port this matcher was instantiated with. portMatcher +// implements the matcher interface. +type portMatcher struct { + destinationPort uint32 +} + +func newPortMatcher(destinationPort uint32) *portMatcher { + return &portMatcher{destinationPort: destinationPort} +} + +func (pm *portMatcher) match(data *rpcData) bool { + return data.destinationPort == pm.destinationPort +} + +// authenticatedMatcher matches on the name of the Principal. If set, the URI +// SAN or DNS SAN in that order is used from the certificate, otherwise the +// subject field is used. If unset, it applies to any user that is +// authenticated. authenticatedMatcher implements the matcher interface. +type authenticatedMatcher struct { + stringMatcher *internalmatcher.StringMatcher +} + +func newAuthenticatedMatcher(authenticatedMatcherConfig *v3rbacpb.Principal_Authenticated) (*authenticatedMatcher, error) { + // Represents this line in the RBAC documentation = "If unset, it applies to + // any user that is authenticated" (see package-level comments). + if authenticatedMatcherConfig.PrincipalName == nil { + return &authenticatedMatcher{}, nil + } + stringMatcher, err := internalmatcher.StringMatcherFromProto(authenticatedMatcherConfig.PrincipalName) + if err != nil { + return nil, err + } + return &authenticatedMatcher{stringMatcher: &stringMatcher}, nil +} + +func (am *authenticatedMatcher) match(data *rpcData) bool { + if data.authType != "tls" { + // Connection is not authenticated. + return false + } + if am.stringMatcher == nil { + // Allows any authenticated user. + return true + } + // "If there is no client certificate (thus no SAN nor Subject), check if "" + // (empty string) matches. If it matches, the principal_name is said to + // match" - A41 + if len(data.certs) == 0 { + return am.stringMatcher.Match("") + } + cert := data.certs[0] + // The order of matching as per the RBAC documentation (see package-level comments) + // is as follows: URI SANs, DNS SANs, and then subject name. + for _, uriSAN := range cert.URIs { + if am.stringMatcher.Match(uriSAN.String()) { + return true + } + } + for _, dnsSAN := range cert.DNSNames { + if am.stringMatcher.Match(dnsSAN) { + return true + } + } + return am.stringMatcher.Match(cert.Subject.String()) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go new file mode 100644 index 0000000000..344052cb04 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -0,0 +1,316 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package rbac provides service-level and method-level access control for a +// service. See +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/rbac/v3/rbac.proto#role-based-access-control-rbac +// for documentation. +package rbac + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "net" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" +) + +var logger = grpclog.Component("rbac") + +var getConnection = transport.GetConnection + +// ChainEngine represents a chain of RBAC Engines, used to make authorization +// decisions on incoming RPCs. +type ChainEngine struct { + chainedEngines []*engine +} + +// NewChainEngine returns a chain of RBAC engines, used to make authorization +// decisions on incoming RPCs. Returns a non-nil error for invalid policies. +func NewChainEngine(policies []*v3rbacpb.RBAC, policyName string) (*ChainEngine, error) { + engines := make([]*engine, 0, len(policies)) + for _, policy := range policies { + engine, err := newEngine(policy, policyName) + if err != nil { + return nil, err + } + engines = append(engines, engine) + } + return &ChainEngine{chainedEngines: engines}, nil +} + +func (cre *ChainEngine) logRequestDetails(rpcData *rpcData) { + if logger.V(2) { + logger.Infof("checking request: url path=%s", rpcData.fullMethod) + if len(rpcData.certs) > 0 { + cert := rpcData.certs[0] + logger.Infof("uri sans=%q, dns sans=%q, subject=%v", cert.URIs, cert.DNSNames, cert.Subject) + } + } +} + +// IsAuthorized determines if an incoming RPC is authorized based on the chain of RBAC +// engines and their associated actions. +// +// Errors returned by this function are compatible with the status package. +func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { + // This conversion step (i.e. pulling things out of ctx) can be done once, + // and then be used for the whole chain of RBAC Engines. + rpcData, err := newRPCData(ctx) + if err != nil { + logger.Errorf("newRPCData: %v", err) + return status.Errorf(codes.Internal, "gRPC RBAC: %v", err) + } + for _, engine := range cre.chainedEngines { + matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) + if logger.V(2) && ok { + logger.Infof("incoming RPC matched to policy %v in engine with action %v", matchingPolicyName, engine.action) + } + + switch { + case engine.action == v3rbacpb.RBAC_ALLOW && !ok: + cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) + return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") + case engine.action == v3rbacpb.RBAC_DENY && ok: + cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) + return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) + } + // Every policy in the engine list must be queried. Thus, iterate to the + // next policy. + engine.doAuditLogging(rpcData, matchingPolicyName, true) + } + // If the incoming RPC gets through all of the engines successfully (i.e. + // doesn't not match an allow or match a deny engine), the RPC is authorized + // to proceed. + return nil +} + +// engine is used for matching incoming RPCs to policies. +type engine struct { + // TODO(gtcooke94) - differentiate between `policyName`, `policies`, and `rules` + policyName string + policies map[string]*policyMatcher + // action must be ALLOW or DENY. + action v3rbacpb.RBAC_Action + auditLoggers []audit.Logger + auditCondition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition +} + +// newEngine creates an RBAC Engine based on the contents of a policy. Returns a +// non-nil error if the policy is invalid. +func newEngine(config *v3rbacpb.RBAC, policyName string) (*engine, error) { + a := config.GetAction() + if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { + return nil, fmt.Errorf("unsupported action %s", config.Action) + } + + policies := make(map[string]*policyMatcher, len(config.GetPolicies())) + for name, policy := range config.GetPolicies() { + matcher, err := newPolicyMatcher(policy) + if err != nil { + return nil, err + } + policies[name] = matcher + } + + auditLoggers, auditCondition, err := parseAuditOptions(config.GetAuditLoggingOptions()) + if err != nil { + return nil, err + } + return &engine{ + policyName: policyName, + policies: policies, + action: a, + auditLoggers: auditLoggers, + auditCondition: auditCondition, + }, nil +} + +func parseAuditOptions(opts *v3rbacpb.RBAC_AuditLoggingOptions) ([]audit.Logger, v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition, error) { + if opts == nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, nil + } + var auditLoggers []audit.Logger + for _, logger := range opts.LoggerConfigs { + auditLogger, err := buildLogger(logger) + if err != nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, err + } + if auditLogger == nil { + // This occurs when the audit logger is not registered but also + // marked optional. + continue + } + auditLoggers = append(auditLoggers, auditLogger) + } + return auditLoggers, opts.GetAuditCondition(), nil + +} + +// findMatchingPolicy determines if an incoming RPC matches a policy. On a +// successful match, it returns the name of the matching policy and a true bool +// to specify that there was a matching policy found. It returns false in +// the case of not finding a matching policy. +func (e *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { + for policy, matcher := range e.policies { + if matcher.match(rpcData) { + return policy, true + } + } + return "", false +} + +// newRPCData takes an incoming context (should be a context representing state +// needed for server RPC Call with metadata, peer info (used for source ip/port +// and TLS information) and connection (used for destination ip/port) piped into +// it) and the method name of the Service being called server side and populates +// an rpcData struct ready to be passed to the RBAC Engine to find a matching +// policy. +func newRPCData(ctx context.Context) (*rpcData, error) { + // The caller should populate all of these fields (i.e. for empty headers, + // pipe an empty md into context). + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errors.New("missing metadata in incoming context") + } + // ":method can be hard-coded to POST if unavailable" - A41 + md[":method"] = []string{"POST"} + // "If the transport exposes TE in Metadata, then RBAC must special-case the + // header to treat it as not present." - A41 + delete(md, "TE") + + pi, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("missing peer info in incoming context") + } + + // The methodName will be available in the passed in ctx from a unary or streaming + // interceptor, as grpc.Server pipes in a transport stream which contains the methodName + // into contexts available in both unary or streaming interceptors. + mn, ok := grpc.Method(ctx) + if !ok { + return nil, errors.New("missing method in incoming context") + } + + // The connection is needed in order to find the destination address and + // port of the incoming RPC Call. + conn := getConnection(ctx) + if conn == nil { + return nil, errors.New("missing connection in incoming context") + } + _, dPort, err := net.SplitHostPort(conn.LocalAddr().String()) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + dp, err := strconv.ParseUint(dPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + + var authType string + var peerCertificates []*x509.Certificate + if tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo); ok { + authType = pi.AuthInfo.AuthType() + peerCertificates = tlsInfo.State.PeerCertificates + } + + return &rpcData{ + md: md, + peerInfo: pi, + fullMethod: mn, + destinationPort: uint32(dp), + localAddr: conn.LocalAddr(), + authType: authType, + certs: peerCertificates, + }, nil +} + +// rpcData wraps data pulled from an incoming RPC that the RBAC engine needs to +// find a matching policy. +type rpcData struct { + // md is the HTTP Headers that are present in the incoming RPC. + md metadata.MD + // peerInfo is information about the downstream peer. + peerInfo *peer.Peer + // fullMethod is the method name being called on the upstream service. + fullMethod string + // destinationPort is the port that the RPC is being sent to on the + // server. + destinationPort uint32 + // localAddr is the address that the RPC is being sent to. + localAddr net.Addr + // authType is the type of authentication e.g. "tls". + authType string + // certs are the certificates presented by the peer during a TLS + // handshake. + certs []*x509.Certificate +} + +func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) { + // In the RBAC world, we need to have a SPIFFE ID as the principal for this + // to be meaningful + principal := "" + if rpcData.peerInfo != nil { + // If AuthType = tls, then we can cast AuthInfo to TLSInfo. + if tlsInfo, ok := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo); ok { + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } + } + } + + //TODO(gtcooke94) check if we need to log before creating the event + event := &audit.Event{ + FullMethodName: rpcData.fullMethod, + Principal: principal, + PolicyName: e.policyName, + MatchedRule: rule, + Authorized: authorized, + } + for _, logger := range e.auditLoggers { + switch e.auditCondition { + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + if !authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + if authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + logger.Log(event) + } + } +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" diff --git a/vendor/google.golang.org/grpc/orca/call_metrics.go b/vendor/google.golang.org/grpc/orca/call_metrics.go new file mode 100644 index 0000000000..9ae7721420 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/call_metrics.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "context" + "sync" + + "google.golang.org/grpc" + grpcinternal "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/proto" +) + +// CallMetricsRecorder allows a service method handler to record per-RPC +// metrics. It contains all utilization-based metrics from +// ServerMetricsRecorder as well as additional request cost metrics. +type CallMetricsRecorder interface { + ServerMetricsRecorder + + // SetRequestCost sets the relevant server metric. + SetRequestCost(name string, val float64) + // DeleteRequestCost deletes the relevant server metric to prevent it + // from being sent. + DeleteRequestCost(name string) + + // SetNamedMetric sets the relevant server metric. + SetNamedMetric(name string, val float64) + // DeleteNamedMetric deletes the relevant server metric to prevent it + // from being sent. + DeleteNamedMetric(name string) +} + +type callMetricsRecorderCtxKey struct{} + +// CallMetricsRecorderFromContext returns the RPC-specific custom metrics +// recorder embedded in the provided RPC context. +// +// Returns nil if no custom metrics recorder is found in the provided context, +// which will be the case when custom metrics reporting is not enabled. +func CallMetricsRecorderFromContext(ctx context.Context) CallMetricsRecorder { + rw, ok := ctx.Value(callMetricsRecorderCtxKey{}).(*recorderWrapper) + if !ok { + return nil + } + return rw.recorder() +} + +// recorderWrapper is a wrapper around a CallMetricsRecorder to ensure that +// concurrent calls to CallMetricsRecorderFromContext() results in only one +// allocation of the underlying metrics recorder, while also allowing for lazy +// initialization of the recorder itself. +type recorderWrapper struct { + once sync.Once + r CallMetricsRecorder + smp ServerMetricsProvider +} + +func (rw *recorderWrapper) recorder() CallMetricsRecorder { + rw.once.Do(func() { + rw.r = newServerMetricsRecorder() + }) + return rw.r +} + +// setTrailerMetadata adds a trailer metadata entry with key being set to +// `internal.TrailerMetadataKey` and value being set to the binary-encoded +// orca.OrcaLoadReport protobuf message. +// +// This function is called from the unary and streaming interceptors defined +// above. Any errors encountered here are not propagated to the caller because +// they are ignored there. Hence we simply log any errors encountered here at +// warning level, and return nothing. +func (rw *recorderWrapper) setTrailerMetadata(ctx context.Context) { + var sm *ServerMetrics + if rw.smp != nil { + sm = rw.smp.ServerMetrics() + sm.merge(rw.r.ServerMetrics()) + } else { + sm = rw.r.ServerMetrics() + } + + b, err := proto.Marshal(sm.toLoadReportProto()) + if err != nil { + logger.Warningf("Failed to marshal load report: %v", err) + return + } + if err := grpc.SetTrailer(ctx, metadata.Pairs(internal.TrailerMetadataKey, string(b))); err != nil { + logger.Warningf("Failed to set trailer metadata: %v", err) + } +} + +var joinServerOptions = grpcinternal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// CallMetricsServerOption returns a server option which enables the reporting +// of per-RPC custom backend metrics for unary and streaming RPCs. +// +// Server applications interested in injecting custom backend metrics should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Subsequently, server RPC handlers can retrieve a reference to the RPC +// specific custom metrics recorder [CallMetricsRecorder] to be used, via a call +// to CallMetricsRecorderFromContext(), and inject custom metrics at any time +// during the RPC lifecycle. +// +// The injected custom metrics will be sent as part of trailer metadata, as a +// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key +// being set be "endpoint-load-metrics-bin". +// +// If a non-nil ServerMetricsProvider is provided, the gRPC server will +// transmit the metrics it provides, overwritten by any per-RPC metrics given +// to the CallMetricsRecorder. A ServerMetricsProvider is typically obtained +// by calling NewServerMetricsRecorder. +// +// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 +func CallMetricsServerOption(smp ServerMetricsProvider) grpc.ServerOption { + return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt(smp)), grpc.ChainStreamInterceptor(streamInt(smp))) +} + +func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) + + resp, err := handler(ctxWithRecorder, req) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ctx) + } + return resp, err + } +} + +func streamInt(smp ServerMetricsProvider) func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ws := &wrappedStream{ + ServerStream: ss, + ctx: newContextWithRecorderWrapper(ss.Context(), rw), + } + + err := handler(srv, ws) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ss.Context()) + } + return err + } +} + +func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { + return context.WithValue(ctx, callMetricsRecorderCtxKey{}, r) +} + +// wrappedStream wraps the grpc.ServerStream received by the streaming +// interceptor. Overrides only the Context() method to return a context which +// contains a reference to the CallMetricsRecorder corresponding to this +// stream. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} diff --git a/vendor/google.golang.org/grpc/orca/internal/internal.go b/vendor/google.golang.org/grpc/orca/internal/internal.go new file mode 100644 index 0000000000..d1425c3e71 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/internal/internal.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains orca-internal code, for testing purposes and to +// avoid polluting the godoc of the top-level orca package. +package internal + +import ( + "errors" + "fmt" + + ibackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval +// configured via ServiceOptions, to a minimum of 30s. +// +// For testing purposes only. +var AllowAnyMinReportingInterval any // func(*ServiceOptions) + +// DefaultBackoffFunc is used by the producer to control its backoff behavior. +// +// For testing purposes only. +var DefaultBackoffFunc = ibackoff.DefaultExponential.Backoff + +// TrailerMetadataKey is the key in which the per-call backend metrics are +// transmitted. +const TrailerMetadataKey = "endpoint-load-metrics-bin" + +// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message +// from md and returns the corresponding struct. The load report is expected to +// be stored as the value for key "endpoint-load-metrics-bin". +// +// If no load report was found in the provided metadata, if multiple load +// reports are found, or if the load report found cannot be parsed, an error is +// returned. +// +// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) +func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { + vs := md.Get(TrailerMetadataKey) + if len(vs) == 0 { + return nil, nil + } + if len(vs) != 1 { + return nil, errors.New("multiple orca load reports found in provided metadata") + } + ret := new(v3orcapb.OrcaLoadReport) + if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { + return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) + } + return ret, nil +} diff --git a/vendor/google.golang.org/grpc/orca/orca.go b/vendor/google.golang.org/grpc/orca/orca.go new file mode 100644 index 0000000000..d0cb3720c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/orca.go @@ -0,0 +1,57 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package orca implements Open Request Cost Aggregation, which is an open +// standard for request cost aggregation and reporting by backends and the +// corresponding aggregation of such reports by L7 load balancers (such as +// Envoy) on the data plane. In a proxyless world with gRPC enabled +// applications, aggregation of such reports will be done by the gRPC client. +// +// # Experimental +// +// Notice: All APIs is this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package orca + +import ( + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" +) + +var logger = grpclog.Component("orca-backend-metrics") + +// loadParser implements the Parser interface defined in `internal/balancerload` +// package. This interface is used by the client stream to parse load reports +// sent by the server in trailer metadata. The parsed loads are then sent to +// balancers via balancer.DoneInfo. +// +// The grpc package cannot directly call toLoadReport() as that would cause an +// import cycle. Hence this roundabout method is used. +type loadParser struct{} + +func (loadParser) Parse(md metadata.MD) any { + lr, err := internal.ToLoadReport(md) + if err != nil { + logger.Infof("Parse failed: %v", err) + } + return lr +} + +func init() { + balancerload.SetParser(loadParser{}) +} diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go new file mode 100644 index 0000000000..6e7c4c9f30 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -0,0 +1,223 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + "google.golang.org/protobuf/types/known/durationpb" +) + +type producerBuilder struct{} + +// Build constructs and returns a producer and its cleanup function +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { + p := &producer{ + client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), + intervals: make(map[time.Duration]int), + listeners: make(map[OOBListener]struct{}), + backoff: internal.DefaultBackoffFunc, + } + return p, func() { + <-p.stopped + } +} + +var producerBuilderSingleton = &producerBuilder{} + +// OOBListener is used to receive out-of-band load reports as they arrive. +type OOBListener interface { + // OnLoadReport is called when a load report is received. + OnLoadReport(*v3orcapb.OrcaLoadReport) +} + +// OOBListenerOptions contains options to control how an OOBListener is called. +type OOBListenerOptions struct { + // ReportInterval specifies how often to request the server to provide a + // load report. May be provided less frequently if the server requires a + // longer interval, or may be provided more frequently if another + // subscriber requests a shorter interval. + ReportInterval time.Duration +} + +// RegisterOOBListener registers an out-of-band load report listener on sc. +// Any OOBListener may only be registered once per subchannel at a time. The +// returned stop function must be called when no longer needed. Do not +// register a single OOBListener more than once per SubConn. +func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*producer) + + p.registerListener(l, opts.ReportInterval) + + // TODO: When we can register for SubConn state updates, automatically call + // stop() on SHUTDOWN. + + // If stop is called multiple times, prevent it from having any effect on + // subsequent calls. + return grpcsync.OnceFunc(func() { + p.unregisterListener(l, opts.ReportInterval) + closeFn() + }) +} + +type producer struct { + client v3orcaservicegrpc.OpenRcaServiceClient + + // backoff is called between stream attempts to determine how long to delay + // to avoid overloading a server experiencing problems. The attempt count + // is incremented when stream errors occur and is reset when the stream + // reports a result. + backoff func(int) time.Duration + + mu sync.Mutex + intervals map[time.Duration]int // map from interval time to count of listeners requesting that time + listeners map[OOBListener]struct{} // set of registered listeners + minInterval time.Duration + stop func() // stops the current run goroutine + stopped chan struct{} // closed when the run goroutine exits +} + +// registerListener adds the listener and its requested report interval to the +// producer. +func (p *producer) registerListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + p.listeners[l] = struct{}{} + p.intervals[interval]++ + if len(p.listeners) == 1 || interval < p.minInterval { + p.minInterval = interval + p.updateRunLocked() + } +} + +// registerListener removes the listener and its requested report interval to +// the producer. +func (p *producer) unregisterListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.listeners, l) + p.intervals[interval]-- + if p.intervals[interval] == 0 { + delete(p.intervals, interval) + + if p.minInterval == interval { + p.recomputeMinInterval() + p.updateRunLocked() + } + } +} + +// recomputeMinInterval sets p.minInterval to the minimum key's value in +// p.intervals. +func (p *producer) recomputeMinInterval() { + first := true + for interval := range p.intervals { + if first || interval < p.minInterval { + p.minInterval = interval + first = false + } + } +} + +// updateRunLocked is called whenever the run goroutine needs to be started / +// stopped / restarted due to: 1. the initial listener being registered, 2. the +// final listener being unregistered, or 3. the minimum registered interval +// changing. +func (p *producer) updateRunLocked() { + if p.stop != nil { + p.stop() + p.stop = nil + } + if len(p.listeners) > 0 { + var ctx context.Context + ctx, p.stop = context.WithCancel(context.Background()) + p.stopped = make(chan struct{}) + go p.run(ctx, p.stopped, p.minInterval) + } +} + +// run manages the ORCA OOB stream on the subchannel. +func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { + defer close(done) + + runStream := func() error { + resetBackoff, err := p.runStream(ctx, interval) + if status.Code(err) == codes.Unimplemented { + // Unimplemented; do not retry. + logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") + return err + } + // Retry for all other errors. + if code := status.Code(err); code != codes.Unavailable && code != codes.Canceled { + // TODO: Unavailable and Canceled should also ideally log an error, + // but for now we receive them when shutting down the ClientConn + // (Unavailable if the stream hasn't started yet, and Canceled if it + // happens mid-stream). Once we can determine the state or ensure + // the producer is stopped before the stream ends, we can log an + // error when it's not a natural shutdown. + logger.Error("Received unexpected stream error:", err) + } + if resetBackoff { + return backoff.ErrResetBackoff + } + return nil + } + backoff.RunF(ctx, runStream, p.backoff) +} + +// runStream runs a single stream on the subchannel and returns the resulting +// error, if any, and whether or not the run loop should reset the backoff +// timer to zero or advance it. +func (p *producer) runStream(ctx context.Context, interval time.Duration) (resetBackoff bool, err error) { + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := p.client.StreamCoreMetrics(streamCtx, &v3orcaservicepb.OrcaLoadReportRequest{ + ReportInterval: durationpb.New(interval), + }) + if err != nil { + return false, err + } + + for { + report, err := stream.Recv() + if err != nil { + return resetBackoff, err + } + resetBackoff = true + p.mu.Lock() + for l := range p.listeners { + l.OnLoadReport(report) + } + p.mu.Unlock() + } +} diff --git a/vendor/google.golang.org/grpc/orca/server_metrics.go b/vendor/google.golang.org/grpc/orca/server_metrics.go new file mode 100644 index 0000000000..bb664d6a08 --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -0,0 +1,352 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "sync/atomic" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// ServerMetrics is the data returned from a server to a client to describe the +// current state of the server and/or the cost of a request when used per-call. +type ServerMetrics struct { + CPUUtilization float64 // CPU utilization: [0, inf); unset=-1 + MemUtilization float64 // Memory utilization: [0, 1.0]; unset=-1 + AppUtilization float64 // Application utilization: [0, inf); unset=-1 + QPS float64 // queries per second: [0, inf); unset=-1 + EPS float64 // errors per second: [0, inf); unset=-1 + + // The following maps must never be nil. + + Utilization map[string]float64 // Custom fields: [0, 1.0] + RequestCost map[string]float64 // Custom fields: [0, inf); not sent OOB + NamedMetrics map[string]float64 // Custom fields: [0, inf); not sent OOB +} + +// toLoadReportProto dumps sm as an OrcaLoadReport proto. +func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { + ret := &v3orcapb.OrcaLoadReport{ + Utilization: sm.Utilization, + RequestCost: sm.RequestCost, + NamedMetrics: sm.NamedMetrics, + } + if sm.CPUUtilization != -1 { + ret.CpuUtilization = sm.CPUUtilization + } + if sm.MemUtilization != -1 { + ret.MemUtilization = sm.MemUtilization + } + if sm.AppUtilization != -1 { + ret.ApplicationUtilization = sm.AppUtilization + } + if sm.QPS != -1 { + ret.RpsFractional = sm.QPS + } + if sm.EPS != -1 { + ret.Eps = sm.EPS + } + return ret +} + +// merge merges o into sm, overwriting any values present in both. +func (sm *ServerMetrics) merge(o *ServerMetrics) { + mergeMap(sm.Utilization, o.Utilization) + mergeMap(sm.RequestCost, o.RequestCost) + mergeMap(sm.NamedMetrics, o.NamedMetrics) + if o.CPUUtilization != -1 { + sm.CPUUtilization = o.CPUUtilization + } + if o.MemUtilization != -1 { + sm.MemUtilization = o.MemUtilization + } + if o.AppUtilization != -1 { + sm.AppUtilization = o.AppUtilization + } + if o.QPS != -1 { + sm.QPS = o.QPS + } + if o.EPS != -1 { + sm.EPS = o.EPS + } +} + +func mergeMap(a, b map[string]float64) { + for k, v := range b { + a[k] = v + } +} + +// ServerMetricsRecorder allows for recording and providing out of band server +// metrics. +type ServerMetricsRecorder interface { + ServerMetricsProvider + + // SetCPUUtilization sets the CPU utilization server metric. Must be + // greater than zero. + SetCPUUtilization(float64) + // DeleteCPUUtilization deletes the CPU utilization server metric to + // prevent it from being sent. + DeleteCPUUtilization() + + // SetMemoryUtilization sets the memory utilization server metric. Must be + // in the range [0, 1]. + SetMemoryUtilization(float64) + // DeleteMemoryUtilization deletes the memory utilization server metric to + // prevent it from being sent. + DeleteMemoryUtilization() + + // SetApplicationUtilization sets the application utilization server + // metric. Must be greater than zero. + SetApplicationUtilization(float64) + // DeleteApplicationUtilization deletes the application utilization server + // metric to prevent it from being sent. + DeleteApplicationUtilization() + + // SetQPS sets the Queries Per Second server metric. Must be greater than + // zero. + SetQPS(float64) + // DeleteQPS deletes the Queries Per Second server metric to prevent it + // from being sent. + DeleteQPS() + + // SetEPS sets the Errors Per Second server metric. Must be greater than + // zero. + SetEPS(float64) + // DeleteEPS deletes the Errors Per Second server metric to prevent it from + // being sent. + DeleteEPS() + + // SetNamedUtilization sets the named utilization server metric for the + // name provided. val must be in the range [0, 1]. + SetNamedUtilization(name string, val float64) + // DeleteNamedUtilization deletes the named utilization server metric for + // the name provided to prevent it from being sent. + DeleteNamedUtilization(name string) +} + +type serverMetricsRecorder struct { + state atomic.Pointer[ServerMetrics] // the current metrics +} + +// NewServerMetricsRecorder returns an in-memory store for ServerMetrics and +// allows for safe setting and retrieving of ServerMetrics. Also implements +// ServerMetricsProvider for use with NewService. +func NewServerMetricsRecorder() ServerMetricsRecorder { + return newServerMetricsRecorder() +} + +func newServerMetricsRecorder() *serverMetricsRecorder { + s := new(serverMetricsRecorder) + s.state.Store(&ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: make(map[string]float64), + RequestCost: make(map[string]float64), + NamedMetrics: make(map[string]float64), + }) + return s +} + +// ServerMetrics returns a copy of the current ServerMetrics. +func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { + return copyServerMetrics(s.state.Load()) +} + +func copyMap(m map[string]float64) map[string]float64 { + ret := make(map[string]float64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyServerMetrics(sm *ServerMetrics) *ServerMetrics { + return &ServerMetrics{ + CPUUtilization: sm.CPUUtilization, + MemUtilization: sm.MemUtilization, + AppUtilization: sm.AppUtilization, + QPS: sm.QPS, + EPS: sm.EPS, + Utilization: copyMap(sm.Utilization), + RequestCost: copyMap(sm.RequestCost), + NamedMetrics: copyMap(sm.NamedMetrics), + } +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring CPU Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.CPUUtilization = val + s.state.Store(smCopy) +} + +// DeleteCPUUtilization deletes the relevant server metric to prevent it from +// being sent. +func (s *serverMetricsRecorder) DeleteCPUUtilization() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.CPUUtilization = -1 + s.state.Store(smCopy) +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Memory Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.MemUtilization = val + s.state.Store(smCopy) +} + +// DeleteMemoryUtilization deletes the relevant server metric to prevent it +// from being sent. +func (s *serverMetricsRecorder) DeleteMemoryUtilization() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.MemUtilization = -1 + s.state.Store(smCopy) +} + +// SetApplicationUtilization records a measurement for a generic utilization +// metric. +func (s *serverMetricsRecorder) SetApplicationUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring Application Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.AppUtilization = val + s.state.Store(smCopy) +} + +// DeleteApplicationUtilization deletes the relevant server metric to prevent +// it from being sent. +func (s *serverMetricsRecorder) DeleteApplicationUtilization() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.AppUtilization = -1 + s.state.Store(smCopy) +} + +// SetQPS records a measurement for the QPS metric. +func (s *serverMetricsRecorder) SetQPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring QPS value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.QPS = val + s.state.Store(smCopy) +} + +// DeleteQPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteQPS() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.QPS = -1 + s.state.Store(smCopy) +} + +// SetEPS records a measurement for the EPS metric. +func (s *serverMetricsRecorder) SetEPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring EPS value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.EPS = val + s.state.Store(smCopy) +} + +// DeleteEPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteEPS() { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.EPS = -1 + s.state.Store(smCopy) +} + +// SetNamedUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Named Utilization value out of range: %v", val) + } + return + } + smCopy := copyServerMetrics(s.state.Load()) + smCopy.Utilization[name] = val + s.state.Store(smCopy) +} + +// DeleteNamedUtilization deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) { + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.Utilization, name) + s.state.Store(smCopy) +} + +// SetRequestCost records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetRequestCost(name string, val float64) { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.RequestCost[name] = val + s.state.Store(smCopy) +} + +// DeleteRequestCost deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteRequestCost(name string) { + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.RequestCost, name) + s.state.Store(smCopy) +} + +// SetNamedMetric records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedMetric(name string, val float64) { + smCopy := copyServerMetrics(s.state.Load()) + smCopy.NamedMetrics[name] = val + s.state.Store(smCopy) +} + +// DeleteNamedMetric deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedMetric(name string) { + smCopy := copyServerMetrics(s.state.Load()) + delete(smCopy.NamedMetrics, name) + s.state.Store(smCopy) +} diff --git a/vendor/google.golang.org/grpc/orca/service.go b/vendor/google.golang.org/grpc/orca/service.go new file mode 100644 index 0000000000..7461a6b05a --- /dev/null +++ b/vendor/google.golang.org/grpc/orca/service.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + ointernal "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +func init() { + ointernal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { + so.allowAnyMinReportingInterval = true + } + internal.ORCAAllowAnyMinReportingInterval = ointernal.AllowAnyMinReportingInterval +} + +// minReportingInterval is the absolute minimum value supported for +// out-of-band metrics reporting from the ORCA service implementation +// provided by the orca package. +const minReportingInterval = 30 * time.Second + +// Service provides an implementation of the OpenRcaService as defined in the +// [ORCA] service protos. Instances of this type must be created via calls to +// Register() or NewService(). +// +// Server applications can use the SetXxx() and DeleteXxx() methods to record +// measurements corresponding to backend metrics, which eventually get pushed to +// clients who have initiated the SteamCoreMetrics streaming RPC. +// +// [ORCA]: https://github.com/cncf/xds/blob/main/xds/service/orca/v3/orca.proto +type Service struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + // Minimum reporting interval, as configured by the user, or the default. + minReportingInterval time.Duration + + smProvider ServerMetricsProvider +} + +// ServiceOptions contains options to configure the ORCA service implementation. +type ServiceOptions struct { + // ServerMetricsProvider is the provider to be used by the service for + // reporting OOB server metrics to clients. Typically obtained via + // NewServerMetricsRecorder. This field is required. + ServerMetricsProvider ServerMetricsProvider + + // MinReportingInterval sets the lower bound for how often out-of-band + // metrics are reported on the streaming RPC initiated by the client. If + // unspecified, negative or less than the default value of 30s, the default + // is used. Clients may request a higher value as part of the + // StreamCoreMetrics streaming RPC. + MinReportingInterval time.Duration + + // Allow a minReportingInterval which is less than the default of 30s. + // Used for testing purposes only. + allowAnyMinReportingInterval bool +} + +// A ServerMetricsProvider provides ServerMetrics upon request. +type ServerMetricsProvider interface { + // ServerMetrics returns the current set of server metrics. It should + // return a read-only, immutable copy of the data that is active at the + // time of the call. + ServerMetrics() *ServerMetrics +} + +// NewService creates a new ORCA service implementation configured using the +// provided options. +func NewService(opts ServiceOptions) (*Service, error) { + // The default minimum supported reporting interval value can be overridden + // for testing purposes through the orca internal package. + if opts.ServerMetricsProvider == nil { + return nil, fmt.Errorf("ServerMetricsProvider not specified") + } + if !opts.allowAnyMinReportingInterval { + if opts.MinReportingInterval < 0 || opts.MinReportingInterval < minReportingInterval { + opts.MinReportingInterval = minReportingInterval + } + } + service := &Service{ + minReportingInterval: opts.MinReportingInterval, + smProvider: opts.ServerMetricsProvider, + } + return service, nil +} + +// Register creates a new ORCA service implementation configured using the +// provided options and registers the same on the provided grpc Server. +func Register(s *grpc.Server, opts ServiceOptions) error { + // TODO(https://github.com/cncf/xds/issues/41): replace *grpc.Server with + // grpc.ServiceRegistrar when possible. + service, err := NewService(opts) + if err != nil { + return err + } + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, service) + return nil +} + +// determineReportingInterval determines the reporting interval for out-of-band +// metrics. If the reporting interval is not specified in the request, or is +// negative or is less than the configured minimum (via +// ServiceOptions.MinReportingInterval), the latter is used. Else the value from +// the incoming request is used. +func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReportRequest) time.Duration { + if req.GetReportInterval() == nil { + return s.minReportingInterval + } + dur := req.GetReportInterval().AsDuration() + if dur < s.minReportingInterval { + logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using minimum", dur, s.minReportingInterval) + return s.minReportingInterval + } + return dur +} + +func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + return stream.Send(s.smProvider.ServerMetrics().toLoadReportProto()) +} + +// StreamCoreMetrics streams custom backend metrics injected by the server +// application. +func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + ticker := time.NewTicker(s.determineReportingInterval(req)) + defer ticker.Stop() + + for { + if err := s.sendMetricsResponse(stream); err != nil { + return err + } + // Send a response containing the currently recorded metrics + select { + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + case <-ticker.C: + } + } +} diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go new file mode 100644 index 0000000000..4af7f933c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go @@ -0,0 +1,277 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opentelemetry + +import ( + "context" + "sync/atomic" + "time" + + "google.golang.org/grpc" + estats "google.golang.org/grpc/experimental/stats" + istats "google.golang.org/grpc/internal/stats" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + otelattribute "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" +) + +type clientStatsHandler struct { + estats.MetricsRecorder + options Options + clientMetrics clientMetrics +} + +func (h *clientStatsHandler) initializeMetrics() { + // Will set no metrics to record, logically making this stats handler a + // no-op. + if h.options.MetricsOptions.MeterProvider == nil { + return + } + + meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version)) + if meter == nil { + return + } + + metrics := h.options.MetricsOptions.Metrics + if metrics == nil { + metrics = DefaultMetrics() + } + + h.clientMetrics.attemptStarted = createInt64Counter(metrics.Metrics(), "grpc.client.attempt.started", meter, otelmetric.WithUnit("attempt"), otelmetric.WithDescription("Number of client call attempts started.")) + h.clientMetrics.attemptDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.attempt.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...)) + h.clientMetrics.attemptSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.clientMetrics.attemptRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.clientMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("Time taken by gRPC to complete an RPC from application's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...)) + + rm := ®istryMetrics{ + optionalLabels: h.options.MetricsOptions.OptionalLabels, + } + h.MetricsRecorder = rm + rm.registerMetrics(metrics, meter) +} + +func (h *clientStatsHandler) unaryInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ci := &callInfo{ + target: cc.CanonicalTarget(), + method: h.determineMethod(method, opts...), + } + ctx = setCallInfo(ctx, ci) + + if h.options.MetricsOptions.pluginOption != nil { + md := h.options.MetricsOptions.pluginOption.GetMetadata() + for k, vs := range md { + for _, v := range vs { + ctx = metadata.AppendToOutgoingContext(ctx, k, v) + } + } + } + + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + h.perCallMetrics(ctx, err, startTime, ci) + return err +} + +// determineMethod determines the method to record attributes with. This will be +// "other" if StaticMethod isn't specified or if method filter is set and +// specifies, the method name as is otherwise. +func (h *clientStatsHandler) determineMethod(method string, opts ...grpc.CallOption) string { + for _, opt := range opts { + if _, ok := opt.(grpc.StaticMethodCallOption); ok { + return removeLeadingSlash(method) + } + } + return "other" +} + +func (h *clientStatsHandler) streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ci := &callInfo{ + target: cc.CanonicalTarget(), + method: h.determineMethod(method, opts...), + } + ctx = setCallInfo(ctx, ci) + + if h.options.MetricsOptions.pluginOption != nil { + md := h.options.MetricsOptions.pluginOption.GetMetadata() + for k, vs := range md { + for _, v := range vs { + ctx = metadata.AppendToOutgoingContext(ctx, k, v) + } + } + } + + startTime := time.Now() + + callback := func(err error) { + h.perCallMetrics(ctx, err, startTime, ci) + } + opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...) + return streamer(ctx, desc, cc, method, opts...) +} + +func (h *clientStatsHandler) perCallMetrics(ctx context.Context, err error, startTime time.Time, ci *callInfo) { + callLatency := float64(time.Since(startTime)) / float64(time.Second) // calculate ASAP + attrs := otelmetric.WithAttributeSet(otelattribute.NewSet( + otelattribute.String("grpc.method", ci.method), + otelattribute.String("grpc.target", ci.target), + otelattribute.String("grpc.status", canonicalString(status.Code(err))), + )) + h.clientMetrics.callDuration.Record(ctx, callLatency, attrs) +} + +// TagConn exists to satisfy stats.Handler. +func (h *clientStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (h *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC attempt context management. +func (h *clientStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + // Numerous stats handlers can be used for the same channel. The cluster + // impl balancer which writes to this will only write once, thus have this + // stats handler's per attempt scoped context point to the same optional + // labels map if set. + var labels *istats.Labels + if labels = istats.GetLabels(ctx); labels == nil { + labels = &istats.Labels{ + // The defaults for all the per call labels from a plugin that + // executes on the callpath that this OpenTelemetry component + // currently supports. + TelemetryLabels: map[string]string{ + "grpc.lb.locality": "", + }, + } + ctx = istats.SetLabels(ctx, labels) + } + ai := &attemptInfo{ // populates information about RPC start. + startTime: time.Now(), + xdsLabels: labels.TelemetryLabels, + method: info.FullMethodName, + } + ri := &rpcInfo{ + ai: ai, + } + return setRPCInfo(ctx, ri) +} + +func (h *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + ri := getRPCInfo(ctx) + if ri == nil { + logger.Error("ctx passed into client side stats handler metrics event handling has no client attempt data present") + return + } + h.processRPCEvent(ctx, rs, ri.ai) +} + +func (h *clientStatsHandler) processRPCEvent(ctx context.Context, s stats.RPCStats, ai *attemptInfo) { + switch st := s.(type) { + case *stats.Begin: + ci := getCallInfo(ctx) + if ci == nil { + logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present") + return + } + + attrs := otelmetric.WithAttributeSet(otelattribute.NewSet( + otelattribute.String("grpc.method", ci.method), + otelattribute.String("grpc.target", ci.target), + )) + h.clientMetrics.attemptStarted.Add(ctx, 1, attrs) + case *stats.OutPayload: + atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength)) + case *stats.InPayload: + atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength)) + case *stats.InHeader: + h.setLabelsFromPluginOption(ai, st.Header) + case *stats.InTrailer: + h.setLabelsFromPluginOption(ai, st.Trailer) + case *stats.End: + h.processRPCEnd(ctx, ai, st) + default: + } +} + +func (h *clientStatsHandler) setLabelsFromPluginOption(ai *attemptInfo, incomingMetadata metadata.MD) { + if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil { + labels := h.options.MetricsOptions.pluginOption.GetLabels(incomingMetadata) + if labels == nil { + labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt. + } + ai.pluginOptionLabels = labels + } +} + +func (h *clientStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) { + ci := getCallInfo(ctx) + if ci == nil { + logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present") + return + } + latency := float64(time.Since(ai.startTime)) / float64(time.Second) + st := "OK" + if e.Error != nil { + s, _ := status.FromError(e.Error) + st = canonicalString(s.Code()) + } + + attributes := []otelattribute.KeyValue{ + otelattribute.String("grpc.method", ci.method), + otelattribute.String("grpc.target", ci.target), + otelattribute.String("grpc.status", st), + } + + for k, v := range ai.pluginOptionLabels { + attributes = append(attributes, otelattribute.String(k, v)) + } + + for _, o := range h.options.MetricsOptions.OptionalLabels { + // TODO: Add a filter for converting to unknown if not present in the + // CSM Plugin Option layer by adding an optional labels API. + if val, ok := ai.xdsLabels[o]; ok { + attributes = append(attributes, otelattribute.String(o, val)) + } + } + + // Allocate vararg slice once. + opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))} + h.clientMetrics.attemptDuration.Record(ctx, latency, opts...) + h.clientMetrics.attemptSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...) + h.clientMetrics.attemptRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...) +} + +const ( + // ClientAttemptStarted is the number of client call attempts started. + ClientAttemptStarted estats.Metric = "grpc.client.attempt.started" + // ClientAttemptDuration is the end-to-end time taken to complete a client + // call attempt. + ClientAttemptDuration estats.Metric = "grpc.client.attempt.duration" + // ClientAttemptSentCompressedTotalMessageSize is the compressed message + // bytes sent per client call attempt. + ClientAttemptSentCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.sent_total_compressed_message_size" + // ClientAttemptRcvdCompressedTotalMessageSize is the compressed message + // bytes received per call attempt. + ClientAttemptRcvdCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.rcvd_total_compressed_message_size" + // ClientCallDuration is the time taken by gRPC to complete an RPC from + // application's perspective. + ClientCallDuration estats.Metric = "grpc.client.call.duration" +) diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go new file mode 100644 index 0000000000..b595aa85ff --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go @@ -0,0 +1,41 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal defines the PluginOption interface. +package internal + +import ( + "google.golang.org/grpc/metadata" +) + +// SetPluginOption sets the plugin option on Options. +var SetPluginOption any // func(*Options, PluginOption) + +// PluginOption is the interface which represents a plugin option for the +// OpenTelemetry instrumentation component. This plugin option emits labels from +// metadata and also creates metadata containing labels. These labels are +// intended to be added to applicable OpenTelemetry metrics recorded in the +// OpenTelemetry instrumentation component. +// +// In the future, we hope to stabilize and expose this API to allow plugins to +// inject labels of their choosing into metrics recorded. +type PluginOption interface { + // GetMetadata creates a MD with metadata exchange labels. + GetMetadata() metadata.MD + // GetLabels emits labels to be attached to metrics for the RPC that + // contains the provided incomingMetadata. + GetLabels(incomingMetadata metadata.MD) map[string]string +} diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go new file mode 100644 index 0000000000..cc5ad387fb --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go @@ -0,0 +1,390 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package opentelemetry implements opentelemetry instrumentation code for +// gRPC-Go clients and servers. +package opentelemetry + +import ( + "context" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + otelinternal "google.golang.org/grpc/stats/opentelemetry/internal" + + otelattribute "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +func init() { + otelinternal.SetPluginOption = func(o *Options, po otelinternal.PluginOption) { + o.MetricsOptions.pluginOption = po + } +} + +var logger = grpclog.Component("otel-plugin") + +var canonicalString = internal.CanonicalString.(func(codes.Code) string) + +var joinDialOptions = internal.JoinDialOptions.(func(...grpc.DialOption) grpc.DialOption) + +// Options are the options for OpenTelemetry instrumentation. +type Options struct { + // MetricsOptions are the metrics options for OpenTelemetry instrumentation. + MetricsOptions MetricsOptions +} + +// MetricsOptions are the metrics options for OpenTelemetry instrumentation. +type MetricsOptions struct { + // MeterProvider is the MeterProvider instance that will be used to create + // instruments. To enable metrics collection, set a meter provider. If + // unset, no metrics will be recorded. Any implementation knobs (i.e. views, + // bounds) set in the MeterProvider take precedence over the API calls from + // this interface. (i.e. it will create default views for unset views). + MeterProvider otelmetric.MeterProvider + + // Metrics are the metrics to instrument. Will create instrument and record telemetry + // for corresponding metric supported by the client and server + // instrumentation components if applicable. If not set, the default metrics + // will be recorded. + Metrics *estats.Metrics + + // MethodAttributeFilter is to record the method name of RPCs handled by + // grpc.UnknownServiceHandler, but take care to limit the values allowed, as + // allowing too many will increase cardinality and could cause severe memory + // or performance problems. On Client Side, pass a + // grpc.StaticMethodCallOption as a call option into Invoke or NewStream. + // This only applies for server side metrics. + MethodAttributeFilter func(string) bool + + // OptionalLabels are labels received from LB Policies that this component + // should add to metrics that record after receiving incoming metadata. + OptionalLabels []string + + // pluginOption is used to get labels to attach to certain metrics, if set. + pluginOption otelinternal.PluginOption +} + +// DialOption returns a dial option which enables OpenTelemetry instrumentation +// code for a grpc.ClientConn. +// +// Client applications interested in instrumenting their grpc.ClientConn should +// pass the dial option returned from this function as a dial option to +// grpc.NewClient(). +// +// For the metrics supported by this instrumentation code, specify the client +// metrics to record in metrics options. Also provide an implementation of a +// MeterProvider. If the passed in Meter Provider does not have the view +// configured for an individual metric turned on, the API call in this component +// will create a default view for that metric. +func DialOption(o Options) grpc.DialOption { + csh := &clientStatsHandler{options: o} + csh.initializeMetrics() + return joinDialOptions(grpc.WithChainUnaryInterceptor(csh.unaryInterceptor), grpc.WithChainStreamInterceptor(csh.streamInterceptor), grpc.WithStatsHandler(csh)) +} + +var joinServerOptions = internal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// ServerOption returns a server option which enables OpenTelemetry +// instrumentation code for a grpc.Server. +// +// Server applications interested in instrumenting their grpc.Server should pass +// the server option returned from this function as an argument to +// grpc.NewServer(). +// +// For the metrics supported by this instrumentation code, specify the server +// metrics to record in metrics options. Also provide an implementation of a +// MeterProvider. If the passed in Meter Provider does not have the view +// configured for an individual metric turned on, the API call in this component +// will create a default view for that metric. +func ServerOption(o Options) grpc.ServerOption { + ssh := &serverStatsHandler{options: o} + ssh.initializeMetrics() + return joinServerOptions(grpc.ChainUnaryInterceptor(ssh.unaryInterceptor), grpc.ChainStreamInterceptor(ssh.streamInterceptor), grpc.StatsHandler(ssh)) +} + +// callInfo is information pertaining to the lifespan of the RPC client side. +type callInfo struct { + target string + + method string +} + +type callInfoKey struct{} + +func setCallInfo(ctx context.Context, ci *callInfo) context.Context { + return context.WithValue(ctx, callInfoKey{}, ci) +} + +// getCallInfo returns the callInfo stored in the context, or nil +// if there isn't one. +func getCallInfo(ctx context.Context) *callInfo { + ci, _ := ctx.Value(callInfoKey{}).(*callInfo) + return ci +} + +// rpcInfo is RPC information scoped to the RPC attempt life span client side, +// and the RPC life span server side. +type rpcInfo struct { + ai *attemptInfo +} + +type rpcInfoKey struct{} + +func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context { + return context.WithValue(ctx, rpcInfoKey{}, ri) +} + +// getRPCInfo returns the rpcInfo stored in the context, or nil +// if there isn't one. +func getRPCInfo(ctx context.Context) *rpcInfo { + ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo) + return ri +} + +func removeLeadingSlash(mn string) string { + return strings.TrimLeft(mn, "/") +} + +// attemptInfo is RPC information scoped to the RPC attempt life span client +// side, and the RPC life span server side. +type attemptInfo struct { + // access these counts atomically for hedging in the future: + // number of bytes after compression (within each message) from side (client + // || server). + sentCompressedBytes int64 + // number of compressed bytes received (within each message) received on + // side (client || server). + recvCompressedBytes int64 + + startTime time.Time + method string + + pluginOptionLabels map[string]string // pluginOptionLabels to attach to metrics emitted + xdsLabels map[string]string +} + +type clientMetrics struct { + // "grpc.client.attempt.started" + attemptStarted otelmetric.Int64Counter + // "grpc.client.attempt.duration" + attemptDuration otelmetric.Float64Histogram + // "grpc.client.attempt.sent_total_compressed_message_size" + attemptSentTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.client.attempt.rcvd_total_compressed_message_size" + attemptRcvdTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.client.call.duration" + callDuration otelmetric.Float64Histogram +} + +type serverMetrics struct { + // "grpc.server.call.started" + callStarted otelmetric.Int64Counter + // "grpc.server.call.sent_total_compressed_message_size" + callSentTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.server.call.rcvd_total_compressed_message_size" + callRcvdTotalCompressedMessageSize otelmetric.Int64Histogram + // "grpc.server.call.duration" + callDuration otelmetric.Float64Histogram +} + +func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Int64Counter{} + } + ret, err := meter.Int64Counter(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Int64Counter{} + } + return ret +} + +func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Float64Counter{} + } + ret, err := meter.Float64Counter(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Float64Counter{} + } + return ret +} + +func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Int64Histogram{} + } + ret, err := meter.Int64Histogram(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Int64Histogram{} + } + return ret +} + +func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Float64Histogram{} + } + ret, err := meter.Float64Histogram(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Float64Histogram{} + } + return ret +} + +func createInt64Gauge(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge { + if _, ok := setOfMetrics[metricName]; !ok { + return noop.Int64Gauge{} + } + ret, err := meter.Int64Gauge(string(metricName), options...) + if err != nil { + logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err) + return noop.Int64Gauge{} + } + return ret +} + +func optionFromLabels(labelKeys []string, optionalLabelKeys []string, optionalLabels []string, labelVals ...string) otelmetric.MeasurementOption { + var attributes []otelattribute.KeyValue + + // Once it hits here lower level has guaranteed length of labelVals matches + // labelKeys + optionalLabelKeys. + for i, label := range labelKeys { + attributes = append(attributes, otelattribute.String(label, labelVals[i])) + } + + for i, label := range optionalLabelKeys { + for _, optLabel := range optionalLabels { // o(n) could build out a set but n is currently capped at < 5 + if label == optLabel { + attributes = append(attributes, otelattribute.String(label, labelVals[i+len(labelKeys)])) + } + } + } + return otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...)) +} + +// registryMetrics implements MetricsRecorder for the client and server stats +// handlers. +type registryMetrics struct { + intCounts map[*estats.MetricDescriptor]otelmetric.Int64Counter + floatCounts map[*estats.MetricDescriptor]otelmetric.Float64Counter + intHistos map[*estats.MetricDescriptor]otelmetric.Int64Histogram + floatHistos map[*estats.MetricDescriptor]otelmetric.Float64Histogram + intGauges map[*estats.MetricDescriptor]otelmetric.Int64Gauge + + optionalLabels []string +} + +func (rm *registryMetrics) registerMetrics(metrics *estats.Metrics, meter otelmetric.Meter) { + rm.intCounts = make(map[*estats.MetricDescriptor]otelmetric.Int64Counter) + rm.floatCounts = make(map[*estats.MetricDescriptor]otelmetric.Float64Counter) + rm.intHistos = make(map[*estats.MetricDescriptor]otelmetric.Int64Histogram) + rm.floatHistos = make(map[*estats.MetricDescriptor]otelmetric.Float64Histogram) + rm.intGauges = make(map[*estats.MetricDescriptor]otelmetric.Int64Gauge) + + for metric := range metrics.Metrics() { + desc := estats.DescriptorForMetric(metric) + if desc == nil { + // Either the metric was per call or the metric is not registered. + // Thus, if this component ever receives the desc as a handle in + // record it will be a no-op. + continue + } + switch desc.Type { + case estats.MetricTypeIntCount: + rm.intCounts[desc] = createInt64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description)) + case estats.MetricTypeFloatCount: + rm.floatCounts[desc] = createFloat64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description)) + case estats.MetricTypeIntHisto: + rm.intHistos[desc] = createInt64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...)) + case estats.MetricTypeFloatHisto: + rm.floatHistos[desc] = createFloat64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...)) + case estats.MetricTypeIntGauge: + rm.intGauges[desc] = createInt64Gauge(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description)) + } + } +} + +func (rm *registryMetrics) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + desc := handle.Descriptor() + if ic, ok := rm.intCounts[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + ic.Add(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + desc := handle.Descriptor() + if fc, ok := rm.floatCounts[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + fc.Add(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + desc := handle.Descriptor() + if ih, ok := rm.intHistos[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + ih.Record(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + desc := handle.Descriptor() + if fh, ok := rm.floatHistos[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + fh.Record(context.TODO(), incr, ao) + } +} + +func (rm *registryMetrics) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + desc := handle.Descriptor() + if ig, ok := rm.intGauges[desc]; ok { + ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...) + ig.Record(context.TODO(), incr, ao) + } +} + +// Users of this component should use these bucket boundaries as part of their +// SDK MeterProvider passed in. This component sends this as "advice" to the +// API, which works, however this stability is not guaranteed, so for safety the +// SDK Meter Provider provided should set these bounds for corresponding +// metrics. +var ( + // DefaultLatencyBounds are the default bounds for latency metrics. + DefaultLatencyBounds = []float64{0, 0.00001, 0.00005, 0.0001, 0.0003, 0.0006, 0.0008, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.008, 0.01, 0.013, 0.016, 0.02, 0.025, 0.03, 0.04, 0.05, 0.065, 0.08, 0.1, 0.13, 0.16, 0.2, 0.25, 0.3, 0.4, 0.5, 0.65, 0.8, 1, 2, 5, 10, 20, 50, 100} // provide "advice" through API, SDK should set this too + // DefaultSizeBounds are the default bounds for metrics which record size. + DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} + // defaultPerCallMetrics are the default metrics provided by this module. + defaultPerCallMetrics = estats.NewMetrics(ClientAttemptStarted, ClientAttemptDuration, ClientAttemptSentCompressedTotalMessageSize, ClientAttemptRcvdCompressedTotalMessageSize, ClientCallDuration, ServerCallStarted, ServerCallSentCompressedTotalMessageSize, ServerCallRcvdCompressedTotalMessageSize, ServerCallDuration) +) + +// DefaultMetrics returns a set of default OpenTelemetry metrics. +// +// This should only be invoked after init time. +func DefaultMetrics() *estats.Metrics { + return defaultPerCallMetrics.Join(estats.DefaultMetrics) +} diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go new file mode 100644 index 0000000000..eaea559b2c --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go @@ -0,0 +1,278 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opentelemetry + +import ( + "context" + "sync/atomic" + "time" + + "google.golang.org/grpc" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + otelattribute "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" +) + +type serverStatsHandler struct { + estats.MetricsRecorder + options Options + serverMetrics serverMetrics +} + +func (h *serverStatsHandler) initializeMetrics() { + // Will set no metrics to record, logically making this stats handler a + // no-op. + if h.options.MetricsOptions.MeterProvider == nil { + return + } + + meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version)) + if meter == nil { + return + } + metrics := h.options.MetricsOptions.Metrics + if metrics == nil { + metrics = DefaultMetrics() + } + + h.serverMetrics.callStarted = createInt64Counter(metrics.Metrics(), "grpc.server.call.started", meter, otelmetric.WithUnit("call"), otelmetric.WithDescription("Number of server calls started.")) + h.serverMetrics.callSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.serverMetrics.callRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...)) + h.serverMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.server.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a call from server transport's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...)) + + rm := ®istryMetrics{ + optionalLabels: h.options.MetricsOptions.OptionalLabels, + } + h.MetricsRecorder = rm + rm.registerMetrics(metrics, meter) +} + +// attachLabelsTransportStream intercepts SetHeader and SendHeader calls of the +// underlying ServerTransportStream to attach metadataExchangeLabels. +type attachLabelsTransportStream struct { + grpc.ServerTransportStream + + attachedLabels atomic.Bool + metadataExchangeLabels metadata.MD +} + +func (s *attachLabelsTransportStream) SetHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerTransportStream.SetHeader(s.metadataExchangeLabels) + } + return s.ServerTransportStream.SetHeader(md) +} + +func (s *attachLabelsTransportStream) SendHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerTransportStream.SetHeader(s.metadataExchangeLabels) + } + + return s.ServerTransportStream.SendHeader(md) +} + +func (h *serverStatsHandler) unaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + var metadataExchangeLabels metadata.MD + if h.options.MetricsOptions.pluginOption != nil { + metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata() + } + + sts := grpc.ServerTransportStreamFromContext(ctx) + + alts := &attachLabelsTransportStream{ + ServerTransportStream: sts, + metadataExchangeLabels: metadataExchangeLabels, + } + ctx = grpc.NewContextWithServerTransportStream(ctx, alts) + + res, err := handler(ctx, req) + if err != nil { // maybe trailers-only if headers haven't already been sent + if !alts.attachedLabels.Swap(true) { + alts.SetTrailer(alts.metadataExchangeLabels) + } + } else { // headers will be written; a message was sent + if !alts.attachedLabels.Swap(true) { + alts.SetHeader(alts.metadataExchangeLabels) + } + } + + return res, err +} + +// attachLabelsStream embeds a grpc.ServerStream, and intercepts the +// SetHeader/SendHeader/SendMsg/SendTrailer call to attach metadata exchange +// labels. +type attachLabelsStream struct { + grpc.ServerStream + + attachedLabels atomic.Bool + metadataExchangeLabels metadata.MD +} + +func (s *attachLabelsStream) SetHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerStream.SetHeader(s.metadataExchangeLabels) + } + + return s.ServerStream.SetHeader(md) +} + +func (s *attachLabelsStream) SendHeader(md metadata.MD) error { + if !s.attachedLabels.Swap(true) { + s.ServerStream.SetHeader(s.metadataExchangeLabels) + } + + return s.ServerStream.SendHeader(md) +} + +func (s *attachLabelsStream) SendMsg(m any) error { + if !s.attachedLabels.Swap(true) { + s.ServerStream.SetHeader(s.metadataExchangeLabels) + } + return s.ServerStream.SendMsg(m) +} + +func (h *serverStatsHandler) streamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + var metadataExchangeLabels metadata.MD + if h.options.MetricsOptions.pluginOption != nil { + metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata() + } + als := &attachLabelsStream{ + ServerStream: ss, + metadataExchangeLabels: metadataExchangeLabels, + } + err := handler(srv, als) + + // Add metadata exchange labels to trailers if never sent in headers, + // irrespective of whether or not RPC failed. + if !als.attachedLabels.Load() { + als.SetTrailer(als.metadataExchangeLabels) + } + return err +} + +// TagConn exists to satisfy stats.Handler. +func (h *serverStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (h *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC context management. +func (h *serverStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + method := info.FullMethodName + if h.options.MetricsOptions.MethodAttributeFilter != nil { + if !h.options.MetricsOptions.MethodAttributeFilter(method) { + method = "other" + } + } + server := internal.ServerFromContext.(func(context.Context) *grpc.Server)(ctx) + if server == nil { // Shouldn't happen, defensive programming. + logger.Error("ctx passed into server side stats handler has no grpc server ref") + method = "other" + } else { + isRegisteredMethod := internal.IsRegisteredMethod.(func(*grpc.Server, string) bool) + if !isRegisteredMethod(server, method) { + method = "other" + } + } + + ai := &attemptInfo{ + startTime: time.Now(), + method: removeLeadingSlash(method), + } + ri := &rpcInfo{ + ai: ai, + } + return setRPCInfo(ctx, ri) +} + +// HandleRPC implements per RPC tracing and stats implementation. +func (h *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + ri := getRPCInfo(ctx) + if ri == nil { + logger.Error("ctx passed into server side stats handler metrics event handling has no server call data present") + return + } + h.processRPCData(ctx, rs, ri.ai) +} + +func (h *serverStatsHandler) processRPCData(ctx context.Context, s stats.RPCStats, ai *attemptInfo) { + switch st := s.(type) { + case *stats.InHeader: + if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil { + labels := h.options.MetricsOptions.pluginOption.GetLabels(st.Header) + if labels == nil { + labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt. + } + ai.pluginOptionLabels = labels + } + attrs := otelmetric.WithAttributeSet(otelattribute.NewSet( + otelattribute.String("grpc.method", ai.method), + )) + h.serverMetrics.callStarted.Add(ctx, 1, attrs) + case *stats.OutPayload: + atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength)) + case *stats.InPayload: + atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength)) + case *stats.End: + h.processRPCEnd(ctx, ai, st) + default: + } +} + +func (h *serverStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) { + latency := float64(time.Since(ai.startTime)) / float64(time.Second) + st := "OK" + if e.Error != nil { + s, _ := status.FromError(e.Error) + st = canonicalString(s.Code()) + } + attributes := []otelattribute.KeyValue{ + otelattribute.String("grpc.method", ai.method), + otelattribute.String("grpc.status", st), + } + for k, v := range ai.pluginOptionLabels { + attributes = append(attributes, otelattribute.String(k, v)) + } + + // Allocate vararg slice once. + opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))} + h.serverMetrics.callDuration.Record(ctx, latency, opts...) + h.serverMetrics.callSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...) + h.serverMetrics.callRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...) +} + +const ( + // ServerCallStarted is the number of server calls started. + ServerCallStarted estats.Metric = "grpc.server.call.started" + // ServerCallSentCompressedTotalMessageSize is the compressed message bytes + // sent per server call. + ServerCallSentCompressedTotalMessageSize estats.Metric = "grpc.server.call.sent_total_compressed_message_size" + // ServerCallRcvdCompressedTotalMessageSize is the compressed message bytes + // received per server call. + ServerCallRcvdCompressedTotalMessageSize estats.Metric = "grpc.server.call.rcvd_total_compressed_message_size" + // ServerCallDuration is the end-to-end time taken to complete a call from + // server transport's perspective. + ServerCallDuration estats.Metric = "grpc.server.call.duration" +) diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go new file mode 100644 index 0000000000..ef55ff0c02 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/bootstrap/bootstrap.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to register possible options +// for aspects of the xDS client through the bootstrap file. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed +// in a later release. +package bootstrap + +import ( + "encoding/json" + + "google.golang.org/grpc/credentials" +) + +// registry is a map from credential type name to Credential builder. +var registry = make(map[string]Credentials) + +// Credentials interface encapsulates a credentials.Bundle builder +// that can be used for communicating with the xDS Management server. +type Credentials interface { + // Build returns a credential bundle associated with this credential, and + // a function to cleans up additional resources associated with this bundle + // when it is no longer needed. + Build(config json.RawMessage) (credentials.Bundle, func(), error) + // Name returns the credential name associated with this credential. + Name() string +} + +// RegisterCredentials registers Credentials used for connecting to the xds +// management server. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple credentials are +// registered with the same name, the one registered last will take effect. +func RegisterCredentials(c Credentials) { + registry[c.Name()] = c +} + +// GetCredentials returns the credentials associated with a given name. +// If no credentials are registered with the name, nil will be returned. +func GetCredentials(name string) Credentials { + if c, ok := registry[name]; ok { + return c + } + + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go new file mode 100644 index 0000000000..578e127897 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package bootstrap + +import ( + "encoding/json" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/xds/bootstrap/tlscreds" +) + +func init() { + RegisterCredentials(&insecureCredsBuilder{}) + RegisterCredentials(&googleDefaultCredsBuilder{}) + RegisterCredentials(&tlsCredsBuilder{}) +} + +// insecureCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates an insecure credential. +type insecureCredsBuilder struct{} + +func (i *insecureCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { + return insecure.NewBundle(), func() {}, nil +} + +func (i *insecureCredsBuilder) Name() string { + return "insecure" +} + +// tlsCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates a TLS credential. +type tlsCredsBuilder struct{} + +func (t *tlsCredsBuilder) Build(config json.RawMessage) (credentials.Bundle, func(), error) { + return tlscreds.NewBundle(config) +} + +func (t *tlsCredsBuilder) Name() string { + return "tls" +} + +// googleDefaultCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates a Google Default credential. +type googleDefaultCredsBuilder struct{} + +func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { + return google.NewDefaultCredentials(), func() {}, nil +} + +func (d *googleDefaultCredsBuilder) Name() string { + return "google_default" +} diff --git a/vendor/google.golang.org/grpc/xds/csds/csds.go b/vendor/google.golang.org/grpc/xds/csds/csds.go new file mode 100644 index 0000000000..3d8398a72f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package csds implements features to dump the status (xDS responses) the +// xds_client is using. +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a later +// release. +package csds + +import ( + "context" + "fmt" + "io" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +var logger = grpclog.Component("xds") + +const prefix = "[csds-server %p] " + +func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, s)) +} + +// ClientStatusDiscoveryServer provides an implementation of the Client Status +// Discovery Service (CSDS) for exposing the xDS config of a given client. See +// https://github.com/envoyproxy/envoy/blob/main/api/envoy/service/status/v3/csds.proto. +// +// For more details about the gRPC implementation of CSDS, refer to gRPC A40 at: +// https://github.com/grpc/proposal/blob/master/A40-csds-support.md. +type ClientStatusDiscoveryServer struct { + logger *internalgrpclog.PrefixLogger +} + +// NewClientStatusDiscoveryServer returns an implementation of the CSDS server +// that can be registered on a gRPC server. +func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { + s := &ClientStatusDiscoveryServer{} + s.logger = prefixLogger(s) + s.logger.Infof("Created CSDS server") + return s, nil +} + +// StreamClientStatus implements interface ClientStatusDiscoveryServiceServer. +func (s *ClientStatusDiscoveryServer) StreamClientStatus(stream v3statusgrpc.ClientStatusDiscoveryService_StreamClientStatusServer) error { + for { + req, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + resp, err := s.buildClientStatusRespForReq(req) + if err != nil { + return err + } + if err := stream.Send(resp); err != nil { + return err + } + } +} + +// FetchClientStatus implements interface ClientStatusDiscoveryServiceServer. +func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + return s.buildClientStatusRespForReq(req) +} + +// buildClientStatusRespForReq fetches the status of xDS resources from the +// xdsclient, and returns the response to be sent back to the csds client. +// +// If it returns an error, the error is a status error. +func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + // Field NodeMatchers is unsupported, by design + // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. + if len(req.NodeMatchers) != 0 { + return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) + } + + return xdsclient.DumpResources(), nil +} + +// Close cleans up the resources. +func (s *ClientStatusDiscoveryServer) Close() {} diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go new file mode 100644 index 0000000000..936bf2da32 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package googledirectpath implements a resolver that configures xds to make +// cloud to prod directpath connection. +// +// It's a combo of DNS and xDS resolvers. It delegates to DNS if +// - not on GCE, or +// - xDS bootstrap env var is set (so this client needs to do normal xDS, not +// direct path, and clients with this scheme is not part of the xDS mesh). +package googledirectpath + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/url" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/googlecloud" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/resolver" + + _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. +) + +const ( + c2pScheme = "google-c2p" + c2pAuthority = "traffic-director-c2p.xds.googleapis.com" + + tdURL = "dns:///directpath-pa.googleapis.com" + zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" + ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" + ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" + httpReqTimeout = 10 * time.Second + + logPrefix = "[google-c2p-resolver]" + dnsName, xdsName = "dns", "xds" +) + +// For overriding in unittests. +var ( + onGCE = googlecloud.OnGCE + randInt = rand.Int + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) +) + +func init() { + resolver.Register(c2pResolverBuilder{}) +} + +type c2pResolverBuilder struct{} + +func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if t.URL.Host != "" { + return nil, fmt.Errorf("google-c2p URI scheme does not support authorities") + } + + if !runDirectPath() { + // If not xDS, fallback to DNS. + t.URL.Scheme = dnsName + return resolver.Get(dnsName).Build(t, cc, opts) + } + + // Note that the following calls to getZone() and getIPv6Capable() does I/O, + // and has 10 seconds timeout each. + // + // This should be fine in most of the cases. In certain error cases, this + // could block Dial() for up to 10 seconds (each blocking call has its own + // goroutine). + zoneCh, ipv6CapableCh := make(chan string), make(chan bool) + go func() { zoneCh <- getZone(httpReqTimeout) }() + go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() + + xdsServerURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI + if xdsServerURI == "" { + xdsServerURI = tdURL + } + + nodeCfg := newNodeConfig(<-zoneCh, <-ipv6CapableCh) + xdsServerCfg := newXdsServerConfig(xdsServerURI) + authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) + + cfg := map[string]any{ + "xds_servers": []any{xdsServerCfg}, + "client_default_listener_resource_name_template": "%s", + "authorities": authoritiesCfg, + "node": nodeCfg, + } + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("failed to marshal bootstrap configuration: %v", err) + } + if err := bootstrap.SetFallbackBootstrapConfig(cfgJSON); err != nil { + return nil, fmt.Errorf("failed to set fallback bootstrap configuration: %v", err) + } + + t = resolver.Target{ + URL: url.URL{ + Scheme: xdsName, + Host: c2pAuthority, + Path: t.URL.Path, + }, + } + return resolver.Get(xdsName).Build(t, cc, opts) +} + +func (b c2pResolverBuilder) Scheme() string { + return c2pScheme +} + +func newNodeConfig(zone string, ipv6Capable bool) map[string]any { + node := map[string]any{ + "id": fmt.Sprintf("C2P-%d", randInt()), + "locality": map[string]any{"zone": zone}, + } + if ipv6Capable { + node["metadata"] = map[string]any{ipv6CapableMetadataName: true} + } + return node +} + +func newAuthoritiesConfig(serverCfg map[string]any) map[string]any { + return map[string]any{ + c2pAuthority: map[string]any{"xds_servers": []any{serverCfg}}, + } +} + +func newXdsServerConfig(uri string) map[string]any { + return map[string]any{ + "server_uri": uri, + "channel_creds": []map[string]any{{"type": "google_default"}}, + "server_features": []any{"ignore_resource_deletion"}, + } +} + +// runDirectPath returns whether this resolver should use direct path. +// +// direct path is enabled if this client is running on GCE. +func runDirectPath() bool { + return onGCE() +} diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/utils.go b/vendor/google.golang.org/grpc/xds/googledirectpath/utils.go new file mode 100644 index 0000000000..32a28a1677 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/utils.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googledirectpath + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +func getFromMetadata(timeout time.Duration, urlStr string) ([]byte, error) { + parsedURL, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + client := &http.Client{Timeout: timeout} + req := &http.Request{ + Method: http.MethodGet, + URL: parsedURL, + Header: http.Header{"Metadata-Flavor": {"Google"}}, + } + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed communicating with metadata server: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("metadata server returned resp with non-OK: %v", resp) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed reading from metadata server: %v", err) + } + return body, nil +} + +var ( + zone string + zoneOnce sync.Once +) + +// Defined as var to be overridden in tests. +var getZone = func(timeout time.Duration) string { + zoneOnce.Do(func() { + qualifiedZone, err := getFromMetadata(timeout, zoneURL) + if err != nil { + logger.Warningf("could not discover instance zone: %v", err) + return + } + i := bytes.LastIndexByte(qualifiedZone, '/') + if i == -1 { + logger.Warningf("could not parse zone from metadata server: %s", qualifiedZone) + return + } + zone = string(qualifiedZone[i+1:]) + }) + return zone +} + +var ( + ipv6Capable bool + ipv6CapableOnce sync.Once +) + +// Defined as var to be overridden in tests. +var getIPv6Capable = func(timeout time.Duration) bool { + ipv6CapableOnce.Do(func() { + addr, err := getFromMetadata(timeout, ipv6URL) + if err != nil { + logger.Warningf("could not discover ipv6 capability: %v", err) + return + } + if trimmedAddr := strings.TrimSpace(string(addr)); trimmedAddr == "" { + logger.Warningf("metadata server returned empty ipv6 address") + return + } + ipv6Capable = true + }) + return ipv6Capable +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go new file mode 100644 index 0000000000..ff27af026d --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer installs all the xds balancers. +package balancer + +import ( + _ "google.golang.org/grpc/balancer/leastrequest" // Register the least_request_experimental balancer + _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // Register the outlier_detection balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer +) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go new file mode 100644 index 0000000000..9a112e2769 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -0,0 +1,671 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cdsbalancer implements a balancer to handle CDS responses. +package cdsbalancer + +import ( + "context" + "encoding/json" + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal/balancer/nop" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + cdsName = "cds_experimental" + aggregateClusterMaxDepth = 16 +) + +var ( + errBalancerClosed = fmt.Errorf("cds_experimental LB policy is closed") + errExceedsMaxDepth = fmt.Errorf("aggregate cluster graph exceeds max depth (%d)", aggregateClusterMaxDepth) + + // newChildBalancer is a helper function to build a new cluster_resolver + // balancer and will be overridden in unittests. + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + return nil, fmt.Errorf("xds: no balancer builder with name %v", clusterresolver.Name) + } + // We directly pass the parent clientConn to the underlying + // cluster_resolver balancer because the cdsBalancer does not deal with + // subConns. + return builder.Build(cc, opts), nil + } + buildProvider = buildProviderFunc +) + +func init() { + balancer.Register(bb{}) +} + +// bb implements the balancer.Builder interface to help build a cdsBalancer. +// It also implements the balancer.ConfigParser interface to help parse the +// JSON service config, to be passed to the cdsBalancer. +type bb struct{} + +// Build creates a new CDS balancer with the ClientConn. +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) + } + + ctx, cancel := context.WithCancel(context.Background()) + hi := xdsinternal.NewHandshakeInfo(nil, nil, nil, false) + xdsHIPtr := unsafe.Pointer(hi) + b := &cdsBalancer{ + bOpts: opts, + childConfigParser: parser, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + xdsHIPtr: &xdsHIPtr, + watchers: make(map[string]*watcherState), + } + b.ccw = &ccWrapper{ + ClientConn: cc, + xdsHIPtr: b.xdsHIPtr, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + + var creds credentials.TransportCredentials + switch { + case opts.DialCreds != nil: + creds = opts.DialCreds + case opts.CredsBundle != nil: + creds = opts.CredsBundle.TransportCredentials() + } + if xc, ok := creds.(interface{ UsesXDS() bool }); ok && xc.UsesXDS() { + b.xdsCredsInUse = true + } + b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) + return b +} + +// Name returns the name of balancers built by this builder. +func (bb) Name() string { + return cdsName +} + +// lbConfig represents the loadBalancingConfig section of the service config +// for the cdsBalancer. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + ClusterName string `json:"Cluster"` +} + +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg lbConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, fmt.Errorf("xds: unable to unmarshal lbconfig: %s, error: %v", string(c), err) + } + return &cfg, nil +} + +// cdsBalancer implements a CDS based LB policy. It instantiates a +// cluster_resolver balancer to further resolve the serviceName received from +// CDS, into localities and endpoints. Implements the balancer.Balancer +// interface which is exposed to gRPC and implements the balancer.ClientConn +// interface which is exposed to the cluster_resolver balancer. +type cdsBalancer struct { + // The following fields are initialized at build time and are either + // read-only after that or provide their own synchronization, and therefore + // do not need to be guarded by a mutex. + ccw *ccWrapper // ClientConn interface passed to child LB. + bOpts balancer.BuildOptions // BuildOptions passed to child LB. + childConfigParser balancer.ConfigParser // Config parser for cluster_resolver LB policy. + logger *grpclog.PrefixLogger // Prefix logger for all logging. + xdsCredsInUse bool + + xdsHIPtr *unsafe.Pointer // Accessed atomically. + + // The serializer and its cancel func are initialized at build time, and the + // rest of the fields here are only accessed from serializer callbacks (or + // from balancer.Balancer methods, which themselves are guaranteed to be + // mutually exclusive) and hence do not need to be guarded by a mutex. + serializer *grpcsync.CallbackSerializer // Serializes updates from gRPC and xDS client. + serializerCancel context.CancelFunc // Stops the above serializer. + childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph. + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resources. + watchers map[string]*watcherState // Set of watchers and associated state, keyed by cluster name. + lbCfg *lbConfig // Current load balancing configuration. + + // The certificate providers are cached here to that they can be closed when + // a new provider is to be created. + cachedRoot certprovider.Provider + cachedIdentity certprovider.Provider +} + +// handleSecurityConfig processes the security configuration received from the +// management server, creates appropriate certificate provider plugins, and +// updates the HandshakeInfo which is added as an address attribute in +// NewSubConn() calls. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { + // If xdsCredentials are not in use, i.e, the user did not want to get + // security configuration from an xDS server, we should not be acting on the + // received security config here. Doing so poses a security threat. + if !b.xdsCredsInUse { + return nil + } + var xdsHI *xdsinternal.HandshakeInfo + + // Security config being nil is a valid case where the management server has + // not sent any security configuration. The xdsCredentials implementation + // handles this by delegating to its fallback credentials. + if config == nil { + // We need to explicitly set the fields to nil here since this might be + // a case of switching from a good security configuration to an empty + // one where fallback credentials are to be used. + xdsHI = xdsinternal.NewHandshakeInfo(nil, nil, nil, false) + atomic.StorePointer(b.xdsHIPtr, unsafe.Pointer(xdsHI)) + return nil + + } + + // A root provider is required whether we are using TLS or mTLS. + cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs() + rootProvider, err := buildProvider(cpc, config.RootInstanceName, config.RootCertName, false, true) + if err != nil { + return err + } + + // The identity provider is only present when using mTLS. + var identityProvider certprovider.Provider + if name, cert := config.IdentityInstanceName, config.IdentityCertName; name != "" { + var err error + identityProvider, err = buildProvider(cpc, name, cert, true, false) + if err != nil { + return err + } + } + + // Close the old providers and cache the new ones. + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.cachedRoot = rootProvider + b.cachedIdentity = identityProvider + xdsHI = xdsinternal.NewHandshakeInfo(rootProvider, identityProvider, config.SubjectAltNameMatchers, false) + atomic.StorePointer(b.xdsHIPtr, unsafe.Pointer(xdsHI)) + return nil +} + +func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanceName, certName string, wantIdentity, wantRoot bool) (certprovider.Provider, error) { + cfg, ok := configs[instanceName] + if !ok { + // Defensive programming. If a resource received from the management + // server contains a certificate provider instance name that is not + // found in the bootstrap, the resource is NACKed by the xDS client. + return nil, fmt.Errorf("certificate provider instance %q not found in bootstrap file", instanceName) + } + provider, err := cfg.Build(certprovider.BuildOptions{ + CertName: certName, + WantIdentity: wantIdentity, + WantRoot: wantRoot, + }) + if err != nil { + // This error is not expected since the bootstrap process parses the + // config and makes sure that it is acceptable to the plugin. Still, it + // is possible that the plugin parses the config successfully, but its + // Build() method errors out. + return nil, fmt.Errorf("xds: failed to get security plugin instance (%+v): %v", cfg, err) + } + return provider, nil +} + +// A convenience method to create a watcher for cluster `name`. It also +// registers the watch with the xDS client, and adds the newly created watcher +// to the list of watchers maintained by the LB policy. +func (b *cdsBalancer) createAndAddWatcherForCluster(name string) { + w := &clusterWatcher{ + name: name, + parent: b, + } + ws := &watcherState{ + watcher: w, + cancelWatch: xdsresource.WatchCluster(b.xdsClient, name, w), + } + b.watchers[name] = ws +} + +// UpdateClientConnState receives the serviceConfig (which contains the +// clusterName to watch for in CDS) and the xdsClient object from the +// xdsResolver. +func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + b.logger.Warningf("Received balancer config with no xDS client") + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig)) + + // The errors checked here should ideally never happen because the + // ServiceConfig in this case is prepared by the xdsResolver and is not + // something that is received on the wire. + lbCfg, ok := state.BalancerConfig.(*lbConfig) + if !ok { + b.logger.Warningf("Received unexpected balancer config type: %T", state.BalancerConfig) + return balancer.ErrBadResolverState + } + if lbCfg.ClusterName == "" { + b.logger.Warningf("Received balancer config with no cluster name") + return balancer.ErrBadResolverState + } + + // Do nothing and return early if configuration has not changed. + if b.lbCfg != nil && b.lbCfg.ClusterName == lbCfg.ClusterName { + return nil + } + b.lbCfg = lbCfg + + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + // A config update with a changed top-level cluster name means that none + // of our old watchers make any sense any more. + b.closeAllWatchers() + + // Create a new watcher for the top-level cluster. Upon resolution, it + // could end up creating more watchers if turns out to be an aggregate + // cluster. + b.createAndAddWatcherForCluster(lbCfg.ClusterName) + errCh <- nil + } + onFailure := func() { + // The call to Schedule returns false *only* if the serializer has been + // closed, which happens only when we receive an update after close. + errCh <- errBalancerClosed + } + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *cdsBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + // Resource not found error is reported by the resolver when the + // top-level cluster resource is removed by the management server. + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.closeAllWatchers() + } + var root string + if b.lbCfg != nil { + root = b.lbCfg.ClusterName + } + b.onClusterError(root, err) + }) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// Closes all registered cluster watchers and removes them from the internal map. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) closeAllWatchers() { + for name, state := range b.watchers { + state.cancelWatch() + delete(b.watchers, name) + } +} + +// Close cancels the CDS watch, closes the child policy and closes the +// cdsBalancer. +func (b *cdsBalancer) Close() { + b.serializer.TrySchedule(func(context.Context) { + b.closeAllWatchers() + + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil + } + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() +} + +func (b *cdsBalancer) ExitIdle() { + b.serializer.TrySchedule(func(context.Context) { + if b.childLB == nil { + b.logger.Warningf("Received ExitIdle with no child policy") + return + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + }) +} + +// Handles a good Cluster update from the xDS client. Kicks off the discovery +// mechanism generation process from the top-level cluster and if the cluster +// graph is resolved, generates child policy config and pushes it down. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterUpdate(name string, update xdsresource.ClusterUpdate) { + state := b.watchers[name] + if state == nil { + // We are currently not watching this cluster anymore. Return early. + return + } + + b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update)) + + // Update the watchers map with the update for the cluster. + state.lastUpdate = &update + + // For an aggregate cluster, always use the security configuration on the + // root cluster. + if name == b.lbCfg.ClusterName { + // Process the security config from the received update before building the + // child policy or forwarding the update to it. We do this because the child + // policy may try to create a new subConn inline. Processing the security + // configuration here and setting up the handshakeInfo will make sure that + // such attempts are handled properly. + if err := b.handleSecurityConfig(update.SecurityCfg); err != nil { + // If the security config is invalid, for example, if the provider + // instance is not found in the bootstrap config, we need to put the + // channel in transient failure. + b.onClusterError(name, fmt.Errorf("received Cluster resource contains invalid security config: %v", err)) + return + } + } + + clustersSeen := make(map[string]bool) + dms, ok, err := b.generateDMsForCluster(b.lbCfg.ClusterName, 0, nil, clustersSeen) + if err != nil { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("failed to generate discovery mechanisms: %v", err)) + return + } + if ok { + if len(dms) == 0 { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("aggregate cluster graph has no leaf clusters")) + return + } + // Child policy is built the first time we resolve the cluster graph. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) + if err != nil { + b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) + return + } + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + // Prepare the child policy configuration, convert it to JSON, have it + // parsed by the child policy to convert it into service config and push + // an update to it. + childCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, + // The LB policy is configured by the root cluster. + XDSLBPolicy: b.watchers[b.lbCfg.ClusterName].lastUpdate.LBPolicy, + } + cfgJSON, err := json.Marshal(childCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", childCfg) + return + } + + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childConfigParser.ParseConfig(cfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(cfgJSON), err) + return + } + + ccState := balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), + BalancerConfig: sc, + } + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) + } + } + // We no longer need the clusters that we did not see in this iteration of + // generateDMsForCluster(). + for cluster := range clustersSeen { + state, ok := b.watchers[cluster] + if ok { + continue + } + state.cancelWatch() + delete(b.watchers, cluster) + } +} + +// Handles an error Cluster update from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterError(name string, err error) { + b.logger.Warningf("Cluster resource %q received error update: %v", name, err) + + if b.childLB != nil { + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.childLB.ResolverError(err) + } + } else { + // If child balancer was never created, fail the RPCs with + // errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(fmt.Errorf("%q: %v", name, err)), + }) + } +} + +// Handles a resource-not-found error from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterResourceNotFound(name string) { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", name) + if b.childLB != nil { + b.childLB.ResolverError(err) + } else { + // If child balancer was never created, fail the RPCs with errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + } +} + +// Generates discovery mechanisms for the cluster graph rooted at `name`. This +// method is called recursively if `name` corresponds to an aggregate cluster, +// with the base case for recursion being a leaf cluster. If a new cluster is +// encountered when traversing the graph, a watcher is created for it. +// +// Inputs: +// - name: name of the cluster to start from +// - depth: recursion depth of the current cluster, starting from root +// - dms: prioritized list of current discovery mechanisms +// - clustersSeen: cluster names seen so far in the graph traversal +// +// Outputs: +// - new prioritized list of discovery mechanisms +// - boolean indicating if traversal of the aggregate cluster graph is +// complete. If false, the above list of discovery mechanisms is ignored. +// - error indicating if any error was encountered as part of the graph +// traversal. If error is non-nil, the other return values are ignored. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []clusterresolver.DiscoveryMechanism, clustersSeen map[string]bool) ([]clusterresolver.DiscoveryMechanism, bool, error) { + if depth >= aggregateClusterMaxDepth { + return dms, false, errExceedsMaxDepth + } + + if clustersSeen[name] { + // Discovery mechanism already seen through a different branch. + return dms, true, nil + } + clustersSeen[name] = true + + state, ok := b.watchers[name] + if !ok { + // If we have not seen this cluster so far, create a watcher for it, add + // it to the map, start the watch and return. + b.createAndAddWatcherForCluster(name) + + // And since we just created the watcher, we know that we haven't + // resolved the cluster graph yet. + return dms, false, nil + } + + // A watcher exists, but no update has been received yet. + if state.lastUpdate == nil { + return dms, false, nil + } + + var dm clusterresolver.DiscoveryMechanism + cluster := state.lastUpdate + switch cluster.ClusterType { + case xdsresource.ClusterTypeAggregate: + // This boolean is used to track if any of the clusters in the graph is + // not yet completely resolved or returns errors, thereby allowing us to + // traverse as much of the graph as possible (and start the associated + // watches where required) to ensure that clustersSeen contains all + // clusters in the graph that we can traverse to. + missingCluster := false + var err error + for _, child := range cluster.PrioritizedClusterNames { + var ok bool + dms, ok, err = b.generateDMsForCluster(child, depth+1, dms, clustersSeen) + if err != nil || !ok { + missingCluster = true + } + } + return dms, !missingCluster, err + case xdsresource.ClusterTypeEDS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: cluster.ClusterName, + EDSServiceName: cluster.EDSServiceName, + MaxConcurrentRequests: cluster.MaxRequests, + LoadReportingServer: cluster.LRSServerConfig, + } + case xdsresource.ClusterTypeLogicalDNS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + Cluster: cluster.ClusterName, + DNSHostname: cluster.DNSHostName, + } + } + odJSON := cluster.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dm.OutlierDetection = odJSON + + dm.TelemetryLabels = cluster.TelemetryLabels + + return append(dms, dm), true, nil +} + +// ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at +// creation and intercepts the NewSubConn() and UpdateAddresses() call from the +// child policy to add security configuration required by xDS credentials. +// +// Other methods of the balancer.ClientConn interface are not overridden and +// hence get the original implementation. +type ccWrapper struct { + balancer.ClientConn + + xdsHIPtr *unsafe.Pointer +} + +// NewSubConn intercepts NewSubConn() calls from the child policy and adds an +// address attribute which provides all information required by the xdsCreds +// handshaker to perform the TLS handshake. +func (ccw *ccWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeInfo(addr, ccw.xdsHIPtr) + } + + // No need to override opts.StateListener; just forward all calls to the + // child that created the SubConn. + return ccw.ClientConn.NewSubConn(newAddrs, opts) +} + +func (ccw *ccWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeInfo(addr, ccw.xdsHIPtr) + } + ccw.ClientConn.UpdateAddresses(sc, newAddrs) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go new file mode 100644 index 0000000000..835461d099 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -0,0 +1,55 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "context" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// clusterWatcher implements the xdsresource.ClusterWatcher interface, and is +// passed to the xDS client as part of the WatchResource() API. +// +// It watches a single cluster and handles callbacks from the xDS client by +// scheduling them on the parent LB policy's serializer. +type clusterWatcher struct { + name string + parent *cdsBalancer +} + +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u.Resource); onDone() } + cw.parent.serializer.ScheduleOr(handleUpdate, onDone) +} + +func (cw *clusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { cw.parent.onClusterError(cw.name, err); onDone() } + cw.parent.serializer.ScheduleOr(handleError, onDone) +} + +func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { cw.parent.onClusterResourceNotFound(cw.name); onDone() } + cw.parent.serializer.ScheduleOr(handleNotFound, onDone) +} + +// watcherState groups the state associated with a clusterWatcher. +type watcherState struct { + watcher *clusterWatcher // The underlying watcher. + cancelWatch func() // Cancel func to cancel the watch. + lastUpdate *xdsresource.ClusterUpdate // Most recent update received for this cluster. +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go new file mode 100644 index 0000000000..e179bb1bf1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package cdsbalancer + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[cds-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *cdsBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go new file mode 100644 index 0000000000..0dc71dfede --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -0,0 +1,480 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterimpl implements the xds_cluster_impl balancing policy. It +// handles the cluster features (e.g. circuit_breaking, RPC dropping). +// +// Note that it doesn't handle name resolution, which is done by policy +// xds_cluster_resolver. +package clusterimpl + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/loadstore" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +const ( + // Name is the name of the cluster_impl balancer. + Name = "xds_cluster_impl_experimental" + defaultRequestCountMax = 1024 +) + +var ( + connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) + errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + ctx, cancel := context.WithCancel(context.Background()) + b := &clusterImplBalancer{ + ClientConn: cc, + bOpts: bOpts, + loadWrapper: loadstore.NewWrapper(), + requestCountMax: defaultRequestCountMax, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + b.logger = prefixLogger(b) + b.child = gracefulswitch.NewBalancer(b, bOpts) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type clusterImplBalancer struct { + balancer.ClientConn + + bOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + xdsClient xdsclient.XDSClient + + config *LBConfig + child *gracefulswitch.Balancer + cancelLoadReport func() + edsServiceName string + lrsServer *bootstrap.ServerConfig + loadWrapper *loadstore.Wrapper + + clusterNameMu sync.Mutex + clusterName string + + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // childState/drops/requestCounter keeps the state used by the most recently + // generated picker. + childState balancer.State + dropCategories []DropConfig // The categories for drops. + drops []*dropper + requestCounterCluster string // The cluster name for the request counter. + requestCounterService string // The service name for the request counter. + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 + telemetryLabels map[string]string +} + +// updateLoadStore checks the config for load store, and decides whether it +// needs to restart the load reporting stream. +func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { + var updateLoadClusterAndService bool + + // ClusterName is different, restart. ClusterName is from ClusterName and + // EDSServiceName. + clusterName := b.getClusterName() + if clusterName != newConfig.Cluster { + updateLoadClusterAndService = true + b.setClusterName(newConfig.Cluster) + clusterName = newConfig.Cluster + } + if b.edsServiceName != newConfig.EDSServiceName { + updateLoadClusterAndService = true + b.edsServiceName = newConfig.EDSServiceName + } + if updateLoadClusterAndService { + // This updates the clusterName and serviceName that will be reported + // for the loads. The update here is too early, the perfect timing is + // when the picker is updated with the new connection. But from this + // balancer's point of view, it's impossible to tell. + // + // On the other hand, this will almost never happen. Each LRS policy + // shouldn't get updated config. The parent should do a graceful switch + // when the clusterName or serviceName is changed. + b.loadWrapper.UpdateClusterAndService(clusterName, b.edsServiceName) + } + + var ( + stopOldLoadReport bool + startNewLoadReport bool + ) + + // Check if it's necessary to restart load report. + if b.lrsServer == nil { + if newConfig.LoadReportingServer != nil { + // Old is nil, new is not nil, start new LRS. + b.lrsServer = newConfig.LoadReportingServer + startNewLoadReport = true + } + // Old is nil, new is nil, do nothing. + } else if newConfig.LoadReportingServer == nil { + // Old is not nil, new is nil, stop old, don't start new. + b.lrsServer = newConfig.LoadReportingServer + stopOldLoadReport = true + } else { + // Old is not nil, new is not nil, compare string values, if + // different, stop old and start new. + if !b.lrsServer.Equal(newConfig.LoadReportingServer) { + b.lrsServer = newConfig.LoadReportingServer + stopOldLoadReport = true + startNewLoadReport = true + } + } + + if stopOldLoadReport { + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + if !startNewLoadReport { + // If a new LRS stream will be started later, no need to update + // it to nil here. + b.loadWrapper.UpdateLoadStore(nil) + } + } + } + if startNewLoadReport { + var loadStore *load.Store + if b.xdsClient != nil { + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(b.lrsServer) + } + b.loadWrapper.UpdateLoadStore(loadStore) + } + + return nil +} + +func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { + if b.logger.V(2) { + b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + + // Need to check for potential errors at the beginning of this function, so + // that on errors, we reject the whole config, instead of applying part of + // it. + bb := balancer.Get(newConfig.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("child policy %q not registered", newConfig.ChildPolicy.Name) + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + + // Update load reporting config. This needs to be done before updating the + // child policy because we need the loadStore from the updated client to be + // passed to the ccWrapper, so that the next picker from the child policy + // will pick up the new loadStore. + if err := b.updateLoadStore(newConfig); err != nil { + return err + } + + if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { + if err := b.child.SwitchTo(bb); err != nil { + return fmt.Errorf("error switching to child of type %q: %v", newConfig.ChildPolicy.Name, err) + } + } + b.config = newConfig + + b.telemetryLabels = newConfig.TelemetryLabels + dc := b.handleDropAndRequestCount(newConfig) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(dc), + }) + } + + // Addresses and sub-balancer config are sent to sub-balancer. + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.config.ChildPolicy.Config, + }) +} + +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + errCh <- b.updateClientConnState(s) + } + onFailure := func() { + // An attempt to schedule callback fails only when an update is received + // after Close(). + errCh <- errBalancerClosed + } + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh +} + +func (b *clusterImplBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + b.child.ResolverError(err) + }) +} + +func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { + // Trigger re-resolution when a SubConn turns transient failure. This is + // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. + // + // Note that this happens not only for the addresses from DNS, but also for + // EDS (cluster_impl doesn't know if it's DNS or EDS, only the parent + // knows). The parent priority policy is configured to ignore re-resolution + // signal from the EDS children. + if s.ConnectivityState == connectivity.TransientFailure { + b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) + } + + if cb != nil { + cb(s) + } +} + +func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, s) +} + +func (b *clusterImplBalancer) Close() { + b.serializer.TrySchedule(func(_ context.Context) { + b.child.Close() + b.childState = balancer.State{} + + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() +} + +func (b *clusterImplBalancer) ExitIdle() { + b.serializer.TrySchedule(func(context.Context) { + b.child.ExitIdle() + }) +} + +// Override methods to accept updates from the child LB. + +func (b *clusterImplBalancer) UpdateState(state balancer.State) { + b.serializer.TrySchedule(func(context.Context) { + b.childState = state + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(&dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }), + }) + }) +} + +func (b *clusterImplBalancer) setClusterName(n string) { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + b.clusterName = n +} + +func (b *clusterImplBalancer) getClusterName() string { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + return b.clusterName +} + +// scWrapper is a wrapper of SubConn with locality ID. The locality ID can be +// retrieved from the addresses when creating SubConn. +// +// All SubConns passed to the child policies are wrapped in this, so that the +// picker can get the localityID from the picked SubConn, and do load reporting. +// +// After wrapping, all SubConns to and from the parent ClientConn (e.g. for +// SubConn state update, update/remove SubConn) must be the original SubConns. +// All SubConns to and from the child policy (NewSubConn, forwarding SubConn +// state update) must be the wrapper. The balancer keeps a map from the original +// SubConn to the wrapper for this purpose. +type scWrapper struct { + balancer.SubConn + // locality needs to be atomic because it can be updated while being read by + // the picker. + locality atomic.Value // type xdsinternal.LocalityID +} + +func (scw *scWrapper) updateLocalityID(lID xdsinternal.LocalityID) { + scw.locality.Store(lID) +} + +func (scw *scWrapper) localityID() xdsinternal.LocalityID { + lID, _ := scw.locality.Load().(xdsinternal.LocalityID) + return lID +} + +func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) + } + var sc balancer.SubConn + scw := &scWrapper{} + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { + b.serializer.TrySchedule(func(context.Context) { + b.updateSubConnState(sc, state, oldListener) + if state.ConnectivityState != connectivity.Ready { + return + } + // Read connected address and call updateLocalityID() based on the connected + // address's locality. https://github.com/grpc/grpc-go/issues/7339 + addr := connectedAddress(state) + lID := xdsinternal.GetLocalityID(addr) + if lID.Empty() { + if b.logger.V(2) { + b.logger.Infof("Locality ID for %s unexpectedly empty", addr) + } + return + } + scw.updateLocalityID(lID) + }) + } + sc, err := b.ClientConn.NewSubConn(newAddrs, opts) + if err != nil { + return nil, err + } + scw.SubConn = sc + return scw, nil +} + +func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { + b.logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID + for i, addr := range addrs { + newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + if scw, ok := sc.(*scWrapper); ok { + scw.updateLocalityID(lID) + // Need to get the original SubConn from the wrapper before calling + // parent ClientConn. + sc = scw.SubConn + } + b.ClientConn.UpdateAddresses(sc, newAddrs) +} + +type dropConfigs struct { + drops []*dropper + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 +} + +// handleDropAndRequestCount compares drop and request counter in newConfig with +// the one currently used by picker. It returns a new dropConfigs if a new +// picker needs to be generated, otherwise it returns nil. +func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { + // Compare new drop config. And update picker if it's changed. + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + // Compare cluster name. And update picker if it's changed, because circuit + // breaking's stream counter will be different. + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { + b.requestCounterCluster = newConfig.Cluster + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + updatePicker = true + } + // Compare upper bound of stream count. And update picker if it's changed. + // This is also for circuit breaking. + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + if !updatePicker { + return nil + } + return &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go new file mode 100644 index 0000000000..134a568e0f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/serviceconfig" +) + +// DropConfig contains the category, and drop ratio. +type DropConfig struct { + Category string + RequestsPerMillion uint32 +} + +// LBConfig is the balancer config for cluster_impl balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Cluster string `json:"cluster,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + DropCategories []DropConfig `json:"dropCategories,omitempty"` + // TelemetryLabels are the telemetry Labels associated with this cluster. + TelemetryLabels map[string]string `json:"telemetryLabels,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} + +func equalDropCategories(a, b []DropConfig) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go new file mode 100644 index 0000000000..3bbd1b0d78 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-cluster-impl-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clusterImplBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go new file mode 100644 index 0000000000..fbadbb92ba --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "context" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/stats" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// NewRandomWRR is used when calculating drops. It's exported so that tests can +// override it. +var NewRandomWRR = wrr.NewRandom + +const million = 1000000 + +type dropper struct { + category string + w wrr.WRR +} + +// greatest common divisor (GCD) via Euclidean algorithm +func gcd(a, b uint32) uint32 { + for b != 0 { + t := b + b = a % b + a = t + } + return a +} + +func newDropper(c DropConfig) *dropper { + w := NewRandomWRR() + gcdv := gcd(c.RequestsPerMillion, million) + // Return true for RequestPerMillion, false for the rest. + w.Add(true, int64(c.RequestsPerMillion/gcdv)) + w.Add(false, int64((million-c.RequestsPerMillion)/gcdv)) + + return &dropper{ + category: c.Category, + w: w, + } +} + +func (d *dropper) drop() (ret bool) { + return d.w.Next().(bool) +} + +// loadReporter wraps the methods from the loadStore that are used here. +type loadReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) + CallDropped(locality string) +} + +// Picker implements RPC drop, circuit breaking drop and load reporting. +type picker struct { + drops []*dropper + s balancer.State + loadStore loadReporter + counter *xdsclient.ClusterRequestsCounter + countMax uint32 + telemetryLabels map[string]string +} + +func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { + return &picker{ + drops: config.drops, + s: b.childState, + loadStore: b.loadWrapper, + counter: config.requestCounter, + countMax: config.requestCountMax, + telemetryLabels: b.telemetryLabels, + } +} + +func telemetryLabels(ctx context.Context) map[string]string { + if ctx == nil { + return nil + } + labels := stats.GetLabels(ctx) + if labels == nil { + return nil + } + return labels.TelemetryLabels +} + +func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Unconditionally set labels if present, even dropped or queued RPC's can + // use these labels. + if labels := telemetryLabels(info.Ctx); labels != nil { + for key, value := range d.telemetryLabels { + labels[key] = value + } + } + + // Don't drop unless the inner picker is READY. Similar to + // https://github.com/grpc/grpc-go/issues/2622. + if d.s.ConnectivityState == connectivity.Ready { + // Check if this RPC should be dropped by category. + for _, dp := range d.drops { + if dp.drop() { + if d.loadStore != nil { + d.loadStore.CallDropped(dp.category) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") + } + } + } + + // Check if this RPC should be dropped by circuit breaking. + if d.counter != nil { + if err := d.counter.StartRequest(d.countMax); err != nil { + // Drops by circuit breaking are reported with empty category. They + // will be reported only in total drops, but not in per category. + if d.loadStore != nil { + d.loadStore.CallDropped("") + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) + } + } + + var lIDStr string + pr, err := d.s.Picker.Pick(info) + if scw, ok := pr.SubConn.(*scWrapper); ok { + // This OK check also covers the case err!=nil, because SubConn will be + // nil. + pr.SubConn = scw.SubConn + var e error + // If locality ID isn't found in the wrapper, an empty locality ID will + // be used. + lIDStr, e = scw.localityID().ToString() + if e != nil { + logger.Infof("failed to marshal LocalityID: %#v, loads won't be reported", scw.localityID()) + } + } + + if err != nil { + if d.counter != nil { + // Release one request count if this pick fails. + d.counter.EndRequest() + } + return pr, err + } + + if labels := telemetryLabels(info.Ctx); labels != nil { + labels["grpc.lb.locality"] = lIDStr + } + + if d.loadStore != nil { + d.loadStore.CallStarted(lIDStr) + oldDone := pr.Done + pr.Done = func(info balancer.DoneInfo) { + if oldDone != nil { + oldDone(info) + } + d.loadStore.CallFinished(lIDStr, info.Err) + + load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) + if !ok || load == nil { + return + } + for n, c := range load.NamedMetrics { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + } + } + + if d.counter != nil { + // Update Done() so that when the RPC finishes, the request count will + // be released. + oldDone := pr.Done + pr.Done = func(doneInfo balancer.DoneInfo) { + d.counter.EndRequest() + if oldDone != nil { + oldDone(doneInfo) + } + } + } + + return pr, err +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go new file mode 100644 index 0000000000..92c69f5e1f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clustermanager + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" +) + +type subBalancerState struct { + state balancer.State + // stateToAggregate is the connectivity state used only for state + // aggregation. It could be different from state.ConnectivityState. For + // example when a sub-balancer transitions from TransientFailure to + // connecting, state.ConnectivityState is Connecting, but stateToAggregate + // is still TransientFailure. + stateToAggregate connectivity.State +} + +func (s *subBalancerState) String() string { + return fmt.Sprintf("picker:%p,state:%v,stateToAggregate:%v", s.state.Picker, s.state.ConnectivityState, s.stateToAggregate) +} + +type balancerStateAggregator struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + csEval *balancer.ConnectivityStateEvaluator + + mu sync.Mutex + // This field is used to ensure that no updates are forwarded to the parent + // CC once the aggregator is closed. A closed sub-balancer could still send + // pickers to this aggregator. + closed bool + // Map from child policy name to last reported state. + idToPickerState map[string]*subBalancerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool +} + +func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLogger) *balancerStateAggregator { + return &balancerStateAggregator{ + cc: cc, + logger: logger, + csEval: &balancer.ConnectivityStateEvaluator{}, + idToPickerState: make(map[string]*subBalancerState), + } +} + +func (bsa *balancerStateAggregator) close() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.closed = true +} + +// add adds a sub-balancer in CONNECTING state. +// +// This is called when there's a new child. +func (bsa *balancerStateAggregator) add(id string) { + bsa.mu.Lock() + defer bsa.mu.Unlock() + + bsa.idToPickerState[id] = &subBalancerState{ + // Start everything in CONNECTING, so if one of the sub-balancers + // reports TransientFailure, the RPCs will still wait for the other + // sub-balancers. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + stateToAggregate: connectivity.Connecting, + } + bsa.csEval.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + bsa.buildAndUpdateLocked() +} + +// remove removes the sub-balancer state. Future updates from this sub-balancer, +// if any, will be ignored. +// +// This is called when a child is removed. +func (bsa *balancerStateAggregator) remove(id string) { + bsa.mu.Lock() + defer bsa.mu.Unlock() + if _, ok := bsa.idToPickerState[id]; !ok { + return + } + // Setting the state of the deleted sub-balancer to Shutdown will get + // csEvltr to remove the previous state for any aggregated state + // evaluations. Transitions to and from connectivity.Shutdown are ignored + // by csEvltr. + bsa.csEval.RecordTransition(bsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) + // Remove id and picker from picker map. This also results in future updates + // for this ID to be ignored. + delete(bsa.idToPickerState, id) + bsa.buildAndUpdateLocked() +} + +// pauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (bsa *balancerStateAggregator) pauseStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = true + bsa.needUpdateStateOnResume = false +} + +// resumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (bsa *balancerStateAggregator) resumeStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = false + if bsa.needUpdateStateOnResume { + bsa.cc.UpdateState(bsa.buildLocked()) + } +} + +// UpdateState is called to report a balancer state change from sub-balancer. +// It's usually called by the balancer group. +// +// It calls parent ClientConn's UpdateState with the new aggregated state. +func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) { + bsa.logger.Infof("State update from sub-balancer %q: %+v", id, state) + + bsa.mu.Lock() + defer bsa.mu.Unlock() + pickerSt, ok := bsa.idToPickerState[id] + if !ok { + // All state starts with an entry in pickStateMap. If ID is not in map, + // it's either removed, or never existed. + return + } + if !(pickerSt.state.ConnectivityState == connectivity.TransientFailure && state.ConnectivityState == connectivity.Connecting) { + // If old state is TransientFailure, and new state is Connecting, don't + // update the state, to prevent the aggregated state from being always + // CONNECTING. Otherwise, stateToAggregate is the same as + // state.ConnectivityState. + bsa.csEval.RecordTransition(pickerSt.stateToAggregate, state.ConnectivityState) + pickerSt.stateToAggregate = state.ConnectivityState + } + pickerSt.state = state + bsa.buildAndUpdateLocked() +} + +// buildAndUpdateLocked combines the sub-state from each sub-balancer into one +// state, and sends a picker update to the parent ClientConn. +func (bsa *balancerStateAggregator) buildAndUpdateLocked() { + if bsa.closed { + return + } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } + bsa.cc.UpdateState(bsa.buildLocked()) +} + +// buildLocked combines sub-states into one. +func (bsa *balancerStateAggregator) buildLocked() balancer.State { + // The picker's return error might not be consistent with the + // aggregatedState. Because for this LB policy, we want to always build + // picker with all sub-pickers (not only ready sub-pickers), so even if the + // overall state is Ready, pick for certain RPCs can behave like Connecting + // or TransientFailure. + bsa.logger.Infof("Child pickers: %+v", bsa.idToPickerState) + return balancer.State{ + ConnectivityState: bsa.csEval.CurrentState(), + Picker: newPickerGroup(bsa.idToPickerState), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go new file mode 100644 index 0000000000..e6d751ecbe --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -0,0 +1,205 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clustermanager implements the cluster manager LB policy for xds. +package clustermanager + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancergroup" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const balancerName = "xds_cluster_manager_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + b := &bal{} + b.logger = prefixLogger(b) + b.stateAggregator = newBalancerStateAggregator(cc, b.logger) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: opts, + StateAggregator: b.stateAggregator, + Logger: b.logger, + SubBalancerCloseTimeout: time.Duration(0), // Disable caching of removed child policies + }) + b.bg.Start() + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return balancerName +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type bal struct { + logger *internalgrpclog.PrefixLogger + bg *balancergroup.BalancerGroup + stateAggregator *balancerStateAggregator + + children map[string]childConfig +} + +func (b *bal) setErrorPickerForChild(childName string, err error) { + b.stateAggregator.UpdateState(childName, balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) +} + +func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) error { + // TODO: Get rid of handling hierarchy in addresses. This LB policy never + // gets addresses from the resolver. + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + // Remove sub-balancers that are not in the new list from the aggregator and + // balancergroup. + for name := range b.children { + if _, ok := newConfig.Children[name]; !ok { + b.stateAggregator.remove(name) + b.bg.Remove(name) + } + } + + var retErr error + for childName, childCfg := range newConfig.Children { + lbCfg := childCfg.ChildPolicy.Config + if _, ok := b.children[childName]; !ok { + // Add new sub-balancers to the aggregator and balancergroup. + b.stateAggregator.add(childName) + b.bg.Add(childName, balancer.Get(childCfg.ChildPolicy.Name)) + } else { + // If the child policy type has changed for existing sub-balancers, + // parse the new config and send down the config update to the + // balancergroup, which will take care of gracefully switching the + // child over to the new policy. + // + // If we run into errors here, we need to ensure that RPCs to this + // child fail, while RPCs to other children with good configs + // continue to succeed. + newPolicyName, oldPolicyName := childCfg.ChildPolicy.Name, b.children[childName].ChildPolicy.Name + if newPolicyName != oldPolicyName { + var err error + var cfgJSON []byte + cfgJSON, err = childCfg.ChildPolicy.MarshalJSON() + if err != nil { + retErr = fmt.Errorf("failed to JSON marshal load balancing policy for child %q: %v", childName, err) + b.setErrorPickerForChild(childName, retErr) + continue + } + // This overwrites lbCfg to be in the format expected by the + // gracefulswitch balancer. So, when this config is pushed to + // the child (below), it will result in a graceful switch to the + // new child policy. + lbCfg, err = balancergroup.ParseConfig(cfgJSON) + if err != nil { + retErr = fmt.Errorf("failed to parse load balancing policy for child %q: %v", childName, err) + b.setErrorPickerForChild(childName, retErr) + continue + } + } + } + + if err := b.bg.UpdateClientConnState(childName, balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addressesSplit[childName], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }, + BalancerConfig: lbCfg, + }); err != nil { + retErr = fmt.Errorf("failed to push new configuration %v to child %q", childCfg.ChildPolicy.Config, childName) + b.setErrorPickerForChild(childName, retErr) + } + + // Picker update is sent to the parent ClientConn only after the + // new child policy returns a picker. So, there is no need to + // set needUpdateStateOnResume to true here. + } + + b.children = newConfig.Children + + // If multiple sub-balancers run into errors, we will return only the last + // one, which is still good enough, since the grpc channel will anyways + // return this error as balancer.ErrBadResolver to the name resolver, + // resulting in re-resolution attempts. + return retErr + + // Adding or removing a sub-balancer will result in the + // needUpdateStateOnResume bit to true which results in a picker update once + // resumeStateUpdates() is called. +} + +func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { + newConfig, ok := s.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + b.logger.Infof("Update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) + + b.stateAggregator.pauseStateUpdates() + defer b.stateAggregator.resumeStateUpdates() + return b.updateChildren(s, newConfig) +} + +func (b *bal) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *bal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *bal) Close() { + b.stateAggregator.close() + b.bg.Close() + b.logger.Infof("Shutdown") +} + +func (b *bal) ExitIdle() { + b.bg.ExitIdle() +} + +const prefix = "[xds-cluster-manager-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *bal) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go new file mode 100644 index 0000000000..3a5625ff19 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/config.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clustermanager + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type childConfig struct { + // ChildPolicy is the child policy and it's config. + ChildPolicy *internalserviceconfig.BalancerConfig +} + +// lbConfig is the balancer config for xds routing policy. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + Children map[string]childConfig +} + +func parseConfig(c json.RawMessage) (*lbConfig, error) { + cfg := &lbConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go new file mode 100644 index 0000000000..015cd2b7af --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/picker.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clustermanager + +import ( + "context" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// pickerGroup contains a list of pickers. If the picker isn't ready, the pick +// will be queued. +type pickerGroup struct { + pickers map[string]balancer.Picker +} + +func newPickerGroup(idToPickerState map[string]*subBalancerState) *pickerGroup { + pickers := make(map[string]balancer.Picker) + for id, st := range idToPickerState { + pickers[id] = st.state.Picker + } + return &pickerGroup{ + pickers: pickers, + } +} + +func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + cluster := getPickedCluster(info.Ctx) + if p := pg.pickers[cluster]; p != nil { + return p.Pick(info) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "unknown cluster selected for RPC: %q", cluster) +} + +type clusterKey struct{} + +func getPickedCluster(ctx context.Context) string { + cluster, _ := ctx.Value(clusterKey{}).(string) + return cluster +} + +// GetPickedClusterForTesting returns the cluster in the context; to be used +// for testing only. +func GetPickedClusterForTesting(ctx context.Context) string { + return getPickedCluster(ctx) +} + +// SetPickedCluster adds the selected cluster to the context for the +// xds_cluster_manager LB policy to pick. +func SetPickedCluster(ctx context.Context, cluster string) context.Context { + return context.WithValue(ctx, clusterKey{}, cluster) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go new file mode 100644 index 0000000000..749945059b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -0,0 +1,400 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterresolver contains the implementation of the +// cluster_resolver_experimental LB policy which resolves endpoint addresses +// using a list of one or more discovery mechanisms. +package clusterresolver + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/nop" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Name is the name of the cluster_resolver balancer. +const Name = "cluster_resolver_experimental" + +var ( + errBalancerClosed = errors.New("cdsBalancer is closed") + newChildBalancer = func(bb balancer.Builder, cc balancer.ClientConn, o balancer.BuildOptions) balancer.Balancer { + return bb.Build(cc, o) + } +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +// Build helps implement the balancer.Builder interface. +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + priorityBuilder := balancer.Get(priority.Name) + if priorityBuilder == nil { + logger.Errorf("%q LB policy is needed but not registered", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) + } + priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) + if !ok { + logger.Errorf("%q LB policy does not implement a config parser", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) + } + + b := &clusterResolverBalancer{ + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + + priorityBuilder: priorityBuilder, + priorityConfigParser: priorityConfigParser, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + + b.resourceWatcher = newResourceResolver(b, b.logger) + b.cc = &ccWrapper{ + ClientConn: cc, + b: b, + resourceWatcher: b.resourceWatcher, + } + + go b.run() + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + odBuilder := balancer.Get(outlierdetection.Name) + if odBuilder == nil { + // Shouldn't happen, registered through imported Outlier Detection, + // defensive programming. + return nil, fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) + } + odParser, ok := odBuilder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Outlier Detection builder has this method. + return nil, fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) + } + + var cfg *LBConfig + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err) + } + + for i, dm := range cfg.DiscoveryMechanisms { + lbCfg, err := odParser.ParseConfig(dm.OutlierDetection) + if err != nil { + return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err) + } + odCfg, ok := lbCfg.(*outlierdetection.LBConfig) + if !ok { + // Shouldn't happen, Parser built at build time with Outlier Detection + // builder pulled from gRPC LB Registry. + return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg) + } + cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg + } + if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil { + // This will never occur, valid configuration is emitted from the xDS + // Client. Validity is already checked in the xDS Client, however, this + // double validation is present because Unmarshalling and Validating are + // coupled into one json.Unmarshal operation). We will switch this in + // the future to two separate operations. + return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err) + } + return cfg, nil +} + +// ccUpdate wraps a clientConn update received from gRPC. +type ccUpdate struct { + state balancer.ClientConnState + err error +} + +type exitIdle struct{} + +// clusterResolverBalancer resolves endpoint addresses using a list of one or +// more discovery mechanisms. +type clusterResolverBalancer struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + updateCh *buffer.Unbounded // Channel for updates from gRPC. + resourceWatcher *resourceResolver + logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + + priorityBuilder balancer.Builder + priorityConfigParser balancer.ConfigParser + + config *LBConfig + configRaw *serviceconfig.ParseResult + xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. + attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. + + child balancer.Balancer + priorities []priorityConfig + watchUpdateReceived bool +} + +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. +// +// A good update results in creation of endpoint resolvers for the configured +// discovery mechanisms. An update with an error results in cancellation of any +// existing endpoint resolution and propagation of the same to the child policy. +func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, true) + return + } + + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + cfg, _ := update.state.BalancerConfig.(*LBConfig) + if cfg == nil { + b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) + return + } + + b.config = cfg + b.configRaw = update.state.ResolverState.ServiceConfig + b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) + + // The child policy is created only after all configured discovery + // mechanisms have been successfully returned endpoints. If that is not the + // case, we return early. + if !b.watchUpdateReceived { + return + } + b.updateChildConfig() +} + +// handleResourceUpdate handles a resource update or error from the resource +// resolver by propagating the same to the child LB policy. +func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { + b.watchUpdateReceived = true + b.priorities = update.priorities + + // An update from the resource resolver contains resolved endpoint addresses + // for all configured discovery mechanisms ordered by priority. This is used + // to generate configuration for the priority LB policy. + b.updateChildConfig() + + if update.onDone != nil { + update.onDone() + } +} + +// updateChildConfig builds child policy configuration using endpoint addresses +// returned by the resource resolver and child policy configuration provided by +// parent LB policy. +// +// A child policy is created if one doesn't already exist. The newly built +// configuration is then pushed to the child policy. +func (b *clusterResolverBalancer) updateChildConfig() { + if b.child == nil { + b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) + } + + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) + if err != nil { + b.logger.Warningf("Failed to build child policy config: %v", err) + return + } + childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) + if err != nil { + b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err) + return + } + if b.logger.V(2) { + b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) + } + + endpoints := make([]resolver.Endpoint, len(addrs)) + for i, a := range addrs { + endpoints[i].Attributes = a.BalancerAttributes + endpoints[i].Addresses = []resolver.Address{a} + endpoints[i].Addresses[0].BalancerAttributes = nil + } + if err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: endpoints, + Addresses: addrs, + ServiceConfig: b.configRaw, + Attributes: b.attrsWithClient, + }, + BalancerConfig: childCfg, + }); err != nil { + b.logger.Warningf("Failed to push config to child policy: %v", err) + } +} + +// handleErrorFromUpdate handles errors from the parent LB policy and endpoint +// resolvers. fromParent is true if error is from the parent LB policy. In both +// cases, the error is propagated to the child policy, if one exists. +func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { + b.logger.Warningf("Received error: %v", err) + + // A resource-not-found error from the parent LB policy means that the LDS + // or CDS resource was removed. This should result in endpoint resolvers + // being stopped here. + // + // A resource-not-found error from the EDS endpoint resolver means that the + // EDS resource was removed. No action needs to be taken for this, and we + // should continue watching the same EDS resource. + if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.resourceWatcher.stop(false) + } + + if b.child != nil { + b.child.ResolverError(err) + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) +} + +// run is a long-running goroutine that handles updates from gRPC and endpoint +// resolvers. The methods handling the individual updates simply push them onto +// a channel which is read and acted upon from here. +func (b *clusterResolverBalancer) run() { + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case *ccUpdate: + b.handleClientConnUpdate(update) + case exitIdle: + if b.child == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.child.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + } + case u := <-b.resourceWatcher.updateChannel: + b.handleResourceUpdate(u) + + // Close results in stopping the endpoint resolvers and closing the + // underlying child policy and is the only way to exit this goroutine. + case <-b.closed.Done(): + b.resourceWatcher.stop(true) + + if b.child != nil { + b.child.Close() + b.child = nil + } + b.updateCh.Close() + // This is the *ONLY* point of return from this function. + b.logger.Infof("Shutdown") + b.done.Fire() + return + } + } +} + +// Following are methods to implement the balancer interface. + +func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("Received update from gRPC {%+v} after close", state) + return errBalancerClosed + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + b.attrsWithClient = state.ResolverState.Attributes + } + + b.updateCh.Put(&ccUpdate{state: state}) + return nil +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *clusterResolverBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("Received resolver error {%v} after close", err) + return + } + b.updateCh.Put(&ccUpdate{err: err}) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// Close closes the cdsBalancer and the underlying child balancer. +func (b *clusterResolverBalancer) Close() { + b.closed.Fire() + <-b.done.Done() +} + +func (b *clusterResolverBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + +// ccWrapper overrides ResolveNow(), so that re-resolution from the child +// policies will trigger the DNS resolver in cluster_resolver balancer. It +// also intercepts NewSubConn calls in case children don't set the +// StateListener, to allow redirection to happen via this cluster_resolver +// balancer. +type ccWrapper struct { + balancer.ClientConn + b *clusterResolverBalancer + resourceWatcher *resourceResolver +} + +func (c *ccWrapper) ResolveNow(resolver.ResolveNowOptions) { + c.resourceWatcher.resolveNow() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go new file mode 100644 index 0000000000..7614b0fc57 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "bytes" + "encoding/json" + "fmt" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" +) + +// DiscoveryMechanismType is the type of discovery mechanism. +type DiscoveryMechanismType int + +const ( + // DiscoveryMechanismTypeEDS is eds. + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` + // DiscoveryMechanismTypeLogicalDNS is DNS. + DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` +) + +// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. +// +// This is necessary to handle enum (as strings) from JSON. +// +// Note that this needs to be defined on the type not pointer, otherwise the +// variables of this type will marshal to int not string. +func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case DiscoveryMechanismTypeEDS: + buffer.WriteString("EDS") + case DiscoveryMechanismTypeLogicalDNS: + buffer.WriteString("LOGICAL_DNS") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. +func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "EDS": + *t = DiscoveryMechanismTypeEDS + case "LOGICAL_DNS": + *t = DiscoveryMechanismTypeLogicalDNS + default: + return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) + } + return nil +} + +// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. +// +// For DNS, the ClientConn target will be used for name resolution. +// +// For EDS, if EDSServiceName is not empty, it will be used for watching. If +// EDSServiceName is empty, Cluster will be used. +type DiscoveryMechanism struct { + // Cluster is the cluster name. + Cluster string `json:"cluster,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + // MaxConcurrentRequests is the maximum number of outstanding requests can + // be made to the upstream cluster. Default is 1024. + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + // Type is the discovery mechanism type. + Type DiscoveryMechanismType `json:"type,omitempty"` + // EDSServiceName is the EDS service name, as returned in CDS. May be unset + // if not specified in CDS. For type EDS only. + // + // This is used for EDS watch if set. If unset, Cluster is used for EDS + // watch. + EDSServiceName string `json:"edsServiceName,omitempty"` + // DNSHostname is the DNS name to resolve in "host:port" form. For type + // LOGICAL_DNS only. + DNSHostname string `json:"dnsHostname,omitempty"` + // OutlierDetection is the Outlier Detection LB configuration for this + // priority. + OutlierDetection json.RawMessage `json:"outlierDetection,omitempty"` + // TelemetryLabels are the telemetry labels associated with this cluster. + TelemetryLabels map[string]string `json:"telemetryLabels,omitempty"` + outlierDetection outlierdetection.LBConfig +} + +// Equal returns whether the DiscoveryMechanism is the same with the parameter. +func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + od := &dm.outlierDetection + switch { + case dm.Cluster != b.Cluster: + return false + case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): + return false + case dm.Type != b.Type: + return false + case dm.EDSServiceName != b.EDSServiceName: + return false + case dm.DNSHostname != b.DNSHostname: + return false + case !od.EqualIgnoringChildPolicy(&b.outlierDetection): + return false + } + + if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { + return true + } + if (dm.LoadReportingServer != nil) != (b.LoadReportingServer != nil) { + return false + } + return dm.LoadReportingServer.String() == b.LoadReportingServer.String() +} + +func equalUint32P(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// LBConfig is the config for cluster resolver balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // DiscoveryMechanisms is an ordered list of discovery mechanisms. + // + // Must have at least one element. Results from each discovery mechanism are + // concatenated together in successive priorities. + DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` + + // XDSLBPolicy specifies the policy for locality picking and endpoint picking. + XDSLBPolicy json.RawMessage `json:"xdsLbPolicy,omitempty"` + xdsLBPolicy internalserviceconfig.BalancerConfig +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go new file mode 100644 index 0000000000..8740360eef --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "encoding/json" + "fmt" + "sort" + + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/hierarchy" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const million = 1000000 + +// priorityConfig is config for one priority. For example, if there an EDS and a +// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. +// +// Each priorityConfig corresponds to one discovery mechanism from the LBConfig +// generated by the CDS balancer. The CDS balancer resolves the cluster name to +// an ordered list of discovery mechanisms (if the top cluster is an aggregated +// cluster), one for each underlying cluster. +type priorityConfig struct { + mechanism DiscoveryMechanism + // edsResp is set only if type is EDS. + edsResp xdsresource.EndpointsUpdate + // addresses is set only if type is DNS. + addresses []string + // Each discovery mechanism has a name generator so that the child policies + // can reuse names between updates (EDS updates for example). + childNameGen *nameGenerator +} + +// buildPriorityConfigJSON builds balancer config for the passed in +// priorities. +// +// The built tree of balancers (see test for the output struct). +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌──────────▼─┐ ┌─▼──────────┐ +// │cluster_impl│ │cluster_impl│ +// └──────┬─────┘ └─────┬──────┘ +// │ │ +// ┌──────▼─────┐ ┌─────▼──────┐ +// │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) +// └────────────┘ └────────────┘ +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) + if err != nil { + return nil, nil, fmt.Errorf("failed to build priority config: %v", err) + } + ret, err := json.Marshal(pc) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) + } + return ret, addrs, nil +} + +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { + var ( + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retAddrs []resolver.Address + ) + for _, p := range priorities { + switch p.mechanism.Type { + case DiscoveryMechanismTypeEDS: + names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) + if err != nil { + return nil, nil, err + } + retConfig.Priorities = append(retConfig.Priorities, names...) + retAddrs = append(retAddrs, addrs...) + odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) + for n, c := range odCfgs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + continue + case DiscoveryMechanismTypeLogicalDNS: + name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) + retConfig.Priorities = append(retConfig.Priorities, name) + retAddrs = append(retAddrs, addrs...) + odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + continue + } + } + return retConfig, retAddrs, nil +} + +func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { + odCfgs := make(map[string]*outlierdetection.LBConfig, len(ciCfgs)) + for n, c := range ciCfgs { + odCfgs[n] = makeClusterImplOutlierDetectionChild(c, odCfg) + } + return odCfgs +} + +func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) *outlierdetection.LBConfig { + odCfgRet := odCfg + odCfgRet.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: ciCfg} + return &odCfgRet +} + +func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { + // Endpoint picking policy for DNS is hardcoded to pick_first. + const childPolicy = "pick_first" + retAddrs := make([]resolver.Address, 0, len(addrStrs)) + pName := fmt.Sprintf("priority-%v", g.prefix) + for _, addrStr := range addrStrs { + retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) + } + return pName, &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + TelemetryLabels: mechanism.TelemetryLabels, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, + }, retAddrs +} + +// buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for +// each priority, sorted by priority, and the addresses for each priority (with +// hierarchy attributes set). +// +// For example, if there are two priorities, the returned values will be +// - ["p0", "p1"] +// - map{"p0":p0_config, "p1":p1_config} +// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] +// - p0 addresses' hierarchy attributes are set to p0 +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { + drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) + for _, d := range edsResp.Drops { + drops = append(drops, clusterimpl.DropConfig{ + Category: d.Category, + RequestsPerMillion: d.Numerator * million / d.Denominator, + }) + } + + // Localities of length 0 is triggered by an NACK or resource-not-found + // error before update, or a empty localities list in a update. In either + // case want to create a priority, and send down empty address list, causing + // TF for that priority. "If any discovery mechanism instance experiences an + // error retrieving data, and it has not previously reported any results, it + // should report a result that is a single priority with no endpoints." - + // A37 + priorities := [][]xdsresource.Locality{{}} + if len(edsResp.Localities) != 0 { + priorities = groupLocalitiesByPriority(edsResp.Localities) + } + retNames := g.generate(priorities) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) + var retAddrs []resolver.Address + for i, pName := range retNames { + priorityLocalities := priorities[i] + cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + if err != nil { + return nil, nil, nil, err + } + retConfigs[pName] = cfg + retAddrs = append(retAddrs, addrs...) + } + return retNames, retConfigs, retAddrs, nil +} + +// groupLocalitiesByPriority returns the localities grouped by priority. +// +// The returned list is sorted from higher priority to lower. Each item in the +// list is a group of localities. +// +// For example, for L0-p0, L1-p0, L2-p1, results will be +// - [[L0, L1], [L2]] +func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresource.Locality { + var priorityIntSlice []int + priorities := make(map[int][]xdsresource.Locality) + for _, locality := range localities { + priority := int(locality.Priority) + priorities[priority] = append(priorities[priority], locality) + priorityIntSlice = append(priorityIntSlice, priority) + } + // Sort the priorities based on the int value, deduplicate, and then turn + // the sorted list into a string list. This will be child names, in priority + // order. + sort.Ints(priorityIntSlice) + priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) + ret := make([][]xdsresource.Locality, 0, len(priorityIntSliceDeduped)) + for _, p := range priorityIntSliceDeduped { + ret = append(ret, priorities[p]) + } + return ret +} + +func dedupSortedIntSlice(a []int) []int { + if len(a) == 0 { + return a + } + i, j := 0, 1 + for ; j < len(a); j++ { + if a[i] == a[j] { + continue + } + i++ + if i != j { + a[i] = a[j] + } + } + return a[:i+1] +} + +// priorityLocalitiesToClusterImpl takes a list of localities (with the same +// priority), and generates a cluster impl policy config, and a list of +// addresses with their path hierarchy set to [priority-name, locality-name], so +// priority and the xDS LB Policy know which child policy each address is for. +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { + var addrs []resolver.Address + for _, locality := range localities { + var lw uint32 = 1 + if locality.Weight != 0 { + lw = locality.Weight + } + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { + continue + } + addr := resolver.Address{Addr: endpoint.Address} + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + // "To provide the xds_wrr_locality load balancer information about + // locality weights received from EDS, the cluster resolver will + // populate a new locality weight attribute for each address The + // attribute will have the weight (as an integer) of the locality + // the address is part of." - A52 + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) + var ew uint32 = 1 + if endpoint.Weight != 0 { + ew = endpoint.Weight + } + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) + addrs = append(addrs, addr) + } + } + return &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServer: mechanism.LoadReportingServer, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + TelemetryLabels: mechanism.TelemetryLabels, + DropCategories: drops, + ChildPolicy: xdsLBPolicy, + }, addrs, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go new file mode 100644 index 0000000000..119f4c4747 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// nameGenerator generates a child name for a list of priorities (each priority +// is a list of localities). +// +// The purpose of this generator is to reuse names between updates. So the +// struct keeps state between generate() calls, and a later generate() might +// return names returned by the previous call. +type nameGenerator struct { + existingNames map[internal.LocalityID]string + prefix uint64 + nextID uint64 +} + +func newNameGenerator(prefix uint64) *nameGenerator { + return &nameGenerator{prefix: prefix} +} + +// generate returns a list of names for the given list of priorities. +// +// Each priority is a list of localities. The name for the priority is picked as +// - for each locality in this priority, if it exists in the existing names, +// this priority will reuse the name +// - if no reusable name is found for this priority, a new name is generated +// +// For example: +// - update 1: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 2: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 3: [[L1, L2], [L3]] --> ["0", "2"] (Two priorities were merged) +// - update 4: [[L1], [L4]] --> ["0", "3",] (A priority was split, and a new priority was added) +func (ng *nameGenerator) generate(priorities [][]xdsresource.Locality) []string { + var ret []string + usedNames := make(map[string]bool) + newNames := make(map[internal.LocalityID]string) + for _, priority := range priorities { + var nameFound string + for _, locality := range priority { + if name, ok := ng.existingNames[locality.ID]; ok { + if !usedNames[name] { + nameFound = name + // Found a name to use. No need to process the remaining + // localities. + break + } + } + } + + if nameFound == "" { + // No appropriate used name is found. Make a new name. + nameFound = fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) + ng.nextID++ + } + + ret = append(ret, nameFound) + // All localities in this priority share the same name. Add them all to + // the new map. + for _, l := range priority { + newNames[l.ID] = nameFound + } + usedNames[nameFound] = true + } + ng.existingNames = newNames + return ret +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go new file mode 100644 index 0000000000..728f1f709c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-cluster-resolver-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clusterResolverBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go new file mode 100644 index 0000000000..5bc64b8630 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -0,0 +1,321 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// resourceUpdate is a combined update from all the resources, in the order of +// priority. For example, it can be {EDS, EDS, DNS}. +type resourceUpdate struct { + // A discovery mechanism would return an empty update when it runs into + // errors, and this would result in the priority LB policy reporting + // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would + // fallback to the next highest priority that is available. + priorities []priorityConfig + // To be invoked once the update is completely processed, or is dropped in + // favor of a newer update. + onDone xdsresource.OnDoneFunc +} + +// topLevelResolver is used by concrete endpointsResolver implementations for +// reporting updates and errors. The `resourceResolver` type implements this +// interface and takes appropriate actions upon receipt of updates and errors +// from underlying concrete resolvers. +type topLevelResolver interface { + // onUpdate is called when a new update is received from the underlying + // endpointsResolver implementation. The onDone callback is to be invoked + // once the update is completely processed, or is dropped in favor of a + // newer update. + onUpdate(onDone xdsresource.OnDoneFunc) +} + +// endpointsResolver wraps the functionality to resolve a given resource name to +// a set of endpoints. The mechanism used by concrete implementations depend on +// the supported discovery mechanism type. +type endpointsResolver interface { + // lastUpdate returns endpoint results from the most recent resolution. + // + // The type of the first return result is dependent on the resolver + // implementation. + // + // The second return result indicates whether the resolver was able to + // successfully resolve the resource name to endpoints. If set to false, the + // first return result is invalid and must not be used. + lastUpdate() (any, bool) + + // resolverNow triggers re-resolution of the resource. + resolveNow() + + // stop stops resolution of the resource. Implementations must not invoke + // any methods on the topLevelResolver interface once `stop()` returns. + stop() +} + +// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so +// that the same resource resolver can be reused (e.g. when there are two +// mechanisms, both for the same EDS resource, but has different circuit +// breaking config. +type discoveryMechanismKey struct { + typ DiscoveryMechanismType + name string +} + +// discoveryMechanismAndResolver is needed to keep the resolver and the +// discovery mechanism together, because resolvers can be shared. And we need +// the mechanism for fields like circuit breaking, LRS etc when generating the +// balancer config. +type discoveryMechanismAndResolver struct { + dm DiscoveryMechanism + r endpointsResolver + + childNameGen *nameGenerator +} + +type resourceResolver struct { + parent *clusterResolverBalancer + logger *grpclog.PrefixLogger + updateChannel chan *resourceUpdate + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // mu protects the slice and map, and content of the resolvers in the slice. + mu sync.Mutex + mechanisms []DiscoveryMechanism + children []discoveryMechanismAndResolver + // childrenMap's value only needs the resolver implementation (type + // discoveryMechanism) and the childNameGen. The other two fields are not + // used. + // + // TODO(cleanup): maybe we can make a new type with just the necessary + // fields, and use it here instead. + childrenMap map[discoveryMechanismKey]discoveryMechanismAndResolver + // Each new discovery mechanism needs a child name generator to reuse child + // policy names. But to make sure the names across discover mechanism + // doesn't conflict, we need a seq ID. This ID is incremented for each new + // discover mechanism. + childNameGeneratorSeqID uint64 +} + +func newResourceResolver(parent *clusterResolverBalancer, logger *grpclog.PrefixLogger) *resourceResolver { + rr := &resourceResolver{ + parent: parent, + logger: logger, + updateChannel: make(chan *resourceUpdate, 1), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), + } + ctx, cancel := context.WithCancel(context.Background()) + rr.serializer = grpcsync.NewCallbackSerializer(ctx) + rr.serializerCancel = cancel + return rr +} + +func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + bb := b[i] + if !aa.Equal(bb) { + return false + } + } + return true +} + +func discoveryMechanismToKey(dm DiscoveryMechanism) discoveryMechanismKey { + switch dm.Type { + case DiscoveryMechanismTypeEDS: + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + return discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + case DiscoveryMechanismTypeLogicalDNS: + return discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + default: + return discoveryMechanismKey{} + } +} + +func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { + rr.mu.Lock() + defer rr.mu.Unlock() + if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { + return + } + rr.mechanisms = mechanisms + rr.children = make([]discoveryMechanismAndResolver, len(mechanisms)) + newDMs := make(map[discoveryMechanismKey]bool) + + // Start one watch for each new discover mechanism {type+resource_name}. + for i, dm := range mechanisms { + dmKey := discoveryMechanismToKey(dm) + newDMs[dmKey] = true + dmAndResolver, ok := rr.childrenMap[dmKey] + if ok { + // If this is not new, keep the fields (especially childNameGen), + // and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not copied + // to dmKey, we need to keep those updated. + dmAndResolver.dm = dm + rr.children[i] = dmAndResolver + continue + } + + // Create resolver for a newly seen resource. + var resolver endpointsResolver + switch dm.Type { + case DiscoveryMechanismTypeEDS: + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) + case DiscoveryMechanismTypeLogicalDNS: + resolver = newDNSResolver(dmKey.name, rr, rr.logger) + } + dmAndResolver = discoveryMechanismAndResolver{ + dm: dm, + r: resolver, + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } + rr.childrenMap[dmKey] = dmAndResolver + rr.children[i] = dmAndResolver + rr.childNameGeneratorSeqID++ + } + + // Stop the resources that were removed. + for dm, r := range rr.childrenMap { + if !newDMs[dm] { + delete(rr.childrenMap, dm) + go r.r.stop() + } + } + // Regenerate even if there's no change in discovery mechanism, in case + // priority order changed. + rr.generateLocked(func() {}) +} + +// resolveNow is typically called to trigger re-resolve of DNS. The EDS +// resolveNow() is a noop. +func (rr *resourceResolver) resolveNow() { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, r := range rr.childrenMap { + r.r.resolveNow() + } +} + +func (rr *resourceResolver) stop(closing bool) { + rr.mu.Lock() + + // Save the previous childrenMap to stop the children outside the mutex, + // and reinitialize the map. We only need to reinitialize to allow for the + // policy to be reused if the resource comes back. In practice, this does + // not happen as the parent LB policy will also be closed, causing this to + // be removed entirely, but a future use case might want to reuse the + // policy instead. + cm := rr.childrenMap + rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) + rr.mechanisms = nil + rr.children = nil + + rr.mu.Unlock() + + for _, r := range cm { + r.r.stop() + } + + if closing { + rr.serializerCancel() + <-rr.serializer.Done() + } + + // stop() is called when the LB policy is closed or when the underlying + // cluster resource is removed by the management server. In the latter case, + // an empty config update needs to be pushed to the child policy to ensure + // that a picker that fails RPCs is sent up to the channel. + // + // Resource resolver implementations are expected to not send any updates + // after they are stopped. Therefore, we don't have to worry about another + // write to this channel happening at the same time as this one. + select { + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } + default: + } + rr.updateChannel <- &resourceUpdate{} +} + +// generateLocked collects updates from all resolvers. It pushes the combined +// result on the update channel if all child resolvers have received at least +// one update. Otherwise it returns early. +// +// The onDone callback is invoked inline if not all child resolvers have +// received at least one update. If all child resolvers have received at least +// one update, onDone is invoked when the combined update is processed by the +// clusterresolver LB policy. +// +// Caller must hold rr.mu. +func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { + var ret []priorityConfig + for _, rDM := range rr.children { + u, ok := rDM.r.lastUpdate() + if !ok { + // Don't send updates to parent until all resolvers have update to + // send. + onDone() + return + } + switch uu := u.(type) { + case xdsresource.EndpointsUpdate: + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) + case []string: + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) + } + } + select { + // A previously unprocessed update is dropped in favor of the new one, and + // the former's onDone callback is invoked to unblock the xDS client's + // receive path. + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } + default: + } + rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone} +} + +func (rr *resourceResolver) onUpdate(onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { + rr.mu.Lock() + rr.generateLocked(onDone) + rr.mu.Unlock() + } + rr.serializer.ScheduleOr(handleUpdate, func() { onDone() }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go new file mode 100644 index 0000000000..cfc871d3b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -0,0 +1,188 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + "net/url" + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + // The dns resolver is registered by the grpc package. So, this call to + // resolver.Get() is never expected to return nil. + return resolver.Get("dns").Build(target, cc, opts) + } +) + +// dnsDiscoveryMechanism watches updates for the given DNS hostname. +// +// It implements resolver.ClientConn interface to work with the DNS resolver. +type dnsDiscoveryMechanism struct { + target string + topLevelResolver topLevelResolver + dnsR resolver.Resolver + logger *grpclog.PrefixLogger + + mu sync.Mutex + addrs []string + updateReceived bool +} + +// newDNSResolver creates an endpoints resolver which uses a DNS resolver under +// the hood. +// +// An error in parsing the provided target string or an error in creating a DNS +// resolver means that we will never be able to resolve the provided target +// strings to endpoints. The topLevelResolver propagates address updates to the +// clusterresolver LB policy **only** after it receives updates from all its +// child resolvers. Therefore, an error here means that the topLevelResolver +// will never send address updates to the clusterresolver LB policy. +// +// Calling the onError() callback will ensure that this error is +// propagated to the child policy which eventually move the channel to +// transient failure. +// +// The `dnsR` field is unset if we run into errors in this function. Therefore, a +// nil check is required wherever we access that field. +func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism { + ret := &dnsDiscoveryMechanism{ + target: target, + topLevelResolver: topLevelResolver, + logger: logger, + } + u, err := url.Parse("dns:///" + target) + if err != nil { + if ret.logger.V(2) { + ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) + } + ret.updateReceived = true + ret.topLevelResolver.onUpdate(func() {}) + return ret + } + + r, err := newDNS(resolver.Target{URL: *u}, ret, resolver.BuildOptions{}) + if err != nil { + if ret.logger.V(2) { + ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) + } + ret.updateReceived = true + ret.topLevelResolver.onUpdate(func() {}) + return ret + } + ret.dnsR = r + return ret +} + +func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { + dr.mu.Lock() + defer dr.mu.Unlock() + + if !dr.updateReceived { + return nil, false + } + return dr.addrs, true +} + +func (dr *dnsDiscoveryMechanism) resolveNow() { + if dr.dnsR != nil { + dr.dnsR.ResolveNow(resolver.ResolveNowOptions{}) + } +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. The +// underlying dns resolver does not send any updates to the resolver.ClientConn +// interface passed to it (implemented by dnsDiscoveryMechanism in this case) +// after its `Close()` returns. Therefore, we can guarantee that no methods of +// the topLevelResolver are invoked after we return from this method. +func (dr *dnsDiscoveryMechanism) stop() { + if dr.dnsR != nil { + dr.dnsR.Close() + } +} + +// dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive +// updates from the real DNS resolver. + +func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported an update: %s", dr.target, pretty.ToJSON(state)) + } + + dr.mu.Lock() + var addrs []string + if len(state.Endpoints) > 0 { + // Assume 1 address per endpoint, which is how DNS is expected to + // behave. The slice will grow as needed, however. + addrs = make([]string, 0, len(state.Endpoints)) + for _, e := range state.Endpoints { + for _, a := range e.Addresses { + addrs = append(addrs, a.Addr) + } + } + } else { + addrs = make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } + } + dr.addrs = addrs + dr.updateReceived = true + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate(func() {}) + return nil +} + +func (dr *dnsDiscoveryMechanism) ReportError(err error) { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported error: %v", dr.target, err) + } + + dr.mu.Lock() + // If a previous good update was received, suppress the error and continue + // using the previous update. If RPCs were succeeding prior to this, they + // will continue to do so. Also suppress errors if we previously received an + // error, since there will be no downstream effects of propagating this + // error. + if dr.updateReceived { + dr.mu.Unlock() + return + } + dr.addrs = nil + dr.updateReceived = true + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate(func() {}) +} + +func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { + dr.UpdateState(resolver.State{Addresses: addresses}) +} + +func (dr *dnsDiscoveryMechanism) ParseServiceConfig(string) *serviceconfig.ParseResult { + return &serviceconfig.ParseResult{Err: fmt.Errorf("service config not supported")} +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go new file mode 100644 index 0000000000..ddb949019e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type edsDiscoveryMechanism struct { + nameToWatch string + cancelWatch func() + topLevelResolver topLevelResolver + stopped *grpcsync.Event + logger *grpclog.PrefixLogger + + mu sync.Mutex + update *xdsresource.EndpointsUpdate // Nil indicates no update received so far. +} + +func (er *edsDiscoveryMechanism) lastUpdate() (any, bool) { + er.mu.Lock() + defer er.mu.Unlock() + + if er.update == nil { + return nil, false + } + return *er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. +func (er *edsDiscoveryMechanism) stop() { + // Canceling a watch with the xDS client can race with an xDS response + // received around the same time, and can result in the watch callback being + // invoked after the watch is canceled. Callers need to handle this race, + // and we fire the stopped event here to ensure that a watch callback + // invocation around the same time becomes a no-op. + er.stopped.Fire() + er.cancelWatch() +} + +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{ + nameToWatch: nameToWatch, + topLevelResolver: topLevelResolver, + logger: logger, + stopped: grpcsync.NewEvent(), + } + ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) + return ret +} + +// OnUpdate is invoked to report an update for the resource being watched. +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData, onDone xdsresource.OnDoneFunc) { + if er.stopped.HasFired() { + onDone() + return + } + + er.mu.Lock() + er.update = &update.Resource + er.mu.Unlock() + + er.topLevelResolver.onUpdate(onDone) +} + +func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.OnDoneFunc) { + if er.stopped.HasFired() { + onDone() + return + } + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported error: %v", er.nameToWatch, err) + } + + er.mu.Lock() + if er.update != nil { + // Continue using a previously received good configuration if one + // exists. + er.mu.Unlock() + onDone() + return + } + + // Else report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate(onDone) +} + +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + if er.stopped.HasFired() { + onDone() + return + } + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported resource-does-not-exist error", er.nameToWatch) + } + + // Report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.mu.Lock() + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate(onDone) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go new file mode 100644 index 0000000000..f5605df832 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package loadstore contains the loadStoreWrapper shared by the balancers. +package loadstore + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// NewWrapper creates a Wrapper. +func NewWrapper() *Wrapper { + return &Wrapper{} +} + +// Wrapper wraps a load store with cluster and edsService. +// +// It's store and cluster/edsService can be updated separately. And it will +// update its internal perCluster store so that new stats will be added to the +// correct perCluster. +// +// Note that this struct is a temporary workaround before we implement graceful +// switch for EDS. Any update to the clusterName and serviceName is too early, +// the perfect timing is when the picker is updated with the new connection. +// This early update could cause picks for the old SubConn being reported to the +// new services. +// +// When the graceful switch in EDS is done, there should be no need for this +// struct. The policies that record/report load shouldn't need to handle update +// of lrsServerName/cluster/edsService. Its parent should do a graceful switch +// of the whole tree when one of that changes. +type Wrapper struct { + mu sync.RWMutex + cluster string + edsService string + // store and perCluster are initialized as nil. They are only set by the + // balancer when LRS is enabled. Before that, all functions to record loads + // are no-op. + store *load.Store + perCluster load.PerClusterReporter +} + +// UpdateClusterAndService updates the cluster name and eds service for this +// wrapper. If any one of them is changed from before, the perCluster store in +// this wrapper will also be updated. +func (lsw *Wrapper) UpdateClusterAndService(cluster, edsService string) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if cluster == lsw.cluster && edsService == lsw.edsService { + return + } + lsw.cluster = cluster + lsw.edsService = edsService + lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService) +} + +// UpdateLoadStore updates the load store for this wrapper. If it is changed +// from before, the perCluster store in this wrapper will also be updated. +func (lsw *Wrapper) UpdateLoadStore(store *load.Store) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if store == lsw.store { + return + } + lsw.store = store + lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService) +} + +// CallStarted records a call started in the store. +func (lsw *Wrapper) CallStarted(locality string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallStarted(locality) + } +} + +// CallFinished records a call finished in the store. +func (lsw *Wrapper) CallFinished(locality string, err error) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallFinished(locality, err) + } +} + +// CallServerLoad records the server load in the store. +func (lsw *Wrapper) CallServerLoad(locality, name string, val float64) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallServerLoad(locality, name, val) + } +} + +// CallDropped records a call dropped in the store. +func (lsw *Wrapper) CallDropped(category string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallDropped(category) + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go new file mode 100644 index 0000000000..53ba72c081 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -0,0 +1,918 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package outlierdetection provides an implementation of the outlier detection +// LB policy, as defined in +// https://github.com/grpc/proposal/blob/master/A50-xds-outlier-detection.md. +package outlierdetection + +import ( + "encoding/json" + "fmt" + "math" + "math/rand" + "strings" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Globals to stub out in tests. +var ( + afterFunc = time.AfterFunc + now = time.Now +) + +// Name is the name of the outlier detection balancer. +const Name = "outlier_detection_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &outlierDetectionBalancer{ + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + channelzParent: bOpts.ChannelzParent, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + b.child = gracefulswitch.NewBalancer(b, bOpts) + go b.run() + return b +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &LBConfig{ + // Default top layer values as documented in A50. + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + } + + // This unmarshalling handles underlying layers sre and fpe which have their + // own defaults for their fields if either sre or fpe are present. + if err := json.Unmarshal(s, lbCfg); err != nil { // Validates child config if present as well. + return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) + } + + // Note: in the xds flow, these validations will never fail. The xdsclient + // performs the same validations as here on the xds Outlier Detection + // resource before parsing resource into JSON which this function gets + // called with. A50 defines two separate places for these validations to + // take place, the xdsclient and this ParseConfig method. "When parsing a + // config from JSON, if any of these requirements is violated, that should + // be treated as a parsing error." - A50 + switch { + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + // Approximately 290 years is the maximum time that time.Duration (int64) + // can represent. The restrictions on the protobuf.Duration field are to be + // within +-10000 years. Thus, just check for negative values. + case lbCfg.Interval < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.interval = %s; must be >= 0", lbCfg.Interval) + case lbCfg.BaseEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.base_ejection_time = %s; must be >= 0", lbCfg.BaseEjectionTime) + case lbCfg.MaxEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_time = %s; must be >= 0", lbCfg.MaxEjectionTime) + + // "The fields max_ejection_percent, + // success_rate_ejection.enforcement_percentage, + // failure_percentage_ejection.threshold, and + // failure_percentage.enforcement_percentage must have values less than or + // equal to 100." - A50 + case lbCfg.MaxEjectionPercent > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_percent = %v; must be <= 100", lbCfg.MaxEjectionPercent) + case lbCfg.SuccessRateEjection != nil && lbCfg.SuccessRateEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = %v; must be <= 100", lbCfg.SuccessRateEjection.EnforcementPercentage) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.Threshold > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) + } + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// scUpdate wraps a subConn update to be sent to the child balancer. +type scUpdate struct { + scw *subConnWrapper + state balancer.SubConnState +} + +type ejectionUpdate struct { + scw *subConnWrapper + isEjected bool // true for ejected, false for unejected +} + +type lbCfgUpdate struct { + lbCfg *LBConfig + // to make sure picker is updated synchronously. + done chan struct{} +} + +type outlierDetectionBalancer struct { + // These fields are safe to be accessed without holding any mutex because + // they are synchronized in run(), which makes these field accesses happen + // serially. + // + // childState is the latest balancer state received from the child. + childState balancer.State + // recentPickerNoop represents whether the most recent picker sent upward to + // the balancer.ClientConn is a noop picker, which doesn't count RPC's. Used + // to suppress redundant picker updates. + recentPickerNoop bool + + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + channelzParent channelz.Identifier + + // childMu guards calls into child (to uphold the balancer.Balancer API + // guarantee of synchronous calls). + childMu sync.Mutex + child *gracefulswitch.Balancer + + // mu guards access to the following fields. It also helps to synchronize + // behaviors of the following events: config updates, firing of the interval + // timer, SubConn State updates, SubConn address updates, and child state + // updates. + // + // For example, when we receive a config update in the middle of the + // interval timer algorithm, which uses knobs present in the config, the + // balancer will wait for the interval timer algorithm to finish before + // persisting the new configuration. + // + // Another example would be the updating of the addrs map, such as from a + // SubConn address update in the middle of the interval timer algorithm + // which uses addrs. This balancer waits for the interval timer algorithm to + // finish before making the update to the addrs map. + // + // This mutex is never held at the same time as childMu (within the context + // of a single goroutine). + mu sync.Mutex + addrs map[string]*addressInfo + cfg *LBConfig + scWrappers map[balancer.SubConn]*subConnWrapper + timerStartTime time.Time + intervalTimer *time.Timer + inhibitPickerUpdates bool + updateUnconditionally bool + numAddrsEjected int // For fast calculations of percentage of addrs ejected + + scUpdateCh *buffer.Unbounded + pickerUpdateCh *buffer.Unbounded +} + +// noopConfig returns whether this balancer is configured with a logical no-op +// configuration or not. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) noopConfig() bool { + return b.cfg.SuccessRateEjection == nil && b.cfg.FailurePercentageEjection == nil +} + +// onIntervalConfig handles logic required specifically on the receipt of a +// configuration which specifies to count RPC's and periodically perform passive +// health checking based on heuristics defined in configuration every configured +// interval. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onIntervalConfig() { + var interval time.Duration + if b.timerStartTime.IsZero() { + b.timerStartTime = time.Now() + for _, addrInfo := range b.addrs { + addrInfo.callCounter.clear() + } + interval = time.Duration(b.cfg.Interval) + } else { + interval = time.Duration(b.cfg.Interval) - now().Sub(b.timerStartTime) + if interval < 0 { + interval = 0 + } + } + b.intervalTimer = afterFunc(interval, b.intervalTimerAlgorithm) +} + +// onNoopConfig handles logic required specifically on the receipt of a +// configuration which specifies the balancer to be a noop. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onNoopConfig() { + // "If a config is provided with both the `success_rate_ejection` and + // `failure_percentage_ejection` fields unset, skip starting the timer and + // do the following:" + // "Unset the timer start timestamp." + b.timerStartTime = time.Time{} + for _, addrInfo := range b.addrs { + // "Uneject all currently ejected addresses." + if !addrInfo.latestEjectionTimestamp.IsZero() { + b.unejectAddress(addrInfo) + } + // "Reset each address's ejection time multiplier to 0." + addrInfo.ejectionTimeMultiplier = 0 + } +} + +func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + // Reject whole config if child policy doesn't exist, don't persist it for + // later. + bb := balancer.Get(lbCfg.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("outlier detection: child balancer %q not registered", lbCfg.ChildPolicy.Name) + } + + // It is safe to read b.cfg here without holding the mutex, as the only + // write to b.cfg happens later in this function. This function is part of + // the balancer.Balancer API, so it is guaranteed to be called in a + // synchronous manner, so it cannot race with this read. + if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { + b.childMu.Lock() + err := b.child.SwitchTo(bb) + if err != nil { + b.childMu.Unlock() + return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) + } + b.childMu.Unlock() + } + + b.mu.Lock() + // Inhibit child picker updates until this UpdateClientConnState() call + // completes. If needed, a picker update containing the no-op config bit + // determined from this config and most recent state from the child will be + // sent synchronously upward at the end of this UpdateClientConnState() + // call. + b.inhibitPickerUpdates = true + b.updateUnconditionally = false + b.cfg = lbCfg + + addrs := make(map[string]bool, len(s.ResolverState.Addresses)) + for _, addr := range s.ResolverState.Addresses { + addrs[addr.Addr] = true + if _, ok := b.addrs[addr.Addr]; !ok { + b.addrs[addr.Addr] = newAddressInfo() + } + } + for addr := range b.addrs { + if !addrs[addr] { + delete(b.addrs, addr) + } + } + + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + + if b.noopConfig() { + b.onNoopConfig() + } else { + b.onIntervalConfig() + } + b.mu.Unlock() + + b.childMu.Lock() + err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.cfg.ChildPolicy.Config, + }) + b.childMu.Unlock() + + done := make(chan struct{}) + b.pickerUpdateCh.Put(lbCfgUpdate{ + lbCfg: lbCfg, + done: done, + }) + <-done + + return err +} + +func (b *outlierDetectionBalancer) ResolverError(err error) { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ResolverError(err) +} + +func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + scw, ok := b.scWrappers[sc] + if !ok { + // Shouldn't happen if passed down a SubConnWrapper to child on SubConn + // creation. + b.logger.Errorf("UpdateSubConnState called with SubConn that has no corresponding SubConnWrapper") + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(b.scWrappers, scw.SubConn) + } + b.scUpdateCh.Put(&scUpdate{ + scw: scw, + state: state, + }) +} + +func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *outlierDetectionBalancer) Close() { + b.closed.Fire() + <-b.done.Done() + b.childMu.Lock() + b.child.Close() + b.childMu.Unlock() + + b.scUpdateCh.Close() + b.pickerUpdateCh.Close() + + b.mu.Lock() + defer b.mu.Unlock() + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } +} + +func (b *outlierDetectionBalancer) ExitIdle() { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ExitIdle() +} + +// wrappedPicker delegates to the child policy's picker, and when the request +// finishes, it increments the corresponding counter in the map entry referenced +// by the subConnWrapper that was picked. If both the `success_rate_ejection` +// and `failure_percentage_ejection` fields are unset in the configuration, this +// picker will not count. +type wrappedPicker struct { + childPicker balancer.Picker + noopPicker bool +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := wp.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + + done := func(di balancer.DoneInfo) { + if !wp.noopPicker { + incrementCounter(pr.SubConn, di) + } + if pr.Done != nil { + pr.Done(di) + } + } + scw, ok := pr.SubConn.(*subConnWrapper) + if !ok { + // This can never happen, but check is present for defensive + // programming. + logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") + return balancer.PickResult{ + SubConn: pr.SubConn, + Done: done, + Metadata: pr.Metadata, + }, nil + } + return balancer.PickResult{ + SubConn: scw.SubConn, + Done: done, + Metadata: pr.Metadata, + }, nil +} + +func incrementCounter(sc balancer.SubConn, info balancer.DoneInfo) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Shouldn't happen, as comes from child + return + } + + // scw.addressInfo and callCounter.activeBucket can be written to + // concurrently (the pointers themselves). Thus, protect the reads here with + // atomics to prevent data corruption. There exists a race in which you read + // the addressInfo or active bucket pointer and then that pointer points to + // deprecated memory. If this goroutine yields the processor, in between + // reading the addressInfo pointer and writing to the active bucket, + // UpdateAddresses can switch the addressInfo the scw points to. Writing to + // an outdated addresses is a very small race and tolerable. After reading + // callCounter.activeBucket in this picker a swap call can concurrently + // change what activeBucket points to. A50 says to swap the pointer, which + // will cause this race to write to deprecated memory the interval timer + // algorithm will never read, which makes this race alright. + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + ab := (*bucket)(atomic.LoadPointer(&addrInfo.callCounter.activeBucket)) + + if info.Err == nil { + atomic.AddUint32(&ab.numSuccesses, 1) + } else { + atomic.AddUint32(&ab.numFailures, 1) + } +} + +func (b *outlierDetectionBalancer) UpdateState(s balancer.State) { + b.pickerUpdateCh.Put(s) +} + +func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state) } + sc, err := b.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + scw := &subConnWrapper{ + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + listener: oldListener, + } + b.mu.Lock() + defer b.mu.Unlock() + b.scWrappers[sc] = scw + if len(addrs) != 1 { + return scw, nil + } + addrInfo, ok := b.addrs[addrs[0].Addr] + if !ok { + return scw, nil + } + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + if !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + return scw, nil +} + +func (b *outlierDetectionBalancer) RemoveSubConn(sc balancer.SubConn) { + b.logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +// appendIfPresent appends the scw to the address, if the address is present in +// the Outlier Detection balancers address map. Returns nil if not present, and +// the map entry if present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) appendIfPresent(addr string, scw *subConnWrapper) *addressInfo { + addrInfo, ok := b.addrs[addr] + if !ok { + return nil + } + + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + return addrInfo +} + +// removeSubConnFromAddressesMapEntry removes the scw from its map entry if +// present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) removeSubConnFromAddressesMapEntry(scw *subConnWrapper) { + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + for i, sw := range addrInfo.sws { + if scw == sw { + addrInfo.sws = append(addrInfo.sws[:i], addrInfo.sws[i+1:]...) + return + } + } +} + +func (b *outlierDetectionBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Return, shouldn't happen if passed up scw + return + } + + b.cc.UpdateAddresses(scw.SubConn, addrs) + b.mu.Lock() + defer b.mu.Unlock() + + // Note that 0 addresses is a valid update/state for a SubConn to be in. + // This is correctly handled by this algorithm (handled as part of a non singular + // old address/new address). + switch { + case len(scw.addresses) == 1 && len(addrs) == 1: // single address to single address + // If the updated address is the same, then there is nothing to do + // past this point. + if scw.addresses[0].Addr == addrs[0].Addr { + return + } + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo == nil { // uneject unconditionally because could have come from an ejected address + scw.uneject() + break + } + if addrInfo.latestEjectionTimestamp.IsZero() { // relay new updated subconn state + scw.uneject() + } else { + scw.eject() + } + case len(scw.addresses) == 1: // single address to multiple/no addresses + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo != nil { + addrInfo.callCounter.clear() + } + scw.uneject() + case len(addrs) == 1: // multiple/no addresses to single address + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo != nil && !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + } // otherwise multiple/no addresses to multiple/no addresses; ignore + + scw.addresses = addrs +} + +func (b *outlierDetectionBalancer) ResolveNow(opts resolver.ResolveNowOptions) { + b.cc.ResolveNow(opts) +} + +func (b *outlierDetectionBalancer) Target() string { + return b.cc.Target() +} + +// handleSubConnUpdate stores the recent state and forward the update +// if the SubConn is not ejected. +func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { + scw := u.scw + scw.latestState = u.state + if !scw.ejected { + if scw.listener != nil { + b.childMu.Lock() + scw.listener(u.state) + b.childMu.Unlock() + } + } +} + +// handleEjectedUpdate handles any SubConns that get ejected/unejected, and +// forwards the appropriate corresponding subConnState to the child policy. +func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { + scw := u.scw + scw.ejected = u.isEjected + // If scw.latestState has never been written to will default to connectivity + // IDLE, which is fine. + stateToUpdate := scw.latestState + if u.isEjected { + stateToUpdate = balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + } + if scw.listener != nil { + b.childMu.Lock() + scw.listener(stateToUpdate) + b.childMu.Unlock() + } +} + +// handleChildStateUpdate forwards the picker update wrapped in a wrapped picker +// with the noop picker bit present. +func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) { + b.childState = u + b.mu.Lock() + if b.inhibitPickerUpdates { + // If a child's state is updated during the suppression of child + // updates, the synchronous handleLBConfigUpdate function with respect + // to UpdateClientConnState should return a picker unconditionally. + b.updateUnconditionally = true + b.mu.Unlock() + return + } + noopCfg := b.noopConfig() + b.mu.Unlock() + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) +} + +// handleLBConfigUpdate compares whether the new config is a noop config or not, +// to the noop bit in the picker if present. It updates the picker if this bit +// changed compared to the picker currently in use. +func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) { + lbCfg := u.lbCfg + noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil + // If the child has sent it's first update and this config flips the noop + // bit compared to the most recent picker update sent upward, then a new + // picker with this updated bit needs to be forwarded upward. If a child + // update was received during the suppression of child updates within + // UpdateClientConnState(), then a new picker needs to be forwarded with + // this updated state, irregardless of whether this new configuration flips + // the bit. + if b.childState.Picker != nil && noopCfg != b.recentPickerNoop || b.updateUnconditionally { + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) + } + b.inhibitPickerUpdates = false + b.updateUnconditionally = false + close(u.done) +} + +func (b *outlierDetectionBalancer) run() { + defer b.done.Fire() + for { + select { + case update, ok := <-b.scUpdateCh.Get(): + if !ok { + return + } + b.scUpdateCh.Load() + if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed + return + } + switch u := update.(type) { + case *scUpdate: + b.handleSubConnUpdate(u) + case *ejectionUpdate: + b.handleEjectedUpdate(u) + } + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } + b.pickerUpdateCh.Load() + if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed + return + } + switch u := update.(type) { + case balancer.State: + b.handleChildStateUpdate(u) + case lbCfgUpdate: + b.handleLBConfigUpdate(u) + } + case <-b.closed.Done(): + return + } + } +} + +// intervalTimerAlgorithm ejects and unejects addresses based on the Outlier +// Detection configuration and data about each address from the previous +// interval. +func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { + b.mu.Lock() + defer b.mu.Unlock() + b.timerStartTime = time.Now() + + for _, addrInfo := range b.addrs { + addrInfo.callCounter.swap() + } + + if b.cfg.SuccessRateEjection != nil { + b.successRateAlgorithm() + } + + if b.cfg.FailurePercentageEjection != nil { + b.failurePercentageAlgorithm() + } + + for _, addrInfo := range b.addrs { + if addrInfo.latestEjectionTimestamp.IsZero() && addrInfo.ejectionTimeMultiplier > 0 { + addrInfo.ejectionTimeMultiplier-- + continue + } + if addrInfo.latestEjectionTimestamp.IsZero() { + // Address is already not ejected, so no need to check for whether + // to uneject the address below. + continue + } + et := time.Duration(b.cfg.BaseEjectionTime) * time.Duration(addrInfo.ejectionTimeMultiplier) + met := max(time.Duration(b.cfg.BaseEjectionTime), time.Duration(b.cfg.MaxEjectionTime)) + uet := addrInfo.latestEjectionTimestamp.Add(min(et, met)) + if now().After(uet) { + b.unejectAddress(addrInfo) + } + } + + // This conditional only for testing (since the interval timer algorithm is + // called manually), will never hit in production. + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + b.intervalTimer = afterFunc(time.Duration(b.cfg.Interval), b.intervalTimerAlgorithm) +} + +// addrsWithAtLeastRequestVolume returns a slice of address information of all +// addresses with at least request volume passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) addrsWithAtLeastRequestVolume(requestVolume uint32) []*addressInfo { + var addrs []*addressInfo + for _, addrInfo := range b.addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + if rv >= requestVolume { + addrs = append(addrs, addrInfo) + } + } + return addrs +} + +// meanAndStdDev returns the mean and std dev of the fractions of successful +// requests of the addresses passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) meanAndStdDev(addrs []*addressInfo) (float64, float64) { + var totalFractionOfSuccessfulRequests float64 + var mean float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + totalFractionOfSuccessfulRequests += float64(bucket.numSuccesses) / float64(rv) + } + mean = totalFractionOfSuccessfulRequests / float64(len(addrs)) + var sumOfSquares float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + devFromMean := (float64(bucket.numSuccesses) / float64(rv)) - mean + sumOfSquares += devFromMean * devFromMean + } + variance := sumOfSquares / float64(len(addrs)) + return mean, math.Sqrt(variance) +} + +// successRateAlgorithm ejects any addresses where the success rate falls below +// the other addresses according to mean and standard deviation, and if overall +// applicable from other set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) successRateAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.SuccessRateEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.SuccessRateEjection.MinimumHosts) { + return + } + mean, stddev := b.meanAndStdDev(addrsToConsider) + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.SuccessRateEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) + requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) + if successRate < requiredSuccessRate { + channelz.Infof(logger, b.channelzParent, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// failurePercentageAlgorithm ejects any addresses where the failure percentage +// rate exceeds a set enforcement percentage, if overall applicable from other +// set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.FailurePercentageEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.FailurePercentageEjection.MinimumHosts) { + return + } + + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.FailurePercentageEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 + if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + channelz.Infof(logger, b.channelzParent, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected++ + addrInfo.latestEjectionTimestamp = b.timerStartTime + addrInfo.ejectionTimeMultiplier++ + for _, sbw := range addrInfo.sws { + sbw.eject() + channelz.Infof(logger, b.channelzParent, "Subchannel ejected: %s", sbw) + } + +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected-- + addrInfo.latestEjectionTimestamp = time.Time{} + for _, sbw := range addrInfo.sws { + sbw.uneject() + channelz.Infof(logger, b.channelzParent, "Subchannel unejected: %s", sbw) + } +} + +// addressInfo contains the runtime information about an address that pertains +// to Outlier Detection. This struct and all of its fields is protected by +// outlierDetectionBalancer.mu in the case where it is accessed through the +// address map. In the case of Picker callbacks, the writes to the activeBucket +// of callCounter are protected by atomically loading and storing +// unsafe.Pointers (see further explanation in incrementCounter()). +type addressInfo struct { + // The call result counter object. + callCounter *callCounter + + // The latest ejection timestamp, or zero if the address is currently not + // ejected. + latestEjectionTimestamp time.Time + + // The current ejection time multiplier, starting at 0. + ejectionTimeMultiplier int64 + + // A list of subchannel wrapper objects that correspond to this address. + sws []*subConnWrapper +} + +func (a *addressInfo) String() string { + var res strings.Builder + res.WriteString("[") + for _, sw := range a.sws { + res.WriteString(sw.String()) + } + res.WriteString("]") + return res.String() +} + +func newAddressInfo() *addressInfo { + return &addressInfo{ + callCounter: newCallCounter(), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go new file mode 100644 index 0000000000..4597f727b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "unsafe" +) + +type bucket struct { + numSuccesses uint32 + numFailures uint32 +} + +func newCallCounter() *callCounter { + return &callCounter{ + activeBucket: unsafe.Pointer(&bucket{}), + inactiveBucket: &bucket{}, + } +} + +// callCounter has two buckets, which each count successful and failing RPC's. +// The activeBucket is used to actively count any finished RPC's, and the +// inactiveBucket is populated with this activeBucket's data every interval for +// use by the Outlier Detection algorithm. +type callCounter struct { + // activeBucket updates every time a call finishes (from picker passed to + // Client Conn), so protect pointer read with atomic load of unsafe.Pointer + // so picker does not have to grab a mutex per RPC, the critical path. + activeBucket unsafe.Pointer // bucket + inactiveBucket *bucket +} + +func (cc *callCounter) clear() { + atomic.StorePointer(&cc.activeBucket, unsafe.Pointer(&bucket{})) + cc.inactiveBucket = &bucket{} +} + +// "When the timer triggers, the inactive bucket is zeroed and swapped with the +// active bucket. Then the inactive bucket contains the number of successes and +// failures since the last time the timer triggered. Those numbers are used to +// evaluate the ejection criteria." - A50. +func (cc *callCounter) swap() { + ib := cc.inactiveBucket + *ib = bucket{} + ab := (*bucket)(atomic.SwapPointer(&cc.activeBucket, unsafe.Pointer(ib))) + cc.inactiveBucket = &bucket{ + numSuccesses: atomic.LoadUint32(&ab.numSuccesses), + numFailures: atomic.LoadUint32(&ab.numFailures), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go new file mode 100644 index 0000000000..196a562ed6 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go @@ -0,0 +1,240 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "encoding/json" + "time" + + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// SuccessRateEjection is parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and ejects +// individual endpoints whose success rates are statistical outliers. +type SuccessRateEjection struct { + // StddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor uint32 `json:"stdevFactor,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the number of hosts in a cluster that must have enough + // request volume to detect success rate outliers. If the number of hosts is + // less than this setting, outlier detection via success rate statistics is + // not performed for any host in the cluster. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // include this host in success rate based outlier detection. If the volume + // is lower than this setting, outlier detection via success rate statistics + // is not performed for that host. Defaults to 100. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type successRateEjection SuccessRateEjection + +// UnmarshalJSON unmarshals JSON into SuccessRateEjection. If a +// SuccessRateEjection field is not set, that field will get its default value. +func (sre *SuccessRateEjection) UnmarshalJSON(j []byte) error { + sre.StdevFactor = 1900 + sre.EnforcementPercentage = 100 + sre.MinimumHosts = 5 + sre.RequestVolume = 100 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*successRateEjection)(sre)) +} + +// Equal returns whether the SuccessRateEjection is the same with the parameter. +func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { + if sre == nil && sre2 == nil { + return true + } + if (sre != nil) != (sre2 != nil) { + return false + } + if sre.StdevFactor != sre2.StdevFactor { + return false + } + if sre.EnforcementPercentage != sre2.EnforcementPercentage { + return false + } + if sre.MinimumHosts != sre2.MinimumHosts { + return false + } + return sre.RequestVolume == sre2.RequestVolume +} + +// FailurePercentageEjection is parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type FailurePercentageEjection struct { + // Threshold is the failure percentage to use when determining failure + // percentage-based outlier detection. If the failure percentage of a given + // host is greater than or equal to this value, it will be ejected. Defaults + // to 85. + Threshold uint32 `json:"threshold,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the minimum number of hosts in a cluster in order to + // perform failure percentage-based ejection. If the total number of hosts + // in the cluster is less than this value, failure percentage-based ejection + // will not be performed. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // perform failure percentage-based ejection for this host. If the volume is + // lower than this setting, failure percentage-based ejection will not be + // performed for this host. Defaults to 50. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type failurePercentageEjection FailurePercentageEjection + +// UnmarshalJSON unmarshals JSON into FailurePercentageEjection. If a +// FailurePercentageEjection field is not set, that field will get its default +// value. +func (fpe *FailurePercentageEjection) UnmarshalJSON(j []byte) error { + fpe.Threshold = 85 + fpe.EnforcementPercentage = 0 + fpe.MinimumHosts = 5 + fpe.RequestVolume = 50 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*failurePercentageEjection)(fpe)) +} + +// Equal returns whether the FailurePercentageEjection is the same with the +// parameter. +func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { + if fpe == nil && fpe2 == nil { + return true + } + if (fpe != nil) != (fpe2 != nil) { + return false + } + if fpe.Threshold != fpe2.Threshold { + return false + } + if fpe.EnforcementPercentage != fpe2.EnforcementPercentage { + return false + } + if fpe.MinimumHosts != fpe2.MinimumHosts { + return false + } + return fpe.RequestVolume == fpe2.RequestVolume +} + +// LBConfig is the config for the outlier detection balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval iserviceconfig.Duration `json:"interval,omitempty"` + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 `json:"maxEjectionPercent,omitempty"` + // SuccessRateEjection is the parameters for the success rate ejection + // algorithm. If set, success rate ejections will be performed. + SuccessRateEjection *SuccessRateEjection `json:"successRateEjection,omitempty"` + // FailurePercentageEjection is the parameters for the failure percentage + // algorithm. If set, failure rate ejections will be performed. + FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` + // ChildPolicy is the config for the child policy. + ChildPolicy *iserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type lbConfig LBConfig + +// UnmarshalJSON unmarshals JSON into LBConfig. If a top level LBConfig field +// (i.e. not next layer sre or fpe) is not set, that field will get its default +// value. If sre or fpe is not set, it will stay unset, otherwise it will +// unmarshal on those types populating with default values for their fields if +// needed. +func (lbc *LBConfig) UnmarshalJSON(j []byte) error { + // Default top layer values as documented in A50. + lbc.Interval = iserviceconfig.Duration(10 * time.Second) + lbc.BaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + lbc.MaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + lbc.MaxEjectionPercent = 10 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*lbConfig)(lbc)) +} + +// EqualIgnoringChildPolicy returns whether the LBConfig is same with the +// parameter outside of the child policy, only comparing the Outlier Detection +// specific configuration. +func (lbc *LBConfig) EqualIgnoringChildPolicy(lbc2 *LBConfig) bool { + if lbc == nil && lbc2 == nil { + return true + } + if (lbc != nil) != (lbc2 != nil) { + return false + } + if lbc.Interval != lbc2.Interval { + return false + } + if lbc.BaseEjectionTime != lbc2.BaseEjectionTime { + return false + } + if lbc.MaxEjectionTime != lbc2.MaxEjectionTime { + return false + } + if lbc.MaxEjectionPercent != lbc2.MaxEjectionPercent { + return false + } + if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { + return false + } + return lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go new file mode 100644 index 0000000000..705b0cb691 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package outlierdetection + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[outlier-detection-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *outlierDetectionBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go new file mode 100644 index 0000000000..0fa422d8f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "fmt" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/resolver" +) + +// subConnWrapper wraps every created SubConn in the Outlier Detection Balancer, +// to help track the latest state update from the underlying SubConn, and also +// whether or not this SubConn is ejected. +type subConnWrapper struct { + balancer.SubConn + listener func(balancer.SubConnState) + + // addressInfo is a pointer to the subConnWrapper's corresponding address + // map entry, if the map entry exists. + addressInfo unsafe.Pointer // *addressInfo + // These two pieces of state will reach eventual consistency due to sync in + // run(), and child will always have the correctly updated SubConnState. + // latestState is the latest state update from the underlying SubConn. This + // is used whenever a SubConn gets unejected. + latestState balancer.SubConnState + ejected bool + + scUpdateCh *buffer.Unbounded + + // addresses is the list of address(es) this SubConn was created with to + // help support any change in address(es) + addresses []resolver.Address +} + +// eject causes the wrapper to report a state update with the TRANSIENT_FAILURE +// state, and to stop passing along updates from the underlying subchannel. +func (scw *subConnWrapper) eject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: true, + }) +} + +// uneject causes the wrapper to report a state update with the latest update +// from the underlying subchannel, and resume passing along updates from the +// underlying subchannel. +func (scw *subConnWrapper) uneject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: false, + }) +} + +func (scw *subConnWrapper) String() string { + return fmt.Sprintf("%+v", scw.addresses) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go new file mode 100644 index 0000000000..c17c62f23a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -0,0 +1,289 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package priority implements the priority balancer. +// +// This balancer will be kept in internal until we use it in the xds balancers, +// and are confident its functionalities are stable. It will then be exported +// for more users. +package priority + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the priority balancer. +const Name = "priority_experimental" + +// DefaultSubBalancerCloseTimeout is defined as a variable instead of const for +// testing. +var DefaultSubBalancerCloseTimeout = 15 * time.Minute + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &priorityBalancer{ + cc: cc, + done: grpcsync.NewEvent(), + children: make(map[string]*childBalancer), + childBalancerStateUpdate: buffer.NewUnbounded(), + } + + b.logger = prefixLogger(b) + b.bg = balancergroup.New(balancergroup.Options{ + CC: cc, + BuildOpts: bOpts, + StateAggregator: b, + Logger: b.logger, + SubBalancerCloseTimeout: DefaultSubBalancerCloseTimeout, + }) + b.bg.Start() + go b.run() + b.logger.Infof("Created") + return b +} + +func (b bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(s) +} + +func (bb) Name() string { + return Name +} + +// timerWrapper wraps a timer with a boolean. So that when a race happens +// between AfterFunc and Stop, the func is guaranteed to not execute. +type timerWrapper struct { + stopped bool + timer *time.Timer +} + +type priorityBalancer struct { + logger *grpclog.PrefixLogger + cc balancer.ClientConn + bg *balancergroup.BalancerGroup + done *grpcsync.Event + childBalancerStateUpdate *buffer.Unbounded + + mu sync.Mutex + childInUse string + // priorities is a list of child names from higher to lower priority. + priorities []string + // children is a map from child name to sub-balancers. + children map[string]*childBalancer + + // Set during UpdateClientConnState when calling into sub-balancers. + // Prevents child updates from recomputing the active priority or sending + // an update of the aggregated picker to the parent. Cleared after all + // sub-balancers have finished UpdateClientConnState, after which + // syncPriority is called manually. + inhibitPickerUpdates bool +} + +func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if b.logger.V(2) { + b.logger.Infof("Received an update with balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + b.mu.Lock() + // Create and remove children, since we know all children from the config + // are used by some priority. + for name, newSubConfig := range newConfig.Children { + bb := balancer.Get(newSubConfig.Config.Name) + if bb == nil { + b.logger.Errorf("balancer name %v from config is not registered", newSubConfig.Config.Name) + continue + } + + currentChild, ok := b.children[name] + if !ok { + // This is a new child, add it to the children list. But note that + // the balancer isn't built, because this child can be a low + // priority. If necessary, it will be built when syncing priorities. + cb := newChildBalancer(name, b, bb.Name(), b.cc) + cb.updateConfig(newSubConfig, resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }) + b.children[name] = cb + continue + } + + // This is not a new child. But the config/addresses could change. + + // The balancing policy name is changed, close the old child. But don't + // rebuild, rebuild will happen when syncing priorities. + if currentChild.balancerName != bb.Name() { + currentChild.stop() + currentChild.updateBalancerName(bb.Name()) + } + + // Update config and address, but note that this doesn't send the + // updates to non-started child balancers (the child balancer might not + // be built, if it's a low priority). + currentChild.updateConfig(newSubConfig, resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }) + } + // Cleanup resources used by children removed from the config. + for name, oldChild := range b.children { + if _, ok := newConfig.Children[name]; !ok { + oldChild.stop() + delete(b.children, name) + } + } + + // Update priorities and handle priority changes. + b.priorities = newConfig.Priorities + + // Everything was removed by the update. + if len(b.priorities) == 0 { + b.childInUse = "" + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), + }) + b.mu.Unlock() + return nil + } + + // This will sync the states of all children to the new updated + // priorities. Includes starting/stopping child balancers when necessary. + // Block picker updates until all children have had a chance to call + // UpdateState to prevent races where, e.g., the active priority reports + // transient failure but a higher priority may have reported something that + // made it active, and if the transient failure update is handled first, + // RPCs could fail. + b.inhibitPickerUpdates = true + // Add an item to queue to notify us when the current items in the queue + // are done and syncPriority has been called. + done := make(chan struct{}) + b.childBalancerStateUpdate.Put(resumePickerUpdates{done: done}) + b.mu.Unlock() + <-done + + return nil +} + +func (b *priorityBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *priorityBalancer) Close() { + b.bg.Close() + b.childBalancerStateUpdate.Close() + + b.mu.Lock() + defer b.mu.Unlock() + b.done.Fire() + // Clear states of the current child in use, so if there's a race in picker + // update, it will be dropped. + b.childInUse = "" + // Stop the child policies, this is necessary to stop the init timers in the + // children. + for _, child := range b.children { + child.stop() + } +} + +func (b *priorityBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// UpdateState implements balancergroup.BalancerStateAggregator interface. The +// balancer group sends new connectivity state and picker here. +func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { + b.childBalancerStateUpdate.Put(childBalancerState{ + name: childName, + s: state, + }) +} + +type childBalancerState struct { + name string + s balancer.State +} + +type resumePickerUpdates struct { + done chan struct{} +} + +// run handles child update in a separate goroutine, so if the child sends +// updates inline (when called by parent), it won't cause deadlocks (by trying +// to hold the same mutex). +func (b *priorityBalancer) run() { + for { + select { + case u, ok := <-b.childBalancerStateUpdate.Get(): + if !ok { + return + } + b.childBalancerStateUpdate.Load() + // Needs to handle state update in a goroutine, because each state + // update needs to start/close child policy, could result in + // deadlock. + b.mu.Lock() + if b.done.HasFired() { + b.mu.Unlock() + return + } + switch s := u.(type) { + case childBalancerState: + b.handleChildStateUpdate(s.name, s.s) + case resumePickerUpdates: + b.inhibitPickerUpdates = false + b.syncPriority(b.childInUse) + close(s.done) + } + b.mu.Unlock() + case <-b.done.Done(): + return + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go new file mode 100644 index 0000000000..7e8ccbd335 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +type childBalancer struct { + name string + parent *priorityBalancer + parentCC balancer.ClientConn + balancerName string + cc *ignoreResolveNowClientConn + + ignoreReresolutionRequests bool + config serviceconfig.LoadBalancingConfig + rState resolver.State + + started bool + // This is set when the child reports TransientFailure, and unset when it + // reports Ready or Idle. It is used to decide whether the failover timer + // should start when the child is transitioning into Connecting. The timer + // will be restarted if the child has not reported TF more recently than it + // reported Ready or Idle. + reportedTF bool + // The latest state the child balancer provided. + state balancer.State + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + initTimer *timerWrapper +} + +// newChildBalancer creates a child balancer place holder, but doesn't +// build/start the child balancer. +func newChildBalancer(name string, parent *priorityBalancer, balancerName string, cc balancer.ClientConn) *childBalancer { + return &childBalancer{ + name: name, + parent: parent, + parentCC: cc, + balancerName: balancerName, + cc: newIgnoreResolveNowClientConn(cc, false), + started: false, + // Start with the connecting state and picker with re-pick error, so + // that when a priority switch causes this child picked before it's + // balancing policy is created, a re-pick will happen. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + } +} + +// updateBalancerName updates balancer name for the child, but doesn't build a +// new one. The parent priority LB always closes the child policy before +// updating the balancer name, and the new balancer is built when it gets added +// to the balancergroup as part of start(). +func (cb *childBalancer) updateBalancerName(balancerName string) { + cb.balancerName = balancerName + cb.cc = newIgnoreResolveNowClientConn(cb.parentCC, cb.ignoreReresolutionRequests) +} + +// updateConfig sets childBalancer's config and state, but doesn't send update to +// the child balancer unless it is started. +func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { + cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests + cb.config = child.Config.Config + cb.rState = rState + if cb.started { + cb.sendUpdate() + } +} + +// start builds the child balancer if it's not already started. +// +// It doesn't do it directly. It asks the balancer group to build it. +func (cb *childBalancer) start() { + if cb.started { + return + } + cb.started = true + cb.parent.bg.AddWithClientConn(cb.name, cb.balancerName, cb.cc) + cb.startInitTimer() + cb.sendUpdate() +} + +// sendUpdate sends the addresses and config to the child balancer. +func (cb *childBalancer) sendUpdate() { + cb.cc.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) + // TODO: return and aggregate the returned error in the parent. + err := cb.parent.bg.UpdateClientConnState(cb.name, balancer.ClientConnState{ + ResolverState: cb.rState, + BalancerConfig: cb.config, + }) + if err != nil { + cb.parent.logger.Warningf("Failed to update state for child policy %q: %v", cb.name, err) + } +} + +// stop stops the child balancer and resets the state. +// +// It doesn't do it directly. It asks the balancer group to remove it. +// +// Note that the underlying balancer group could keep the child in a cache. +func (cb *childBalancer) stop() { + if !cb.started { + return + } + cb.stopInitTimer() + cb.parent.bg.Remove(cb.name) + cb.started = false + cb.state = balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + } + // Clear child.reportedTF, so that if this child is started later, it will + // be given time to connect. + cb.reportedTF = false +} + +func (cb *childBalancer) startInitTimer() { + if cb.initTimer != nil { + return + } + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + cb.initTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + cb.parent.mu.Lock() + defer cb.parent.mu.Unlock() + if timerW.stopped { + return + } + cb.initTimer = nil + // Re-sync the priority. This will switch to the next priority if + // there's any. Note that it's important sync() is called after setting + // initTimer to nil. + cb.parent.syncPriority("") + }) +} + +func (cb *childBalancer) stopInitTimer() { + timerW := cb.initTimer + if timerW == nil { + return + } + cb.initTimer = nil + timerW.stopped = true + timerW.timer.Stop() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go new file mode 100644 index 0000000000..0be807c134 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "errors" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +var ( + // ErrAllPrioritiesRemoved is returned by the picker when there's no priority available. + ErrAllPrioritiesRemoved = errors.New("no priority is provided, all priorities are removed") + // DefaultPriorityInitTimeout is the timeout after which if a priority is + // not READY, the next will be started. It's exported to be overridden by + // tests. + DefaultPriorityInitTimeout = 10 * time.Second +) + +// syncPriority handles priority after a config update or a child balancer +// connectivity state update. It makes sure the balancer state (started or not) +// is in sync with the priorities (even in tricky cases where a child is moved +// from a priority to another). +// +// It's guaranteed that after this function returns: +// +// If some child is READY, it is childInUse, and all lower priorities are +// closed. +// +// If some child is newly started(in Connecting for the first time), it is +// childInUse, and all lower priorities are closed. +// +// Otherwise, the lowest priority is childInUse (none of the children is +// ready, and the overall state is not ready). +// +// Steps: +// +// If all priorities were deleted, unset childInUse (to an empty string), and +// set parent ClientConn to TransientFailure +// +// Otherwise, Scan all children from p0, and check balancer stats: +// +// For any of the following cases: +// +// If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// +// If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// +// If balancer is READY or IDLE +// +// If this is the lowest priority +// +// do the following: +// +// if this is not the old childInUse, override picker so old picker is no +// longer used. +// +// switch to it (because all higher priorities are neither new or Ready) +// +// forward the new addresses and config +// +// Caller must hold b.mu. +func (b *priorityBalancer) syncPriority(childUpdating string) { + if b.inhibitPickerUpdates { + if b.logger.V(2) { + b.logger.Infof("Skipping update from child policy %q", childUpdating) + } + return + } + for p, name := range b.priorities { + child, ok := b.children[name] + if !ok { + b.logger.Warningf("Priority name %q is not found in list of child policies", name) + continue + } + + if !child.started || + child.state.ConnectivityState == connectivity.Ready || + child.state.ConnectivityState == connectivity.Idle || + (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || + p == len(b.priorities)-1 { + if b.childInUse != child.name || child.name == childUpdating { + if b.logger.V(2) { + b.logger.Infof("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + } + // If we switch children or the child in use just updated its + // picker, push the child's picker to the parent. + b.cc.UpdateState(child.state) + } + if b.logger.V(2) { + b.logger.Infof("Switching to (%q, %v) in syncPriority", child.name, p) + } + b.switchToChild(child, p) + break + } + } +} + +// Stop priorities [p+1, lowest]. +// +// Caller must hold b.mu. +func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { + for i := p + 1; i < len(b.priorities); i++ { + name := b.priorities[i] + child, ok := b.children[name] + if !ok { + b.logger.Warningf("Priority name %q is not found in list of child policies", name) + continue + } + child.stop() + } +} + +// switchToChild does the following: +// - stop all child with lower priorities +// - if childInUse is not this child +// - set childInUse to this child +// - if this child is not started, start it +// +// Note that it does NOT send the current child state (picker) to the parent +// ClientConn. The caller needs to send it if necessary. +// +// this can be called when +// 1. first update, start p0 +// 2. an update moves a READY child from a lower priority to higher +// 2. a different builder is updated for this child +// 3. a high priority goes Failure, start next +// 4. a high priority init timeout, start next +// +// Caller must hold b.mu. +func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { + // Stop lower priorities even if childInUse is same as this child. It's + // possible this child was moved from a priority to another. + b.stopSubBalancersLowerThanPriority(priority) + + // If this child is already in use, do nothing. + // + // This can happen: + // - all priorities are not READY, an config update always triggers switch + // to the lowest. In this case, the lowest child could still be connecting, + // so we don't stop the init timer. + // - a high priority is READY, an config update always triggers switch to + // it. + if b.childInUse == child.name && child.started { + return + } + b.childInUse = child.name + + if !child.started { + child.start() + } +} + +// handleChildStateUpdate start/close priorities based on the connectivity +// state. +func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.State) { + // Update state in child. The updated picker will be sent to parent later if + // necessary. + child, ok := b.children[childName] + if !ok { + b.logger.Warningf("Child policy not found for %q", childName) + return + } + if !child.started { + b.logger.Warningf("Ignoring update from child policy %q which is not in started state: %+v", childName, s) + return + } + child.state = s + + // We start/stop the init timer of this child based on the new connectivity + // state. syncPriority() later will need the init timer (to check if it's + // nil or not) to decide which child to switch to. + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + child.reportedTF = false + child.stopInitTimer() + case connectivity.TransientFailure: + child.reportedTF = true + child.stopInitTimer() + case connectivity.Connecting: + if !child.reportedTF { + child.startInitTimer() + } + default: + // New state is Shutdown, should never happen. Don't forward. + } + + child.parent.syncPriority(childName) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go new file mode 100644 index 0000000000..37f1c9a829 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "encoding/json" + "fmt" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Child is a child of priority balancer. +type Child struct { + Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` + IgnoreReresolutionRequests bool `json:"ignoreReresolutionRequests,omitempty"` +} + +// LBConfig represents priority balancer's config. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Children is a map from the child balancer names to their configs. Child + // names can be found in field Priorities. + Children map[string]*Child `json:"children,omitempty"` + // Priorities is a list of child balancer names. They are sorted from + // highest priority to low. The type/config for each child can be found in + // field Children, with the balancer name as the key. + Priorities []string `json:"priorities,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + + prioritiesSet := make(map[string]bool) + for _, name := range cfg.Priorities { + if _, ok := cfg.Children[name]; !ok { + return nil, fmt.Errorf("LB policy name %q found in Priorities field (%v) is not found in Children field (%+v)", name, cfg.Priorities, cfg.Children) + } + prioritiesSet[name] = true + } + for name := range cfg.Children { + if _, ok := prioritiesSet[name]; !ok { + return nil, fmt.Errorf("LB policy name %q found in Children field (%v) is not found in Priorities field (%+v)", name, cfg.Children, cfg.Priorities) + } + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go new file mode 100644 index 0000000000..792ee4b3f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// ignoreResolveNowClientConn wraps a balancer.ClientConn and overrides the +// ResolveNow() method to ignore those calls if the ignoreResolveNow bit is set. +type ignoreResolveNowClientConn struct { + balancer.ClientConn + ignoreResolveNow *uint32 +} + +func newIgnoreResolveNowClientConn(cc balancer.ClientConn, ignore bool) *ignoreResolveNowClientConn { + ret := &ignoreResolveNowClientConn{ + ClientConn: cc, + ignoreResolveNow: new(uint32), + } + ret.updateIgnoreResolveNow(ignore) + return ret +} + +func (i *ignoreResolveNowClientConn) updateIgnoreResolveNow(b bool) { + if b { + atomic.StoreUint32(i.ignoreResolveNow, 1) + return + } + atomic.StoreUint32(i.ignoreResolveNow, 0) + +} + +func (i ignoreResolveNowClientConn) ResolveNow(o resolver.ResolveNowOptions) { + if atomic.LoadUint32(i.ignoreResolveNow) != 0 { + return + } + i.ClientConn.ResolveNow(o) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go new file mode 100644 index 0000000000..2fb8d2d204 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[priority-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *priorityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go new file mode 100644 index 0000000000..45fbe76443 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go new file mode 100644 index 0000000000..b4afcf1001 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/serviceconfig" +) + +// LBConfig is the balancer config for ring_hash balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + MinRingSize uint64 `json:"minRingSize,omitempty"` + MaxRingSize uint64 `json:"maxRingSize,omitempty"` +} + +const ( + defaultMinSize = 1024 + defaultMaxSize = 4096 + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + if cfg.MinRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("min_ring_size value of %d is greater than max supported value %d for this field", cfg.MinRingSize, ringHashSizeUpperBound) + } + if cfg.MaxRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("max_ring_size value of %d is greater than max supported value %d for this field", cfg.MaxRingSize, ringHashSizeUpperBound) + } + if cfg.MinRingSize == 0 { + cfg.MinRingSize = defaultMinSize + } + if cfg.MaxRingSize == 0 { + cfg.MaxRingSize = defaultMaxSize + } + if cfg.MinRingSize > cfg.MaxRingSize { + return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) + } + if cfg.MinRingSize > envconfig.RingHashCap { + cfg.MinRingSize = envconfig.RingHashCap + } + if cfg.MaxRingSize > envconfig.RingHashCap { + cfg.MaxRingSize = envconfig.RingHashCap + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go new file mode 100644 index 0000000000..3e0f0adf58 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[ring-hash-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} + +func subConnPrefixLogger(p *ringhashBalancer, sc *subConn) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)+fmt.Sprintf("[subConn %p] ", sc)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go new file mode 100644 index 0000000000..5ce72caded --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -0,0 +1,161 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/status" +) + +type picker struct { + ring *ring + logger *grpclog.PrefixLogger + subConnStates map[*subConn]connectivity.State +} + +func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { + states := make(map[*subConn]connectivity.State) + for _, e := range ring.items { + states[e.sc] = e.sc.effectiveState() + } + return &picker{ring: ring, logger: logger, subConnStates: states} +} + +// handleRICSResult is the return type of handleRICS. It's needed to wrap the +// returned error from Pick() in a struct. With this, if the return values are +// `balancer.PickResult, error, bool`, linter complains because error is not the +// last return value. +type handleRICSResult struct { + pr balancer.PickResult + err error +} + +// handleRICS generates pick result if the entry is in Ready, Idle, Connecting +// or Shutdown. TransientFailure will be handled specifically after this +// function returns. +// +// The first return value indicates if the state is in Ready, Idle, Connecting +// or Shutdown. If it's true, the PickResult and error should be returned from +// Pick() as is. +func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { + switch state := p.subConnStates[e.sc]; state { + case connectivity.Ready: + return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true + case connectivity.Idle: + // Trigger Connect() and queue the pick. + e.sc.queueConnect() + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.Connecting: + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.TransientFailure: + // Return ok==false, so TransientFailure will be handled afterwards. + return handleRICSResult{}, false + case connectivity.Shutdown: + // Shutdown can happen in a race where the old picker is called. A new + // picker should already be sent. + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + default: + // Should never reach this. All the connectivity states are already + // handled in the cases. + p.logger.Errorf("SubConn has undefined connectivity state: %v", state) + return handleRICSResult{err: status.Errorf(codes.Unavailable, "SubConn has undefined connectivity state: %v", state)}, true + } +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + e := p.ring.pick(getRequestHash(info.Ctx)) + if hr, ok := p.handleRICS(e); ok { + return hr.pr, hr.err + } + // ok was false, the entry is in transient failure. + return p.handleTransientFailure(e) +} + +func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, error) { + // Queue a connect on the first picked SubConn. + e.sc.queueConnect() + + // Find next entry in the ring, skipping duplicate SubConns. + e2 := nextSkippingDuplicates(p.ring, e) + if e2 == nil { + // There's no next entry available, fail the pick. + return balancer.PickResult{}, fmt.Errorf("the only SubConn is in Transient Failure") + } + + // For the second SubConn, also check Ready/Idle/Connecting as if it's the + // first entry. + if hr, ok := p.handleRICS(e2); ok { + return hr.pr, hr.err + } + + // The second SubConn is also in TransientFailure. Queue a connect on it. + e2.sc.queueConnect() + + // If it gets here, this is after the second SubConn, and the second SubConn + // was in TransientFailure. + // + // Loop over all other SubConns: + // - If all SubConns so far are all TransientFailure, trigger Connect() on + // the TransientFailure SubConns, and keep going. + // - If there's one SubConn that's not in TransientFailure, keep checking + // the remaining SubConns (in case there's a Ready, which will be returned), + // but don't not trigger Connect() on the other SubConns. + var firstNonFailedFound bool + for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { + scState := p.subConnStates[ee.sc] + if scState == connectivity.Ready { + return balancer.PickResult{SubConn: ee.sc.sc}, nil + } + if firstNonFailedFound { + continue + } + if scState == connectivity.TransientFailure { + // This will queue a connect. + ee.sc.queueConnect() + continue + } + // This is a SubConn in a non-failure state. We continue to check the + // other SubConns, but remember that there was a non-failed SubConn + // seen. After this, Pick() will never trigger any SubConn to Connect(). + firstNonFailedFound = true + if scState == connectivity.Idle { + // This is the first non-failed SubConn, and it is in a real Idle + // state. Trigger it to Connect(). + ee.sc.queueConnect() + } + } + return balancer.PickResult{}, fmt.Errorf("no connection is Ready") +} + +// nextSkippingDuplicates finds the next entry in the ring, with a different +// subconn from the given entry. +func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { + for next := ring.next(entry); next != entry; next = ring.next(next) { + if next.sc != entry.sc { + return next + } + } + // There's no qualifying next entry. + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go new file mode 100644 index 0000000000..45dbb2d2a8 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -0,0 +1,182 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "math" + "sort" + "strconv" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/resolver" +) + +type ring struct { + items []*ringEntry +} + +type subConnWithWeight struct { + sc *subConn + weight float64 +} + +type ringEntry struct { + idx int + hash uint64 + sc *subConn +} + +// newRing creates a ring from the subConns stored in the AddressMap. The ring +// size is limited by the passed in max/min. +// +// ring entries will be created for each subConn, and subConn with high weight +// (specified by the address) may have multiple entries. +// +// For example, for subConns with weights {a:3, b:3, c:4}, a generated ring of +// size 10 could be: +// - {idx:0 hash:3689675255460411075 b} +// - {idx:1 hash:4262906501694543955 c} +// - {idx:2 hash:5712155492001633497 c} +// - {idx:3 hash:8050519350657643659 b} +// - {idx:4 hash:8723022065838381142 b} +// - {idx:5 hash:11532782514799973195 a} +// - {idx:6 hash:13157034721563383607 c} +// - {idx:7 hash:14468677667651225770 c} +// - {idx:8 hash:17336016884672388720 a} +// - {idx:9 hash:18151002094784932496 a} +// +// To pick from a ring, a binary search will be done for the given target hash, +// and first item with hash >= given hash will be returned. +// +// Must be called with a non-empty subConns map. +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { + if logger.V(2) { + logger.Infof("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + } + + // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 + normalizedWeights, minWeight := normalizeWeights(subConns) + if logger.V(2) { + logger.Infof("newRing: normalized subConn weights is %v", normalizedWeights) + } + + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. + + // Scale up the size of the ring such that the least-weighted host gets a + // whole number of hashes on the ring. + // + // Note that size is limited by the input max/min. + scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) + ringSize := math.Ceil(scale) + items := make([]*ringEntry, 0, int(ringSize)) + if logger.V(2) { + logger.Infof("newRing: creating new ring of size %v", ringSize) + } + + // For each entry, scale*weight nodes are generated in the ring. + // + // Not all of these are whole numbers. E.g. for weights {a:3,b:3,c:4}, if + // ring size is 7, scale is 6.66. The numbers of nodes will be + // {a,a,b,b,c,c,c}. + // + // A hash is generated for each item, and later the results will be sorted + // based on the hash. + var currentHashes, targetHashes float64 + for _, scw := range normalizedWeights { + targetHashes += scale * scw.weight + // This index ensures that ring entries corresponding to the same + // address hash to different values. And since this index is + // per-address, these entries hash to the same value across address + // updates. + idx := 0 + for currentHashes < targetHashes { + h := xxhash.Sum64String(scw.sc.addr + "_" + strconv.Itoa(idx)) + items = append(items, &ringEntry{hash: h, sc: scw.sc}) + idx++ + currentHashes++ + } + } + + // Sort items based on hash, to prepare for binary search. + sort.Slice(items, func(i, j int) bool { return items[i].hash < items[j].hash }) + for i, ii := range items { + ii.idx = i + } + return &ring{items: items} +} + +// normalizeWeights calculates the normalized weights for each subConn in the +// given subConns map. It returns a slice of subConnWithWeight structs, where +// each struct contains a subConn and its corresponding weight. The function +// also returns the minimum weight among all subConns. +// +// The normalized weight of each subConn is calculated by dividing its weight +// attribute by the sum of all subConn weights. If the weight attribute is not +// found on the address, a default weight of 1 is used. +// +// The addresses are sorted in ascending order to ensure consistent results. +// +// Must be called with a non-empty subConns map. +func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64) { + var weightSum uint32 + // Since attributes are explicitly ignored in the AddressMap key, we need to + // iterate over the values to get the weights. + scVals := subConns.Values() + for _, a := range scVals { + weightSum += a.(*subConn).weight + } + ret := make([]subConnWithWeight, 0, subConns.Len()) + min := 1.0 + for _, a := range scVals { + scInfo := a.(*subConn) + // (*subConn).weight is set to 1 if the weight attribute is not found on + // the address. And since this function is guaranteed to be called with + // a non-empty subConns map, weightSum is guaranteed to be non-zero. So, + // we need not worry about divide by zero error here. + nw := float64(scInfo.weight) / float64(weightSum) + ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) + min = math.Min(min, nw) + } + // Sort the addresses to return consistent results. + // + // Note: this might not be necessary, but this makes sure the ring is + // consistent as long as the addresses are the same, for example, in cases + // where an address is added and then removed, the RPCs will still pick the + // same old SubConn. + sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) + return ret, min +} + +// pick does a binary search. It returns the item with smallest index i that +// r.items[i].hash >= h. +func (r *ring) pick(h uint64) *ringEntry { + i := sort.Search(len(r.items), func(i int) bool { return r.items[i].hash >= h }) + if i == len(r.items) { + // If not found, and h is greater than the largest hash, return the + // first item. + i = 0 + } + return r.items[i] +} + +// next returns the next entry. +func (r *ring) next(e *ringEntry) *ringEntry { + return r.items[(e.idx+1)%len(r.items)] +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go new file mode 100644 index 0000000000..ef054d48aa --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -0,0 +1,527 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package ringhash implements the ringhash balancer. +package ringhash + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the ring_hash balancer. +const Name = "ring_hash_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &ringhashBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + orderedSubConns: make([]*subConn, 0), + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type subConn struct { + addr string + weight uint32 + sc balancer.SubConn + logger *grpclog.PrefixLogger + + mu sync.RWMutex + // This is the actual state of this SubConn (as updated by the ClientConn). + // The effective state can be different, see comment of attemptedToConnect. + state connectivity.State + // failing is whether this SubConn is in a failing state. A subConn is + // considered to be in a failing state if it was previously in + // TransientFailure. + // + // This affects the effective connectivity state of this SubConn, e.g. + // - if the actual state is Idle or Connecting, but this SubConn is failing, + // the effective state is TransientFailure. + // + // This is used in pick(). E.g. if a subConn is Idle, but has failing as + // true, pick() will + // - consider this SubConn as TransientFailure, and check the state of the + // next SubConn. + // - trigger Connect() (note that normally a SubConn in real + // TransientFailure cannot Connect()) + // + // A subConn starts in non-failing (failing is false). A transition to + // TransientFailure sets failing to true (and it stays true). A transition + // to Ready sets failing to false. + failing bool + // connectQueued is true if a Connect() was queued for this SubConn while + // it's not in Idle (most likely was in TransientFailure). A Connect() will + // be triggered on this SubConn when it turns Idle. + // + // When connectivity state is updated to Idle for this SubConn, if + // connectQueued is true, Connect() will be called on the SubConn. + connectQueued bool + // attemptingToConnect indicates if this subconn is attempting to connect. + // It's set when queueConnect is called. It's unset when the state is + // changed to Ready/Shutdown, or Idle (and if connectQueued is false). + attemptingToConnect bool +} + +// setState updates the state of this SubConn. +// +// It also handles the queued Connect(). If the new state is Idle, and a +// Connect() was queued, this SubConn will be triggered to Connect(). +func (sc *subConn) setState(s connectivity.State) { + sc.mu.Lock() + defer sc.mu.Unlock() + switch s { + case connectivity.Idle: + // Trigger Connect() if new state is Idle, and there is a queued connect. + if sc.connectQueued { + sc.connectQueued = false + sc.logger.Infof("Executing a queued connect for subConn moving to state: %v", sc.state) + sc.sc.Connect() + } else { + sc.attemptingToConnect = false + } + case connectivity.Connecting: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + case connectivity.Ready: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + sc.attemptingToConnect = false + // Set to a non-failing state. + sc.failing = false + case connectivity.TransientFailure: + // Set to a failing state. + sc.failing = true + case connectivity.Shutdown: + sc.attemptingToConnect = false + } + sc.state = s +} + +// effectiveState returns the effective state of this SubConn. It can be +// different from the actual state, e.g. Idle while the subConn is failing is +// considered TransientFailure. Read comment of field failing for other cases. +func (sc *subConn) effectiveState() connectivity.State { + sc.mu.RLock() + defer sc.mu.RUnlock() + if sc.failing && (sc.state == connectivity.Idle || sc.state == connectivity.Connecting) { + return connectivity.TransientFailure + } + return sc.state +} + +// queueConnect sets a boolean so that when the SubConn state changes to Idle, +// it's Connect() will be triggered. If the SubConn state is already Idle, it +// will just call Connect(). +func (sc *subConn) queueConnect() { + sc.mu.Lock() + defer sc.mu.Unlock() + sc.attemptingToConnect = true + if sc.state == connectivity.Idle { + sc.logger.Infof("Executing a queued connect for subConn in state: %v", sc.state) + sc.sc.Connect() + return + } + // Queue this connect, and when this SubConn switches back to Idle (happens + // after backoff in TransientFailure), it will Connect(). + sc.logger.Infof("Queueing a connect for subConn in state: %v", sc.state) + sc.connectQueued = true +} + +func (sc *subConn) isAttemptingToConnect() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.attemptingToConnect +} + +type ringhashBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + config *LBConfig + subConns *resolver.AddressMap // Map from resolver.Address to `*subConn`. + scStates map[balancer.SubConn]*subConn + + // ring is always in sync with subConns. When subConns change, a new ring is + // generated. Note that address weights updates (they are keys in the + // subConns map) also regenerates the ring. + ring *ring + picker balancer.Picker + csEvltr *connectivityStateEvaluator + state connectivity.State + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + + // orderedSubConns contains the list of subconns in the order that addresses + // appear from the resolver. Together with lastInternallyTriggeredSCIndex, + // this allows triggering connection attempts to all SubConns independently + // of the order they appear on the ring. Always in sync with ring and + // subConns. The index is reset when addresses change. + orderedSubConns []*subConn + lastInternallyTriggeredSCIndex int +} + +// updateAddresses creates new SubConns and removes SubConns, based on the +// address update. +// +// The return value is whether the new address list is different from the +// previous. True if +// - an address was added +// - an address was removed +// - an address's weight was updated +// +// Note that this function doesn't trigger SubConn connecting, so all the new +// SubConn states are Idle. +func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { + var addrsUpdated bool + // addrsSet is the set converted from addrs, used for quick lookup. + addrsSet := resolver.NewAddressMap() + + b.orderedSubConns = b.orderedSubConns[:0] // reuse the underlying array. + + for _, addr := range addrs { + addrsSet.Set(addr, true) + newWeight := getWeightAttribute(addr) + if val, ok := b.subConns.Get(addr); !ok { + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: true, + StateListener: func(state balancer.SubConnState) { b.updateSubConnState(sc, state) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + b.logger.Warningf("Failed to create new SubConn: %v", err) + continue + } + scs := &subConn{addr: addr.Addr, weight: newWeight, sc: sc} + scs.logger = subConnPrefixLogger(b, scs) + scs.setState(connectivity.Idle) + b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) + b.subConns.Set(addr, scs) + b.scStates[sc] = scs + b.orderedSubConns = append(b.orderedSubConns, scs) + addrsUpdated = true + } else { + // We have seen this address before and created a subConn for it. If the + // weight associated with the address has changed, update the subConns map + // with the new weight. This will be used when a new ring is created. + // + // There is no need to call UpdateAddresses on the subConn at this point + // since *only* the weight attribute has changed, and that does not affect + // subConn uniqueness. + scInfo := val.(*subConn) + b.orderedSubConns = append(b.orderedSubConns, scInfo) + if oldWeight := scInfo.weight; oldWeight != newWeight { + scInfo.weight = newWeight + b.subConns.Set(addr, scInfo) + // Return true to force recreation of the ring. + addrsUpdated = true + } + } + } + for _, addr := range b.subConns.Keys() { + // addr was removed by resolver. + if _, ok := addrsSet.Get(addr); !ok { + v, _ := b.subConns.Get(addr) + scInfo := v.(*subConn) + scInfo.sc.Shutdown() + b.subConns.Delete(addr) + addrsUpdated = true + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in updateSubConnState. + } + } + if addrsUpdated { + b.lastInternallyTriggeredSCIndex = 0 + } + return addrsUpdated +} + +func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + + // If addresses were updated, whether it resulted in SubConn + // creation/deletion, or just weight update, we need to regenerate the ring + // and send a new picker. + regenerateRing := b.updateAddresses(s.ResolverState.Addresses) + + // If the ring configuration has changed, we need to regenerate the ring and + // send a new picker. + if b.config == nil || b.config.MinRingSize != newConfig.MinRingSize || b.config.MaxRingSize != newConfig.MaxRingSize { + regenerateRing = true + } + b.config = newConfig + + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + if regenerateRing { + // Ring creation is guaranteed to not fail because we call newRing() + // with a non-empty subConns map. + b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize, b.logger) + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // Successful resolution; clear resolver error and return nil. + b.resolverErr = nil + return nil +} + +func (b *ringhashBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +// updateSubConnState updates the per-SubConn state stored in the ring, and also +// the aggregated state. +// +// It triggers an update to cc when: +// - the new state is TransientFailure, to update the error message +// - it's possible that this is a noop, but sending an extra update is easier +// than comparing errors +// +// - the aggregated state is changed +// - the same picker will be sent again, but this update may trigger a re-pick +// for some RPCs. +func (b *ringhashBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + b.logger.Infof("Handle SubConn state change: %p, %v", sc, s) + } + scs, ok := b.scStates[sc] + if !ok { + b.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) + return + } + oldSCState := scs.effectiveState() + scs.setState(s) + newSCState := scs.effectiveState() + b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) + + b.state = b.csEvltr.recordTransition(oldSCState, newSCState) + + switch s { + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + if oldSCState != newSCState { + // Because the picker caches the state of the subconns, we always + // regenerate and update the picker when the effective SubConn state + // changes. + b.regeneratePicker() + b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + switch b.state { + case connectivity.Connecting, connectivity.TransientFailure: + // When overall state is TransientFailure, we need to make sure at least + // one SubConn is attempting to connect, otherwise this balancer may + // never get picks if the parent is priority. + // + // Because we report Connecting as the overall state when only one + // SubConn is in TransientFailure, we do the same check for Connecting + // here. + // + // Note that this check also covers deleting SubConns due to address + // change. E.g. if the SubConn attempting to connect is deleted, and the + // overall state is TF. Since there must be at least one SubConn + // attempting to connect, we need to trigger one. But since the deleted + // SubConn will eventually send a shutdown update, this code will run + // and trigger the next SubConn to connect. + for _, v := range b.subConns.Values() { + sc := v.(*subConn) + if sc.isAttemptingToConnect() { + return + } + } + + // Trigger a SubConn (the next in the order addresses appear in the + // resolver) to connect if nobody is attempting to connect. + b.lastInternallyTriggeredSCIndex = (b.lastInternallyTriggeredSCIndex + 1) % len(b.orderedSubConns) + b.orderedSubConns[b.lastInternallyTriggeredSCIndex].queueConnect() + } +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *ringhashBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *ringhashBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = base.NewErrPicker(b.mergeErrors()) + return + } + b.picker = newPicker(b.ring, b.logger) +} + +func (b *ringhashBalancer) Close() { + b.logger.Infof("Shutdown") +} + +func (b *ringhashBalancer) ExitIdle() { + // ExitIdle implementation is a no-op because connections are either + // triggers from picks or from subConn state changes. +} + +// connectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type connectivityStateEvaluator struct { + sum uint64 + nums [5]uint64 +} + +// recordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If there is at least one subchannel in READY state, report READY. +// - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. +// - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is one subchannel in TRANSIENT_FAILURE and there is more than one subchannel, report state CONNECTING. +// - If there is at least one subchannel in Idle state, report Idle. +// - Otherwise, report TRANSIENT_FAILURE. +// +// Note that if there are 1 connecting, 2 transient failure, the overall state +// is transient failure. This is because the second transient failure is a +// fallback of the first failing SubConn, and we want to report transient +// failure to failover to the lower priority. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + cse.nums[state] += updateVal + } + if oldState == connectivity.Shutdown { + // There's technically no transition from Shutdown. But we record a + // Shutdown->Idle transition when a new SubConn is created. + cse.sum++ + } + if newState == connectivity.Shutdown { + cse.sum-- + } + + if cse.nums[connectivity.Ready] > 0 { + return connectivity.Ready + } + if cse.nums[connectivity.TransientFailure] > 1 { + return connectivity.TransientFailure + } + if cse.nums[connectivity.Connecting] > 0 { + return connectivity.Connecting + } + if cse.nums[connectivity.TransientFailure] > 0 && cse.sum > 1 { + return connectivity.Connecting + } + if cse.nums[connectivity.Idle] > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} + +// getWeightAttribute is a convenience function which returns the value of the +// weight attribute stored in the BalancerAttributes field of addr, using the +// weightedroundrobin package. +// +// When used in the xDS context, the weight attribute is guaranteed to be +// non-zero. But, when used in a non-xDS context, the weight attribute could be +// unset. A Default of 1 is used in the latter case. +func getWeightAttribute(addr resolver.Address) uint32 { + w := weightedroundrobin.GetAddrInfo(addr).Weight + if w == 0 { + return 1 + } + return w +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go new file mode 100644 index 0000000000..92bb3ae5b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import "context" + +type clusterKey struct{} + +func getRequestHash(ctx context.Context) uint64 { + requestHash, _ := ctx.Value(clusterKey{}).(uint64) + return requestHash +} + +// GetRequestHashForTesting returns the request hash in the context; to be used +// for testing only. +func GetRequestHashForTesting(ctx context.Context) uint64 { + return getRequestHash(ctx) +} + +// SetRequestHash adds the request hash to the context for use in Ring Hash Load +// Balancing. +func SetRequestHash(ctx context.Context, requestHash uint64) context.Context { + return context.WithValue(ctx, clusterKey{}, requestHash) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go new file mode 100644 index 0000000000..943ee7806b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package wrrlocality provides an implementation of the wrr locality LB policy, +// as defined in [A52 - xDS Custom LB Policies]. +// +// [A52 - xDS Custom LB Policies]: https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md +package wrrlocality + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/grpclog" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" +) + +// Name is the name of wrr_locality balancer. +const Name = "xds_wrr_locality_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Name() string { + return Name +} + +// LBConfig is the config for the wrr locality balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// To plumb in a different child in tests. +var weightedTargetName = weightedtarget.Name + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(weightedTargetName) + if builder == nil { + // Shouldn't happen, registered through imported weighted target, + // defensive programming. + return nil + } + + // Doesn't need to intercept any balancer.ClientConn operations; pass + // through by just giving cc to child balancer. + wtb := builder.Build(cc, bOpts) + if wtb == nil { + // shouldn't happen, defensive programming. + return nil + } + wtbCfgParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported weighted target builder has this method. + return nil + } + wrrL := &wrrLocalityBalancer{ + child: wtb, + childParser: wtbCfgParser, + } + + wrrL.logger = prefixLogger(wrrL) + wrrL.logger.Infof("Created") + return wrrL +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { + return nil, fmt.Errorf("xds_wrr_locality: invalid LBConfig: %s, error: %v", string(s), err) + } + if lbCfg == nil || lbCfg.ChildPolicy == nil { + return nil, errors.New("xds_wrr_locality: invalid LBConfig: child policy field must be set") + } + return lbCfg, nil +} + +type attributeKey struct{} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o any) bool { + oa, ok := o.(AddrInfo) + return ok && oa.LocalityWeight == a.LocalityWeight +} + +// AddrInfo is the locality weight of the locality an address is a part of. +type AddrInfo struct { + LocalityWeight uint32 +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with AddrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) +} + +// getAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. Returns false if no AddrInfo found. +func getAddrInfo(addr resolver.Address) (AddrInfo, bool) { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, ok := v.(AddrInfo) + return ai, ok +} + +// wrrLocalityBalancer wraps a weighted target balancer, and builds +// configuration for the weighted target once it receives configuration +// specifying the weighted target child balancer and locality weight +// information. +type wrrLocalityBalancer struct { + // child will be a weighted target balancer, and will be built it at + // wrrLocalityBalancer build time. Other than preparing configuration, other + // balancer operations are simply pass through. + child balancer.Balancer + + childParser balancer.ConfigParser + + logger *grpclog.PrefixLogger +} + +func (b *wrrLocalityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("Received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + weightedTargets := make(map[string]weightedtarget.Target) + for _, addr := range s.ResolverState.Addresses { + // This get of LocalityID could potentially return a zero value. This + // shouldn't happen though (this attribute that is set actually gets + // used to build localities in the first place), and thus don't error + // out, and just build a weighted target with undefined behavior. + locality, err := internal.GetLocalityID(addr).ToString() + if err != nil { + // Should never happen. + logger.Errorf("Failed to marshal LocalityID: %v, skipping this locality in weighted target") + } + ai, ok := getAddrInfo(addr) + if !ok { + return fmt.Errorf("xds_wrr_locality: missing locality weight information in address %q", addr) + } + weightedTargets[locality] = weightedtarget.Target{Weight: ai.LocalityWeight, ChildPolicy: lbCfg.ChildPolicy} + } + wtCfg := &weightedtarget.LBConfig{Targets: weightedTargets} + wtCfgJSON, err := json.Marshal(wtCfg) + if err != nil { + // Shouldn't happen. + return fmt.Errorf("xds_wrr_locality: error marshalling prepared config: %v", wtCfg) + } + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childParser.ParseConfig(wtCfgJSON); err != nil { + return fmt.Errorf("xds_wrr_locality: config generated %v is invalid: %v", wtCfgJSON, err) + } + + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: sc, + }) +} + +func (b *wrrLocalityBalancer) ResolverError(err error) { + b.child.ResolverError(err) +} + +func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *wrrLocalityBalancer) Close() { + b.child.Close() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go new file mode 100644 index 0000000000..42ccea0a92 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package wrrlocality + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[wrrlocality-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *wrrLocalityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go new file mode 100644 index 0000000000..f81e2cab0f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterspecifier contains the ClusterSpecifier interface and a registry for +// storing and retrieving their implementations. +package clusterspecifier + +import ( + "google.golang.org/protobuf/proto" +) + +// BalancerConfig is the Go Native JSON representation of a balancer +// configuration. +type BalancerConfig []map[string]any + +// ClusterSpecifier defines the parsing functionality of a Cluster Specifier. +type ClusterSpecifier interface { + // TypeURLs are the proto message types supported by this + // ClusterSpecifierPlugin. A ClusterSpecifierPlugin will be registered by + // each of its supported message types. + TypeURLs() []string + // ParseClusterSpecifierConfig parses the provided configuration + // proto.Message from the top level RDS configuration. The resulting + // BalancerConfig will be used as configuration for a child LB Policy of the + // Cluster Manager LB Policy. A nil BalancerConfig is invalid. + ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error) +} + +var ( + // m is a map from scheme to filter. + m = make(map[string]ClusterSpecifier) +) + +// Register registers the ClusterSpecifierPlugin to the ClusterSpecifier map. +// cs.TypeURLs() will be used as the types for this ClusterSpecifierPlugin. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple cluster specifier +// plugins are registered with the same type URL, the one registered last will +// take effect. +func Register(cs ClusterSpecifier) { + for _, u := range cs.TypeURLs() { + m[u] = cs + } +} + +// Get returns the ClusterSpecifier registered with typeURL. +// +// If no cluster specifier is registered with typeURL, nil will be returned. +func Get(typeURL string) ClusterSpecifier { + return m[typeURL] +} + +// UnregisterForTesting unregisters the ClusterSpecifier for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go new file mode 100644 index 0000000000..89837605c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS cluster specifier plugin. +package rls + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + clusterspecifier.Register(rls{}) + + // TODO: Remove these once the RLS env var is removed. + internal.RegisterRLSClusterSpecifierPluginForTesting = func() { + clusterspecifier.Register(rls{}) + } + internal.UnregisterRLSClusterSpecifierPluginForTesting = func() { + for _, typeURL := range rls.TypeURLs(rls{}) { + clusterspecifier.UnregisterForTesting(typeURL) + } + } +} + +type rls struct{} + +func (rls) TypeURLs() []string { + return []string{"type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier"} +} + +// lbConfigJSON is the RLS LB Policies configuration in JSON format. +// RouteLookupConfig will be a raw JSON string from the passed in proto +// configuration, and the other fields will be hardcoded. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage `json:"routeLookupConfig"` + ChildPolicy []map[string]json.RawMessage `json:"childPolicy"` + ChildPolicyConfigTargetFieldName string `json:"childPolicyConfigTargetFieldName"` +} + +func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rls_csp: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) + } + rlcs := new(rlspb.RouteLookupClusterSpecifier) + + if err := m.UnmarshalTo(rlcs); err != nil { + return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) + } + rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling route lookup config: %v: %v", rlcs.GetRouteLookupConfig(), err) + } + lbCfgJSON := &lbConfigJSON{ + RouteLookupConfig: rlcJSON, // "JSON form of RouteLookupClusterSpecifier.config" - RLS in xDS Design Doc + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": json.RawMessage("{}"), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", + } + + rawJSON, err := json.Marshal(lbCfgJSON) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) + } + + rlsBB := balancer.Get(internal.RLSLoadBalancingPolicyName) + if rlsBB == nil { + return nil, fmt.Errorf("RLS LB policy not registered") + } + if _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON); err != nil { + return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing: %v", err) + } + + return clusterspecifier.BalancerConfig{{internal.RLSLoadBalancingPolicyName: lbCfgJSON}}, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go new file mode 100644 index 0000000000..5a82490598 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fault implements the Envoy Fault Injection HTTP filter. +package fault + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "strconv" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + cpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3" + fpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3" + tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +const headerAbortHTTPStatus = "x-envoy-fault-abort-request" +const headerAbortGRPCStatus = "x-envoy-fault-abort-grpc-request" +const headerAbortPercentage = "x-envoy-fault-abort-request-percentage" + +const headerDelayPercentage = "x-envoy-fault-delay-request-percentage" +const headerDelayDuration = "x-envoy-fault-delay-request" + +var statusMap = map[int]codes.Code{ + 400: codes.Internal, + 401: codes.Unauthenticated, + 403: codes.PermissionDenied, + 404: codes.Unimplemented, + 429: codes.Unavailable, + 502: codes.Unavailable, + 503: codes.Unavailable, + 504: codes.Unavailable, +} + +func init() { + httpfilter.Register(builder{}) +} + +type builder struct { +} + +type config struct { + httpfilter.FilterConfig + config *fpb.HTTPFault +} + +func (builder) TypeURLs() []string { + return []string{"type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault"} +} + +// Parsing is the same for the base config and the override config. +func parseConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("fault: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("fault: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(fpb.HTTPFault) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("fault: error parsing config %v: %v", cfg, err) + } + return config{config: msg}, nil +} + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + return parseConfig(cfg) +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + return parseConfig(override) +} + +func (builder) IsTerminal() bool { + return false +} + +var _ httpfilter.ClientInterceptorBuilder = builder{} + +func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { + if cfg == nil { + return nil, fmt.Errorf("fault: nil config provided") + } + + c, ok := cfg.(config) + if !ok { + return nil, fmt.Errorf("fault: incorrect config type provided (%T): %v", cfg, cfg) + } + + if override != nil { + // override completely replaces the listener configuration; but we + // still validate the listener config type. + c, ok = override.(config) + if !ok { + return nil, fmt.Errorf("fault: incorrect override config type provided (%T): %v", override, override) + } + } + + icfg := c.config + if (icfg.GetMaxActiveFaults() != nil && icfg.GetMaxActiveFaults().GetValue() == 0) || + (icfg.GetDelay() == nil && icfg.GetAbort() == nil) { + return nil, nil + } + return &interceptor{config: icfg}, nil +} + +type interceptor struct { + config *fpb.HTTPFault +} + +var activeFaults uint32 // global active faults; accessed atomically + +func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { + if maxAF := i.config.GetMaxActiveFaults(); maxAF != nil { + defer atomic.AddUint32(&activeFaults, ^uint32(0)) // decrement counter + if af := atomic.AddUint32(&activeFaults, 1); af > maxAF.GetValue() { + // Would exceed maximum active fault limit. + return newStream(ctx, done) + } + } + + if err := injectDelay(ctx, i.config.GetDelay()); err != nil { + return nil, err + } + + if err := injectAbort(ctx, i.config.GetAbort()); err != nil { + if err == errOKStream { + return &okStream{ctx: ctx}, nil + } + return nil, err + } + return newStream(ctx, done) +} + +// For overriding in tests +var randIntn = rand.Intn +var newTimer = time.NewTimer + +func injectDelay(ctx context.Context, delayCfg *cpb.FaultDelay) error { + numerator, denominator := splitPct(delayCfg.GetPercentage()) + var delay time.Duration + switch delayType := delayCfg.GetFaultDelaySecifier().(type) { + case *cpb.FaultDelay_FixedDelay: + delay = delayType.FixedDelay.AsDuration() + case *cpb.FaultDelay_HeaderDelay_: + md, _ := metadata.FromOutgoingContext(ctx) + v := md[headerDelayDuration] + if v == nil { + // No delay configured for this RPC. + return nil + } + ms, ok := parseIntFromMD(v) + if !ok { + // Malformed header; no delay. + return nil + } + delay = time.Duration(ms) * time.Millisecond + if v := md[headerDelayPercentage]; v != nil { + if num, ok := parseIntFromMD(v); ok && num < numerator { + numerator = num + } + } + } + if delay == 0 || randIntn(denominator) >= numerator { + return nil + } + t := newTimer(delay) + select { + case <-t.C: + case <-ctx.Done(): + t.Stop() + return ctx.Err() + } + return nil +} + +func injectAbort(ctx context.Context, abortCfg *fpb.FaultAbort) error { + numerator, denominator := splitPct(abortCfg.GetPercentage()) + code := codes.OK + okCode := false + switch errType := abortCfg.GetErrorType().(type) { + case *fpb.FaultAbort_HttpStatus: + code, okCode = grpcFromHTTP(int(errType.HttpStatus)) + case *fpb.FaultAbort_GrpcStatus: + code, okCode = sanitizeGRPCCode(codes.Code(errType.GrpcStatus)), true + case *fpb.FaultAbort_HeaderAbort_: + md, _ := metadata.FromOutgoingContext(ctx) + if v := md[headerAbortHTTPStatus]; v != nil { + // HTTP status has priority over gRPC status. + if httpStatus, ok := parseIntFromMD(v); ok { + code, okCode = grpcFromHTTP(httpStatus) + } + } else if v := md[headerAbortGRPCStatus]; v != nil { + if grpcStatus, ok := parseIntFromMD(v); ok { + code, okCode = sanitizeGRPCCode(codes.Code(grpcStatus)), true + } + } + if v := md[headerAbortPercentage]; v != nil { + if num, ok := parseIntFromMD(v); ok && num < numerator { + numerator = num + } + } + } + if !okCode || randIntn(denominator) >= numerator { + return nil + } + if code == codes.OK { + return errOKStream + } + return status.Errorf(code, "RPC terminated due to fault injection") +} + +var errOKStream = errors.New("stream terminated early with OK status") + +// parseIntFromMD returns the integer in the last header or nil if parsing +// failed. +func parseIntFromMD(header []string) (int, bool) { + if len(header) == 0 { + return 0, false + } + v, err := strconv.Atoi(header[len(header)-1]) + return v, err == nil +} + +func splitPct(fp *tpb.FractionalPercent) (num int, den int) { + if fp == nil { + return 0, 100 + } + num = int(fp.GetNumerator()) + switch fp.GetDenominator() { + case tpb.FractionalPercent_HUNDRED: + return num, 100 + case tpb.FractionalPercent_TEN_THOUSAND: + return num, 10 * 1000 + case tpb.FractionalPercent_MILLION: + return num, 1000 * 1000 + } + return num, 100 +} + +func grpcFromHTTP(httpStatus int) (codes.Code, bool) { + if httpStatus < 200 || httpStatus >= 600 { + // Malformed; ignore this fault type. + return codes.OK, false + } + if c := statusMap[httpStatus]; c != codes.OK { + // OK = 0/the default for the map. + return c, true + } + // All undefined HTTP status codes convert to Unknown. HTTP status of 200 + // is "success", but gRPC converts to Unknown due to missing grpc status. + return codes.Unknown, true +} + +func sanitizeGRPCCode(c codes.Code) codes.Code { + if c > codes.Code(16) { + return codes.Unknown + } + return c +} + +type okStream struct { + ctx context.Context +} + +func (*okStream) Header() (metadata.MD, error) { return nil, nil } +func (*okStream) Trailer() metadata.MD { return nil } +func (*okStream) CloseSend() error { return nil } +func (o *okStream) Context() context.Context { return o.ctx } +func (*okStream) SendMsg(any) error { return io.EOF } +func (*okStream) RecvMsg(any) error { return io.EOF } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go new file mode 100644 index 0000000000..4402f0ebff --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package httpfilter contains the HTTPFilter interface and a registry for +// storing and retrieving their implementations. +package httpfilter + +import ( + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/protobuf/proto" +) + +// FilterConfig represents an opaque data structure holding configuration for a +// filter. Embed this interface to implement it. +type FilterConfig interface { + isFilterConfig() +} + +// Filter defines the parsing functionality of an HTTP filter. A Filter may +// optionally implement either ClientInterceptorBuilder or +// ServerInterceptorBuilder or both, indicating it is capable of working on the +// client side or server side or both, respectively. +type Filter interface { + // TypeURLs are the proto message types supported by this filter. A filter + // will be registered by each of its supported message types. + TypeURLs() []string + // ParseFilterConfig parses the provided configuration proto.Message from + // the LDS configuration of this filter. This may be an anypb.Any, a + // udpa.type.v1.TypedStruct, or an xds.type.v3.TypedStruct for filters that + // do not accept a custom type. The resulting FilterConfig will later be + // passed to Build. + ParseFilterConfig(proto.Message) (FilterConfig, error) + // ParseFilterConfigOverride parses the provided override configuration + // proto.Message from the RDS override configuration of this filter. This + // may be an anypb.Any, a udpa.type.v1.TypedStruct, or an + // xds.type.v3.TypedStruct for filters that do not accept a custom type. + // The resulting FilterConfig will later be passed to Build. + ParseFilterConfigOverride(proto.Message) (FilterConfig, error) + // IsTerminal returns whether this Filter is terminal or not (i.e. it must + // be last filter in the filter chain). + IsTerminal() bool +} + +// ClientInterceptorBuilder constructs a Client Interceptor. If this type is +// implemented by a Filter, it is capable of working on a client. +type ClientInterceptorBuilder interface { + // BuildClientInterceptor uses the FilterConfigs produced above to produce + // an HTTP filter interceptor for clients. config will always be non-nil, + // but override may be nil if no override config exists for the filter. It + // is valid for Build to return a nil Interceptor and a nil error. In this + // case, the RPC will not be intercepted by this filter. + BuildClientInterceptor(config, override FilterConfig) (iresolver.ClientInterceptor, error) +} + +// ServerInterceptorBuilder constructs a Server Interceptor. If this type is +// implemented by a Filter, it is capable of working on a server. +type ServerInterceptorBuilder interface { + // BuildServerInterceptor uses the FilterConfigs produced above to produce + // an HTTP filter interceptor for servers. config will always be non-nil, + // but override may be nil if no override config exists for the filter. It + // is valid for Build to return a nil Interceptor and a nil error. In this + // case, the RPC will not be intercepted by this filter. + BuildServerInterceptor(config, override FilterConfig) (iresolver.ServerInterceptor, error) +} + +var ( + // m is a map from scheme to filter. + m = make(map[string]Filter) +) + +// Register registers the HTTP filter Builder to the filter map. b.TypeURLs() +// will be used as the types for this filter. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple filters are +// registered with the same type URL, the one registered last will take effect. +func Register(b Filter) { + for _, u := range b.TypeURLs() { + m[u] = b + } +} + +// UnregisterForTesting unregisters the HTTP Filter for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} + +// Get returns the HTTPFilter registered with typeURL. +// +// If no filter is register with typeURL, nil will be returned. +func Get(typeURL string) Filter { + return m[typeURL] +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go new file mode 100644 index 0000000000..bcda2ab05f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -0,0 +1,214 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rbac implements the Envoy RBAC HTTP filter. +package rbac + +import ( + "context" + "errors" + "fmt" + "strings" + + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/rbac" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" +) + +func init() { + httpfilter.Register(builder{}) + + // TODO: Remove these once the RBAC env var is removed. + internal.RegisterRBACHTTPFilterForTesting = func() { + httpfilter.Register(builder{}) + } + internal.UnregisterRBACHTTPFilterForTesting = func() { + for _, typeURL := range builder.TypeURLs(builder{}) { + httpfilter.UnregisterForTesting(typeURL) + } + } +} + +type builder struct { +} + +type config struct { + httpfilter.FilterConfig + chainEngine *rbac.ChainEngine +} + +func (builder) TypeURLs() []string { + return []string{ + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute", + } +} + +// Parsing is the same for the base config and the override config. +func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { + // All the validation logic described in A41. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + // "Policy.condition and Policy.checked_condition must cause a + // validation failure if present." - A41 + if policy.Condition != nil { + return nil, errors.New("rbac: Policy.condition is present") + } + if policy.CheckedCondition != nil { + return nil, errors.New("rbac: policy.CheckedCondition is present") + } + + // "It is also a validation failure if Permission or Principal has a + // header matcher for a grpc- prefixed header name or :scheme." - A41 + for _, principal := range policy.Principals { + name := principal.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name) + } + } + for _, permission := range policy.Permissions { + name := permission.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name) + } + } + } + + // "Envoy aliases :authority and Host in its header map implementation, so + // they should be treated equivalent for the RBAC matchers; there must be no + // behavior change depending on which of the two header names is used in the + // RBAC policy." - A41. Loop through config's principals and policies, change + // any header matcher with value "host" to :authority", as that is what + // grpc-go shifts both headers to in transport layer. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + for _, principal := range policy.Principals { + if principal.GetHeader().GetName() == "host" { + principal.GetHeader().Name = ":authority" + } + } + for _, permission := range policy.Permissions { + if permission.GetHeader().GetName() == "host" { + permission.GetHeader().Name = ":authority" + } + } + } + + // Two cases where this HTTP Filter is a no op: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configured." - A41 + if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { + return config{}, nil + } + + // TODO(gregorycooke) - change the call chain to here so we have the filter + // name to input here instead of an empty string. It will come from here: + // https://github.com/grpc/grpc-go/blob/eff0942e95d93112921414aee758e619ec86f26f/xds/internal/xdsclient/xdsresource/unmarshal_lds.go#L199 + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") + if err != nil { + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configured." - A41 + if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { + return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) + } + } + + return config{chainEngine: ce}, nil +} + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(rpb.RBAC) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err) + } + return parseConfig(msg) +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + if override == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + m, ok := override.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override) + } + msg := new(rpb.RBACPerRoute) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err) + } + return parseConfig(msg.Rbac) +} + +func (builder) IsTerminal() bool { + return false +} + +var _ httpfilter.ServerInterceptorBuilder = builder{} + +// BuildServerInterceptor is an optional interface builder implements in order +// to signify it works server side. +func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override httpfilter.FilterConfig) (resolver.ServerInterceptor, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil config provided") + } + + c, ok := cfg.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect config type provided (%T): %v", cfg, cfg) + } + + if override != nil { + // override completely replaces the listener configuration; but we + // still validate the listener config type. + c, ok = override.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect override config type provided (%T): %v", override, override) + } + } + + // RBAC HTTP Filter is a no op from one of these two cases: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configured." - A41 + if c.chainEngine == nil { + return nil, nil + } + return &interceptor{chainEngine: c.chainEngine}, nil +} + +type interceptor struct { + chainEngine *rbac.ChainEngine +} + +func (i *interceptor) AllowRPC(ctx context.Context) error { + return i.chainEngine.IsAuthorized(ctx) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go new file mode 100644 index 0000000000..a781523d37 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package router implements the Envoy Router HTTP filter. +package router + +import ( + "fmt" + + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + pb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" +) + +// TypeURL is the message type for the Router configuration. +const TypeURL = "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + +func init() { + httpfilter.Register(builder{}) +} + +// IsRouterFilter returns true iff a HTTP filter is a Router filter. +func IsRouterFilter(b httpfilter.Filter) bool { + _, ok := b.(builder) + return ok +} + +type builder struct { +} + +func (builder) TypeURLs() []string { return []string{TypeURL} } + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + // The gRPC router filter does not currently use any fields from the + // config. Verify type only. + if cfg == nil { + return nil, fmt.Errorf("router: nil configuration message provided") + } + m, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("router: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(pb.Router) + if err := m.UnmarshalTo(msg); err != nil { + return nil, fmt.Errorf("router: error parsing config %v: %v", cfg, err) + } + return config{}, nil +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + if override != nil { + return nil, fmt.Errorf("router: unexpected config override specified: %v", override) + } + return config{}, nil +} + +func (builder) IsTerminal() bool { + return true +} + +var ( + _ httpfilter.ClientInterceptorBuilder = builder{} + _ httpfilter.ServerInterceptorBuilder = builder{} +) + +func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { + if _, ok := cfg.(config); !ok { + return nil, fmt.Errorf("router: incorrect config type provided (%T): %v", cfg, cfg) + } + if override != nil { + return nil, fmt.Errorf("router: unexpected override configuration specified: %v", override) + } + // The gRPC router is implemented within the xds resolver's config + // selector, not as a separate plugin. So we return a nil HTTPFilter, + // which will not be invoked. + return nil, nil +} + +func (builder) BuildServerInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ServerInterceptor, error) { + if _, ok := cfg.(config); !ok { + return nil, fmt.Errorf("router: incorrect config type provided (%T): %v", cfg, cfg) + } + if override != nil { + return nil, fmt.Errorf("router: unexpected override configuration specified: %v", override) + } + // The gRPC router is currently unimplemented on the server side. So we + // return a nil HTTPFilter, which will not be invoked. + return nil, nil +} + +// The gRPC router filter does not currently support any configuration. Verify +// type only. +type config struct { + httpfilter.FilterConfig +} diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go new file mode 100644 index 0000000000..1d8a6b03f1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functions/structs shared by xds +// balancers/resolvers. +package internal + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/resolver" +) + +// LocalityID is xds.Locality without XXX fields, so it can be used as map +// keys. +// +// xds.Locality cannot be map keys because one of the XXX fields is a slice. +type LocalityID struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` + SubZone string `json:"subZone,omitempty"` +} + +// ToString generates a string representation of LocalityID by marshalling it into +// json. Not calling it String() so printf won't call it. +func (l LocalityID) ToString() (string, error) { + b, err := json.Marshal(l) + if err != nil { + return "", err + } + return string(b), nil +} + +// Equal allows the values to be compared by Attributes.Equal. +func (l LocalityID) Equal(o any) bool { + ol, ok := o.(LocalityID) + if !ok { + return false + } + return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone +} + +// Empty returns whether or not the locality ID is empty. +func (l LocalityID) Empty() bool { + return l.Region == "" && l.Zone == "" && l.SubZone == "" +} + +// LocalityIDFromString converts a json representation of locality, into a +// LocalityID struct. +func LocalityIDFromString(s string) (ret LocalityID, _ error) { + err := json.Unmarshal([]byte(s), &ret) + if err != nil { + return LocalityID{}, fmt.Errorf("%s is not a well formatted locality ID, error: %v", s, err) + } + return ret, nil +} + +type localityKeyType string + +const localityKey = localityKeyType("grpc.xds.internal.address.locality") + +// GetLocalityID returns the locality ID of addr. +func GetLocalityID(addr resolver.Address) LocalityID { + path, _ := addr.BalancerAttributes.Value(localityKey).(LocalityID) + return path +} + +// SetLocalityID sets locality ID in addr to l. +func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(localityKey, l) + return addr +} + +// ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. +var ResourceTypeMapForTesting map[string]any + +// UnknownCSMLabels are TelemetryLabels emitted from CDS if CSM Telemetry Label +// data is not present in the CDS Resource. +var UnknownCSMLabels = map[string]string{ + "csm.service_name": "unknown", + "csm.service_namespace_name": "unknown", +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go new file mode 100644 index 0000000000..d9c2327828 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go @@ -0,0 +1,30 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the xDS resolver. +package internal + +// The following variables are overridden in tests. +var ( + // NewWRR is the function used to create a new weighted round robin + // implementation. + NewWRR any // func() wrr.WRR + + // NewXDSClient is the function used to create a new xDS client. + NewXDSClient any // func(string) (xdsclient.XDSClient, func(), error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/logging.go b/vendor/google.golang.org/grpc/xds/internal/resolver/logging.go new file mode 100644 index 0000000000..746b85af25 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resolver %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *xdsResolver) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go new file mode 100644 index 0000000000..36776f3deb --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -0,0 +1,348 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "context" + "encoding/json" + "fmt" + "math/bits" + "math/rand" + "strings" + "sync/atomic" + "time" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/balancer/clustermanager" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + cdsName = "cds_experimental" + xdsClusterManagerName = "xds_cluster_manager_experimental" + clusterPrefix = "cluster:" + clusterSpecifierPluginPrefix = "cluster_specifier_plugin:" +) + +type serviceConfig struct { + LoadBalancingConfig balancerConfig `json:"loadBalancingConfig"` +} + +type balancerConfig []map[string]any + +func newBalancerConfig(name string, config any) balancerConfig { + return []map[string]any{{name: config}} +} + +type cdsBalancerConfig struct { + Cluster string `json:"cluster"` +} + +type xdsChildConfig struct { + ChildPolicy balancerConfig `json:"childPolicy"` +} + +type xdsClusterManagerConfig struct { + Children map[string]xdsChildConfig `json:"children"` +} + +// serviceConfigJSON produces a service config in JSON format representing all +// the clusters referenced in activeClusters. This includes clusters with zero +// references, so they must be pruned first. +func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { + // Generate children (all entries in activeClusters). + children := make(map[string]xdsChildConfig) + for cluster, ci := range activeClusters { + children[cluster] = ci.cfg + } + + sc := serviceConfig{ + LoadBalancingConfig: newBalancerConfig( + xdsClusterManagerName, xdsClusterManagerConfig{Children: children}, + ), + } + + bs, err := json.Marshal(sc) + if err != nil { + return nil, fmt.Errorf("failed to marshal json: %v", err) + } + return bs, nil +} + +type virtualHost struct { + // map from filter name to its config + httpFilterConfigOverride map[string]httpfilter.FilterConfig + // retry policy present in virtual host + retryConfig *xdsresource.RetryConfig +} + +// routeCluster holds information about a cluster as referenced by a route. +type routeCluster struct { + name string + // map from filter name to its config + httpFilterConfigOverride map[string]httpfilter.FilterConfig +} + +type route struct { + m *xdsresource.CompositeMatcher // converted from route matchers + actionType xdsresource.RouteActionType // holds route action type + clusters wrr.WRR // holds *routeCluster entries + maxStreamDuration time.Duration + // map from filter name to its config + httpFilterConfigOverride map[string]httpfilter.FilterConfig + retryConfig *xdsresource.RetryConfig + hashPolicies []*xdsresource.HashPolicy +} + +func (r route) String() string { + return fmt.Sprintf("%s -> { clusters: %v, maxStreamDuration: %v }", r.m.String(), r.clusters, r.maxStreamDuration) +} + +type configSelector struct { + r *xdsResolver + virtualHost virtualHost + routes []route + clusters map[string]*clusterInfo + httpFilterConfig []xdsresource.HTTPFilter +} + +var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") +var errUnsupportedClientRouteAction = status.Errorf(codes.Unavailable, "matched route does not have a supported route action type") + +func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + if cs == nil { + return nil, status.Errorf(codes.Unavailable, "no valid clusters") + } + var rt *route + // Loop through routes in order and select first match. + for _, r := range cs.routes { + if r.m.Match(rpcInfo) { + rt = &r + break + } + } + + if rt == nil || rt.clusters == nil { + return nil, errNoMatchedRouteFound + } + + if rt.actionType != xdsresource.RouteActionRoute { + return nil, errUnsupportedClientRouteAction + } + + cluster, ok := rt.clusters.Next().(*routeCluster) + if !ok { + return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) + } + + // Add a ref to the selected cluster, as this RPC needs this cluster until + // it is committed. + ref := &cs.clusters[cluster.name].refCount + atomic.AddInt32(ref, 1) + + interceptor, err := cs.newInterceptor(rt, cluster) + if err != nil { + return nil, err + } + + lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) + lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies)) + + config := &iresolver.RPCConfig{ + // Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy. + Context: lbCtx, + OnCommitted: func() { + // When the RPC is committed, the cluster is no longer required. + // Decrease its ref. + if v := atomic.AddInt32(ref, -1); v == 0 { + // This entry will be removed from activeClusters when + // producing the service config for the empty update. + cs.r.serializer.TrySchedule(func(context.Context) { + cs.r.onClusterRefDownToZero() + }) + } + }, + Interceptor: interceptor, + } + + if rt.maxStreamDuration != 0 { + config.MethodConfig.Timeout = &rt.maxStreamDuration + } + if rt.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(rt.retryConfig) + } else if cs.virtualHost.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(cs.virtualHost.retryConfig) + } + + return config, nil +} + +func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy { + return &serviceconfig.RetryPolicy{ + MaxAttempts: int(config.NumRetries) + 1, + InitialBackoff: config.RetryBackoff.BaseInterval, + MaxBackoff: config.RetryBackoff.MaxInterval, + BackoffMultiplier: 2, + RetryableStatusCodes: config.RetryOn, + } +} + +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { + var hash uint64 + var generatedHash bool + var md, emd metadata.MD + var mdRead bool + for _, policy := range hashPolicies { + var policyHash uint64 + var generatedPolicyHash bool + switch policy.HashPolicyType { + case xdsresource.HashPolicyTypeHeader: + if strings.HasSuffix(policy.HeaderName, "-bin") { + continue + } + if !mdRead { + md, _ = metadata.FromOutgoingContext(rpcInfo.Context) + emd, _ = grpcutil.ExtraMetadata(rpcInfo.Context) + mdRead = true + } + values := emd.Get(policy.HeaderName) + if len(values) == 0 { + // Extra metadata (e.g. the "content-type" header) takes + // precedence over the user's metadata. + values = md.Get(policy.HeaderName) + if len(values) == 0 { + // If the header isn't present at all, this policy is a no-op. + continue + } + } + joinedValues := strings.Join(values, ",") + if policy.Regex != nil { + joinedValues = policy.Regex.ReplaceAllString(joinedValues, policy.RegexSubstitution) + } + policyHash = xxhash.Sum64String(joinedValues) + generatedHash = true + generatedPolicyHash = true + case xdsresource.HashPolicyTypeChannelID: + // Use the static channel ID as the hash for this policy. + policyHash = cs.r.channelID + generatedHash = true + generatedPolicyHash = true + } + + // Deterministically combine the hash policies. Rotating prevents + // duplicate hash policies from cancelling each other out and preserves + // the 64 bits of entropy. + if generatedPolicyHash { + hash = bits.RotateLeft64(hash, 1) + hash = hash ^ policyHash + } + + // If terminal policy and a hash has already been generated, ignore the + // rest of the policies and use that hash already generated. + if policy.Terminal && generatedHash { + break + } + } + + if generatedHash { + return hash + } + // If no generated hash return a random long. In the grand scheme of things + // this logically will map to choosing a random backend to route request to. + return rand.Uint64() +} + +func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { + if len(cs.httpFilterConfig) == 0 { + return nil, nil + } + interceptors := make([]iresolver.ClientInterceptor, 0, len(cs.httpFilterConfig)) + for _, filter := range cs.httpFilterConfig { + override := cluster.httpFilterConfigOverride[filter.Name] // cluster is highest priority + if override == nil { + override = rt.httpFilterConfigOverride[filter.Name] // route is second priority + } + if override == nil { + override = cs.virtualHost.httpFilterConfigOverride[filter.Name] // VH is third & lowest priority + } + ib, ok := filter.Filter.(httpfilter.ClientInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return nil, fmt.Errorf("filter does not support use in client") + } + i, err := ib.BuildClientInterceptor(filter.Config, override) + if err != nil { + return nil, fmt.Errorf("error constructing filter: %v", err) + } + if i != nil { + interceptors = append(interceptors, i) + } + } + return &interceptorList{interceptors: interceptors}, nil +} + +// stop decrements refs of all clusters referenced by this config selector. +func (cs *configSelector) stop() { + // The resolver's old configSelector may be nil. Handle that here. + if cs == nil { + return + } + // If any refs drop to zero, we'll need a service config update to delete + // the cluster. + needUpdate := false + // Loops over cs.clusters, but these are pointers to entries in + // activeClusters. + for _, ci := range cs.clusters { + if v := atomic.AddInt32(&ci.refCount, -1); v == 0 { + needUpdate = true + } + } + // We stop the old config selector immediately after sending a new config + // selector; we need another update to delete clusters from the config (if + // we don't have another update pending already). + if needUpdate { + cs.r.serializer.TrySchedule(func(context.Context) { + cs.r.onClusterRefDownToZero() + }) + } +} + +type interceptorList struct { + interceptors []iresolver.ClientInterceptor +} + +func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, _ func(), newStream func(ctx context.Context, _ func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { + for i := len(il.interceptors) - 1; i >= 0; i-- { + ns := newStream + interceptor := il.interceptors[i] + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return interceptor.NewStream(ctx, ri, done, ns) + } + } + return newStream(ctx, func() {}) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go new file mode 100644 index 0000000000..0de6604484 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "context" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type listenerWatcher struct { + resourceName string + cancel func() + parent *xdsResolver +} + +func newListenerWatcher(resourceName string, parent *xdsResolver) *listenerWatcher { + lw := &listenerWatcher{resourceName: resourceName, parent: parent} + lw.cancel = xdsresource.WatchListener(parent.xdsClient, resourceName, lw) + return lw +} + +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { l.parent.onListenerResourceUpdate(update.Resource); onDone() } + l.parent.serializer.ScheduleOr(handleUpdate, onDone) +} + +func (l *listenerWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { l.parent.onListenerResourceError(err); onDone() } + l.parent.serializer.ScheduleOr(handleError, onDone) +} + +func (l *listenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { l.parent.onListenerResourceNotFound(); onDone() } + l.parent.serializer.ScheduleOr(handleNotFound, onDone) +} + +func (l *listenerWatcher) stop() { + l.cancel() + l.parent.logger.Infof("Canceling watch on Listener resource %q", l.resourceName) +} + +type routeConfigWatcher struct { + resourceName string + cancel func() + parent *xdsResolver +} + +func newRouteConfigWatcher(resourceName string, parent *xdsResolver) *routeConfigWatcher { + rw := &routeConfigWatcher{resourceName: resourceName, parent: parent} + rw.cancel = xdsresource.WatchRouteConfig(parent.xdsClient, resourceName, rw) + return rw +} + +func (r *routeConfigWatcher) OnUpdate(u *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { + r.parent.onRouteConfigResourceUpdate(r.resourceName, u.Resource) + onDone() + } + r.parent.serializer.ScheduleOr(handleUpdate, onDone) +} + +func (r *routeConfigWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { r.parent.onRouteConfigResourceError(r.resourceName, err); onDone() } + r.parent.serializer.ScheduleOr(handleError, onDone) +} + +func (r *routeConfigWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { r.parent.onRouteConfigResourceNotFound(r.resourceName); onDone() } + r.parent.serializer.ScheduleOr(handleNotFound, onDone) +} + +func (r *routeConfigWatcher) stop() { + r.cancel() + r.parent.logger.Infof("Canceling watch on RouteConfiguration resource %q", r.resourceName) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go new file mode 100644 index 0000000000..b5d24e4bf2 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -0,0 +1,570 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver implements the xds resolver, that does LDS and RDS to find +// the cluster to use. +package resolver + +import ( + "context" + "fmt" + "math/rand" + "sync/atomic" + + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/resolver" + rinternal "google.golang.org/grpc/xds/internal/resolver/internal" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Scheme is the xDS resolver's scheme. +// +// TODO(easwars): Rename this package as xdsresolver so that this is accessed as +// xdsresolver.Scheme +const Scheme = "xds" + +// newBuilderForTesting creates a new xds resolver builder using a specific xds +// bootstrap config, so tests can use multiple xds clients in different +// ClientConns at the same time. +func newBuilderForTesting(config []byte) (resolver.Builder, error) { + return &xdsResolverBuilder{ + newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) + }, + }, nil +} + +func init() { + resolver.Register(&xdsResolverBuilder{}) + internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting + + rinternal.NewWRR = wrr.NewRandom + rinternal.NewXDSClient = xdsclient.New +} + +type xdsResolverBuilder struct { + newXDSClient func(string) (xdsclient.XDSClient, func(), error) +} + +// Build helps implement the resolver.Builder interface. +// +// The xds bootstrap process is performed (and a new xds client is built) every +// time an xds resolver is built. +func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { + r := &xdsResolver{ + cc: cc, + activeClusters: make(map[string]*clusterInfo), + channelID: rand.Uint64(), + } + defer func() { + if retErr != nil { + r.Close() + } + }() + r.logger = prefixLogger(r) + r.logger.Infof("Creating resolver for target: %+v", target) + + // Initialize the serializer used to synchronize the following: + // - updates from the xDS client. This could lead to generation of new + // service config if resolution is complete. + // - completion of an RPC to a removed cluster causing the associated ref + // count to become zero, resulting in generation of new service config. + // - stopping of a config selector that results in generation of new service + // config. + ctx, cancel := context.WithCancel(context.Background()) + r.serializer = grpcsync.NewCallbackSerializer(ctx) + r.serializerCancel = cancel + + // Initialize the xDS client. + newXDSClient := rinternal.NewXDSClient.(func(string) (xdsclient.XDSClient, func(), error)) + if b.newXDSClient != nil { + newXDSClient = b.newXDSClient + } + client, closeFn, err := newXDSClient(target.String()) + if err != nil { + return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) + } + r.xdsClient = client + r.xdsClientClose = closeFn + + // Determine the listener resource name and start a watcher for it. + template, err := r.sanityChecksOnBootstrapConfig(target, opts, r.xdsClient) + if err != nil { + return nil, err + } + r.dataplaneAuthority = opts.Authority + r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, target.Endpoint()) + r.listenerWatcher = newListenerWatcher(r.ldsResourceName, r) + return r, nil +} + +// Performs the following sanity checks: +// - Verifies that the bootstrap configuration is not empty. +// - Verifies that if xDS credentials are specified by the user, the +// bootstrap configuration contains certificate providers. +// - Verifies that if the provided dial target contains an authority, the +// bootstrap configuration contains server config for that authority. +// +// Returns the listener resource name template to use. If any of the above +// validations fail, a non-nil error is returned. +func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, _ resolver.BuildOptions, client xdsclient.XDSClient) (string, error) { + bootstrapConfig := client.BootstrapConfig() + if bootstrapConfig == nil { + // This is never expected to happen after a successful xDS client + // creation. Defensive programming. + return "", fmt.Errorf("xds: bootstrap configuration is empty") + } + + // Find the client listener template to use from the bootstrap config: + // - If authority is not set in the target, use the top level template + // - If authority is set, use the template from the authority map. + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate() + if authority := target.URL.Host; authority != "" { + authorities := bootstrapConfig.Authorities() + if authorities == nil { + return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) + } + a := authorities[authority] + if a == nil { + return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) + } + if a.ClientListenerResourceNameTemplate != "" { + // This check will never be false, because + // ClientListenerResourceNameTemplate is required to start with + // xdstp://, and has a default value (not an empty string) if unset. + template = a.ClientListenerResourceNameTemplate + } + } + return template, nil +} + +// Name helps implement the resolver.Builder interface. +func (*xdsResolverBuilder) Scheme() string { + return Scheme +} + +// xdsResolver implements the resolver.Resolver interface. +// +// It registers a watcher for ServiceConfig updates with the xdsClient object +// (which performs LDS/RDS queries for the same), and passes the received +// updates to the ClientConn. +type xdsResolver struct { + cc resolver.ClientConn + logger *grpclog.PrefixLogger + // The underlying xdsClient which performs all xDS requests and responses. + xdsClient xdsclient.XDSClient + xdsClientClose func() + // A random number which uniquely identifies the channel which owns this + // resolver. + channelID uint64 + + // All methods on the xdsResolver type except for the ones invoked by gRPC, + // i.e ResolveNow() and Close(), are guaranteed to execute in the context of + // this serializer's callback. And since the serializer guarantees mutual + // exclusion among these callbacks, we can get by without any mutexes to + // access all of the below defined state. The only exception is Close(), + // which does access some of this shared state, but it does so after + // cancelling the context passed to the serializer. + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // dataplaneAuthority is the authority used for the data plane connections, + // which is also used to select the VirtualHost within the xDS + // RouteConfiguration. This is %-encoded to match with VirtualHost Domain + // in xDS RouteConfiguration. + dataplaneAuthority string + + ldsResourceName string + listenerWatcher *listenerWatcher + listenerUpdateRecvd bool + currentListener xdsresource.ListenerUpdate + + rdsResourceName string + routeConfigWatcher *routeConfigWatcher + routeConfigUpdateRecvd bool + currentRouteConfig xdsresource.RouteConfigUpdate + currentVirtualHost *xdsresource.VirtualHost // Matched virtual host for quick access. + + // activeClusters is a map from cluster name to information about the + // cluster that includes a ref count and load balancing configuration. + activeClusters map[string]*clusterInfo + + curConfigSelector *configSelector +} + +// ResolveNow is a no-op at this point. +func (*xdsResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (r *xdsResolver) Close() { + // Cancel the context passed to the serializer and wait for any scheduled + // callbacks to complete. Canceling the context ensures that no new + // callbacks will be scheduled. + r.serializerCancel() + <-r.serializer.Done() + + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + + if r.listenerWatcher != nil { + r.listenerWatcher.stop() + } + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + } + if r.xdsClientClose != nil { + r.xdsClientClose() + } + r.logger.Infof("Shutdown") +} + +// sendNewServiceConfig prunes active clusters, generates a new service config +// based on the current set of active clusters, and sends an update to the +// channel with that service config and the provided config selector. Returns +// false if an error occurs while generating the service config and the update +// cannot be sent. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { + // Delete entries from r.activeClusters with zero references; + // otherwise serviceConfigJSON will generate a config including + // them. + r.pruneActiveClusters() + + if cs == nil && len(r.activeClusters) == 0 { + // There are no clusters and we are sending a failing configSelector. + // Send an empty config, which picks pick-first, with no address, and + // puts the ClientConn into transient failure. + r.cc.UpdateState(resolver.State{ServiceConfig: r.cc.ParseServiceConfig("{}")}) + return true + } + + sc, err := serviceConfigJSON(r.activeClusters) + if err != nil { + // JSON marshal error; should never happen. + r.logger.Errorf("For Listener resource %q and RouteConfiguration resource %q, failed to marshal newly built service config: %v", r.ldsResourceName, r.rdsResourceName, err) + r.cc.ReportError(err) + return false + } + r.logger.Infof("For Listener resource %q and RouteConfiguration resource %q, generated service config: %v", r.ldsResourceName, r.rdsResourceName, pretty.FormatJSON(sc)) + + // Send the update to the ClientConn. + state := iresolver.SetConfigSelector(resolver.State{ + ServiceConfig: r.cc.ParseServiceConfig(string(sc)), + }, cs) + r.cc.UpdateState(xdsclient.SetClient(state, r.xdsClient)) + return true +} + +// newConfigSelector creates a new config selector using the most recently +// received listener and route config updates. May add entries to +// r.activeClusters for previously-unseen clusters. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) newConfigSelector() (*configSelector, error) { + cs := &configSelector{ + r: r, + virtualHost: virtualHost{ + httpFilterConfigOverride: r.currentVirtualHost.HTTPFilterConfigOverride, + retryConfig: r.currentVirtualHost.RetryConfig, + }, + routes: make([]route, len(r.currentVirtualHost.Routes)), + clusters: make(map[string]*clusterInfo), + httpFilterConfig: r.currentListener.HTTPFilters, + } + + for i, rt := range r.currentVirtualHost.Routes { + clusters := rinternal.NewWRR.(func() wrr.WRR)() + if rt.ClusterSpecifierPlugin != "" { + clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin + clusters.Add(&routeCluster{ + name: clusterName, + }, 1) + ci := r.addOrGetActiveClusterInfo(clusterName) + ci.cfg = xdsChildConfig{ChildPolicy: balancerConfig(r.currentRouteConfig.ClusterSpecifierPlugins[rt.ClusterSpecifierPlugin])} + cs.clusters[clusterName] = ci + } else { + for cluster, wc := range rt.WeightedClusters { + clusterName := clusterPrefix + cluster + clusters.Add(&routeCluster{ + name: clusterName, + httpFilterConfigOverride: wc.HTTPFilterConfigOverride, + }, int64(wc.Weight)) + ci := r.addOrGetActiveClusterInfo(clusterName) + ci.cfg = xdsChildConfig{ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster})} + cs.clusters[clusterName] = ci + } + } + cs.routes[i].clusters = clusters + + var err error + cs.routes[i].m, err = xdsresource.RouteToMatcher(rt) + if err != nil { + return nil, err + } + cs.routes[i].actionType = rt.ActionType + if rt.MaxStreamDuration == nil { + cs.routes[i].maxStreamDuration = r.currentListener.MaxStreamDuration + } else { + cs.routes[i].maxStreamDuration = *rt.MaxStreamDuration + } + + cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride + cs.routes[i].retryConfig = rt.RetryConfig + cs.routes[i].hashPolicies = rt.HashPolicies + } + + // Account for this config selector's clusters. Do this after no further + // errors may occur. Note: cs.clusters are pointers to entries in + // activeClusters. + for _, ci := range cs.clusters { + atomic.AddInt32(&ci.refCount, 1) + } + + return cs, nil +} + +// pruneActiveClusters deletes entries in r.activeClusters with zero +// references. +func (r *xdsResolver) pruneActiveClusters() { + for cluster, ci := range r.activeClusters { + if atomic.LoadInt32(&ci.refCount) == 0 { + delete(r.activeClusters, cluster) + } + } +} + +func (r *xdsResolver) addOrGetActiveClusterInfo(name string) *clusterInfo { + ci := r.activeClusters[name] + if ci != nil { + return ci + } + + ci = &clusterInfo{refCount: 0} + r.activeClusters[name] = ci + return ci +} + +type clusterInfo struct { + // number of references to this cluster; accessed atomically + refCount int32 + // cfg is the child configuration for this cluster, containing either the + // csp config or the cds cluster config. + cfg xdsChildConfig +} + +// Determines if the xdsResolver has received all required configuration, i.e +// Listener and RouteConfiguration resources, from the management server, and +// whether a matching virtual host was found in the RouteConfiguration resource. +func (r *xdsResolver) resolutionComplete() bool { + return r.listenerUpdateRecvd && r.routeConfigUpdateRecvd && r.currentVirtualHost != nil +} + +// onResolutionComplete performs the following actions when resolution is +// complete, i.e Listener and RouteConfiguration resources have been received +// from the management server and a matching virtual host is found in the +// latter. +// - creates a new config selector (this involves incrementing references to +// clusters owned by this config selector). +// - stops the old config selector (this involves decrementing references to +// clusters owned by this config selector). +// - prunes active clusters and pushes a new service config to the channel. +// - updates the current config selector used by the resolver. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onResolutionComplete() { + if !r.resolutionComplete() { + return + } + + cs, err := r.newConfigSelector() + if err != nil { + r.logger.Warningf("Failed to build a config selector for resource %q: %v", r.ldsResourceName, err) + r.cc.ReportError(err) + return + } + + if !r.sendNewServiceConfig(cs) { + // JSON error creating the service config (unexpected); erase + // this config selector and ignore this update, continuing with + // the previous config selector. + cs.stop() + return + } + + r.curConfigSelector.stop() + r.curConfigSelector = cs +} + +func (r *xdsResolver) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) { + matchVh := xdsresource.FindBestMatchingVirtualHost(r.dataplaneAuthority, update.VirtualHosts) + if matchVh == nil { + r.onError(fmt.Errorf("no matching virtual host found for %q", r.dataplaneAuthority)) + return + } + r.currentRouteConfig = update + r.currentVirtualHost = matchVh + r.routeConfigUpdateRecvd = true + + r.onResolutionComplete() +} + +// onError propagates the error up to the channel. And since this is invoked +// only for non resource-not-found errors, we don't have to update resolver +// state and we can keep using the old config. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onError(err error) { + r.cc.ReportError(err) +} + +// Contains common functionality to be executed when resources of either type +// are removed. +// +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onResourceNotFound() { + // We cannot remove clusters from the service config that have ongoing RPCs. + // Instead, what we can do is to send an erroring (nil) config selector + // along with normal service config. This will ensure that new RPCs will + // fail, and once the active RPCs complete, the reference counts on the + // clusters will come down to zero. At that point, we will send an empty + // service config with no addresses. This results in the pick-first + // LB policy being configured on the channel, and since there are no + // address, pick-first will put the channel in TRANSIENT_FAILURE. + r.sendNewServiceConfig(nil) + + // Stop and dereference the active config selector, if one exists. + r.curConfigSelector.stop() + r.curConfigSelector = nil +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onListenerResourceUpdate(update xdsresource.ListenerUpdate) { + if r.logger.V(2) { + r.logger.Infof("Received update for Listener resource %q: %v", r.ldsResourceName, pretty.ToJSON(update)) + } + + r.currentListener = update + r.listenerUpdateRecvd = true + + if update.InlineRouteConfig != nil { + // If there was a previous route config watcher because of a non-inline + // route configuration, cancel it. + r.rdsResourceName = "" + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + r.routeConfigWatcher = nil + } + + r.applyRouteConfigUpdate(*update.InlineRouteConfig) + return + } + + // We get here only if there was no inline route configuration. + + // If the route config name has not changed, send an update with existing + // route configuration and the newly received listener configuration. + if r.rdsResourceName == update.RouteConfigName { + r.onResolutionComplete() + return + } + + // If the route config name has changed, cancel the old watcher and start a + // new one. At this point, since we have not yet resolved the new route + // config name, we don't send an update to the channel, and therefore + // continue using the old route configuration (if received) until the new + // one is received. + r.rdsResourceName = update.RouteConfigName + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + r.currentVirtualHost = nil + r.routeConfigUpdateRecvd = false + } + r.routeConfigWatcher = newRouteConfigWatcher(r.rdsResourceName, r) +} + +func (r *xdsResolver) onListenerResourceError(err error) { + if r.logger.V(2) { + r.logger.Infof("Received error for Listener resource %q: %v", r.ldsResourceName, err) + } + r.onError(err) +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onListenerResourceNotFound() { + if r.logger.V(2) { + r.logger.Infof("Received resource-not-found-error for Listener resource %q", r.ldsResourceName) + } + + r.listenerUpdateRecvd = false + + if r.routeConfigWatcher != nil { + r.routeConfigWatcher.stop() + } + r.rdsResourceName = "" + r.currentVirtualHost = nil + r.routeConfigUpdateRecvd = false + r.routeConfigWatcher = nil + + r.onResourceNotFound() +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onRouteConfigResourceUpdate(name string, update xdsresource.RouteConfigUpdate) { + if r.logger.V(2) { + r.logger.Infof("Received update for RouteConfiguration resource %q: %v", name, pretty.ToJSON(update)) + } + + if r.rdsResourceName != name { + // Drop updates from canceled watchers. + return + } + + r.applyRouteConfigUpdate(update) +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onRouteConfigResourceError(name string, err error) { + if r.logger.V(2) { + r.logger.Infof("Received error for RouteConfiguration resource %q: %v", name, err) + } + r.onError(err) +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onRouteConfigResourceNotFound(name string) { + if r.logger.V(2) { + r.logger.Infof("Received resource-not-found-error for RouteConfiguration resource %q", name) + } + + if r.rdsResourceName != name { + return + } + r.onResourceNotFound() +} + +// Only executed in the context of a serializer callback. +func (r *xdsResolver) onClusterRefDownToZero() { + r.sendNewServiceConfig(r.curConfigSelector) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go new file mode 100644 index 0000000000..92d07e7fb6 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go @@ -0,0 +1,189 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/credentials/tls/certprovider" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// connWrapper is a thin wrapper around a net.Conn returned by Accept(). It +// provides the following additional functionality: +// 1. A way to retrieve the configured deadline. This is required by the +// ServerHandshake() method of the xdsCredentials when it attempts to read +// key material from the certificate providers. +// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to +// retrieve the configured certificate providers. +// 3. xDS filter_chain configuration determines security configuration. +// 4. Dynamically reads routing configuration in UsableRouteConfiguration(), called +// to process incoming RPC's. (LDS + RDS configuration). +type connWrapper struct { + net.Conn + + // The specific filter chain picked for handling this connection. + filterChain *xdsresource.FilterChain + + // A reference to the listenerWrapper on which this connection was accepted. + parent *listenerWrapper + + // The certificate providers created for this connection. + rootProvider, identityProvider certprovider.Provider + + // The connection deadline as configured by the grpc.Server on the rawConn + // that is returned by a call to Accept(). This is set to the connection + // timeout value configured by the user (or to a default value) before + // initiating the transport credential handshake, and set to zero after + // completing the HTTP2 handshake. + deadlineMu sync.Mutex + deadline time.Time + + mu sync.Mutex + st transport.ServerTransport + draining bool + + // The virtual hosts with matchable routes and instantiated HTTP Filters per + // route, or an error. + urc *atomic.Pointer[xdsresource.UsableRouteConfiguration] +} + +// UsableRouteConfiguration returns the UsableRouteConfiguration to be used for +// server side routing. +func (c *connWrapper) UsableRouteConfiguration() xdsresource.UsableRouteConfiguration { + return *c.urc.Load() +} + +// SetDeadline makes a copy of the passed in deadline and forwards the call to +// the underlying rawConn. +func (c *connWrapper) SetDeadline(t time.Time) error { + c.deadlineMu.Lock() + c.deadline = t + c.deadlineMu.Unlock() + return c.Conn.SetDeadline(t) +} + +// GetDeadline returns the configured deadline. This will be invoked by the +// ServerHandshake() method of the XdsCredentials, which needs a deadline to +// pass to the certificate provider. +func (c *connWrapper) GetDeadline() time.Time { + c.deadlineMu.Lock() + t := c.deadline + c.deadlineMu.Unlock() + return t +} + +// XDSHandshakeInfo returns a HandshakeInfo with appropriate security +// configuration for this connection. This method is invoked by the +// ServerHandshake() method of the XdsCredentials. +func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { + if c.filterChain.SecurityCfg == nil { + // If the security config is empty, this means that the control plane + // did not provide any security configuration and therefore we should + // return an empty HandshakeInfo here so that the xdsCreds can use the + // configured fallback credentials. + return xdsinternal.NewHandshakeInfo(nil, nil, nil, false), nil + } + + cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs() + // Identity provider name is mandatory on the server-side, and this is + // enforced when the resource is received at the XDSClient layer. + secCfg := c.filterChain.SecurityCfg + ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) + if err != nil { + return nil, err + } + // Root provider name is optional and required only when doing mTLS. + var rp certprovider.Provider + if instance, cert := secCfg.RootInstanceName, secCfg.RootCertName; instance != "" { + rp, err = buildProviderFunc(cpc, instance, cert, false, true) + if err != nil { + return nil, err + } + } + c.identityProvider = ip + c.rootProvider = rp + + return xdsinternal.NewHandshakeInfo(c.rootProvider, c.identityProvider, nil, secCfg.RequireClientCert), nil +} + +// PassServerTransport drains the passed in ServerTransport if draining is set, +// or persists it to be drained once drained is called. +func (c *connWrapper) PassServerTransport(st transport.ServerTransport) { + c.mu.Lock() + defer c.mu.Unlock() + if c.draining { + st.Drain("draining") + } else { + c.st = st + } +} + +// Drain drains the associated ServerTransport, or sets draining to true so it +// will be drained after it is created. +func (c *connWrapper) Drain() { + c.mu.Lock() + defer c.mu.Unlock() + if c.st == nil { + c.draining = true + } else { + c.st.Drain("draining") + } +} + +// Close closes the providers and the underlying connection. +func (c *connWrapper) Close() error { + if c.identityProvider != nil { + c.identityProvider.Close() + } + if c.rootProvider != nil { + c.rootProvider.Close() + } + c.parent.removeConn(c) + return c.Conn.Close() +} + +func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanceName, certName string, wantIdentity, wantRoot bool) (certprovider.Provider, error) { + cfg, ok := configs[instanceName] + if !ok { + // Defensive programming. If a resource received from the management + // server contains a certificate provider instance name that is not + // found in the bootstrap, the resource is NACKed by the xDS client. + return nil, fmt.Errorf("certificate provider instance %q not found in bootstrap file", instanceName) + } + provider, err := cfg.Build(certprovider.BuildOptions{ + CertName: certName, + WantIdentity: wantIdentity, + WantRoot: wantRoot, + }) + if err != nil { + // This error is not expected since the bootstrap process parses the + // config and makes sure that it is acceptable to the plugin. Still, it + // is possible that the plugin parses the config successfully, but its + // Build() method errors out. + return nil, fmt.Errorf("failed to get security plugin instance (%+v): %v", cfg, err) + } + return provider, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go new file mode 100644 index 0000000000..09d320018a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package server contains internal server-side functionality used by the public +// facing xds package. +package server + +import ( + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + internalbackoff "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +var ( + logger = grpclog.Component("xds") + + // Backoff strategy for temporary errors received from Accept(). If this + // needs to be configurable, we can inject it through ListenerWrapperParams. + bs = internalbackoff.Exponential{Config: backoff.Config{ + BaseDelay: 5 * time.Millisecond, + Multiplier: 2.0, + MaxDelay: 1 * time.Second, + }} + backoffFunc = bs.Backoff +) + +// ServingModeCallback is the callback that users can register to get notified +// about the server's serving mode changes. The callback is invoked with the +// address of the listener and its new mode. The err parameter is set to a +// non-nil error if the server has transitioned into not-serving mode. +type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err error) + +// XDSClient wraps the methods on the XDSClient which are required by +// the listenerWrapper. +type XDSClient interface { + WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + BootstrapConfig() *bootstrap.Config +} + +// ListenerWrapperParams wraps parameters required to create a listenerWrapper. +type ListenerWrapperParams struct { + // Listener is the net.Listener passed by the user that is to be wrapped. + Listener net.Listener + // ListenerResourceName is the xDS Listener resource to request. + ListenerResourceName string + // XDSClient provides the functionality from the XDSClient required here. + XDSClient XDSClient + // ModeCallback is the callback to invoke when the serving mode changes. + ModeCallback ServingModeCallback +} + +// NewListenerWrapper creates a new listenerWrapper with params. It returns a +// net.Listener. It starts in state not serving, which triggers Accept() + +// Close() on any incoming connections. +// +// Only TCP listeners are supported. +func NewListenerWrapper(params ListenerWrapperParams) net.Listener { + lw := &listenerWrapper{ + Listener: params.Listener, + name: params.ListenerResourceName, + xdsC: params.XDSClient, + modeCallback: params.ModeCallback, + isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), + conns: make(map[*connWrapper]bool), + + mode: connectivity.ServingModeNotServing, + closed: grpcsync.NewEvent(), + } + lw.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", lw)) + + // Serve() verifies that Addr() returns a valid TCPAddr. So, it is safe to + // ignore the error from SplitHostPort(). + lisAddr := lw.Listener.Addr().String() + lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) + + lw.rdsHandler = newRDSHandler(lw.handleRDSUpdate, lw.xdsC, lw.logger) + lw.cancelWatch = xdsresource.WatchListener(lw.xdsC, lw.name, &ldsWatcher{ + parent: lw, + logger: lw.logger, + name: lw.name, + }) + return lw +} + +// listenerWrapper wraps the net.Listener associated with the listening address +// passed to Serve(). It also contains all other state associated with this +// particular invocation of Serve(). +type listenerWrapper struct { + net.Listener + logger *internalgrpclog.PrefixLogger + + name string + xdsC XDSClient + cancelWatch func() + modeCallback ServingModeCallback + + // Set to true if the listener is bound to the IP_ANY address (which is + // "0.0.0.0" for IPv4 and "::" for IPv6). + isUnspecifiedAddr bool + // Listening address and port. Used to validate the socket address in the + // Listener resource received from the control plane. + addr, port string + + // A small race exists in the XDSClient code between the receipt of an xDS + // response and the user cancelling the associated watch. In this window, + // the registered callback may be invoked after the watch is canceled, and + // the user is expected to work around this. This event signifies that the + // listener is closed (and hence the watch is cancelled), and we drop any + // updates received in the callback if this event has fired. + closed *grpcsync.Event + + // mu guards access to the current serving mode and the active filter chain + // manager. + mu sync.Mutex + // Current serving mode. + mode connectivity.ServingMode + // Filter chain manager currently serving. + activeFilterChainManager *xdsresource.FilterChainManager + // conns accepted with configuration from activeFilterChainManager. + conns map[*connWrapper]bool + + // These fields are read/written to in the context of xDS updates, which are + // guaranteed to be emitted synchronously from the xDS Client. Thus, they do + // not need further synchronization. + + // Pending filter chain manager. Will go active once rdsHandler has received + // all the RDS resources this filter chain manager needs. + pendingFilterChainManager *xdsresource.FilterChainManager + + // rdsHandler is used for any dynamic RDS resources specified in a LDS + // update. + rdsHandler *rdsHandler +} + +func (l *listenerWrapper) handleLDSUpdate(update xdsresource.ListenerUpdate) { + ilc := update.InboundListenerCfg + // Make sure that the socket address on the received Listener resource + // matches the address of the net.Listener passed to us by the user. This + // check is done here instead of at the XDSClient layer because of the + // following couple of reasons: + // - XDSClient cannot know the listening address of every listener in the + // system, and hence cannot perform this check. + // - this is a very context-dependent check and only the server has the + // appropriate context to perform this check. + // + // What this means is that the XDSClient has ACKed a resource which can push + // the server into a "not serving" mode. This is not ideal, but this is + // what we have decided to do. + if ilc.Address != l.addr || ilc.Port != l.port { + l.mu.Lock() + l.switchModeLocked(connectivity.ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) + l.mu.Unlock() + return + } + + l.pendingFilterChainManager = ilc.FilterChains + l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) + + if l.rdsHandler.determineRouteConfigurationReady() { + l.maybeUpdateFilterChains() + } +} + +// maybeUpdateFilterChains swaps in the pending filter chain manager to the +// active one if the pending filter chain manager is present. If a swap occurs, +// it also drains (gracefully stops) any connections that were accepted on the +// old active filter chain manager, and puts this listener in state SERVING. +// Must be called within an xDS Client Callback. +func (l *listenerWrapper) maybeUpdateFilterChains() { + if l.pendingFilterChainManager == nil { + // Nothing to update, return early. + return + } + + l.mu.Lock() + l.switchModeLocked(connectivity.ServingModeServing, nil) + // "Updates to a Listener cause all older connections on that Listener to be + // gracefully shut down with a grace period of 10 minutes for long-lived + // RPC's, such that clients will reconnect and have the updated + // configuration apply." - A36 + connsToClose := l.conns + l.conns = make(map[*connWrapper]bool) + l.activeFilterChainManager = l.pendingFilterChainManager + l.pendingFilterChainManager = nil + l.instantiateFilterChainRoutingConfigurationsLocked() + l.mu.Unlock() + go func() { + for conn := range connsToClose { + conn.Drain() + } + }() + +} + +// handleRDSUpdate rebuilds any routing configuration server side for any filter +// chains that point to this RDS, and potentially makes pending lds +// configuration to swap to be active. +func (l *listenerWrapper) handleRDSUpdate(routeName string, rcu rdsWatcherUpdate) { + // Update any filter chains that point to this route configuration. + if l.activeFilterChainManager != nil { + for _, fc := range l.activeFilterChainManager.FilterChains() { + if fc.RouteConfigName != routeName { + continue + } + if rcu.err != nil && rcu.data == nil { // Either NACK before update, or resource not found triggers this conditional. + fc.UsableRouteConfiguration.Store(&xdsresource.UsableRouteConfiguration{ + Err: rcu.err, + }) + continue + } + fc.UsableRouteConfiguration.Store(fc.ConstructUsableRouteConfiguration(*rcu.data)) + } + } + if l.rdsHandler.determineRouteConfigurationReady() { + l.maybeUpdateFilterChains() + } +} + +// instantiateFilterChainRoutingConfigurationsLocked instantiates all of the +// routing configuration for the newly active filter chains. For any inline +// route configurations, uses that, otherwise uses cached rdsHandler updates. +// Must be called within an xDS Client Callback. +func (l *listenerWrapper) instantiateFilterChainRoutingConfigurationsLocked() { + for _, fc := range l.activeFilterChainManager.FilterChains() { + if fc.InlineRouteConfig != nil { + fc.UsableRouteConfiguration.Store(fc.ConstructUsableRouteConfiguration(*fc.InlineRouteConfig)) // Can't race with an RPC coming in but no harm making atomic. + continue + } // Inline configuration constructed once here, will remain for lifetime of filter chain. + rcu := l.rdsHandler.updates[fc.RouteConfigName] + if rcu.err != nil && rcu.data == nil { + fc.UsableRouteConfiguration.Store(&xdsresource.UsableRouteConfiguration{Err: rcu.err}) + continue + } + fc.UsableRouteConfiguration.Store(fc.ConstructUsableRouteConfiguration(*rcu.data)) // Can't race with an RPC coming in but no harm making atomic. + } +} + +// Accept blocks on an Accept() on the underlying listener, and wraps the +// returned net.connWrapper with the configured certificate providers. +func (l *listenerWrapper) Accept() (net.Conn, error) { + var retries int + for { + conn, err := l.Listener.Accept() + if err != nil { + // Temporary() method is implemented by certain error types returned + // from the net package, and it is useful for us to not shutdown the + // server in these conditions. The listen queue being full is one + // such case. + if ne, ok := err.(interface{ Temporary() bool }); !ok || !ne.Temporary() { + return nil, err + } + retries++ + timer := time.NewTimer(backoffFunc(retries)) + select { + case <-timer.C: + case <-l.closed.Done(): + timer.Stop() + // Continuing here will cause us to call Accept() again + // which will return a non-temporary error. + continue + } + continue + } + // Reset retries after a successful Accept(). + retries = 0 + + // Since the net.Conn represents an incoming connection, the source and + // destination address can be retrieved from the local address and + // remote address of the net.Conn respectively. + destAddr, ok1 := conn.LocalAddr().(*net.TCPAddr) + srcAddr, ok2 := conn.RemoteAddr().(*net.TCPAddr) + if !ok1 || !ok2 { + // If the incoming connection is not a TCP connection, which is + // really unexpected since we check whether the provided listener is + // a TCP listener in Serve(), we return an error which would cause + // us to stop serving. + return nil, fmt.Errorf("received connection with non-TCP address (local: %T, remote %T)", conn.LocalAddr(), conn.RemoteAddr()) + } + + l.mu.Lock() + if l.mode == connectivity.ServingModeNotServing { + // Close connections as soon as we accept them when we are in + // "not-serving" mode. Since we accept a net.Listener from the user + // in Serve(), we cannot close the listener when we move to + // "not-serving". Closing the connection immediately upon accepting + // is one of the other ways to implement the "not-serving" mode as + // outlined in gRFC A36. + l.mu.Unlock() + conn.Close() + continue + } + + fc, err := l.activeFilterChainManager.Lookup(xdsresource.FilterChainLookupParams{ + IsUnspecifiedListener: l.isUnspecifiedAddr, + DestAddr: destAddr.IP, + SourceAddr: srcAddr.IP, + SourcePort: srcAddr.Port, + }) + if err != nil { + l.mu.Unlock() + // When a matching filter chain is not found, we close the + // connection right away, but do not return an error back to + // `grpc.Serve()` from where this Accept() was invoked. Returning an + // error to `grpc.Serve()` causes the server to shutdown. If we want + // to avoid the server from shutting down, we would need to return + // an error type which implements the `Temporary() bool` method, + // which is invoked by `grpc.Serve()` to see if the returned error + // represents a temporary condition. In the case of a temporary + // error, `grpc.Serve()` method sleeps for a small duration and + // therefore ends up blocking all connection attempts during that + // time frame, which is also not ideal for an error like this. + l.logger.Warningf("Connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr(), conn.LocalAddr()) + conn.Close() + continue + } + cw := &connWrapper{Conn: conn, filterChain: fc, parent: l, urc: fc.UsableRouteConfiguration} + l.conns[cw] = true + l.mu.Unlock() + return cw, nil + } +} + +func (l *listenerWrapper) removeConn(conn *connWrapper) { + l.mu.Lock() + defer l.mu.Unlock() + delete(l.conns, conn) +} + +// Close closes the underlying listener. It also cancels the xDS watch +// registered in Serve() and closes any certificate provider instances created +// based on security configuration received in the LDS response. +func (l *listenerWrapper) Close() error { + l.closed.Fire() + l.Listener.Close() + if l.cancelWatch != nil { + l.cancelWatch() + } + l.rdsHandler.close() + return nil +} + +// switchModeLocked switches the current mode of the listener wrapper. It also +// gracefully closes any connections if the listener wrapper transitions into +// not serving. If the serving mode has changed, it invokes the registered mode +// change callback. +func (l *listenerWrapper) switchModeLocked(newMode connectivity.ServingMode, err error) { + if l.mode == newMode && l.mode == connectivity.ServingModeServing { + // Redundant updates are suppressed only when we are SERVING and the new + // mode is also SERVING. In the other case (where we are NOT_SERVING and the + // new mode is also NOT_SERVING), the update is not suppressed as: + // 1. the error may have change + // 2. it provides a timestamp of the last backoff attempt + return + } + l.mode = newMode + if l.mode == connectivity.ServingModeNotServing { + connsToClose := l.conns + l.conns = make(map[*connWrapper]bool) + go func() { + for conn := range connsToClose { + conn.Drain() + } + }() + } + // The XdsServer API will allow applications to register a "serving state" + // callback to be invoked when the server begins serving and when the + // server encounters errors that force it to be "not serving". If "not + // serving", the callback must be provided error information, for + // debugging use by developers - A36. + if l.modeCallback != nil { + l.modeCallback(l.Listener.Addr(), newMode, err) + } +} + +func (l *listenerWrapper) onLDSResourceDoesNotExist(err error) { + l.mu.Lock() + defer l.mu.Unlock() + l.switchModeLocked(connectivity.ServingModeNotServing, err) + l.activeFilterChainManager = nil + l.pendingFilterChainManager = nil + l.rdsHandler.updateRouteNamesToWatch(make(map[string]bool)) +} + +// ldsWatcher implements the xdsresource.ListenerWatcher interface and is +// passed to the WatchListener API. +type ldsWatcher struct { + parent *listenerWrapper + logger *internalgrpclog.PrefixLogger + name string +} + +func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() + if lw.parent.closed.HasFired() { + lw.logger.Warningf("Resource %q received update: %#v after listener was closed", lw.name, update) + return + } + if lw.logger.V(2) { + lw.logger.Infof("LDS watch for resource %q received update: %#v", lw.name, update.Resource) + } + lw.parent.handleLDSUpdate(update.Resource) +} + +func (lw *ldsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() + if lw.parent.closed.HasFired() { + lw.logger.Warningf("Resource %q received error: %v after listener was closed", lw.name, err) + return + } + if lw.logger.V(2) { + lw.logger.Infof("LDS watch for resource %q reported error: %v", lw.name, err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. +} + +func (lw *ldsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() + if lw.parent.closed.HasFired() { + lw.logger.Warningf("Resource %q received resource-does-not-exist error after listener was closed", lw.name) + return + } + if lw.logger.V(2) { + lw.logger.Infof("LDS watch for resource %q reported resource-does-not-exist error", lw.name) + } + + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Listener not found in received response", lw.name) + lw.parent.onLDSResourceDoesNotExist(err) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go new file mode 100644 index 0000000000..bcd3938e6f --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go @@ -0,0 +1,191 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "sync" + + igrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// rdsHandler handles any RDS queries that need to be started for a given server +// side listeners Filter Chains (i.e. not inline). It persists rdsWatcher +// updates for later use and also determines whether all the rdsWatcher updates +// needed have been received or not. +type rdsHandler struct { + xdsC XDSClient + logger *igrpclog.PrefixLogger + + callback func(string, rdsWatcherUpdate) + + // updates is a map from routeName to rdsWatcher update, including + // RouteConfiguration resources and any errors received. If not written in + // this map, no RouteConfiguration or error for that route name yet. If + // update set in value, use that as valid route configuration, otherwise + // treat as an error case and fail at L7 level. + updates map[string]rdsWatcherUpdate + + mu sync.Mutex + cancels map[string]func() +} + +// newRDSHandler creates a new rdsHandler to watch for RouteConfiguration +// resources. listenerWrapper updates the list of route names to watch by +// calling updateRouteNamesToWatch() upon receipt of new Listener configuration. +func newRDSHandler(cb func(string, rdsWatcherUpdate), xdsC XDSClient, logger *igrpclog.PrefixLogger) *rdsHandler { + return &rdsHandler{ + xdsC: xdsC, + logger: logger, + callback: cb, + updates: make(map[string]rdsWatcherUpdate), + cancels: make(map[string]func()), + } +} + +// updateRouteNamesToWatch handles a list of route names to watch for a given +// server side listener (if a filter chain specifies dynamic +// RouteConfiguration). This function handles all the logic with respect to any +// routes that may have been added or deleted as compared to what was previously +// present. Must be called within an xDS Client callback. +func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) { + rh.mu.Lock() + defer rh.mu.Unlock() + // Add and start watches for any new routes in routeNamesToWatch. + for routeName := range routeNamesToWatch { + if _, ok := rh.cancels[routeName]; !ok { + // The xDS client keeps a reference to the watcher until the cancel + // func is invoked. So, we don't need to keep a reference for fear + // of it being garbage collected. + w := &rdsWatcher{parent: rh, routeName: routeName} + cancel := xdsresource.WatchRouteConfig(rh.xdsC, routeName, w) + // Set bit on cancel function to eat any RouteConfiguration calls + // for this watcher after it has been canceled. + rh.cancels[routeName] = func() { + w.mu.Lock() + w.canceled = true + w.mu.Unlock() + cancel() + } + } + } + + // Delete and cancel watches for any routes from persisted routeNamesToWatch + // that are no longer present. + for routeName := range rh.cancels { + if _, ok := routeNamesToWatch[routeName]; !ok { + rh.cancels[routeName]() + delete(rh.cancels, routeName) + delete(rh.updates, routeName) + } + } +} + +// determines if all dynamic RouteConfiguration needed has received +// configuration or update. Must be called from an xDS Client Callback. +func (rh *rdsHandler) determineRouteConfigurationReady() bool { + // Safe to read cancels because only written to in other parts of xDS Client + // Callbacks, which are sync. + return len(rh.updates) == len(rh.cancels) +} + +// Must be called from an xDS Client Callback. +func (rh *rdsHandler) handleRouteUpdate(routeName string, update rdsWatcherUpdate) { + rwu := rh.updates[routeName] + + // Accept the new update if any of the following are true: + // 1. we had no valid update data. + // 2. the update is valid. + // 3. the update error is ResourceNotFound. + if rwu.data == nil || update.err == nil || xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { + rwu = update + } + rh.updates[routeName] = rwu + rh.callback(routeName, rwu) +} + +// close() is meant to be called by wrapped listener when the wrapped listener +// is closed, and it cleans up resources by canceling all the active RDS +// watches. +func (rh *rdsHandler) close() { + rh.mu.Lock() + defer rh.mu.Unlock() + for _, cancel := range rh.cancels { + cancel() + } +} + +type rdsWatcherUpdate struct { + data *xdsresource.RouteConfigUpdate + err error +} + +// rdsWatcher implements the xdsresource.RouteConfigWatcher interface and is +// passed to the WatchRouteConfig API. +type rdsWatcher struct { + parent *rdsHandler + logger *igrpclog.PrefixLogger + routeName string + + mu sync.Mutex + canceled bool // eats callbacks if true +} + +func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() + rw.mu.Lock() + if rw.canceled { + rw.mu.Unlock() + return + } + rw.mu.Unlock() + if rw.logger.V(2) { + rw.logger.Infof("RDS watch for resource %q received update: %#v", rw.routeName, update.Resource) + } + rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{data: &update.Resource}) +} + +func (rw *rdsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() + rw.mu.Lock() + if rw.canceled { + rw.mu.Unlock() + return + } + rw.mu.Unlock() + if rw.logger.V(2) { + rw.logger.Infof("RDS watch for resource %q reported error: %v", rw.routeName, err) + } + rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) +} + +func (rw *rdsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() + rw.mu.Lock() + if rw.canceled { + rw.mu.Unlock() + return + } + rw.mu.Unlock() + if rw.logger.V(2) { + rw.logger.Infof("RDS watch for resource %q reported resource-does-not-exist error: %v", rw.routeName) + } + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type RouteConfiguration not found in received response", rw.routeName) + rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go new file mode 100644 index 0000000000..9076a76fd0 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go @@ -0,0 +1,36 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import "google.golang.org/grpc/resolver" + +type clientKeyType string + +const clientKey = clientKeyType("grpc.xds.internal.client.Client") + +// FromResolverState returns the Client from state, or nil if not present. +func FromResolverState(state resolver.State) XDSClient { + cs, _ := state.Attributes.Value(clientKey).(XDSClient) + return cs +} + +// SetClient sets c in state and returns the new state. +func SetClient(state resolver.State, c XDSClient) resolver.State { + state.Attributes = state.Attributes.WithValue(clientKey, c) + return state +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go new file mode 100644 index 0000000000..3251737f18 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -0,0 +1,703 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" + + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +type watchState int + +const ( + watchStateStarted watchState = iota // Watch started, request not yet set. + watchStateRequested // Request sent for resource being watched. + watchStateReceived // Response received for resource being watched. + watchStateTimeout // Watch timer expired, no response. + watchStateCanceled // Watch cancelled. +) + +type resourceState struct { + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + deletionIgnored bool // True if resource deletion was ignored for a prior update + + // Common watch state for all watchers of this resource. + wTimer *time.Timer // Expiry timer + wState watchState // State of the watch +} + +// authority wraps all state associated with a single management server. It +// contains the transport used to communicate with the management server and a +// cache of resource state for resources requested from the management server. +// +// Bootstrap configuration could contain multiple entries in the authorities map +// that share the same server config (server address and credentials to use). We +// share the same authority instance amongst these entries, and the reference +// counting is taken care of by the `clientImpl` type. +type authority struct { + serverCfg *bootstrap.ServerConfig // Server config for this authority + bootstrapCfg *bootstrap.Config // Full bootstrap configuration + refCount int // Reference count of watches referring to this authority + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks + resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup + transport *transport.Transport // Underlying xDS transport to the management server + watchExpiryTimeout time.Duration // Resource watch expiry timeout + logger *grpclog.PrefixLogger + + // A two level map containing the state of all the resources being watched. + // + // The first level map key is the ResourceType (Listener, Route etc). This + // allows us to have a single map for all resources instead of having per + // resource-type maps. + // + // The second level map key is the resource name, with the value being the + // actual state of the resource. + resourcesMu sync.Mutex + resources map[xdsresource.Type]map[string]*resourceState + closed bool +} + +// authorityArgs is a convenience struct to wrap arguments required to create a +// new authority. All fields here correspond directly to appropriate fields +// stored in the authority struct. +type authorityArgs struct { + // The reason for passing server config and bootstrap config separately + // (although the former is part of the latter) is because authorities in the + // bootstrap config might contain an empty server config, and in this case, + // the top-level server config is to be used. + serverCfg *bootstrap.ServerConfig + bootstrapCfg *bootstrap.Config + serializer *grpcsync.CallbackSerializer + resourceTypeGetter func(string) xdsresource.Type + watchExpiryTimeout time.Duration + logger *grpclog.PrefixLogger +} + +func newAuthority(args authorityArgs) (*authority, error) { + ret := &authority{ + serverCfg: args.serverCfg, + bootstrapCfg: args.bootstrapCfg, + serializer: args.serializer, + resourceTypeGetter: args.resourceTypeGetter, + watchExpiryTimeout: args.watchExpiryTimeout, + logger: args.logger, + resources: make(map[xdsresource.Type]map[string]*resourceState), + } + + tr, err := transport.New(transport.Options{ + ServerCfg: args.serverCfg, + OnRecvHandler: ret.handleResourceUpdate, + OnErrorHandler: ret.newConnectionError, + OnSendHandler: ret.transportOnSendHandler, + Logger: args.logger, + NodeProto: args.bootstrapCfg.Node(), + }) + if err != nil { + return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) + } + ret.transport = tr + return ret, nil +} + +// transportOnSendHandler is called by the underlying transport when it sends a +// resource request successfully. Timers are activated for resources waiting for +// a response. +func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { + rType := a.resourceTypeGetter(u.URL) + // Resource type not found is not expected under normal circumstances, since + // the resource type url passed to the transport is determined by the authority. + if rType == nil { + a.logger.Warningf("Unknown resource type url: %s.", u.URL) + return + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + a.startWatchTimersLocked(rType, u.ResourceNames) +} + +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { + rType := a.resourceTypeGetter(resourceUpdate.URL) + if rType == nil { + return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) + } + + opts := &xdsresource.DecodeOptions{ + BootstrapConfig: a.bootstrapCfg, + ServerConfig: a.serverCfg, + } + updates, md, err := decodeAllResources(opts, rType, resourceUpdate) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) + return err +} + +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We build a list of callback funcs to invoke, and invoke them at the end + // of this method instead of inline (when handling the update for a + // particular resource), because we want to make sure that all calls to + // increment watcherCnt happen before any callbacks are invoked. This will + // ensure that the onDone callback is never invoked before all watcher + // callbacks are invoked, and the watchers have processed the update. + watcherCnt := new(atomic.Int64) + done := func() { + watcherCnt.Add(-1) + if watcherCnt.Load() == 0 { + onDone() + } + } + funcsToSchedule := []func(context.Context){} + defer func() { + if len(funcsToSchedule) == 0 { + // When there are no watchers for the resources received as part of + // this update, invoke onDone explicitly to unblock the next read on + // the ADS stream. + onDone() + } + for _, f := range funcsToSchedule { + a.serializer.ScheduleOr(f, onDone) + } + }() + + resourceStates := a.resources[rType] + for name, uErr := range updates { + if state, ok := resourceStates[name]; ok { + // Cancel the expiry timer associated with the resource once a + // response is received, irrespective of whether the update is a + // good one or not. + // + // We check for watch states `started` and `requested` here to + // accommodate for a race which can happen in the following + // scenario: + // - When a watch is registered, it is possible that the ADS stream + // is not yet created. In this case, the request for the resource + // is not sent out immediately. An entry in the `resourceStates` + // map is created with a watch state of `started`. + // - Once the stream is created, it is possible that the management + // server might respond with the requested resource before we send + // out request for the same. If we don't check for `started` here, + // and move the state to `received`, we will end up starting the + // timer when the request gets sent out. And since the management + // server already sent us the resource, there is a good chance + // that it will not send it again. This would eventually lead to + // the timer firing, even though we have the resource in the + // cache. + if state.wState == watchStateStarted || state.wState == watchStateRequested { + // It is OK to ignore the return value from Stop() here because + // if the timer has already fired, it means that the timer watch + // expiry callback is blocked on the same lock that we currently + // hold. Since we move the state to `received` here, the timer + // callback will be a no-op. + if state.wTimer != nil { + state.wTimer.Stop() + } + state.wState = watchStateReceived + } + + if uErr.err != nil { + // On error, keep previous version of the resource. But update + // status and error. + state.md.ErrState = md.ErrState + state.md.Status = md.Status + for watcher := range state.watchers { + watcher := watcher + err := uErr.err + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) + } + continue + } + + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) + } + // Notify watchers only if this is a first time update or it is different + // from the one currently cached. + if state.cache == nil || !state.cache.Equal(uErr.resource) { + for watcher := range state.watchers { + watcher := watcher + resource := uErr.resource + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) + } + } + // Sync cache. + if a.logger.V(2) { + a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) + } + state.cache = uErr.resource + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + state.md = md + state.md.ErrState = nil + state.md.Status = xdsresource.ServiceStatusACKed + if md.ErrState != nil { + state.md.Version = md.ErrState.Version + } + } + } + + // If this resource type requires that all resources be present in every + // SotW response from the server, a response that does not include a + // previously seen resource will be interpreted as a deletion of that + // resource unless ignore_resource_deletion option was set in the server + // config. + if !rType.AllResourcesRequiredInSotW() { + return + } + for name, state := range resourceStates { + if state.cache == nil { + // If the resource state does not contain a cached update, which can + // happen when: + // - resource was newly requested but has not yet been received, or, + // - resource was removed as part of a previous update, + // we don't want to generate an error for the watchers. + // + // For the first of the above two conditions, this ADS response may + // be in reaction to an earlier request that did not yet request the + // new resource, so its absence from the response does not + // necessarily indicate that the resource does not exist. For that + // case, we rely on the request timeout instead. + // + // For the second of the above two conditions, we already generated + // an error when we received the first response which removed this + // resource. So, there is no need to generate another one. + continue + } + if _, ok := updates[name]; !ok { + // The metadata status is set to "ServiceStatusNotExist" if a + // previous update deleted this resource, in which case we do not + // want to repeatedly call the watch callbacks with a + // "resource-not-found" error. + if state.md.Status == xdsresource.ServiceStatusNotExist { + continue + } + // Per A53, resource deletions are ignored if the `ignore_resource_deletion` + // server feature is enabled through the bootstrap configuration. If the + // resource deletion is to be ignored, the resource is not removed from + // the cache and the corresponding OnResourceDoesNotExist() callback is + // not invoked on the watchers. + if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) + } + continue + } + // If resource exists in cache, but not in the new update, delete + // the resource from cache, and also send a resource not found error + // to indicate resource removed. Metadata for the resource is still + // maintained, as this is required by CSDS. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) + } + } + } +} + +type resourceDataErrTuple struct { + resource xdsresource.ResourceData + err error +} + +func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { + timestamp := time.Now() + md := xdsresource.UpdateMetadata{ + Version: update.Version, + Timestamp: timestamp, + } + + topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. + perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. + ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. + for _, r := range update.Resources { + result, err := rType.Decode(opts, r) + + // Name field of the result is left unpopulated only when resource + // deserialization fails. + name := "" + if result != nil { + name = xdsresource.ParseName(result.Name).String() + } + if err == nil { + ret[name] = resourceDataErrTuple{resource: result.Resource} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret[name] = resourceDataErrTuple{err: err} + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = xdsresource.ServiceStatusACKed + return ret, md, nil + } + + md.Status = xdsresource.ServiceStatusNACKed + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) + md.ErrState = &xdsresource.UpdateErrorMetadata{ + Version: update.Version, + Err: errRet, + Timestamp: timestamp, + } + return ret, md, errRet +} + +// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources +// requested on the underlying ADS stream. This satisfies the conditions to start +// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] +// +// Caller must hold a.resourcesMu. +func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { + resourceStates := a.resources[rType] + for _, resourceName := range resourceNames { + if state, ok := resourceStates[resourceName]; ok { + if state.wState != watchStateStarted { + continue + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.resourcesMu.Lock() + a.handleWatchTimerExpiryLocked(rType, resourceName, state) + a.resourcesMu.Unlock() + }) + state.wState = watchStateRequested + } + } +} + +// stopWatchTimersLocked is invoked upon connection errors to stops watch timers +// for resources that have been requested, but not yet responded to by the management +// server. +// +// Caller must hold a.resourcesMu. +func (a *authority) stopWatchTimersLocked() { + for _, rType := range a.resources { + for resourceName, state := range rType { + if state.wState != watchStateRequested { + continue + } + if !state.wTimer.Stop() { + // If the timer has already fired, it means that the timer watch expiry + // callback is blocked on the same lock that we currently hold. Don't change + // the watch state and instead let the watch expiry callback handle it. + a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) + continue + } + state.wTimer = nil + state.wState = watchStateStarted + } + } +} + +// newConnectionError is called by the underlying transport when it receives a +// connection error. The error will be forwarded to all the resource watchers. +func (a *authority) newConnectionError(err error) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + a.stopWatchTimersLocked() + + // We do not consider it an error if the ADS stream was closed after having received + // a response on the stream. This is because there are legitimate reasons why the server + // may need to close the stream during normal operations, such as needing to rebalance + // load or the underlying connection hitting its max connection age limit. + // See gRFC A57 for more details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return + } + + for _, rType := range a.resources { + for _, state := range rType { + // Propagate the connection error from the transport layer to all watchers. + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) + }) + } + } + } +} + +// Increments the reference count. Caller must hold parent's authorityMu. +func (a *authority) refLocked() { + a.refCount++ +} + +// Decrements the reference count. Caller must hold parent's authorityMu. +func (a *authority) unrefLocked() int { + a.refCount-- + return a.refCount +} + +func (a *authority) close() { + a.transport.Close() + + a.resourcesMu.Lock() + a.closed = true + a.resourcesMu.Unlock() +} + +func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { + if a.logger.V(2) { + a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // Lookup the ResourceType specific resources from the top-level map. If + // there is no entry for this ResourceType, create one. + resources := a.resources[rType] + if resources == nil { + resources = make(map[string]*resourceState) + a.resources[rType] = resources + } + + // Lookup the resourceState for the particular resource that the watch is + // being registered for. If this is the first watch for this resource, + // instruct the transport layer to send a DiscoveryRequest for the same. + state := resources[resourceName] + if state == nil { + if a.logger.V(2) { + a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + state = &resourceState{ + watchers: make(map[xdsresource.ResourceWatcher]bool), + md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, + wState: watchStateStarted, + } + resources[resourceName] = state + a.sendDiscoveryRequestLocked(rType, resources) + } + // Always add the new watcher to the set of watchers. + state.watchers[watcher] = true + + // If we have a cached copy of the resource, notify the new watcher. + if state.cache != nil { + if a.logger.V(2) { + a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + } + resource := state.cache + a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) + } + + return func() { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We already have a reference to the resourceState for this particular + // resource. Avoid indexing into the two-level map to figure this out. + + // Delete this particular watcher from the list of watchers, so that its + // callback will not be invoked in the future. + state.wState = watchStateCanceled + delete(state.watchers, watcher) + if len(state.watchers) > 0 { + return + } + + // There are no more watchers for this resource, delete the state + // associated with it, and instruct the transport to send a request + // which does not include this resource name. + if a.logger.V(2) { + a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + delete(resources, resourceName) + a.sendDiscoveryRequestLocked(rType, resources) + } +} + +func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourceName string, state *resourceState) { + if a.closed { + return + } + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) + + switch state.wState { + case watchStateRequested: + // This is the only state where we need to handle the timer expiry by + // invoking appropriate watch callbacks. This is handled outside the switch. + case watchStateCanceled: + return + default: + a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) + return + } + + state.wState = watchStateTimeout + // With the watch timer firing, it is safe to assume that the resource does + // not exist on the management server. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } +} + +func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + if a.closed { + return + } + resourceStates := a.resources[rType] + state, ok := resourceStates[resourceName] + if !ok { + return + } + // if watchStateTimeout already triggered resource not found above from + // normal watch expiry. + if state.wState == watchStateCanceled || state.wState == watchStateTimeout { + return + } + state.wState = watchStateTimeout + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } +} + +// sendDiscoveryRequestLocked sends a discovery request for the specified +// resource type and resource names. Even though this method does not directly +// access the resource cache, it is important that `resourcesMu` be held when +// calling this method to ensure that a consistent snapshot of resource names is +// being requested. +func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { + resourcesToRequest := make([]string, len(resources)) + i := 0 + for name := range resources { + resourcesToRequest[i] = name + i++ + } + a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) +} + +func (a *authority) reportLoad() (*load.Store, func()) { + return a.transport.ReportLoad() +} + +func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + var ret []*v3statuspb.ClientConfig_GenericXdsConfig + for rType, resourceStates := range a.resources { + typeURL := rType.TypeURL() + for name, state := range resourceStates { + var raw *anypb.Any + if state.cache != nil { + raw = state.cache.Raw() + } + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: state.md.Version, + XdsConfig: raw, + LastUpdated: timestamppb.New(state.md.Timestamp), + ClientStatus: serviceStatusToProto(state.md.Status), + } + if errState := state.md.ErrState; errState != nil { + config.ErrorState = &v3adminpb.UpdateFailureState{ + LastUpdateAttempt: timestamppb.New(errState.Timestamp), + Details: errState.Err.Error(), + VersionInfo: errState.Version, + } + } + ret = append(ret, config) + } + } + return ret +} + +func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { + switch serviceStatus { + case xdsresource.ServiceStatusUnknown: + return v3adminpb.ClientResourceStatus_UNKNOWN + case xdsresource.ServiceStatusRequested: + return v3adminpb.ClientResourceStatus_REQUESTED + case xdsresource.ServiceStatusNotExist: + return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST + case xdsresource.ServiceStatusACKed: + return v3adminpb.ClientResourceStatus_ACKED + case xdsresource.ServiceStatusNACKed: + return v3adminpb.ClientResourceStatus_NACKED + default: + return v3adminpb.ClientResourceStatus_UNKNOWN + } +} + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go new file mode 100644 index 0000000000..144cb5bd76 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdsclient implements a full fledged gRPC client for the xDS API used +// by the xds resolver and balancer implementations. +package xdsclient + +import ( + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + // + // Most callers will not have a need to use this API directly. They will + // instead use a resource-type-specific wrapper API provided by the relevant + // resource type implementation. + // + // + // During a race (e.g. an xDS response is received while the user is calling + // cancel()), there's a small window where the callback can be called after + // the watcher is canceled. Callers need to handle this case. + WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) + + BootstrapConfig() *bootstrap.Config +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go new file mode 100644 index 0000000000..6097e86925 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// NameForServer represents the value to be passed as name when creating an xDS +// client from xDS-enabled gRPC servers. This is a well-known dedicated key +// value, and is defined in gRFC A71. +const NameForServer = "#server" + +// New returns an xDS client configured with bootstrap configuration specified +// by the ordered list: +// - file name containing the configuration specified by GRPC_XDS_BOOTSTRAP +// - actual configuration specified by GRPC_XDS_BOOTSTRAP_CONFIG +// - fallback configuration set using bootstrap.SetFallbackBootstrapConfig +// +// gRPC client implementations are expected to pass the channel's target URI for +// the name field, while server implementations are expected to pass a dedicated +// well-known value "#server", as specified in gRFC A71. The returned client is +// a reference counted implementation shared among callers using the same name. +// +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +func New(name string) (XDSClient, func(), error) { + return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) +} + +// newClientImpl returns a new xdsClient with the given config. +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { + ctx, cancel := context.WithCancel(context.Background()) + c := &clientImpl{ + done: grpcsync.NewEvent(), + config: config, + watchExpiryTimeout: watchExpiryTimeout, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerClose: cancel, + resourceTypes: newResourceTypeRegistry(), + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + } + + c.logger = prefixLogger(c) + return c, nil +} + +// OptionsForTesting contains options to configure xDS client creation for +// testing purposes only. +type OptionsForTesting struct { + // Name is a unique name for this xDS client. + Name string + // Contents contain a JSON representation of the bootstrap configuration to + // be used when creating the xDS client. + Contents []byte + + // WatchExpiryTimeout is the timeout for xDS resource watch expiry. If + // unspecified, uses the default value used in non-test code. + WatchExpiryTimeout time.Duration + + // AuthorityIdleTimeout is the timeout before idle authorities are deleted. + // If unspecified, uses the default value used in non-test code. + AuthorityIdleTimeout time.Duration +} + +// NewForTesting returns an xDS client configured with the provided options. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { + if opts.Name == "" { + return nil, nil, fmt.Errorf("opts.Name field must be non-empty") + } + if opts.WatchExpiryTimeout == 0 { + opts.WatchExpiryTimeout = defaultWatchExpiryTimeout + } + if opts.AuthorityIdleTimeout == 0 { + opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout + } + + if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { + return nil, nil, err + } + client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) + return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err +} + +// GetForTesting returns an xDS client created earlier using the given name. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +func GetForTesting(name string) (XDSClient, func(), error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + c, ok := clients[name] + if !ok { + return nil, nil, fmt.Errorf("xDS client with name %q not found", name) + } + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} + +func init() { + internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting +} + +func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { + crc, ok := client.(*clientRefCounted) + if !ok { + return fmt.Errorf("xDS client is of type %T, want %T", client, &clientRefCounted{}) + } + return crc.clientImpl.triggerResourceNotFoundForTesting(typ, name) +} + +var ( + clients = map[string]*clientRefCounted{} + clientsMu sync.Mutex +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go new file mode 100644 index 0000000000..1efb4de42e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) + +var ( + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + xdsClientImplCreateHook = func(string) {} + xdsClientImplCloseHook = func(string) {} +) + +func clientRefCountedClose(name string) { + clientsMu.Lock() + defer clientsMu.Unlock() + + client, ok := clients[name] + if !ok { + logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) + return + } + if client.decrRef() != 0 { + return + } + client.clientImpl.close() + xdsClientImplCloseHook(name) + delete(clients, name) + +} + +// newRefCounted creates a new reference counted xDS client implementation for +// name, if one does not exist already. If an xDS client for the given name +// exists, it gets a reference to it and returns it. +func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration) (XDSClient, func(), error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + if c := clients[name]; c != nil { + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil + } + + // Create the new client implementation. + config, err := bootstrap.GetConfiguration() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) + } + c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout) + if err != nil { + return nil, nil, err + } + c.logger.Infof("Created client with name %q and bootstrap configuration:\n %s", name, config) + client := &clientRefCounted{clientImpl: c, refCount: 1} + clients[name] = client + xdsClientImplCreateHook(name) + + logger.Infof("xDS node ID: %s", config.Node().GetId()) + return client, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} + +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + refCount int32 // accessed atomically +} + +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go new file mode 100644 index 0000000000..9f619016a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +var _ XDSClient = &clientImpl{} + +// clientImpl is the real implementation of the xds client. The exported Client +// is a wrapper of this struct with a ref count. +type clientImpl struct { + done *grpcsync.Event + config *bootstrap.Config + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration + serializer *grpcsync.CallbackSerializer + serializerClose func() + resourceTypes *resourceTypeRegistry + + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache +} + +// BootstrapConfig returns the configuration read from the bootstrap file. +// Callers must treat the return value as read-only. +func (c *clientImpl) BootstrapConfig() *bootstrap.Config { + return c.config +} + +// close closes the gRPC connection to the management server. +func (c *clientImpl) close() { + if c.done.HasFired() { + return + } + c.done.Fire() + // TODO: Should we invoke the registered callbacks here with an error that + // the client is closed? + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() + } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + c.serializerClose() + + for _, s := range c.config.XDSServers() { + for _, f := range s.Cleanups() { + f() + } + } + for _, a := range c.config.Authorities() { + for _, s := range a.XDSServers { + for _, f := range s.Cleanups() { + f() + } + } + } + c.logger.Infof("Shutdown") +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go new file mode 100644 index 0000000000..1ce20fabdf --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServers()[0] + if scheme == xdsresource.FederationScheme { + authorities := c.config.Authorities() + if authorities == nil { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + cfg, ok := authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + if len(cfg.XDSServers) >= 1 { + config = cfg.XDSServers[0] + } + } + + a, err := c.newAuthorityLocked(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.refLocked() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthorityLocked creates a new authority for the given config. If an +// authority for the given config exists in the cache, it is returned instead of +// creating a new one. +// +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + ret, err := newAuthority(authorityArgs{ + serverCfg: config, + bootstrapCfg: c.config, + serializer: c.serializer, + resourceTypeGetter: c.resourceTypes.get, + watchExpiryTimeout: c.watchExpiryTimeout, + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), + }) + if err != nil { + return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) + } + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unrefLocked() > 0 { + return + } + configStr := a.serverCfg.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go new file mode 100644 index 0000000000..f4d7b0a011 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +// dumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + + var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig + for _, a := range c.authorities { + retCfg = append(retCfg, a.dumpResources()...) + } + + return &v3statuspb.ClientConfig{ + Node: c.config.Node(), + GenericXdsConfigs: retCfg, + } +} + +// DumpResources returns the status and contents of all xDS resources. +func DumpResources() *v3statuspb.ClientStatusResponse { + clientsMu.Lock() + defer clientsMu.Unlock() + + resp := &v3statuspb.ClientStatusResponse{} + for key, client := range clients { + cfg := client.dumpResources() + cfg.ClientScope = key + resp.Config = append(resp.Config, cfg) + } + return resp +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go new file mode 100644 index 0000000000..b42e43a569 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// ReportLoad starts a load reporting stream to the given server. All load +// reports to the same server share the LRS stream. +// +// It returns a Store for the user to report loads, a function to cancel the +// load reporting stream. +func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + if err != nil { + c.authorityMu.Unlock() + c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) + return nil, func() {} + } + // Hold the ref before starting load reporting. + a.refLocked() + c.authorityMu.Unlock() + + store, cancelF := a.reportLoad() + return store, func() { + cancelF() + c.unrefAuthority(a) + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go new file mode 100644 index 0000000000..b9af85db63 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// WatchResource uses xDS to discover the resource associated with the provided +// resource name. The resource type implementation determines how xDS requests +// are sent out and how responses are deserialized and validated. Upon receipt +// of a response from the management server, an appropriate callback on the +// watcher is invoked. +func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) { + // Return early if the client is already closed. + // + // The client returned from the top-level API is a ref-counted client which + // contains a pointer to `clientImpl`. When all references are released, the + // ref-counted client sets its pointer to `nil`. And if any watch APIs are + // made on such a closed client, we will get here with a `nil` receiver. + if c == nil || c.done.HasFired() { + logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeName(), resourceName) + return func() {} + } + + if err := c.resourceTypes.maybeRegister(rType); err != nil { + logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) + return func() {} + } + + // TODO: Make ParseName return an error if parsing fails, and + // schedule the OnError callback in that case. + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) + return func() {} + } + cancelF := a.watchResource(rType, n.String(), watcher) + return func() { + cancelF() + unref() + } +} + +// A registry of xdsresource.Type implementations indexed by their corresponding +// type URLs. Registration of an xdsresource.Type happens the first time a watch +// for a resource of that type is invoked. +type resourceTypeRegistry struct { + mu sync.Mutex + types map[string]xdsresource.Type +} + +func newResourceTypeRegistry() *resourceTypeRegistry { + return &resourceTypeRegistry{types: make(map[string]xdsresource.Type)} +} + +func (r *resourceTypeRegistry) get(url string) xdsresource.Type { + r.mu.Lock() + defer r.mu.Unlock() + return r.types[url] +} + +func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { + r.mu.Lock() + defer r.mu.Unlock() + + url := rType.TypeURL() + typ, ok := r.types[url] + if ok && typ != rType { + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeName()) + } + r.types[url] = rType + return nil +} + +func (c *clientImpl) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { + if c == nil || c.done.HasFired() { + return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but client is closed", rType.TypeName(), resourceName) + } + + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but authority %q is not found", rType.TypeName(), resourceName, n.Authority) + } + defer unref() + a.triggerResourceNotFoundForTesting(rType, n.String()) + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go new file mode 100644 index 0000000000..e126107441 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the xdsclient package. +package internal + +// The following vars can be overridden by tests. +var ( + // NewADSStream is a function that returns a new ADS stream. + NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go new file mode 100644 index 0000000000..67e29e5bae --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package load + +// PerClusterReporter wraps the methods from the loadStore that are used here. +type PerClusterReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) + CallDropped(category string) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go new file mode 100644 index 0000000000..f1e265ee7d --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go @@ -0,0 +1,441 @@ +/* + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package load provides functionality to record and maintain load data. +package load + +import ( + "sync" + "sync/atomic" + "time" +) + +const negativeOneUInt64 = ^uint64(0) + +// Store keeps the loads for multiple clusters and services to be reported via +// LRS. It contains loads to reported to one LRS server. Create multiple stores +// for multiple servers. +// +// It is safe for concurrent use. +type Store struct { + // mu only protects the map (2 layers). The read/write to *perClusterStore + // doesn't need to hold the mu. + mu sync.Mutex + // clusters is a map with cluster name as the key. The second layer is a map + // with service name as the key. Each value (perClusterStore) contains data + // for a (cluster, service) pair. + // + // Note that new entries are added to this map, but never removed. This is + // potentially a memory leak. But the memory is allocated for each new + // (cluster,service) pair, and the memory allocated is just pointers and + // maps. So this shouldn't get too bad. + clusters map[string]map[string]*perClusterStore +} + +// NewStore creates a Store. +func NewStore() *Store { + return &Store{ + clusters: make(map[string]map[string]*perClusterStore), + } +} + +// Stats returns the load data for the given cluster names. Data is returned in +// a slice with no specific order. +// +// If no clusterName is given (an empty slice), all data for all known clusters +// is returned. +// +// If a cluster's Data is empty (no load to report), it's not appended to the +// returned slice. +func (s *Store) Stats(clusterNames []string) []*Data { + var ret []*Data + s.mu.Lock() + defer s.mu.Unlock() + + if len(clusterNames) == 0 { + for _, c := range s.clusters { + ret = appendClusterStats(ret, c) + } + return ret + } + + for _, n := range clusterNames { + if c, ok := s.clusters[n]; ok { + ret = appendClusterStats(ret, c) + } + } + return ret +} + +// appendClusterStats gets Data for the given cluster, append to ret, and return +// the new slice. +// +// Data is only appended to ret if it's not empty. +func appendClusterStats(ret []*Data, cluster map[string]*perClusterStore) []*Data { + for _, d := range cluster { + data := d.stats() + if data == nil { + // Skip this data if it doesn't contain any information. + continue + } + ret = append(ret, data) + } + return ret +} + +// PerCluster returns the perClusterStore for the given clusterName + +// serviceName. +func (s *Store) PerCluster(clusterName, serviceName string) PerClusterReporter { + if s == nil { + return nil + } + + s.mu.Lock() + defer s.mu.Unlock() + c, ok := s.clusters[clusterName] + if !ok { + c = make(map[string]*perClusterStore) + s.clusters[clusterName] = c + } + + if p, ok := c[serviceName]; ok { + return p + } + p := &perClusterStore{ + cluster: clusterName, + service: serviceName, + } + c[serviceName] = p + return p +} + +// perClusterStore is a repository for LB policy implementations to report store +// load data. It contains load for a (cluster, edsService) pair. +// +// It is safe for concurrent use. +// +// TODO(easwars): Use regular maps with mutexes instead of sync.Map here. The +// latter is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only +// grow, or (2) when multiple goroutines read, write, and overwrite entries for +// disjoint sets of keys. In these two cases, use of a Map may significantly +// reduce lock contention compared to a Go map paired with a separate Mutex or +// RWMutex. +// Neither of these conditions are met here, and we should transition to a +// regular map with a mutex for better type safety. +type perClusterStore struct { + cluster, service string + drops sync.Map // map[string]*uint64 + localityRPCCount sync.Map // map[string]*rpcCountData + + mu sync.Mutex + lastLoadReportAt time.Time +} + +// Update functions are called by picker for each RPC. To avoid contention, all +// updates are done atomically. + +// CallDropped adds one drop record with the given category to store. +func (ls *perClusterStore) CallDropped(category string) { + if ls == nil { + return + } + + p, ok := ls.drops.Load(category) + if !ok { + tp := new(uint64) + p, _ = ls.drops.LoadOrStore(category, tp) + } + atomic.AddUint64(p.(*uint64), 1) +} + +// CallStarted adds one call started record for the given locality. +func (ls *perClusterStore) CallStarted(locality string) { + if ls == nil { + return + } + + p, ok := ls.localityRPCCount.Load(locality) + if !ok { + tp := newRPCCountData() + p, _ = ls.localityRPCCount.LoadOrStore(locality, tp) + } + p.(*rpcCountData).incrInProgress() + p.(*rpcCountData).incrIssued() +} + +// CallFinished adds one call finished record for the given locality. +// For successful calls, err needs to be nil. +func (ls *perClusterStore) CallFinished(locality string, err error) { + if ls == nil { + return + } + + p, ok := ls.localityRPCCount.Load(locality) + if !ok { + // The map is never cleared, only values in the map are reset. So the + // case where entry for call-finish is not found should never happen. + return + } + p.(*rpcCountData).decrInProgress() + if err == nil { + p.(*rpcCountData).incrSucceeded() + } else { + p.(*rpcCountData).incrErrored() + } +} + +// CallServerLoad adds one server load record for the given locality. The +// load type is specified by desc, and its value by val. +func (ls *perClusterStore) CallServerLoad(locality, name string, d float64) { + if ls == nil { + return + } + + p, ok := ls.localityRPCCount.Load(locality) + if !ok { + // The map is never cleared, only values in the map are reset. So the + // case where entry for callServerLoad is not found should never happen. + return + } + p.(*rpcCountData).addServerLoad(name, d) +} + +// Data contains all load data reported to the Store since the most recent call +// to stats(). +type Data struct { + // Cluster is the name of the cluster this data is for. + Cluster string + // Service is the name of the EDS service this data is for. + Service string + // TotalDrops is the total number of dropped requests. + TotalDrops uint64 + // Drops is the number of dropped requests per category. + Drops map[string]uint64 + // LocalityStats contains load reports per locality. + LocalityStats map[string]LocalityData + // ReportInternal is the duration since last time load was reported (stats() + // was called). + ReportInterval time.Duration +} + +// LocalityData contains load data for a single locality. +type LocalityData struct { + // RequestStats contains counts of requests made to the locality. + RequestStats RequestData + // LoadStats contains server load data for requests made to the locality, + // indexed by the load type. + LoadStats map[string]ServerLoadData +} + +// RequestData contains request counts. +type RequestData struct { + // Succeeded is the number of succeeded requests. + Succeeded uint64 + // Errored is the number of requests which ran into errors. + Errored uint64 + // InProgress is the number of requests in flight. + InProgress uint64 + // Issued is the total number requests that were sent. + Issued uint64 +} + +// ServerLoadData contains server load data. +type ServerLoadData struct { + // Count is the number of load reports. + Count uint64 + // Sum is the total value of all load reports. + Sum float64 +} + +func newData(cluster, service string) *Data { + return &Data{ + Cluster: cluster, + Service: service, + Drops: make(map[string]uint64), + LocalityStats: make(map[string]LocalityData), + } +} + +// stats returns and resets all loads reported to the store, except inProgress +// rpc counts. +// +// It returns nil if the store doesn't contain any (new) data. +func (ls *perClusterStore) stats() *Data { + if ls == nil { + return nil + } + + sd := newData(ls.cluster, ls.service) + ls.drops.Range(func(key, val any) bool { + d := atomic.SwapUint64(val.(*uint64), 0) + if d == 0 { + return true + } + sd.TotalDrops += d + keyStr := key.(string) + if keyStr != "" { + // Skip drops without category. They are counted in total_drops, but + // not in per category. One example is drops by circuit breaking. + sd.Drops[keyStr] = d + } + return true + }) + ls.localityRPCCount.Range(func(key, val any) bool { + countData := val.(*rpcCountData) + succeeded := countData.loadAndClearSucceeded() + inProgress := countData.loadInProgress() + errored := countData.loadAndClearErrored() + issued := countData.loadAndClearIssued() + if succeeded == 0 && inProgress == 0 && errored == 0 && issued == 0 { + return true + } + + ld := LocalityData{ + RequestStats: RequestData{ + Succeeded: succeeded, + Errored: errored, + InProgress: inProgress, + Issued: issued, + }, + LoadStats: make(map[string]ServerLoadData), + } + countData.serverLoads.Range(func(key, val any) bool { + sum, count := val.(*rpcLoadData).loadAndClear() + if count == 0 { + return true + } + ld.LoadStats[key.(string)] = ServerLoadData{ + Count: count, + Sum: sum, + } + return true + }) + sd.LocalityStats[key.(string)] = ld + return true + }) + + ls.mu.Lock() + sd.ReportInterval = time.Since(ls.lastLoadReportAt) + ls.lastLoadReportAt = time.Now() + ls.mu.Unlock() + + if sd.TotalDrops == 0 && len(sd.Drops) == 0 && len(sd.LocalityStats) == 0 { + return nil + } + return sd +} + +type rpcCountData struct { + // Only atomic accesses are allowed for the fields. + succeeded *uint64 + errored *uint64 + inProgress *uint64 + issued *uint64 + + // Map from load desc to load data (sum+count). Loading data from map is + // atomic, but updating data takes a lock, which could cause contention when + // multiple RPCs try to report loads for the same desc. + // + // To fix the contention, shard this map. + serverLoads sync.Map // map[string]*rpcLoadData +} + +func newRPCCountData() *rpcCountData { + return &rpcCountData{ + succeeded: new(uint64), + errored: new(uint64), + inProgress: new(uint64), + issued: new(uint64), + } +} + +func (rcd *rpcCountData) incrSucceeded() { + atomic.AddUint64(rcd.succeeded, 1) +} + +func (rcd *rpcCountData) loadAndClearSucceeded() uint64 { + return atomic.SwapUint64(rcd.succeeded, 0) +} + +func (rcd *rpcCountData) incrErrored() { + atomic.AddUint64(rcd.errored, 1) +} + +func (rcd *rpcCountData) loadAndClearErrored() uint64 { + return atomic.SwapUint64(rcd.errored, 0) +} + +func (rcd *rpcCountData) incrInProgress() { + atomic.AddUint64(rcd.inProgress, 1) +} + +func (rcd *rpcCountData) decrInProgress() { + atomic.AddUint64(rcd.inProgress, negativeOneUInt64) // atomic.Add(x, -1) +} + +func (rcd *rpcCountData) loadInProgress() uint64 { + return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading. +} + +func (rcd *rpcCountData) incrIssued() { + atomic.AddUint64(rcd.issued, 1) +} + +func (rcd *rpcCountData) loadAndClearIssued() uint64 { + return atomic.SwapUint64(rcd.issued, 0) +} + +func (rcd *rpcCountData) addServerLoad(name string, d float64) { + loads, ok := rcd.serverLoads.Load(name) + if !ok { + tl := newRPCLoadData() + loads, _ = rcd.serverLoads.LoadOrStore(name, tl) + } + loads.(*rpcLoadData).add(d) +} + +// Data for server loads (from trailers or oob). Fields in this struct must be +// updated consistently. +// +// The current solution is to hold a lock, which could cause contention. To fix, +// shard serverLoads map in rpcCountData. +type rpcLoadData struct { + mu sync.Mutex + sum float64 + count uint64 +} + +func newRPCLoadData() *rpcLoadData { + return &rpcLoadData{} +} + +func (rld *rpcLoadData) add(v float64) { + rld.mu.Lock() + rld.sum += v + rld.count++ + rld.mu.Unlock() +} + +func (rld *rpcLoadData) loadAndClear() (s float64, c uint64) { + rld.mu.Lock() + s = rld.sum + rld.sum = 0 + c = rld.count + rld.count = 0 + rld.mu.Unlock() + return +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go new file mode 100644 index 0000000000..2269cb293d --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, clientPrefix(p)) +} + +func clientPrefix(p *clientImpl) string { + return fmt.Sprintf("[xds-client %p] ", p) +} + +func authorityPrefix(p *clientImpl, serverURI string) string { + return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go new file mode 100644 index 0000000000..bd08c7e18a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync" + "sync/atomic" +) + +type clusterNameAndServiceName struct { + clusterName, edsServiceName string +} + +type clusterRequestsCounter struct { + mu sync.Mutex + clusters map[clusterNameAndServiceName]*ClusterRequestsCounter +} + +var src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), +} + +// ClusterRequestsCounter is used to track the total inflight requests for a +// service with the provided name. +type ClusterRequestsCounter struct { + ClusterName string + EDSServiceName string + numRequests uint32 +} + +// GetClusterRequestsCounter returns the ClusterRequestsCounter with the +// provided serviceName. If one does not exist, it creates it. +func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterRequestsCounter { + src.mu.Lock() + defer src.mu.Unlock() + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServiceName: edsServiceName, + } + c, ok := src.clusters[k] + if !ok { + c = &ClusterRequestsCounter{ClusterName: clusterName} + src.clusters[k] = c + } + return c +} + +// StartRequest starts a request for a cluster, incrementing its number of +// requests by 1. Returns an error if the max number of requests is exceeded. +func (c *ClusterRequestsCounter) StartRequest(max uint32) error { + // Note that during race, the limits could be exceeded. This is allowed: + // "Since the implementation is eventually consistent, races between threads + // may allow limits to be potentially exceeded." + // https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking#arch-overview-circuit-break. + if atomic.LoadUint32(&c.numRequests) >= max { + return fmt.Errorf("max requests %v exceeded on service %v", max, c.ClusterName) + } + atomic.AddUint32(&c.numRequests, 1) + return nil +} + +// EndRequest ends a request for a service, decrementing its number of requests +// by 1. +func (c *ClusterRequestsCounter) EndRequest() { + atomic.AddUint32(&c.numRequests, ^uint32(0)) +} + +// ClearCounterForTesting clears the counter for the service. Should be only +// used in tests. +func ClearCounterForTesting(clusterName, edsServiceName string) { + src.mu.Lock() + defer src.mu.Unlock() + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServiceName: edsServiceName, + } + c, ok := src.clusters[k] + if !ok { + return + } + c.numRequests = 0 +} + +// ClearAllCountersForTesting clears all the counters. Should be only used in +// tests. +func ClearAllCountersForTesting() { + src.mu.Lock() + defer src.mu.Unlock() + src.clusters = make(map[clusterNameAndServiceName]*ClusterRequestsCounter) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go new file mode 100644 index 0000000000..9acc33cbbf --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the transport package. +package internal + +// The following vars can be overridden by tests. +var ( + // GRPCNewClient creates a new gRPC Client. + GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go new file mode 100644 index 0000000000..e47fdd9846 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient + +// ReportLoad starts reporting loads to the management server the transport is +// configured to use. +// +// It returns a Store for the user to report loads and a function to cancel the +// load reporting. +func (t *Transport) ReportLoad() (*load.Store, func()) { + t.lrsStartStream() + return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) +} + +// lrsStartStream starts an LRS stream to the server, if none exists. +func (t *Transport) lrsStartStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount++ + if t.lrsRefCount != 1 { + // Return early if the stream has already been started. + return + } + + ctx, cancel := context.WithCancel(context.Background()) + t.lrsCancelStream = cancel + + // Create a new done channel everytime a new stream is created. This ensures + // that we don't close the same channel multiple times (from lrsRunner() + // goroutine) when multiple streams are created and closed. + t.lrsRunnerDoneCh = make(chan struct{}) + go t.lrsRunner(ctx) +} + +// lrsStopStream closes the LRS stream, if this is the last user of the stream. +func (t *Transport) lrsStopStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount-- + if t.lrsRefCount != 0 { + // Return early if the stream has other references. + return + } + + t.lrsCancelStream() + t.logger.Infof("Stopping LRS stream") + + // Wait for the runner goroutine to exit. The done channel will be + // recreated when a new stream is created. + <-t.lrsRunnerDoneCh +} + +// lrsRunner starts an LRS stream to report load data to the management server. +// It reports load at constant intervals (as configured by the management +// server) until the context is cancelled. +func (t *Transport) lrsRunner(ctx context.Context) { + defer close(t.lrsRunnerDoneCh) + + // This feature indicates that the client supports the + // LoadStatsResponse.send_all_clusters field in the LRS response. + node := proto.Clone(t.nodeProto).(*v3corepb.Node) + node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") + + runLoadReportStream := func() error { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) + return nil + } + t.logger.Infof("Created LRS stream to server %q", t.serverURI) + + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Sending first LRS request failed: %v", err) + return nil + } + + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Reading from LRS stream failed: %v", err) + return nil + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + t.sendLoads(streamCtx, stream, clusters, interval) + return backoff.ErrResetBackoff + } + backoff.RunF(ctx, runLoadReportStream, t.backoff) +} + +func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + case <-ctx.Done(): + return + } + if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { + t.logger.Warningf("Writing to LRS stream failed: %v", err) + return + } + } +} + +func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { + req := &v3lrspb.LoadStatsRequest{Node: node} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { + resp, err := stream.Recv() + if err != nil { + return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } + + rInterval := resp.GetLoadReportingInterval() + if rInterval.CheckValid() != nil { + return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) + } + interval := rInterval.AsDuration() + + if resp.ReportEndpointGranularity { + // TODO(easwars): Support per endpoint loads. + return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") + } + + clusters := resp.Clusters + if resp.SendAllClusters { + // Return nil to send stats for all clusters. + clusters = nil + } + + return clusters, interval, nil +} + +func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) + for _, sd := range loads { + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) + for category, count := range sd.Drops { + droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ + Category: category, + DroppedCount: count, + }) + } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) + for l, localityData := range sd.LocalityStats { + lid, err := internal.LocalityIDFromString(l) + if err != nil { + return err + } + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) + for name, loadData := range localityData.LoadStats { + loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ + MetricName: name, + NumRequestsFinishedWithMetric: loadData.Count, + TotalMetricValue: loadData.Sum, + }) + } + localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ + Locality: &v3corepb.Locality{ + Region: lid.Region, + Zone: lid.Zone, + SubZone: lid.SubZone, + }, + TotalSuccessfulRequests: localityData.RequestStats.Succeeded, + TotalRequestsInProgress: localityData.RequestStats.InProgress, + TotalErrorRequests: localityData.RequestStats.Errored, + TotalIssuedRequests: localityData.RequestStats.Issued, + LoadMetricStats: loadMetricStats, + UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. + }) + } + + clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ + ClusterName: sd.Cluster, + ClusterServiceName: sd.Service, + UpstreamLocalityStats: localityStats, + TotalDroppedRequests: sd.TotalDrops, + DroppedRequests: droppedReqs, + LoadReportInterval: durationpb.New(sd.ReportInterval), + }) + } + + req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go new file mode 100644 index 0000000000..0bc0d38680 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -0,0 +1,704 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport implements the xDS transport protocol functionality +// required by the xdsclient. +package transport + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/keepalive" + xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +func init() { + transportinternal.GRPCNewClient = grpc.NewClient + xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) + } +} + +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2. +const perRPCVerbosityLevel = 9 + +// Transport provides a resource-type agnostic implementation of the xDS +// transport protocol. At this layer, resource contents are supposed to be +// opaque blobs which should be meaningful only to the xDS data model layer +// which is implemented by the `xdsresource` package. +// +// Under the hood, it owns the gRPC connection to a single management server and +// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport +// protocol version. +type Transport struct { + // These fields are initialized at creation time and are read-only afterwards. + cc *grpc.ClientConn // ClientConn to the management server. + serverURI string // URI of the management server. + onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. + onErrorHandler func(error) // To report underlying stream errors. + onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + + // These channels enable synchronization amongst the different goroutines + // spawned by the transport, and between asynchronous events resulting from + // receipt of responses from the management server. + adsStreamCh chan adsStream // New ADS streams are pushed here. + adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. + + // mu guards the following runtime state maintained by the transport. + mu sync.Mutex + // resources is map from resource type URL to the set of resource names + // being requested for that type. When the ADS stream is restarted, the + // transport requests all these resources again from the management server. + resources map[string]map[string]bool + // versions is a map from resource type URL to the most recently ACKed + // version for that resource. Resource versions are a property of the + // resource type and not the stream, and will not be reset upon stream + // restarts. + versions map[string]string + // nonces is a map from resource type URL to the most recently received + // nonce for that resource type. Nonces are a property of the ADS stream and + // will be reset upon stream restarts. + nonces map[string]string + + lrsMu sync.Mutex // Protects all LRS state. + lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. + lrsRefCount int // Reference count on the load store. +} + +// OnRecvHandlerFunc is the implementation at the xDS data model layer, which +// determines if the configuration received from the management server can be +// applied locally or not. +// +// A nil error is returned from this function when the data model layer believes +// that the received configuration is good and can be applied locally. This will +// cause the transport layer to send an ACK to the management server. A non-nil +// error is returned from this function when the data model layer believes +// otherwise, and this will cause the transport layer to send a NACK. +// +// The implementation is expected to invoke onDone when local processing of the +// update is complete, i.e. it is consumed by all watchers. +type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error + +// OnSendHandlerFunc is the implementation at the authority, which handles state +// changes for the resource watch and stop watch timers accordingly. +type OnSendHandlerFunc func(update *ResourceSendInfo) + +// ResourceUpdate is a representation of the configuration update received from +// the management server. It only contains fields which are useful to the data +// model layer, and layers above it. +type ResourceUpdate struct { + // Resources is the list of resources received from the management server. + Resources []*anypb.Any + // URL is the resource type URL for the above resources. + URL string + // Version is the resource version, for the above resources, as specified by + // the management server. + Version string +} + +// Options specifies configuration knobs used when creating a new Transport. +type Options struct { + // ServerCfg contains all the configuration required to connect to the xDS + // management server. + ServerCfg *bootstrap.ServerConfig + // OnRecvHandler is the component which makes ACK/NACK decisions based on + // the received resources. + // + // Invoked inline and implementations must not block. + OnRecvHandler OnRecvHandlerFunc + // OnErrorHandler provides a way for the transport layer to report + // underlying stream errors. These can be bubbled all the way up to the user + // of the xdsClient. + // + // Invoked inline and implementations must not block. + OnErrorHandler func(error) + // OnSendHandler provides a way for the transport layer to report underlying + // resource requests sent on the stream. However, Send() on the ADS stream will + // return successfully as long as: + // 1. there is enough flow control quota to send the message. + // 2. the message is added to the send buffer. + // However, the connection may fail after the callback is invoked and before + // the message is actually sent on the wire. This is accepted. + // + // Invoked inline and implementations must not block. + OnSendHandler func(*ResourceSendInfo) + // Backoff controls the amount of time to backoff before recreating failed + // ADS streams. If unspecified, a default exponential backoff implementation + // is used. For more details, see: + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. + Backoff func(retries int) time.Duration + // Logger does logging with a prefix. + Logger *grpclog.PrefixLogger + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node +} + +// New creates a new Transport. +func New(opts Options) (*Transport, error) { + switch { + case opts.OnRecvHandler == nil: + return nil, errors.New("missing OnRecv callback handler when creating a new transport") + case opts.OnErrorHandler == nil: + return nil, errors.New("missing OnError callback handler when creating a new transport") + case opts.OnSendHandler == nil: + return nil, errors.New("missing OnSend callback handler when creating a new transport") + } + + // Dial the xDS management with the passed in credentials. + dopts := []grpc.DialOption{ + opts.ServerCfg.CredsDialOption(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + // We decided to use these sane defaults in all languages, and + // kicked the can down the road as far making these configurable. + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }), + } + grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) + cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) + } + cc.Connect() + + boff := opts.Backoff + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } + ret := &Transport{ + cc: cc, + serverURI: opts.ServerCfg.ServerURI(), + onRecvHandler: opts.OnRecvHandler, + onErrorHandler: opts.OnErrorHandler, + onSendHandler: opts.OnSendHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: opts.NodeProto, + logger: opts.Logger, + + adsStreamCh: make(chan adsStream, 1), + adsRequestCh: buffer.NewUnbounded(), + resources: make(map[string]map[string]bool), + versions: make(map[string]string), + nonces: make(map[string]string), + adsRunnerDoneCh: make(chan struct{}), + } + + // This context is used for sending and receiving RPC requests and + // responses. It is also used by all the goroutines spawned by this + // Transport. Therefore, cancelling this context when the transport is + // closed will essentially cancel any pending RPCs, and cause the goroutines + // to terminate. + ctx, cancel := context.WithCancel(context.Background()) + ret.adsRunnerCancel = cancel + go ret.adsRunner(ctx) + + ret.logger.Infof("Created transport to server %q", ret.serverURI) + return ret, nil +} + +// resourceRequest wraps the resource type url and the resource names requested +// by the user of this transport. +type resourceRequest struct { + resources []string + url string +} + +// SendRequest sends out an ADS request for the provided resources of the +// specified resource type. +// +// The request is sent out asynchronously. If no valid stream exists at the time +// of processing this request, it is queued and will be sent out once a valid +// stream exists. +// +// If a successful response is received, the update handler callback provided at +// creation time is invoked. If an error is encountered, the stream error +// handler callback provided at creation time is invoked. +func (t *Transport) SendRequest(url string, resources []string) { + t.adsRequestCh.Put(&resourceRequest{ + url: url, + resources: resources, + }) +} + +// ResourceSendInfo wraps the names and url of resources sent to the management +// server. This is used by the `authority` type to start/stop the watch timer +// associated with every resource in the update. +type ResourceSendInfo struct { + ResourceNames []string + URL string +} + +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { + req := &v3discoverypb.DiscoveryRequest{ + TypeUrl: resourceURL, + ResourceNames: resourceNames, + VersionInfo: version, + ResponseNonce: nonce, + } + if sendNodeProto { + req.Node = t.nodeProto + } + if nackErr != nil { + req.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), Message: nackErr.Error(), + } + } + if err := stream.Send(req); err != nil { + return err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else { + if t.logger.V(2) { + t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } + } + t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) + return nil +} + +func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { + resp, err := stream.Recv() + if err != nil { + return nil, "", "", "", err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else if t.logger.V(2) { + t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } + return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil +} + +// adsRunner starts an ADS stream (and backs off exponentially, if the previous +// stream failed without receiving a single reply) and runs the sender and +// receiver routines to send and receive data from the stream respectively. +func (t *Transport) adsRunner(ctx context.Context) { + defer close(t.adsRunnerDoneCh) + + go t.send(ctx) + + // We reset backoff state when we successfully receive at least one + // message from the server. + runStreamWithBackoff := func() error { + newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) + stream, err := newStream(ctx, t.cc) + if err != nil { + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) + return nil + } + t.logger.Infof("ADS stream created") + + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + msgReceived := t.recv(ctx, stream) + if msgReceived { + return backoff.ErrResetBackoff + } + return nil + } + backoff.RunF(ctx, runStreamWithBackoff, t.backoff) +} + +// send is a separate goroutine for sending resource requests on the ADS stream. +// +// For every new stream received on the stream channel, all existing resources +// are re-requested from the management server. +// +// For every new resource request received on the resources channel, the +// resources map is updated (this ensures that resend will pick them up when +// there are new streams) and the appropriate request is sent out. +func (t *Transport) send(ctx context.Context) { + var stream adsStream + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request message wastes CPU resources on the client and the server. + sentNodeProto := false + for { + select { + case <-ctx.Done(): + return + case stream = <-t.adsStreamCh: + // We have a new stream and we've to ensure that the node proto gets + // sent out in the first request on the stream. + var err error + if sentNodeProto, err = t.sendExisting(stream); err != nil { + // Send failed, clear the current stream. Attempt to resend will + // only be made after a new stream is created. + stream = nil + continue + } + case u, ok := <-t.adsRequestCh.Get(): + if !ok { + // No requests will be sent after the adsRequestCh buffer is closed. + return + } + t.adsRequestCh.Load() + + var ( + resources []string + url, version, nonce string + send bool + nackErr error + ) + switch update := u.(type) { + case *resourceRequest: + resources, url, version, nonce = t.processResourceRequest(update) + case *ackRequest: + resources, url, version, nonce, send = t.processAckRequest(update, stream) + if !send { + continue + } + nackErr = update.nackErr + } + if stream == nil { + // There's no stream yet. Skip the request. This request + // will be resent to the new streams. If no stream is + // created, the watcher will timeout (same as server not + // sending response back). + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, resources, url, version, nonce, nackErr); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) + // Send failed, clear the current stream. + stream = nil + } + sentNodeProto = true + } + } +} + +// sendExisting sends out xDS requests for existing resources when recovering +// from a broken stream. +// +// We call stream.Send() here with the lock being held. It should be OK to do +// that here because the stream has just started and Send() usually returns +// quickly (once it pushes the message onto the transport layer) and is only +// ever blocked if we don't have enough flow control quota. +// +// Returns true if the node proto was sent. +func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err error) { + t.mu.Lock() + defer t.mu.Unlock() + + // Reset only the nonces map when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream + t.nonces = make(map[string]string) + + // Send node proto only in the first request on the stream. + for url, resources := range t.resources { + if len(resources) == 0 { + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) + return false, err + } + sentNodeProto = true + } + + return sentNodeProto, nil +} + +// recv receives xDS responses on the provided ADS stream and branches out to +// message specific handlers. Returns true if at least one message was +// successfully received. +func (t *Transport) recv(ctx context.Context, stream adsStream) bool { + // Initialize the flow control quota for the stream. This helps to block the + // next read until the previous one is consumed by all watchers. + fc := newADSFlowControl() + + msgReceived := false + for { + // Wait for ADS stream level flow control to be available. + if !fc.wait(ctx) { + if t.logger.V(2) { + t.logger.Infof("ADS stream context canceled") + } + return msgReceived + } + + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) + if err != nil { + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + t.onErrorHandler(err) + t.logger.Warningf("ADS stream closed: %v", err) + return msgReceived + } + msgReceived = true + + u := ResourceUpdate{ + Resources: resources, + URL: url, + Version: rVersion, + } + fc.setPending() + if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + t.logger.Warningf("%v", err) + continue + } + // If the data model layer returned an error, we need to NACK the + // response in which case we need to set the version to the most + // recently accepted version of this resource type. + if err != nil { + t.mu.Lock() + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: t.versions[url], + nackErr: err, + }) + t.mu.Unlock() + t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) + continue + } + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: rVersion, + }) + if t.logger.V(2) { + t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + } + } +} + +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) + for i := range m { + ret = append(ret, i) + } + return ret +} + +func sliceToMap(ss []string) map[string]bool { + ret := make(map[string]bool, len(ss)) + for _, s := range ss { + ret[s] = true + } + return ret +} + +// processResourceRequest pulls the fields needed to send out an ADS request. +// The resource type and the list of resources to request are provided by the +// user, while the version and nonce are maintained internally. +// +// The resources map, which keeps track of the resources being requested, is +// updated here. Any subsequent stream failure will re-request resources stored +// in this map. +// +// Returns the list of resources, resource type url, version and nonce. +func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { + t.mu.Lock() + defer t.mu.Unlock() + + resources := sliceToMap(req.resources) + t.resources[req.url] = resources + return req.resources, req.url, t.versions[req.url], t.nonces[req.url] +} + +type ackRequest struct { + url string // Resource type URL. + version string // NACK if version is an empty string. + nonce string + nackErr error // nil for ACK, non-nil for NACK. + // ACK/NACK are tagged with the stream it's for. When the stream is down, + // all the ACK/NACK for this stream will be dropped, and the version/nonce + // won't be updated. + stream grpc.ClientStream +} + +// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces +// and versions map is updated. +// +// Returns the list of resources, resource type url, version, nonce, and an +// indication of whether an ACK should be sent on the wire or not. +func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { + if ack.stream != stream { + // If ACK's stream isn't the current sending stream, this means the ACK + // was pushed to queue before the old stream broke, and a new stream has + // been started since. Return immediately here so we don't update the + // nonce for the new stream. + return nil, "", "", "", false + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Update the nonce irrespective of whether we send the ACK request on wire. + // An up-to-date nonce is required for the next request. + nonce := ack.nonce + t.nonces[ack.url] = nonce + + s, ok := t.resources[ack.url] + if !ok || len(s) == 0 { + // We don't send the ACK request if there are no resources of this type + // in our resources map. This can be either when the server sends + // responses before any request, or the resources are removed while the + // ackRequest was in queue). If we send a request with an empty + // resource name list, the server may treat it as a wild card and send + // us everything. + return nil, "", "", "", false + } + resources := mapToSlice(s) + + // Update the versions map only when we plan to send an ACK. + if ack.nackErr == nil { + t.versions[ack.url] = ack.version + } + + return resources, ack.url, ack.version, nonce, true +} + +// Close closes the Transport and frees any associated resources. +func (t *Transport) Close() { + t.adsRunnerCancel() + <-t.adsRunnerDoneCh + t.adsRequestCh.Close() + t.cc.Close() +} + +// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC +// channel to the management server. +// +// Only for testing purposes. +func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { + return t.cc.GetState() +} + +// adsFlowControl implements ADS stream level flow control that enables the +// transport to block the reading of the next message off of the stream until +// the previous update is consumed by all watchers. +// +// The lifetime of the flow control is tied to the lifetime of the stream. +type adsFlowControl struct { + logger *grpclog.PrefixLogger + + // Whether the most recent update is pending consumption by all watchers. + pending atomic.Bool + // Channel used to notify when all the watchers have consumed the most + // recent update. Wait() blocks on reading a value from this channel. + readyCh chan struct{} +} + +// newADSFlowControl returns a new adsFlowControl. +func newADSFlowControl() *adsFlowControl { + return &adsFlowControl{readyCh: make(chan struct{}, 1)} +} + +// setPending changes the internal state to indicate that there is an update +// pending consumption by all watchers. +func (fc *adsFlowControl) setPending() { + fc.pending.Store(true) +} + +// wait blocks until all the watchers have consumed the most recent update and +// returns true. If the context expires before that, it returns false. +func (fc *adsFlowControl) wait(ctx context.Context) bool { + // If there is no pending update, there is no need to block. + if !fc.pending.Load() { + // If all watchers finished processing the most recent update before the + // `recv` goroutine made the next call to `Wait()`, there would be an + // entry in the readyCh channel that needs to be drained to ensure that + // the next call to `Wait()` doesn't unblock before it actually should. + select { + case <-fc.readyCh: + default: + } + return true + } + + select { + case <-ctx.Done(): + return false + case <-fc.readyCh: + return true + } +} + +// onDone indicates that all watchers have consumed the most recent update. +func (fc *adsFlowControl) onDone() { + fc.pending.Store(false) + + select { + // Writes to the readyCh channel should not block ideally. The default + // branch here is to appease the paranoid mind. + case fc.readyCh <- struct{}{}: + default: + if fc.logger.V(2) { + fc.logger.Infof("ADS stream flow control readyCh is full") + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go new file mode 100644 index 0000000000..3c48f1bdea --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package converter provides converters to convert proto load balancing +// configuration, defined by the xDS API spec, to JSON load balancing +// configuration. These converters are registered by proto type in a registry, +// which gets pulled from based off proto type passed in. +package converter + +import ( + "encoding/json" + "fmt" + "strings" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/leastrequest" + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/envconfig" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" +) + +func init() { + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", convertRingHashProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", convertPickFirstProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.least_request.v3.LeastRequest", convertLeastRequestProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) + xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + defaultLeastRequestChoiceCount = 2 +) + +func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + rhProto := &v3ringhashpb.RingHash{} + if err := proto.Unmarshal(rawProto, rhProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if rhProto.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { + return nil, fmt.Errorf("unsupported ring_hash hash function %v", rhProto.GetHashFunction()) + } + + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhProto.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhProto.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhCfg := &ringhash.LBConfig{ + MinRingSize: minSize, + MaxRingSize: maxSize, + } + + rhCfgJSON, err := json.Marshal(rhCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", rhCfg, err) + } + return makeBalancerConfigJSON(ringhash.Name, rhCfgJSON), nil +} + +type pfConfig struct { + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + pfProto := &v3pickfirstpb.PickFirst{} + if err := proto.Unmarshal(rawProto, pfProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + pfCfg := &pfConfig{ShuffleAddressList: pfProto.GetShuffleAddressList()} + js, err := json.Marshal(pfCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", pfCfg, err) + } + return makeBalancerConfigJSON(pickfirst.Name, js), nil +} + +func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { + return makeBalancerConfigJSON(roundrobin.Name, json.RawMessage("{}")), nil +} + +type wrrLocalityLBConfig struct { + ChildPolicy json.RawMessage `json:"childPolicy,omitempty"` +} + +func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + wrrlProto := &v3wrrlocalitypb.WrrLocality{} + if err := proto.Unmarshal(rawProto, wrrlProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + epJSON, err := xdslbregistry.ConvertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) + if err != nil { + return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, wrrlProto) + } + wrrLCfg := wrrLocalityLBConfig{ + ChildPolicy: epJSON, + } + + lbCfgJSON, err := json.Marshal(wrrLCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLCfg, err) + } + return makeBalancerConfigJSON(wrrlocality.Name, lbCfgJSON), nil +} + +func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + cswrrProto := &v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{} + if err := proto.Unmarshal(rawProto, cswrrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + wrrLBCfg := &wrrLBConfig{} + // Only set fields if specified in proto. If not set, ParseConfig of the WRR + // will populate the config with defaults. + if enableOOBLoadReportCfg := cswrrProto.GetEnableOobLoadReport(); enableOOBLoadReportCfg != nil { + wrrLBCfg.EnableOOBLoadReport = enableOOBLoadReportCfg.GetValue() + } + if oobReportingPeriodCfg := cswrrProto.GetOobReportingPeriod(); oobReportingPeriodCfg != nil { + wrrLBCfg.OOBReportingPeriod = internalserviceconfig.Duration(oobReportingPeriodCfg.AsDuration()) + } + if blackoutPeriodCfg := cswrrProto.GetBlackoutPeriod(); blackoutPeriodCfg != nil { + wrrLBCfg.BlackoutPeriod = internalserviceconfig.Duration(blackoutPeriodCfg.AsDuration()) + } + if weightExpirationPeriodCfg := cswrrProto.GetBlackoutPeriod(); weightExpirationPeriodCfg != nil { + wrrLBCfg.WeightExpirationPeriod = internalserviceconfig.Duration(weightExpirationPeriodCfg.AsDuration()) + } + if weightUpdatePeriodCfg := cswrrProto.GetWeightUpdatePeriod(); weightUpdatePeriodCfg != nil { + wrrLBCfg.WeightUpdatePeriod = internalserviceconfig.Duration(weightUpdatePeriodCfg.AsDuration()) + } + if errorUtilizationPenaltyCfg := cswrrProto.GetErrorUtilizationPenalty(); errorUtilizationPenaltyCfg != nil { + wrrLBCfg.ErrorUtilizationPenalty = float64(errorUtilizationPenaltyCfg.GetValue()) + } + + lbCfgJSON, err := json.Marshal(wrrLBCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLBCfg, err) + } + return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil +} + +func convertLeastRequestProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.LeastRequestLB { + return nil, nil + } + lrProto := &v3leastrequestpb.LeastRequest{} + if err := proto.Unmarshal(rawProto, lrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + // "The configuration for the Least Request LB policy is the + // least_request_lb_config field. The field is optional; if not present, + // defaults will be assumed for all of its values." - A48 + choiceCount := uint32(defaultLeastRequestChoiceCount) + if cc := lrProto.GetChoiceCount(); cc != nil { + choiceCount = cc.GetValue() + } + lrCfg := &leastrequest.LBConfig{ChoiceCount: choiceCount} + js, err := json.Marshal(lrCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", lrCfg, err) + } + return makeBalancerConfigJSON(leastrequest.Name, js), nil +} + +func convertV1TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v1xdsudpatypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +func convertV3TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v3xdsxdstypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +// convertCustomPolicy attempts to prepare json configuration for a custom lb +// proto, which specifies the gRPC balancer type and configuration. Returns the +// converted json and an error which should cause caller to error if error +// converting. If both json and error returned are nil, it means the gRPC +// Balancer registry does not contain that balancer type, and the caller should +// continue to the next policy. +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + pos := strings.LastIndex(typeURL, "/") + name := typeURL[pos+1:] + + if balancer.Get(name) == nil { + return nil, nil + } + + rawJSON, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + } + + // The Struct contained in the TypedStruct will be returned as-is as the + // configuration JSON object. + return makeBalancerConfigJSON(name, rawJSON), nil +} + +type wrrLBConfig struct { + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod internalserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod internalserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod internalserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod internalserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} + +func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { + return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go new file mode 100644 index 0000000000..a7782709d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides a registry of converters that convert proto +// from load balancing configuration, defined by the xDS API spec, to JSON load +// balancing configuration. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" +) + +var ( + // m is a map from proto type to Converter. + m = make(map[string]Converter) +) + +// Register registers the converter to the map keyed on a proto type. Must be +// called at init time. Not thread safe. +func Register(protoType string, c Converter) { + m[protoType] = c +} + +// SetRegistry sets the xDS LB registry. Must be called at init time. Not thread +// safe. +func SetRegistry(registry map[string]Converter) { + m = registry +} + +// Converter converts raw proto bytes into the internal Go JSON representation +// of the proto passed. Returns the json message, and an error. If both +// returned are nil, it represents continuing to the next proto. +type Converter func([]byte, int) (json.RawMessage, error) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] + // "Any entry not in the above list is unsupported and will be skipped." + // - A52 + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + if converter == nil { + continue + } + json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) + if json == nil && err == nil { + continue + } + return json, err + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go new file mode 100644 index 0000000000..18d47cbc10 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // ClusterResourceTypeName represents the transport agnostic name for the + // cluster resource. + ClusterResourceTypeName = "ClusterResource" +) + +var ( + // Compile time interface checks. + _ Type = clusterResourceType{} + + // Singleton instantiation of the resource type implementation. + clusterType = clusterResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3ClusterURL, + typeName: ClusterResourceTypeName, + allResourcesRequiredInSotW: true, + }, + } +) + +// clusterResourceType provides the resource-type specific functionality for a +// Cluster resource. +// +// Implements the Type interface. +type clusterResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, cluster, err := unmarshalClusterResource(resource, opts.ServerConfig) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + // Perform extra validation here. + if err := securityConfigValidator(opts.BootstrapConfig, cluster.SecurityCfg); err != nil { + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: cluster}}, nil + +} + +// ClusterResourceData wraps the configuration of a Cluster resource as received +// from the management server. +// +// Implements the ResourceData interface. +type ClusterResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ClusterUpdate +} + +// Equal returns true if other is equal to r. +func (c *ClusterResourceData) Equal(other ResourceData) bool { + if c == nil && other == nil { + return true + } + if (c == nil) != (other == nil) { + return false + } + return proto.Equal(c.Resource.Raw, other.Raw()) +} + +// ToJSON returns a JSON string representation of the resource data. +func (c *ClusterResourceData) ToJSON() string { + return pretty.ToJSON(c.Resource) +} + +// Raw returns the underlying raw protobuf form of the cluster resource. +func (c *ClusterResourceData) Raw() *anypb.Any { + return c.Resource.Raw +} + +// ClusterWatcher wraps the callbacks to be invoked for different events +// corresponding to the cluster resource being watched. +type ClusterWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ClusterResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingClusterWatcher struct { + watcher ClusterWatcher +} + +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + c := data.(*ClusterResourceData) + d.watcher.OnUpdate(c, onDone) +} + +func (d *delegatingClusterWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingClusterWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchCluster uses xDS to discover the configuration associated with the +// provided cluster resource name. +func WatchCluster(p Producer, name string, w ClusterWatcher) (cancel func()) { + delegator := &delegatingClusterWatcher{watcher: w} + return p.WatchResource(clusterType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go new file mode 100644 index 0000000000..66c0ae0b20 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // EndpointsResourceTypeName represents the transport agnostic name for the + // endpoint resource. + EndpointsResourceTypeName = "EndpointsResource" +) + +var ( + // Compile time interface checks. + _ Type = endpointsResourceType{} + + // Singleton instantiation of the resource type implementation. + endpointsType = endpointsResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3EndpointsURL, + typeName: "EndpointsResource", + allResourcesRequiredInSotW: false, + }, + } +) + +// endpointsResourceType provides the resource-type specific functionality for a +// ClusterLoadAssignment (or Endpoints) resource. +// +// Implements the Type interface. +type endpointsResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (endpointsResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalEndpointsResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: EndpointsUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: rc}}, nil + +} + +// EndpointsResourceData wraps the configuration of an Endpoints resource as +// received from the management server. +// +// Implements the ResourceData interface. +type EndpointsResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource EndpointsUpdate +} + +// Equal returns true if other is equal to r. +func (e *EndpointsResourceData) Equal(other ResourceData) bool { + if e == nil && other == nil { + return true + } + if (e == nil) != (other == nil) { + return false + } + return proto.Equal(e.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (e *EndpointsResourceData) ToJSON() string { + return pretty.ToJSON(e.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (e *EndpointsResourceData) Raw() *anypb.Any { + return e.Resource.Raw +} + +// EndpointsWatcher wraps the callbacks to be invoked for different +// events corresponding to the endpoints resource being watched. +type EndpointsWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*EndpointsResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingEndpointsWatcher struct { + watcher EndpointsWatcher +} + +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + e := data.(*EndpointsResourceData) + d.watcher.OnUpdate(e, onDone) +} + +func (d *delegatingEndpointsWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchEndpoints uses xDS to discover the configuration associated with the +// provided endpoints resource name. +func WatchEndpoints(p Producer, name string, w EndpointsWatcher) (cancel func()) { + delegator := &delegatingEndpointsWatcher{watcher: w} + return p.WatchResource(endpointsType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go new file mode 100644 index 0000000000..7bac4469b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/errors.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import "fmt" + +// ErrorType is the type of the error that the watcher will receive from the xds +// client. +type ErrorType int + +const ( + // ErrorTypeUnknown indicates the error doesn't have a specific type. It is + // the default value, and is returned if the error is not an xds error. + ErrorTypeUnknown ErrorType = iota + // ErrorTypeConnection indicates a connection error from the gRPC client. + ErrorTypeConnection + // ErrorTypeResourceNotFound indicates a resource is not found from the xds + // response. It's typically returned if the resource is removed in the xds + // server. + ErrorTypeResourceNotFound + // ErrorTypeResourceTypeUnsupported indicates the receipt of a message from + // the management server with resources of an unsupported resource type. + ErrorTypeResourceTypeUnsupported + // ErrTypeStreamFailedAfterRecv indicates an ADS stream error, after + // successful receipt of at least one message from the server. + ErrTypeStreamFailedAfterRecv +) + +type xdsClientError struct { + t ErrorType + desc string +} + +func (e *xdsClientError) Error() string { + return e.desc +} + +// NewErrorf creates an xds client error. The callbacks are called with this +// error, to pass additional information about the error. +func NewErrorf(t ErrorType, format string, args ...any) error { + return &xdsClientError{t: t, desc: fmt.Sprintf(format, args...)} +} + +// ErrType returns the error's type. +func ErrType(e error) ErrorType { + if xe, ok := e.(*xdsClientError); ok { + return xe.t + } + return ErrorTypeUnknown +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go new file mode 100644 index 0000000000..196bb9f873 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -0,0 +1,895 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" +) + +const ( + // Used as the map key for unspecified prefixes. The actual value of this + // key is immaterial. + unspecifiedPrefixMapKey = "unspecified" + + // An unspecified destination or source prefix should be considered a less + // specific match than a wildcard prefix, `0.0.0.0/0` or `::/0`. Also, an + // unspecified prefix should match most v4 and v6 addresses compared to the + // wildcard prefixes which match only a specific network (v4 or v6). + // + // We use these constants when looking up the most specific prefix match. A + // wildcard prefix will match 0 bits, and to make sure that a wildcard + // prefix is considered a more specific match than an unspecified prefix, we + // use a value of -1 for the latter. + noPrefixMatch = -2 + unspecifiedPrefixMatch = -1 +) + +// FilterChain captures information from within a FilterChain message in a +// Listener resource. +type FilterChain struct { + // SecurityCfg contains transport socket security configuration. + SecurityCfg *SecurityConfig + // HTTPFilters represent the HTTP Filters that comprise this FilterChain. + HTTPFilters []HTTPFilter + // RouteConfigName is the route configuration name for this FilterChain. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned for this filter chain. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + // UsableRouteConfiguration is the routing configuration for this filter + // chain (LDS + RDS). + UsableRouteConfiguration *atomic.Pointer[UsableRouteConfiguration] +} + +// VirtualHostWithInterceptors captures information present in a VirtualHost +// update, and also contains routes with instantiated HTTP Filters. +type VirtualHostWithInterceptors struct { + // Domains are the domain names which map to this Virtual Host. On the + // server side, this will be dictated by the :authority header of the + // incoming RPC. + Domains []string + // Routes are the Routes for this Virtual Host. + Routes []RouteWithInterceptors +} + +// RouteWithInterceptors captures information in a Route, and contains +// a usable matcher and also instantiated HTTP Filters. +type RouteWithInterceptors struct { + // M is the matcher used to match to this route. + M *CompositeMatcher + // ActionType is the type of routing action to initiate once matched to. + ActionType RouteActionType + // Interceptors are interceptors instantiated for this route. These will be + // constructed from a combination of the top level configuration and any + // HTTP Filter overrides present in Virtual Host or Route. + Interceptors []resolver.ServerInterceptor +} + +// UsableRouteConfiguration contains a matchable route configuration, with +// instantiated HTTP Filters per route. +type UsableRouteConfiguration struct { + VHS []VirtualHostWithInterceptors + Err error +} + +// ConstructUsableRouteConfiguration takes Route Configuration and converts it +// into matchable route configuration, with instantiated HTTP Filters per route. +func (fc *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) *UsableRouteConfiguration { + vhs := make([]VirtualHostWithInterceptors, 0, len(config.VirtualHosts)) + for _, vh := range config.VirtualHosts { + vhwi, err := fc.convertVirtualHost(vh) + if err != nil { + // Non nil if (lds + rds) fails, shouldn't happen since validated by + // xDS Client, treat as L7 error but shouldn't happen. + return &UsableRouteConfiguration{Err: fmt.Errorf("virtual host construction: %v", err)} + } + vhs = append(vhs, vhwi) + } + return &UsableRouteConfiguration{VHS: vhs} +} + +func (fc *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { + rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) + for i, r := range virtualHost.Routes { + var err error + rs[i].ActionType = r.ActionType + rs[i].M, err = RouteToMatcher(r) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) + } + for _, filter := range fc.HTTPFilters { + // Route is highest priority on server side, as there is no concept + // of an upstream cluster on server side. + override := r.HTTPFilterConfigOverride[filter.Name] + if override == nil { + // Virtual Host is second priority. + override = virtualHost.HTTPFilterConfigOverride[filter.Name] + } + sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return VirtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") + } + si, err := sb.BuildServerInterceptor(filter.Config, override) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) + } + if si != nil { + rs[i].Interceptors = append(rs[i].Interceptors, si) + } + } + } + return VirtualHostWithInterceptors{Domains: virtualHost.Domains, Routes: rs}, nil +} + +// SourceType specifies the connection source IP match type. +type SourceType int + +const ( + // SourceTypeAny matches connection attempts from any source. + SourceTypeAny SourceType = iota + // SourceTypeSameOrLoopback matches connection attempts from the same host. + SourceTypeSameOrLoopback + // SourceTypeExternal matches connection attempts from a different host. + SourceTypeExternal +) + +// FilterChainManager contains all the match criteria specified through all +// filter chains in a single Listener resource. It also contains the default +// filter chain specified in the Listener resource. It provides two important +// pieces of functionality: +// 1. Validate the filter chains in an incoming Listener resource to make sure +// that there aren't filter chains which contain the same match criteria. +// 2. As part of performing the above validation, it builds an internal data +// structure which will if used to look up the matching filter chain at +// connection time. +// +// The logic specified in the documentation around the xDS FilterChainMatch +// proto mentions 8 criteria to match on. +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +type FilterChainManager struct { + // Destination prefix is the first match criteria that we support. + // Therefore, this multi-stage map is indexed on destination prefixes + // specified in the match criteria. + // Unspecified destination prefix matches end up as a wildcard entry here + // with a key of 0.0.0.0/0. + dstPrefixMap map[string]*destPrefixEntry + + // At connection time, we do not have the actual destination prefix to match + // on. We only have the real destination address of the incoming connection. + // This means that we cannot use the above map at connection time. This list + // contains the map entries from the above map that we can use at connection + // time to find matching destination prefixes in O(n) time. + // + // TODO: Implement LC-trie to support logarithmic time lookups. If that + // involves too much time/effort, sort this slice based on the netmask size. + dstPrefixes []*destPrefixEntry + + def *FilterChain // Default filter chain, if specified. + + // Slice of filter chains managed by this filter chain manager. + fcs []*FilterChain + + // RouteConfigNames are the route configuration names which need to be + // dynamically queried for RDS Configuration for any FilterChains which + // specify to load RDS Configuration dynamically. + RouteConfigNames map[string]bool +} + +// destPrefixEntry is the value type of the map indexed on destination prefixes. +type destPrefixEntry struct { + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet + // We need to keep track of the transport protocols seen as part of the + // config validation (and internal structure building) phase. The only two + // values that we support are empty string and "raw_buffer", with the latter + // taking preference. Once we have seen one filter chain with "raw_buffer", + // we can drop everything other filter chain with an empty transport + // protocol. + rawBufferSeen bool + // For each specified source type in the filter chain match criteria, this + // array points to the set of specified source prefixes. + // Unspecified source type matches end up as a wildcard entry here with an + // index of 0, which actually represents the source type `ANY`. + srcTypeArr sourceTypesArray +} + +// An array for the fixed number of source types that we have. +type sourceTypesArray [3]*sourcePrefixes + +// sourcePrefixes contains source prefix related information specified in the +// match criteria. These are pointed to by the array of source types. +type sourcePrefixes struct { + // These are very similar to the 'dstPrefixMap' and 'dstPrefixes' field of + // FilterChainManager. Go there for more info. + srcPrefixMap map[string]*sourcePrefixEntry + srcPrefixes []*sourcePrefixEntry +} + +// sourcePrefixEntry contains match criteria per source prefix. +type sourcePrefixEntry struct { + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet + // Mapping from source ports specified in the match criteria to the actual + // filter chain. Unspecified source port matches en up as a wildcard entry + // here with a key of 0. + srcPortMap map[int]*FilterChain +} + +// NewFilterChainManager parses the received Listener resource and builds a +// FilterChainManager. Returns a non-nil error on validation failures. +// +// This function is only exported so that tests outside of this package can +// create a FilterChainManager. +func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { + // Parse all the filter chains and build the internal data structures. + fci := &FilterChainManager{ + dstPrefixMap: make(map[string]*destPrefixEntry), + RouteConfigNames: make(map[string]bool), + } + if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { + return nil, err + } + // Build the source and dest prefix slices used by Lookup(). + fcSeen := false + for _, dstPrefix := range fci.dstPrefixMap { + fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) + for _, st := range dstPrefix.srcTypeArr { + if st == nil { + continue + } + for _, srcPrefix := range st.srcPrefixMap { + st.srcPrefixes = append(st.srcPrefixes, srcPrefix) + for _, fc := range srcPrefix.srcPortMap { + if fc != nil { + fcSeen = true + } + } + } + } + } + + // Retrieve the default filter chain. The match criteria specified on the + // default filter chain is never used. The default filter chain simply gets + // used when none of the other filter chains match. + var def *FilterChain + if dfc := lis.GetDefaultFilterChain(); dfc != nil { + var err error + if def, err = fci.filterChainFromProto(dfc); err != nil { + return nil, err + } + } + fci.def = def + if fci.def != nil { + fci.fcs = append(fci.fcs, fci.def) + } + + // If there are no supported filter chains and no default filter chain, we + // fail here. This will call the Listener resource to be NACK'ed. + if !fcSeen && fci.def == nil { + return nil, fmt.Errorf("no supported filter chains and no default filter chain") + } + return fci, nil +} + +// addFilterChains parses the filter chains in fcs and adds the required +// internal data structures corresponding to the match criteria. +func (fcm *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) error { + for _, fc := range fcs { + fcMatch := fc.GetFilterChainMatch() + if fcMatch.GetDestinationPort().GetValue() != 0 { + // Destination port is the first match criteria and we do not + // support filter chains which contains this match criteria. + logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + continue + } + + // Build the internal representation of the filter chain match fields. + if err := fcm.addFilterChainsForDestPrefixes(fc); err != nil { + return err + } + } + + return nil +} + +func (fcm *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetPrefixRanges() + dstPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse destination prefix range: %+v", pr) + } + dstPrefixes = append(dstPrefixes, ipnet) + } + + if len(dstPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if fcm.dstPrefixMap[unspecifiedPrefixMapKey] == nil { + fcm.dstPrefixMap[unspecifiedPrefixMapKey] = &destPrefixEntry{} + } + return fcm.addFilterChainsForServerNames(fcm.dstPrefixMap[unspecifiedPrefixMapKey], fc) + } + for _, prefix := range dstPrefixes { + p := prefix.String() + if fcm.dstPrefixMap[p] == nil { + fcm.dstPrefixMap[p] = &destPrefixEntry{net: prefix} + } + if err := fcm.addFilterChainsForServerNames(fcm.dstPrefixMap[p], fc); err != nil { + return err + } + } + return nil +} + +func (fcm *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + // Filter chains specifying server names in their match criteria always fail + // a match at connection time. So, these filter chains can be dropped now. + if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + return nil + } + + return fcm.addFilterChainsForTransportProtocols(dstEntry, fc) +} + +func (fcm *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + tp := fc.GetFilterChainMatch().GetTransportProtocol() + switch { + case tp != "" && tp != "raw_buffer": + // Only allow filter chains with transport protocol set to empty string + // or "raw_buffer". + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp == "" && dstEntry.rawBufferSeen: + // If we have already seen filter chains with transport protocol set to + // "raw_buffer", we can drop filter chains with transport protocol set + // to empty string, since the former takes precedence. + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp != "" && !dstEntry.rawBufferSeen: + // This is the first "raw_buffer" that we are seeing. Set the bit and + // reset the source types array which might contain entries for filter + // chains with transport protocol set to empty string. + dstEntry.rawBufferSeen = true + dstEntry.srcTypeArr = sourceTypesArray{} + } + return fcm.addFilterChainsForApplicationProtocols(dstEntry, fc) +} + +func (fcm *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + return nil + } + return fcm.addFilterChainsForSourceType(dstEntry, fc) +} + +// addFilterChainsForSourceType adds source types to the internal data +// structures and delegates control to addFilterChainsForSourcePrefixes to +// continue building the internal data structure. +func (fcm *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + var srcType SourceType + switch st := fc.GetFilterChainMatch().GetSourceType(); st { + case v3listenerpb.FilterChainMatch_ANY: + srcType = SourceTypeAny + case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: + srcType = SourceTypeSameOrLoopback + case v3listenerpb.FilterChainMatch_EXTERNAL: + srcType = SourceTypeExternal + default: + return fmt.Errorf("unsupported source type: %v", st) + } + + st := int(srcType) + if dstEntry.srcTypeArr[st] == nil { + dstEntry.srcTypeArr[st] = &sourcePrefixes{srcPrefixMap: make(map[string]*sourcePrefixEntry)} + } + return fcm.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, fc) +} + +// addFilterChainsForSourcePrefixes adds source prefixes to the internal data +// structures and delegates control to addFilterChainsForSourcePorts to continue +// building the internal data structure. +func (fcm *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() + srcPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse source prefix range: %+v", pr) + } + srcPrefixes = append(srcPrefixes, ipnet) + } + + if len(srcPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if srcPrefixMap[unspecifiedPrefixMapKey] == nil { + srcPrefixMap[unspecifiedPrefixMapKey] = &sourcePrefixEntry{ + srcPortMap: make(map[int]*FilterChain), + } + } + return fcm.addFilterChainsForSourcePorts(srcPrefixMap[unspecifiedPrefixMapKey], fc) + } + for _, prefix := range srcPrefixes { + p := prefix.String() + if srcPrefixMap[p] == nil { + srcPrefixMap[p] = &sourcePrefixEntry{ + net: prefix, + srcPortMap: make(map[int]*FilterChain), + } + } + if err := fcm.addFilterChainsForSourcePorts(srcPrefixMap[p], fc); err != nil { + return err + } + } + return nil +} + +// addFilterChainsForSourcePorts adds source ports to the internal data +// structures and completes the process of building the internal data structure. +// It is here that we determine if there are multiple filter chains with +// overlapping matching rules. +func (fcm *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { + ports := fcProto.GetFilterChainMatch().GetSourcePorts() + srcPorts := make([]int, 0, len(ports)) + for _, port := range ports { + srcPorts = append(srcPorts, int(port)) + } + + fc, err := fcm.filterChainFromProto(fcProto) + if err != nil { + return err + } + + if len(srcPorts) == 0 { + // Use the wildcard port '0', when source ports are unspecified. + if curFC := srcEntry.srcPortMap[0]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[0] = fc + fcm.fcs = append(fcm.fcs, fc) + return nil + } + for _, port := range srcPorts { + if curFC := srcEntry.srcPortMap[port]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[port] = fc + } + fcm.fcs = append(fcm.fcs, fc) + return nil +} + +// FilterChains returns the filter chains for this filter chain manager. +func (fcm *FilterChainManager) FilterChains() []*FilterChain { + return fcm.fcs +} + +// filterChainFromProto extracts the relevant information from the FilterChain +// proto and stores it in our internal representation. It also persists any +// RouteNames which need to be queried dynamically via RDS. +func (fcm *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + filterChain, err := processNetworkFilters(fc.GetFilters()) + if err != nil { + return nil, err + } + // These route names will be dynamically queried via RDS in the wrapped + // listener, which receives the LDS response, if specified for the filter + // chain. + if filterChain.RouteConfigName != "" { + fcm.RouteConfigNames[filterChain.RouteConfigName] = true + } + // If the transport_socket field is not specified, it means that the control + // plane has not sent us any security config. This is fine and the server + // will use the fallback credentials configured as part of the + // xdsCredentials. + ts := fc.GetTransportSocket() + if ts == nil { + return filterChain, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3DownstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) + } + downstreamCtx := &v3tlspb.DownstreamTlsContext{} + if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) + } + if downstreamCtx.GetRequireSni().GetValue() { + return nil, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { + return nil, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) + } + // The following fields from `DownstreamTlsContext` are ignore: + // - disable_stateless_session_resumption + // - session_ticket_keys + // - session_ticket_keys_sds_secret_config + // - session_timeout + if downstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") + } + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) + if err != nil { + return nil, err + } + if sc == nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + return filterChain, nil + } + sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() + if sc.RequireClientCert && sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") + } + filterChain.SecurityCfg = sc + return filterChain, nil +} + +// Validate takes a function to validate the FilterChains in this manager. +func (fcm *FilterChainManager) Validate(f func(fc *FilterChain) error) error { + for _, dst := range fcm.dstPrefixMap { + for _, srcType := range dst.srcTypeArr { + if srcType == nil { + continue + } + for _, src := range srcType.srcPrefixMap { + for _, fc := range src.srcPortMap { + if err := f(fc); err != nil { + return err + } + } + } + } + } + return f(fcm.def) +} + +func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { + rc := &UsableRouteConfiguration{} + filterChain := &FilterChain{ + UsableRouteConfiguration: &atomic.Pointer[UsableRouteConfiguration]{}, + } + filterChain.UsableRouteConfiguration.Store(rc) + seenNames := make(map[string]bool, len(filters)) + seenHCM := false + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) + } + if seenNames[name] { + return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) + } + hcm := &v3httppb.HttpConnectionManager{} + if err := tc.UnmarshalTo(hcm); err != nil { + return nil, fmt.Errorf("network filters {%+v} failed unmarshalling of network filter {%+v}: %v", filters, filter, err) + } + // "Any filters after HttpConnectionManager should be ignored during + // connection processing but still be considered for validity. + // HTTPConnectionManager must have valid http_filters." - A36 + filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) + if err != nil { + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}: %v", filters, hcm.GetHttpFilters(), err) + } + if !seenHCM { + // Validate for RBAC in only the HCM that will be used, since this isn't a logical validation failure, + // it's simply a validation to support RBAC HTTP Filter. + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if hcm.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", hcm) + } + if len(hcm.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", hcm) + } + + // TODO: Implement terminal filter logic, as per A36. + filterChain.HTTPFilters = filters + seenHCM = true + switch hcm.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if hcm.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", hcm) + } + name := hcm.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", hcm) + } + filterChain.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + // "RouteConfiguration validation logic inherits all + // previous validations made for client-side usage as RDS + // does not distinguish between client-side and + // server-side." - A36 + // Can specify v3 here, as will never get to this function + // if v2. + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig()) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + filterChain.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", hcm) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", hcm.RouteSpecifier) + } + } + default: + return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) + } + } + if !seenHCM { + return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) + } + return filterChain, nil +} + +// FilterChainLookupParams wraps parameters to be passed to Lookup. +type FilterChainLookupParams struct { + // IsUnspecified indicates whether the server is listening on a wildcard + // address, "0.0.0.0" for IPv4 and "::" for IPv6. Only when this is set to + // true, do we consider the destination prefixes specified in the filter + // chain match criteria. + IsUnspecifiedListener bool + // DestAddr is the local address of an incoming connection. + DestAddr net.IP + // SourceAddr is the remote address of an incoming connection. + SourceAddr net.IP + // SourcePort is the remote port of an incoming connection. + SourcePort int +} + +// Lookup returns the most specific matching filter chain to be used for an +// incoming connection on the server side. +// +// Returns a non-nil error if no matching filter chain could be found or +// multiple matching filter chains were found, and in both cases, the incoming +// connection must be dropped. +func (fcm *FilterChainManager) Lookup(params FilterChainLookupParams) (*FilterChain, error) { + dstPrefixes := filterByDestinationPrefixes(fcm.dstPrefixes, params.IsUnspecifiedListener, params.DestAddr) + if len(dstPrefixes) == 0 { + if fcm.def != nil { + return fcm.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on destination prefix match for %+v", params) + } + + srcType := SourceTypeExternal + if params.SourceAddr.Equal(params.DestAddr) || params.SourceAddr.IsLoopback() { + srcType = SourceTypeSameOrLoopback + } + srcPrefixes := filterBySourceType(dstPrefixes, srcType) + if len(srcPrefixes) == 0 { + if fcm.def != nil { + return fcm.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on source type match for %+v", params) + } + srcPrefixEntry, err := filterBySourcePrefixes(srcPrefixes, params.SourceAddr) + if err != nil { + return nil, err + } + if fc := filterBySourcePorts(srcPrefixEntry, params.SourcePort); fc != nil { + return fc, nil + } + if fcm.def != nil { + return fcm.def, nil + } + return nil, fmt.Errorf("no matching filter chain after all match criteria for %+v", params) +} + +// filterByDestinationPrefixes is the first stage of the filter chain +// matching algorithm. It takes the complete set of configured filter chain +// matchers and returns the most specific matchers based on the destination +// prefix match criteria (the prefixes which match the most number of bits). +func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified bool, dstAddr net.IP) []*destPrefixEntry { + if !isUnspecified { + // Destination prefix matchers are considered only when the listener is + // bound to the wildcard address. + return dstPrefixes + } + + var matchingDstPrefixes []*destPrefixEntry + maxSubnetMatch := noPrefixMatch + for _, prefix := range dstPrefixes { + if prefix.net != nil && !prefix.net.Contains(dstAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingDstPrefixes = make([]*destPrefixEntry, 0, 1) + } + matchingDstPrefixes = append(matchingDstPrefixes, prefix) + } + return matchingDstPrefixes +} + +// filterBySourceType is the second stage of the matching algorithm. It +// trims the filter chains based on the most specific source type match. +func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType SourceType) []*sourcePrefixes { + var ( + srcPrefixes []*sourcePrefixes + bestSrcTypeMatch int + ) + for _, prefix := range dstPrefixes { + var ( + srcPrefix *sourcePrefixes + match int + ) + switch srcType { + case SourceTypeExternal: + match = int(SourceTypeExternal) + srcPrefix = prefix.srcTypeArr[match] + case SourceTypeSameOrLoopback: + match = int(SourceTypeSameOrLoopback) + srcPrefix = prefix.srcTypeArr[match] + } + if srcPrefix == nil { + match = int(SourceTypeAny) + srcPrefix = prefix.srcTypeArr[match] + } + if match < bestSrcTypeMatch { + continue + } + if match > bestSrcTypeMatch { + bestSrcTypeMatch = match + srcPrefixes = make([]*sourcePrefixes, 0) + } + if srcPrefix != nil { + // The source type array always has 3 entries, but these could be + // nil if the appropriate source type match was not specified. + srcPrefixes = append(srcPrefixes, srcPrefix) + } + } + return srcPrefixes +} + +// filterBySourcePrefixes is the third stage of the filter chain matching +// algorithm. It trims the filter chains based on the source prefix. At most one +// filter chain with the most specific match progress to the next stage. +func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { + var matchingSrcPrefixes []*sourcePrefixEntry + maxSubnetMatch := noPrefixMatch + for _, sp := range srcPrefixes { + for _, prefix := range sp.srcPrefixes { + if prefix.net != nil && !prefix.net.Contains(srcAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingSrcPrefixes = make([]*sourcePrefixEntry, 0, 1) + } + matchingSrcPrefixes = append(matchingSrcPrefixes, prefix) + } + } + if len(matchingSrcPrefixes) == 0 { + // Finding no match is not an error condition. The caller will end up + // using the default filter chain if one was configured. + return nil, nil + } + // We expect at most a single matching source prefix entry at this point. If + // we have multiple entries here, and some of their source port matchers had + // wildcard entries, we could be left with more than one matching filter + // chain and hence would have been flagged as an invalid configuration at + // config validation time. + if len(matchingSrcPrefixes) != 1 { + return nil, errors.New("multiple matching filter chains") + } + return matchingSrcPrefixes[0], nil +} + +// filterBySourcePorts is the last stage of the filter chain matching +// algorithm. It trims the filter chains based on the source ports. +func filterBySourcePorts(spe *sourcePrefixEntry, srcPort int) *FilterChain { + if spe == nil { + return nil + } + // A match could be a wildcard match (this happens when the match + // criteria does not specify source ports) or a specific port match (this + // happens when the match criteria specifies a set of ports and the source + // port of the incoming connection matches one of the specified ports). The + // latter is considered to be a more specific match. + if fc := spe.srcPortMap[srcPort]; fc != nil { + return fc + } + if fc := spe.srcPortMap[0]; fc != nil { + return fc + } + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go new file mode 100644 index 0000000000..80fa5e6a21 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // ListenerResourceTypeName represents the transport agnostic name for the + // listener resource. + ListenerResourceTypeName = "ListenerResource" +) + +var ( + // Compile time interface checks. + _ Type = listenerResourceType{} + + // Singleton instantiation of the resource type implementation. + listenerType = listenerResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3ListenerURL, + typeName: ListenerResourceTypeName, + allResourcesRequiredInSotW: true, + }, + } +) + +// listenerResourceType provides the resource-type specific functionality for a +// Listener resource. +// +// Implements the Type interface. +type listenerResourceType struct { + resourceTypeState +} + +func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := bc.CertProviderConfigs()[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identity certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := bc.CertProviderConfigs()[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { + if lis.InboundListenerCfg == nil || lis.InboundListenerCfg.FilterChains == nil { + return nil + } + return lis.InboundListenerCfg.FilterChains.Validate(func(fc *FilterChain) error { + if fc == nil { + return nil + } + return securityConfigValidator(bc, fc.SecurityCfg) + }) +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, listener, err := unmarshalListenerResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + // Perform extra validation here. + if err := listenerValidator(opts.BootstrapConfig, listener); err != nil { + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: listener}}, nil + +} + +// ListenerResourceData wraps the configuration of a Listener resource as +// received from the management server. +// +// Implements the ResourceData interface. +type ListenerResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ListenerUpdate +} + +// Equal returns true if other is equal to l. +func (l *ListenerResourceData) Equal(other ResourceData) bool { + if l == nil && other == nil { + return true + } + if (l == nil) != (other == nil) { + return false + } + return proto.Equal(l.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (l *ListenerResourceData) ToJSON() string { + return pretty.ToJSON(l.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (l *ListenerResourceData) Raw() *anypb.Any { + return l.Resource.Raw +} + +// ListenerWatcher wraps the callbacks to be invoked for different +// events corresponding to the listener resource being watched. +type ListenerWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ListenerResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingListenerWatcher struct { + watcher ListenerWatcher +} + +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + l := data.(*ListenerResourceData) + d.watcher.OnUpdate(l, onDone) +} + +func (d *delegatingListenerWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingListenerWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchListener uses xDS to discover the configuration associated with the +// provided listener resource name. +func WatchListener(p Producer, name string, w ListenerWatcher) (cancel func()) { + delegator := &delegatingListenerWatcher{watcher: w} + return p.WatchResource(listenerType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go new file mode 100644 index 0000000000..62bcb016ba --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resource] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go new file mode 100644 index 0000000000..796e9e3008 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go @@ -0,0 +1,280 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math/rand" + "strings" + + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/metadata" +) + +// RouteToMatcher converts a route to a Matcher to match incoming RPC's against. +func RouteToMatcher(r *Route) (*CompositeMatcher, error) { + var pm pathMatcher + switch { + case r.Regex != nil: + pm = newPathRegexMatcher(r.Regex) + case r.Path != nil: + pm = newPathExactMatcher(*r.Path, r.CaseInsensitive) + case r.Prefix != nil: + pm = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) + default: + return nil, fmt.Errorf("illegal route: missing path_matcher") + } + + headerMatchers := make([]matcher.HeaderMatcher, 0, len(r.Headers)) + for _, h := range r.Headers { + var matcherT matcher.HeaderMatcher + invert := h.InvertMatch != nil && *h.InvertMatch + switch { + case h.ExactMatch != nil && *h.ExactMatch != "": + matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch, invert) + case h.RegexMatch != nil: + matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch, invert) + case h.PrefixMatch != nil && *h.PrefixMatch != "": + matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch, invert) + case h.SuffixMatch != nil && *h.SuffixMatch != "": + matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch, invert) + case h.RangeMatch != nil: + matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End, invert) + case h.PresentMatch != nil: + matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch, invert) + case h.StringMatch != nil: + matcherT = matcher.NewHeaderStringMatcher(h.Name, *h.StringMatch, invert) + default: + return nil, fmt.Errorf("illegal route: missing header_match_specifier") + } + headerMatchers = append(headerMatchers, matcherT) + } + + var fractionMatcher *fractionMatcher + if r.Fraction != nil { + fractionMatcher = newFractionMatcher(*r.Fraction) + } + return newCompositeMatcher(pm, headerMatchers, fractionMatcher), nil +} + +// CompositeMatcher is a matcher that holds onto many matchers and aggregates +// the matching results. +type CompositeMatcher struct { + pm pathMatcher + hms []matcher.HeaderMatcher + fm *fractionMatcher +} + +func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *CompositeMatcher { + return &CompositeMatcher{pm: pm, hms: hms, fm: fm} +} + +// Match returns true if all matchers return true. +func (a *CompositeMatcher) Match(info iresolver.RPCInfo) bool { + if a.pm != nil && !a.pm.match(info.Method) { + return false + } + + // Call headerMatchers even if md is nil, because routes may match + // non-presence of some headers. + var md metadata.MD + if info.Context != nil { + md, _ = metadata.FromOutgoingContext(info.Context) + if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok { + md = metadata.Join(md, extraMD) + // Remove all binary headers. They are hard to match with. May need + // to add back if asked by users. + for k := range md { + if strings.HasSuffix(k, "-bin") { + delete(md, k) + } + } + } + } + for _, m := range a.hms { + if !m.Match(md) { + return false + } + } + + if a.fm != nil && !a.fm.match() { + return false + } + return true +} + +func (a *CompositeMatcher) String() string { + var ret string + if a.pm != nil { + ret += a.pm.String() + } + for _, m := range a.hms { + ret += m.String() + } + if a.fm != nil { + ret += a.fm.String() + } + return ret +} + +type fractionMatcher struct { + fraction int64 // real fraction is fraction/1,000,000. +} + +func newFractionMatcher(fraction uint32) *fractionMatcher { + return &fractionMatcher{fraction: int64(fraction)} +} + +// RandInt63n overwrites rand for control in tests. +var RandInt63n = rand.Int63n + +func (fm *fractionMatcher) match() bool { + t := RandInt63n(1000000) + return t <= fm.fraction +} + +func (fm *fractionMatcher) String() string { + return fmt.Sprintf("fraction:%v", fm.fraction) +} + +type domainMatchType int + +const ( + domainMatchTypeInvalid domainMatchType = iota + domainMatchTypeUniversal + domainMatchTypePrefix + domainMatchTypeSuffix + domainMatchTypeExact +) + +// Exact > Suffix > Prefix > Universal > Invalid. +func (t domainMatchType) betterThan(b domainMatchType) bool { + return t > b +} + +func matchTypeForDomain(d string) domainMatchType { + if d == "" { + return domainMatchTypeInvalid + } + if d == "*" { + return domainMatchTypeUniversal + } + if strings.HasPrefix(d, "*") { + return domainMatchTypeSuffix + } + if strings.HasSuffix(d, "*") { + return domainMatchTypePrefix + } + if strings.Contains(d, "*") { + return domainMatchTypeInvalid + } + return domainMatchTypeExact +} + +func match(domain, host string) (domainMatchType, bool) { + switch typ := matchTypeForDomain(domain); typ { + case domainMatchTypeInvalid: + return typ, false + case domainMatchTypeUniversal: + return typ, true + case domainMatchTypePrefix: + // abc.* + return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) + case domainMatchTypeSuffix: + // *.123 + return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) + case domainMatchTypeExact: + return typ, domain == host + default: + return domainMatchTypeInvalid, false + } +} + +// FindBestMatchingVirtualHost returns the virtual host whose domains field best +// matches host +// +// The domains field support 4 different matching pattern types: +// +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better. +// * Exact match > suffix match > prefix match > universal match. +// +// - If two matches are of the same pattern type, the longer match is +// better. +// * This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” +func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client + var ( + matchVh *VirtualHost + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, host) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} + +// FindBestMatchingVirtualHostServer returns the virtual host whose domains field best +// matches authority. +func FindBestMatchingVirtualHostServer(authority string, vHosts []VirtualHostWithInterceptors) *VirtualHostWithInterceptors { + var ( + matchVh *VirtualHostWithInterceptors + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, authority) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = &vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go new file mode 100644 index 0000000000..da487e20c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher_path.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +type pathMatcher interface { + match(path string) bool + String() string +} + +type pathExactMatcher struct { + // fullPath is all upper case if caseInsensitive is true. + fullPath string + caseInsensitive bool +} + +func newPathExactMatcher(p string, caseInsensitive bool) *pathExactMatcher { + ret := &pathExactMatcher{ + fullPath: p, + caseInsensitive: caseInsensitive, + } + if caseInsensitive { + ret.fullPath = strings.ToUpper(p) + } + return ret +} + +func (pem *pathExactMatcher) match(path string) bool { + if pem.caseInsensitive { + return pem.fullPath == strings.ToUpper(path) + } + return pem.fullPath == path +} + +func (pem *pathExactMatcher) String() string { + return "pathExact:" + pem.fullPath +} + +type pathPrefixMatcher struct { + // prefix is all upper case if caseInsensitive is true. + prefix string + caseInsensitive bool +} + +func newPathPrefixMatcher(p string, caseInsensitive bool) *pathPrefixMatcher { + ret := &pathPrefixMatcher{ + prefix: p, + caseInsensitive: caseInsensitive, + } + if caseInsensitive { + ret.prefix = strings.ToUpper(p) + } + return ret +} + +func (ppm *pathPrefixMatcher) match(path string) bool { + if ppm.caseInsensitive { + return strings.HasPrefix(strings.ToUpper(path), ppm.prefix) + } + return strings.HasPrefix(path, ppm.prefix) +} + +func (ppm *pathPrefixMatcher) String() string { + return "pathPrefix:" + ppm.prefix +} + +type pathRegexMatcher struct { + re *regexp.Regexp +} + +func newPathRegexMatcher(re *regexp.Regexp) *pathRegexMatcher { + return &pathRegexMatcher{re: re} +} + +func (prm *pathRegexMatcher) match(path string) bool { + return grpcutil.FullMatchWithRegex(prm.re, path) +} + +func (prm *pathRegexMatcher) String() string { + return "pathRegex:" + prm.re.String() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go new file mode 100644 index 0000000000..24200ea8db --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "net/url" + "sort" + "strings" +) + +// FederationScheme is the scheme of a federation resource name. +const FederationScheme = "xdstp" + +// Name contains the parsed component of an xDS resource name. +// +// An xDS resource name is in the format of +// xdstp://[{authority}]/{resource type}/{id/*}?{context parameters}{#processing directive,*} +// +// See +// https://github.com/cncf/xds/blob/main/proposals/TP1-xds-transport-next.md#uri-based-xds-resource-names +// for details, and examples. +type Name struct { + Scheme string + Authority string + Type string + ID string + + ContextParams map[string]string + + processingDirective string +} + +// ParseName splits the name and returns a struct representation of the Name. +// +// If the name isn't a valid new-style xDS name, field ID is set to the input. +// Note that this is not an error, because we still support the old-style +// resource names (those not starting with "xdstp:"). +// +// The caller can tell if the parsing is successful by checking the returned +// Scheme. +func ParseName(name string) *Name { + if !strings.Contains(name, "://") { + // Only the long form URL, with ://, is valid. + return &Name{ID: name} + } + parsed, err := url.Parse(name) + if err != nil { + return &Name{ID: name} + } + + ret := &Name{ + Scheme: parsed.Scheme, + Authority: parsed.Host, + } + split := strings.SplitN(parsed.Path, "/", 3) + if len(split) < 3 { + // Path is in the format of "/type/id". There must be at least 3 + // segments after splitting. + return &Name{ID: name} + } + ret.Type = split[1] + ret.ID = split[2] + if len(parsed.Query()) != 0 { + ret.ContextParams = make(map[string]string) + for k, vs := range parsed.Query() { + if len(vs) > 0 { + // We only keep one value of each key. Behavior for multiple values + // is undefined. + ret.ContextParams[k] = vs[0] + } + } + } + // TODO: processing directive (the part comes after "#" in the URL, stored + // in parsed.RawFragment) is kept but not processed. Add support for that + // when it's needed. + ret.processingDirective = parsed.RawFragment + return ret +} + +// String returns a canonicalized string of name. The context parameters are +// sorted by the keys. +func (n *Name) String() string { + if n.Scheme == "" { + return n.ID + } + + // Sort and build query. + keys := make([]string, 0, len(n.ContextParams)) + for k := range n.ContextParams { + keys = append(keys, k) + } + sort.Strings(keys) + var pairs []string + for _, k := range keys { + pairs = append(pairs, strings.Join([]string{k, n.ContextParams[k]}, "=")) + } + rawQuery := strings.Join(pairs, "&") + + path := n.Type + if n.ID != "" { + path = "/" + path + "/" + n.ID + } + + tempURL := &url.URL{ + Scheme: n.Scheme, + Host: n.Authority, + Path: path, + RawQuery: rawQuery, + RawFragment: n.processingDirective, + } + return tempURL.String() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go new file mode 100644 index 0000000000..55cfd6fbb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -0,0 +1,174 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xdsresource implements the xDS data model layer. +// +// Provides resource-type specific functionality to unmarshal xDS protos into +// internal data structures that contain only fields gRPC is interested in. +// These internal data structures are passed to components in the xDS stack +// (resolver/balancers/server) that have expressed interest in receiving +// updates to specific resources. +package xdsresource + +import ( + "google.golang.org/grpc/internal/xds/bootstrap" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + xdsinternal.ResourceTypeMapForTesting = make(map[string]any) + xdsinternal.ResourceTypeMapForTesting[version.V3ListenerURL] = listenerType + xdsinternal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType + xdsinternal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType + xdsinternal.ResourceTypeMapForTesting[version.V3EndpointsURL] = endpointsType +} + +// Producer contains a single method to discover resource configuration from a +// remote management server using xDS APIs. +// +// The xdsclient package provides a concrete implementation of this interface. +type Producer interface { + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) +} + +// OnDoneFunc is a function to be invoked by watcher implementations upon +// completing the processing of a callback from the xDS client. Failure to +// invoke this callback prevents the xDS client from reading further messages +// from the xDS server. +type OnDoneFunc func() + +// ResourceWatcher wraps the callbacks to be invoked for different events +// corresponding to the resource being watched. +type ResourceWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + // The ResourceData parameter needs to be type asserted to the appropriate + // type for the resource being watched. + OnUpdate(ResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +// TODO: Once the implementation is complete, rename this interface as +// ResourceType and get rid of the existing ResourceType enum. + +// Type wraps all resource-type specific functionality. Each supported resource +// type will provide an implementation of this interface. +type Type interface { + // TypeURL is the xDS type URL of this resource type for v3 transport. + TypeURL() string + + // TypeName identifies resources in a transport protocol agnostic way. This + // can be used for logging/debugging purposes, as well in cases where the + // resource type name is to be uniquely identified but the actual + // functionality provided by the resource type is not required. + // + // TODO: once Type is renamed to ResourceType, rename TypeName to + // ResourceTypeName. + TypeName() string + + // AllResourcesRequiredInSotW indicates whether this resource type requires + // that all resources be present in every SotW response from the server. If + // true, a response that does not include a previously seen resource will be + // interpreted as a deletion of that resource. + AllResourcesRequiredInSotW() bool + + // Decode deserializes and validates an xDS resource serialized inside the + // provided `Any` proto, as received from the xDS management server. + // + // If protobuf deserialization fails or resource validation fails, + // returns a non-nil error. Otherwise, returns a fully populated + // DecodeResult. + Decode(*DecodeOptions, *anypb.Any) (*DecodeResult, error) +} + +// ResourceData contains the configuration data sent by the xDS management +// server, associated with the resource being watched. Every resource type must +// provide an implementation of this interface to represent the configuration +// received from the xDS management server. +type ResourceData interface { + isResourceData() + + // Equal returns true if the passed in resource data is equal to that of the + // receiver. + Equal(ResourceData) bool + + // ToJSON returns a JSON string representation of the resource data. + ToJSON() string + + Raw() *anypb.Any +} + +// DecodeOptions wraps the options required by ResourceType implementation for +// decoding configuration received from the xDS management server. +type DecodeOptions struct { + // BootstrapConfig contains the complete bootstrap configuration passed to + // the xDS client. This contains useful data for resource validation. + BootstrapConfig *bootstrap.Config + // ServerConfig contains the server config (from the above bootstrap + // configuration) of the xDS server from which the current resource, for + // which Decode() is being invoked, was received. + ServerConfig *bootstrap.ServerConfig +} + +// DecodeResult is the result of a decode operation. +type DecodeResult struct { + // Name is the name of the resource being watched. + Name string + // Resource contains the configuration associated with the resource being + // watched. + Resource ResourceData +} + +// resourceTypeState wraps the static state associated with concrete resource +// type implementations, which can then embed this struct and get the methods +// implemented here for free. +type resourceTypeState struct { + typeURL string + typeName string + allResourcesRequiredInSotW bool +} + +func (r resourceTypeState) TypeURL() string { + return r.typeURL +} + +func (r resourceTypeState) TypeName() string { + return r.typeName +} + +func (r resourceTypeState) AllResourcesRequiredInSotW() bool { + return r.allResourcesRequiredInSotW +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go new file mode 100644 index 0000000000..ed32abb833 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -0,0 +1,150 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // RouteConfigTypeName represents the transport agnostic name for the + // route config resource. + RouteConfigTypeName = "RouteConfigResource" +) + +var ( + // Compile time interface checks. + _ Type = routeConfigResourceType{} + + // Singleton instantiation of the resource type implementation. + routeConfigType = routeConfigResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3RouteConfigURL, + typeName: "RouteConfigResource", + allResourcesRequiredInSotW: false, + }, + } +) + +// routeConfigResourceType provides the resource-type specific functionality for +// a RouteConfiguration resource. +// +// Implements the Type interface. +type routeConfigResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (routeConfigResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalRouteConfigResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: RouteConfigUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: rc}}, nil + +} + +// RouteConfigResourceData wraps the configuration of a RouteConfiguration +// resource as received from the management server. +// +// Implements the ResourceData interface. +type RouteConfigResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource RouteConfigUpdate +} + +// Equal returns true if other is equal to r. +func (r *RouteConfigResourceData) Equal(other ResourceData) bool { + if r == nil && other == nil { + return true + } + if (r == nil) != (other == nil) { + return false + } + return proto.Equal(r.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (r *RouteConfigResourceData) ToJSON() string { + return pretty.ToJSON(r.Resource) +} + +// Raw returns the underlying raw protobuf form of the route configuration +// resource. +func (r *RouteConfigResourceData) Raw() *anypb.Any { + return r.Resource.Raw +} + +// RouteConfigWatcher wraps the callbacks to be invoked for different +// events corresponding to the route configuration resource being watched. +type RouteConfigWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*RouteConfigResourceData, OnDoneFunc) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error, OnDoneFunc) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist(OnDoneFunc) +} + +type delegatingRouteConfigWatcher struct { + watcher RouteConfigWatcher +} + +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { + rc := data.(*RouteConfigResourceData) + d.watcher.OnUpdate(rc, onDone) +} + +func (d *delegatingRouteConfigWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) +} + +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) +} + +// WatchRouteConfig uses xDS to discover the configuration associated with the +// provided route configuration resource name. +func WatchRouteConfig(p Producer, name string, w RouteConfigWatcher) (cancel func()) { + delegator := &delegatingRouteConfigWatcher{watcher: w} + return p.WatchResource(routeConfigType, name, delegator) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go new file mode 100644 index 0000000000..9942041018 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +// UpdateValidatorFunc performs validations on update structs using +// context/logic available at the xdsClient layer. Since these validation are +// performed on internal update structs, they can be shared between different +// API clients. +type UpdateValidatorFunc func(any) error + +// UpdateMetadata contains the metadata for each update, including timestamp, +// raw message, and so on. +type UpdateMetadata struct { + // Status is the status of this resource, e.g. ACKed, NACKed, or + // Not_exist(removed). + Status ServiceStatus + // Version is the version of the xds response. Note that this is the version + // of the resource in use (previous ACKed). If a response is NACKed, the + // NACKed version is in ErrState. + Version string + // Timestamp is when the response is received. + Timestamp time.Time + // ErrState is set when the update is NACKed. + ErrState *UpdateErrorMetadata +} + +// IsListenerResource returns true if the provider URL corresponds to an xDS +// Listener resource. +func IsListenerResource(url string) bool { + return url == version.V3ListenerURL +} + +// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS +// HTTPConnManager resource. +func IsHTTPConnManagerResource(url string) bool { + return url == version.V3HTTPConnManagerURL +} + +// IsRouteConfigResource returns true if the provider URL corresponds to an xDS +// RouteConfig resource. +func IsRouteConfigResource(url string) bool { + return url == version.V3RouteConfigURL +} + +// IsClusterResource returns true if the provider URL corresponds to an xDS +// Cluster resource. +func IsClusterResource(url string) bool { + return url == version.V3ClusterURL +} + +// IsEndpointsResource returns true if the provider URL corresponds to an xDS +// Endpoints resource. +func IsEndpointsResource(url string) bool { + return url == version.V3EndpointsURL +} + +// UnwrapResource unwraps and returns the inner resource if it's in a resource +// wrapper. The original resource is returned if it's not wrapped. +func UnwrapResource(r *anypb.Any) (*anypb.Any, error) { + url := r.GetTypeUrl() + if url != version.V3ResourceWrapperURL { + // Not wrapped. + return r, nil + } + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return nil, err + } + return inner.Resource, nil +} + +// ServiceStatus is the status of the update. +type ServiceStatus int + +const ( + // ServiceStatusUnknown is the default state, before a watch is started for + // the resource. + ServiceStatusUnknown ServiceStatus = iota + // ServiceStatusRequested is when the watch is started, but before and + // response is received. + ServiceStatusRequested + // ServiceStatusNotExist is when the resource doesn't exist in + // state-of-the-world responses (e.g. LDS and CDS), which means the resource + // is removed by the management server. + ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. + // ServiceStatusACKed is when the resource is ACKed. + ServiceStatusACKed + // ServiceStatusNACKed is when the resource is NACKed. + ServiceStatusNACKed +) + +// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state +// when a response is NACKed. +type UpdateErrorMetadata struct { + // Version is the version of the NACKed response. + Version string + // Err contains why the response was NACKed. + Err error + // Timestamp is when the NACKed response was received. + Timestamp time.Time +} + +// UpdateWithMD contains the raw message of the update and the metadata, +// including version, raw message, timestamp. +// +// This is to be used for config dump and CSDS, not directly by users (like +// resolvers/balancers). +type UpdateWithMD struct { + MD UpdateMetadata + Raw *anypb.Any +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go new file mode 100644 index 0000000000..ae4cb1e465 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/protobuf/types/known/anypb" +) + +// ClusterType is the type of cluster from a received CDS response. +type ClusterType int + +const ( + // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint + // discovery to the management server. + ClusterTypeEDS ClusterType = iota + // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially + // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. + ClusterTypeLogicalDNS + // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a + // prioritized list of clusters to use. It is used for failover between clusters + // with a different configuration. + ClusterTypeAggregate +) + +// ClusterUpdate contains information from a received CDS response, which is of +// interest to the registered CDS watcher. +type ClusterUpdate struct { + ClusterType ClusterType + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string + // LRSServerConfig contains configuration about the xDS server that sent + // this cluster resource. This is also the server where load reports are to + // be sent, for this cluster. + LRSServerConfig *bootstrap.ServerConfig + // SecurityCfg contains security configuration sent by the control plane. + SecurityCfg *SecurityConfig + // MaxRequests for circuit breaking, if any (otherwise nil). + MaxRequests *uint32 + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string + + // LBPolicy represents the locality and endpoint picking policy in JSON, + // which will be the child policy of xds_cluster_impl. + LBPolicy json.RawMessage + + // OutlierDetection is the outlier detection configuration for this cluster. + // If nil, it means this cluster does not use the outlier detection feature. + OutlierDetection json.RawMessage + + // Raw is the resource from the xds response. + Raw *anypb.Any + // TelemetryLabels are the string valued metadata of filter_metadata type + // "com.google.csm.telemetry_labels" with keys "service_name" or + // "service_namespace". + TelemetryLabels map[string]string +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go new file mode 100644 index 0000000000..1254d250c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// OverloadDropConfig contains the config to drop overloads. +type OverloadDropConfig struct { + Category string + Numerator uint32 + Denominator uint32 +} + +// EndpointHealthStatus represents the health status of an endpoint. +type EndpointHealthStatus int32 + +const ( + // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. + EndpointHealthStatusUnknown EndpointHealthStatus = iota + // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. + EndpointHealthStatusHealthy + // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. + EndpointHealthStatusUnhealthy + // EndpointHealthStatusDraining represents HealthStatus DRAINING. + EndpointHealthStatusDraining + // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. + EndpointHealthStatusTimeout + // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. + EndpointHealthStatusDegraded +) + +// Endpoint contains information of an endpoint. +type Endpoint struct { + Address string + HealthStatus EndpointHealthStatus + Weight uint32 +} + +// Locality contains information of a locality. +type Locality struct { + Endpoints []Endpoint + ID internal.LocalityID + Priority uint32 + Weight uint32 +} + +// EndpointsUpdate contains an EDS update. +type EndpointsUpdate struct { + Drops []OverloadDropConfig + // Localities in the EDS response with `load_balancing_weight` field not set + // or explicitly set to 0 are ignored while parsing the resource, and + // therefore do not show up here. + Localities []Locality + + // Raw is the resource from the xds response. + Raw *anypb.Any +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go new file mode 100644 index 0000000000..a71e38ea9a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_lds.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// ListenerUpdate contains information received in an LDS response, which is of +// interest to the registered LDS watcher. +type ListenerUpdate struct { + // RouteConfigName is the route configuration name corresponding to the + // target which is being watched through LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned inside LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + + // MaxStreamDuration contains the HTTP connection manager's + // common_http_protocol_options.max_stream_duration field, or zero if + // unset. + MaxStreamDuration time.Duration + // HTTPFilters is a list of HTTP filters (name, config) from the LDS + // response. + HTTPFilters []HTTPFilter + // InboundListenerCfg contains inbound listener configuration. + InboundListenerCfg *InboundListenerConfig + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection +// manager field. +type HTTPFilter struct { + // Name is an arbitrary name of the filter. Used for applying override + // settings in virtual host / route / weighted cluster configuration (not + // yet supported). + Name string + // Filter is the HTTP filter found in the registry for the config type. + Filter httpfilter.Filter + // Config contains the filter's configuration + Config httpfilter.FilterConfig +} + +// InboundListenerConfig contains information about the inbound listener, i.e +// the server-side listener. +type InboundListenerConfig struct { + // Address is the local address on which the inbound listener is expected to + // accept incoming connections. + Address string + // Port is the local port on which the inbound listener is expected to + // accept incoming connections. + Port string + // FilterChains is the list of filter chains associated with this listener. + FilterChains *FilterChainManager +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go new file mode 100644 index 0000000000..45d84d6706 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go @@ -0,0 +1,248 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// RouteConfigUpdate contains information received in an RDS response, which is +// of interest to the registered RDS watcher. +type RouteConfigUpdate struct { + VirtualHosts []*VirtualHost + // ClusterSpecifierPlugins are the LB Configurations for any + // ClusterSpecifierPlugins referenced by the Route Table. + ClusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// VirtualHost contains the routes for a list of Domains. +// +// Note that the domains in this slice can be a wildcard, not an exact string. +// The consumer of this struct needs to find the best match for its hostname. +type VirtualHost struct { + Domains []string + // Routes contains a list of routes, each containing matchers and + // corresponding action. + Routes []*Route + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the virtual host which may be present. An individual filter's override + // may be unused if the matching Route contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration +} + +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. This is a 64-bit random int computed at initialization time. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string +} + +// RouteActionType is the action of the route from a received RDS response. +type RouteActionType int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteActionType = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + +// Route is both a specification of how to match a request as well as an +// indication of the action to take upon match. +type Route struct { + Path *string + Prefix *string + Regex *regexp.Regexp + // Indicates if prefix/path matching should be case insensitive. The default + // is false (case sensitive). + CaseInsensitive bool + Headers []*HeaderMatcher + Fraction *uint32 + + HashPolicies []*HashPolicy + + // If the matchers above indicate a match, the below configuration is used. + // If MaxStreamDuration is nil, it indicates neither of the route action's + // max_stream_duration fields (grpc_timeout_header_max nor + // max_stream_duration) were set. In this case, the ListenerUpdate's + // MaxStreamDuration field should be used. If MaxStreamDuration is set to + // an explicit zero duration, the application's deadline should be used. + MaxStreamDuration *time.Duration + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the route which may be present. An individual filter's override may be + // unused if the matching WeightedCluster contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig + + ActionType RouteActionType + + // Only one of the following fields (WeightedClusters or + // ClusterSpecifierPlugin) will be set for a route. + WeightedClusters map[string]WeightedCluster + // ClusterSpecifierPlugin is the name of the Cluster Specifier Plugin that + // this Route is linked to, if specified by xDS. + ClusterSpecifierPlugin string +} + +// WeightedCluster contains settings for an xds ActionType.WeightedCluster. +type WeightedCluster struct { + // Weight is the relative weight of the cluster. It will never be zero. + Weight uint32 + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the weighted cluster which may be present. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig +} + +// HeaderMatcher represents header matchers. +type HeaderMatcher struct { + Name string + InvertMatch *bool + ExactMatch *string + RegexMatch *regexp.Regexp + PrefixMatch *string + SuffixMatch *string + RangeMatch *Int64Range + PresentMatch *bool + StringMatch *matcher.StringMatcher +} + +// Int64Range is a range for header range match. +type Int64Range struct { + Start int64 + End int64 +} + +// SecurityConfig contains the security configuration received as part of the +// Cluster resource on the client-side, and as part of the Listener resource on +// the server-side. +type SecurityConfig struct { + // RootInstanceName identifies the certProvider plugin to be used to fetch + // root certificates. This instance name will be resolved to the plugin name + // and its associated configuration from the certificate_providers field of + // the bootstrap file. + RootInstanceName string + // RootCertName is the certificate name to be passed to the plugin (looked + // up from the bootstrap file) while fetching root certificates. + RootCertName string + // IdentityInstanceName identifies the certProvider plugin to be used to + // fetch identity certificates. This instance name will be resolved to the + // plugin name and its associated configuration from the + // certificate_providers field of the bootstrap file. + IdentityInstanceName string + // IdentityCertName is the certificate name to be passed to the plugin + // (looked up from the bootstrap file) while fetching identity certificates. + IdentityCertName string + // SubjectAltNameMatchers is an optional list of match criteria for SANs + // specified on the peer certificate. Used only on the client-side. + // + // Some intricacies: + // - If this field is empty, then any peer certificate is accepted. + // - If the peer certificate contains a wildcard DNS SAN, and an `exact` + // matcher is configured, a wildcard DNS match is performed instead of a + // regular string comparison. + SubjectAltNameMatchers []matcher.StringMatcher + // RequireClientCert indicates if the server handshake process expects the + // client to present a certificate. Set to true when performing mTLS. Used + // only on the server-side. + RequireClientCert bool +} + +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go new file mode 100644 index 0000000000..ab024b57c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -0,0 +1,699 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +// ValidateClusterAndConstructClusterUpdateForTesting exports the +// validateClusterAndConstructClusterUpdate function for testing purposes. +var ValidateClusterAndConstructClusterUpdateForTesting = validateClusterAndConstructClusterUpdate + +// TransportSocket proto message has a `name` field which is expected to be set +// to this value by the management server. +const transportSocketName = "envoy.transport_sockets.tls" + +func unmarshalClusterResource(r *anypb.Any, serverCfg *bootstrap.ServerConfig) (string, ClusterUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsClusterResource(r.GetTypeUrl()) { + return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cluster := &v3clusterpb.Cluster{} + if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + cu, err := validateClusterAndConstructClusterUpdate(cluster, serverCfg) + if err != nil { + return cluster.GetName(), ClusterUpdate{}, err + } + cu.Raw = r + + return cluster.GetName(), cu, nil +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M + + defaultLeastRequestChoiceCount = 2 +) + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster, serverCfg *bootstrap.ServerConfig) (ClusterUpdate, error) { + telemetryLabels := make(map[string]string) + if fmd := cluster.GetMetadata().GetFilterMetadata(); fmd != nil { + if val, ok := fmd["com.google.csm.telemetry_labels"]; ok { + if fields := val.GetFields(); fields != nil { + if val, ok := fields["service_name"]; ok { + if _, ok := val.GetKind().(*structpb.Value_StringValue); ok { + telemetryLabels["csm.service_name"] = val.GetStringValue() + } + } + if val, ok := fields["service_namespace"]; ok { + if _, ok := val.GetKind().(*structpb.Value_StringValue); ok { + telemetryLabels["csm.service_namespace_name"] = val.GetStringValue() + } + } + } + } + } + // "The values for the service labels csm.service_name and + // csm.service_namespace_name come from xDS, “unknown” if not present." - + // CSM Design. + if _, ok := telemetryLabels["csm.service_name"]; !ok { + telemetryLabels["csm.service_name"] = "unknown" + } + if _, ok := telemetryLabels["csm.service_namespace_name"]; !ok { + telemetryLabels["csm.service_namespace_name"] = "unknown" + } + + var lbPolicy json.RawMessage + var err error + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = []byte(`[{"xds_wrr_locality_experimental": {"childPolicy": [{"round_robin": {}}]}}]`) + case v3clusterpb.Cluster_RING_HASH: + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) + } + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhLBCfg := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + lbPolicy = []byte(fmt.Sprintf(`[{"ring_hash_experimental": %s}]`, rhLBCfg)) + case v3clusterpb.Cluster_LEAST_REQUEST: + if !envconfig.LeastRequestLB { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + + // "The configuration for the Least Request LB policy is the + // least_request_lb_config field. The field is optional; if not present, + // defaults will be assumed for all of its values." - A48 + lr := cluster.GetLeastRequestLbConfig() + var choiceCount uint32 = defaultLeastRequestChoiceCount + if cc := lr.GetChoiceCount(); cc != nil { + choiceCount = cc.GetValue() + } + // "If choice_count < 2, the config will be rejected." - A48 + if choiceCount < 2 { + return ClusterUpdate{}, fmt.Errorf("Cluster_LeastRequestLbConfig.ChoiceCount must be >= 2, got: %v", choiceCount) + } + + lrLBCfg := []byte(fmt.Sprintf("{\"choiceCount\": %d}", choiceCount)) + lbPolicy = []byte(fmt.Sprintf(`[{"least_request_experimental": %s}]`, lrLBCfg)) + default: + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + + // Process outlier detection received from the control plane iff the + // corresponding environment variable is set. + var od json.RawMessage + if od, err = outlierConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + + if cluster.GetLoadBalancingPolicy() != nil { + lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy(), 0) + if err != nil { + return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) + } + // "It will be the responsibility of the XdsClient to validate the + // converted configuration. It will do this by having the gRPC LB policy + // registry parse the configuration." - A52 + bc := &iserviceconfig.BalancerConfig{} + if err := json.Unmarshal(lbPolicy, bc); err != nil { + return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbPolicy), err) + } + } + + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, + OutlierDetection: od, + TelemetryLabels: telemetryLabels, + } + + if lrs := cluster.GetLrsServer(); lrs != nil { + if lrs.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("unsupported config_source_specifier %T in lrs_server field", lrs.ConfigSourceSpecifier) + } + ret.LRSServerConfig = serverCfg + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if configsource := cluster.GetEdsClusterConfig().GetEdsConfig(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS config source is not ADS or Self: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + if strings.HasPrefix(ret.ClusterName, "xdstp:") && ret.EDSServiceName == "" { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS service name is not set with a new-style cluster name: %+v", cluster) + } + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if len(clusters.Clusters) == 0 { + return ClusterUpdate{}, fmt.Errorf("xds: aggregate cluster has empty clusters field in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } +} + +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") + } + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) + } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) + } + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil +} + +// securityConfigFromCluster extracts the relevant security configuration from +// the received Cluster resource. +func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm) + } + // The Cluster resource contains a `transport_socket` field, which contains + // a oneof `typed_config` field of type `protobuf.Any`. The any proto + // contains a marshaled representation of an `UpstreamTlsContext` message. + ts := cluster.GetTransportSocket() + if ts == nil { + return nil, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) + } + upstreamCtx := &v3tlspb.UpstreamTlsContext{} + if err := proto.Unmarshal(tc.GetValue(), upstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) + } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys + if upstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") + } + + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +// The `alpn_protocols` field is ignored. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) + } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) + if sc == nil || sc.Equal(&SecurityConfig{}) { + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) + if err != nil { + return nil, err + } + } + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } + } + return sc, nil +} + +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `CommonTlsContext` contains a + // `tls_certificate_certificate_provider_instance` field of type + // `CertificateProviderInstance`, which contains the provider instance name + // and the certificate name to fetch identity certs. + sc := &SecurityConfig{} + if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + } + + // The `CommonTlsContext` contains a `validation_context_type` field which + // is a oneof. We can get the values that we are interested in from two of + // those possible values: + // - combined validation context: + // - contains a default validation context which holds the list of + // matchers for accepted SANs. + // - contains certificate provider instance configuration + // - certificate provider instance configuration + // - in this case, we do not get a list of accepted SANs. + switch t := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + combined := common.GetCombinedValidationContext() + var matchers []matcher.StringMatcher + if def := combined.GetDefaultValidationContext(); def != nil { + for _, m := range def.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + } + case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: + pi := common.GetValidationContextCertificateProviderInstance() + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + case nil: + // It is valid for the validation context to be nil on the server side. + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", t) + } + return sc, nil +} + +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) + } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch typ := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + +// circuitBreakersFromCluster extracts the circuit breakers configuration from +// the received cluster resource. Returns nil if no CircuitBreakers or no +// Thresholds in CircuitBreakers. +func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { + for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { + if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { + continue + } + maxRequestsPb := threshold.GetMaxRequests() + if maxRequestsPb == nil { + return nil + } + maxRequests := maxRequestsPb.GetValue() + return &maxRequests + } + return nil +} + +// idurationp takes a time.Duration and converts it to an internal duration, and +// returns a pointer to that internal duration. +func idurationp(d time.Duration) *iserviceconfig.Duration { + id := iserviceconfig.Duration(d) + return &id +} + +func uint32p(i uint32) *uint32 { + return &i +} + +// Helper types to prepare Outlier Detection JSON. Pointer types to distinguish +// between unset and a zero value. +type successRateEjection struct { + StdevFactor *uint32 `json:"stdevFactor,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type failurePercentageEjection struct { + Threshold *uint32 `json:"threshold,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type odLBConfig struct { + Interval *iserviceconfig.Duration `json:"interval,omitempty"` + BaseEjectionTime *iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + MaxEjectionTime *iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionPercent *uint32 `json:"maxEjectionPercent,omitempty"` + SuccessRateEjection *successRateEjection `json:"successRateEjection,omitempty"` + FailurePercentageEjection *failurePercentageEjection `json:"failurePercentageEjection,omitempty"` +} + +// outlierConfigFromCluster converts the received Outlier Detection +// configuration into JSON configuration for Outlier Detection, taking into +// account xDS Defaults. Returns nil if no OutlierDetection field set in the +// cluster resource. +func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (json.RawMessage, error) { + od := cluster.GetOutlierDetection() + if od == nil { + return nil, nil + } + + // "The outlier_detection field of the Cluster resource should have its fields + // validated according to the rules for the corresponding LB policy config + // fields in the above "Validation" section. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + var interval *iserviceconfig.Duration + if i := od.GetInterval(); i != nil { + if err := i.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.interval is invalid with error: %v", err) + } + if interval = idurationp(i.AsDuration()); *interval < 0 { + return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", *interval) + } + } + + var baseEjectionTime *iserviceconfig.Duration + if bet := od.GetBaseEjectionTime(); bet != nil { + if err := bet.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error: %v", err) + } + if baseEjectionTime = idurationp(bet.AsDuration()); *baseEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", *baseEjectionTime) + } + } + + var maxEjectionTime *iserviceconfig.Duration + if met := od.GetMaxEjectionTime(); met != nil { + if err := met.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid: %v", err) + } + if maxEjectionTime = idurationp(met.AsDuration()); *maxEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", *maxEjectionTime) + } + } + + // "The fields max_ejection_percent, enforcing_success_rate, + // failure_percentage_threshold, and enforcing_failure_percentage must have + // values less than or equal to 100. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + var maxEjectionPercent *uint32 + if mep := od.GetMaxEjectionPercent(); mep != nil { + if maxEjectionPercent = uint32p(mep.GetValue()); *maxEjectionPercent > 100 { + return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", *maxEjectionPercent) + } + } + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var enforcingSuccessRate *uint32 + if esr := od.GetEnforcingSuccessRate(); esr != nil { + if enforcingSuccessRate = uint32p(esr.GetValue()); *enforcingSuccessRate > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", *enforcingSuccessRate) + } + } + var failurePercentageThreshold *uint32 + if fpt := od.GetFailurePercentageThreshold(); fpt != nil { + if failurePercentageThreshold = uint32p(fpt.GetValue()); *failurePercentageThreshold > 100 { + return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", *failurePercentageThreshold) + } + } + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var enforcingFailurePercentage *uint32 + if efp := od.GetEnforcingFailurePercentage(); efp != nil { + if enforcingFailurePercentage = uint32p(efp.GetValue()); *enforcingFailurePercentage > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", *enforcingFailurePercentage) + } + } + + var successRateStdevFactor *uint32 + if srsf := od.GetSuccessRateStdevFactor(); srsf != nil { + successRateStdevFactor = uint32p(srsf.GetValue()) + } + var successRateMinimumHosts *uint32 + if srmh := od.GetSuccessRateMinimumHosts(); srmh != nil { + successRateMinimumHosts = uint32p(srmh.GetValue()) + } + var successRateRequestVolume *uint32 + if srrv := od.GetSuccessRateRequestVolume(); srrv != nil { + successRateRequestVolume = uint32p(srrv.GetValue()) + } + var failurePercentageMinimumHosts *uint32 + if fpmh := od.GetFailurePercentageMinimumHosts(); fpmh != nil { + failurePercentageMinimumHosts = uint32p(fpmh.GetValue()) + } + var failurePercentageRequestVolume *uint32 + if fprv := od.GetFailurePercentageRequestVolume(); fprv != nil { + failurePercentageRequestVolume = uint32p(fprv.GetValue()) + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *successRateEjection + if enforcingSuccessRate == nil || *enforcingSuccessRate != 0 { + sre = &successRateEjection{ + StdevFactor: successRateStdevFactor, + EnforcementPercentage: enforcingSuccessRate, + MinimumHosts: successRateMinimumHosts, + RequestVolume: successRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *failurePercentageEjection + if enforcingFailurePercentage != nil && *enforcingFailurePercentage != 0 { + fpe = &failurePercentageEjection{ + Threshold: failurePercentageThreshold, + EnforcementPercentage: enforcingFailurePercentage, + MinimumHosts: failurePercentageMinimumHosts, + RequestVolume: failurePercentageRequestVolume, + } + } + + odLBCfg := &odLBConfig{ + Interval: interval, + BaseEjectionTime: baseEjectionTime, + MaxEjectionTime: maxEjectionTime, + MaxEjectionPercent: maxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } + return json.Marshal(odLBCfg) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go new file mode 100644 index 0000000000..f65845b702 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math" + "net" + "strconv" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func unmarshalEndpointsResource(r *anypb.Any) (string, EndpointsUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsEndpointsResource(r.GetTypeUrl()) { + return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cla := &v3endpointpb.ClusterLoadAssignment{} + if err := proto.Unmarshal(r.GetValue(), cla); err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + u, err := parseEDSRespProto(cla) + if err != nil { + return cla.GetClusterName(), EndpointsUpdate{}, err + } + u.Raw = r + return cla.GetClusterName(), u, nil +} + +func parseAddress(socketAddress *v3corepb.SocketAddress) string { + return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) +} + +func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { + percentage := dropPolicy.GetDropPercentage() + var ( + numerator = percentage.GetNumerator() + denominator uint32 + ) + switch percentage.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + denominator = 100 + case v3typepb.FractionalPercent_TEN_THOUSAND: + denominator = 10000 + case v3typepb.FractionalPercent_MILLION: + denominator = 1000000 + } + return OverloadDropConfig{ + Category: dropPolicy.GetCategory(), + Numerator: numerator, + Denominator: denominator, + } +} + +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs map[string]bool) ([]Endpoint, error) { + endpoints := make([]Endpoint, 0, len(lbEndpoints)) + for _, lbEndpoint := range lbEndpoints { + // If the load_balancing_weight field is specified, it must be set to a + // value of at least 1. If unspecified, each host is presumed to have + // equal weight in a locality. + weight := uint32(1) + if w := lbEndpoint.GetLoadBalancingWeight(); w != nil { + if w.GetValue() == 0 { + return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + } + weight = w.GetValue() + } + addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) + if uniqueEndpointAddrs[addr] { + return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) + } + uniqueEndpointAddrs[addr] = true + endpoints = append(endpoints, Endpoint{ + HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + Address: addr, + Weight: weight, + }) + } + return endpoints, nil +} + +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { + ret := EndpointsUpdate{} + for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { + ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) + } + priorities := make(map[uint32]map[string]bool) + sumOfWeights := make(map[uint32]uint64) + uniqueEndpointAddrs := make(map[string]bool) + for _, locality := range m.Endpoints { + l := locality.GetLocality() + if l == nil { + return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + } + weight := locality.GetLoadBalancingWeight().GetValue() + if weight == 0 { + logger.Warningf("Ignoring locality %s with weight 0", pretty.ToJSON(l)) + continue + } + priority := locality.GetPriority() + sumOfWeights[priority] += uint64(weight) + if sumOfWeights[priority] > math.MaxUint32 { + return EndpointsUpdate{}, fmt.Errorf("sum of weights of localities at the same priority %d exceeded maximal value", priority) + } + localitiesWithPriority := priorities[priority] + if localitiesWithPriority == nil { + localitiesWithPriority = make(map[string]bool) + priorities[priority] = localitiesWithPriority + } + lid := internal.LocalityID{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } + lidStr, _ := lid.ToString() + + // "Since an xDS configuration can place a given locality under multiple + // priorities, it is possible to see locality weight attributes with + // different values for the same locality." - A52 + // + // This is handled in the client by emitting the locality weight + // specified for the priority it is specified in. If the same locality + // has a different weight in two priorities, each priority will specify + // a locality with the locality weight specified for that priority, and + // thus the subsequent tree of balancers linked to that priority will + // use that locality weight as well. + if localitiesWithPriority[lidStr] { + return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) + } + localitiesWithPriority[lidStr] = true + endpoints, err := parseEndpoints(locality.GetLbEndpoints(), uniqueEndpointAddrs) + if err != nil { + return EndpointsUpdate{}, err + } + ret.Localities = append(ret.Localities, Locality{ + ID: lid, + Endpoints: endpoints, + Weight: weight, + Priority: priority, + }) + } + for i := 0; i < len(priorities); i++ { + if _, ok := priorities[uint32(i)]; !ok { + return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + } + } + return ret, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go new file mode 100644 index 0000000000..e9a29b9387 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -0,0 +1,277 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "strconv" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsListenerResource(r.GetTypeUrl()) { + return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + lu, err := processListener(lis) + if err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + lu.Raw = r + return lis.GetName(), *lu, nil +} + +func processListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + if lis.GetApiListener() != nil { + return processClientSideListener(lis) + } + return processServerSideListener(lis) +} + +// processClientSideListener checks if the provided Listener proto meets +// the expected criteria. If so, it returns a non-empty routeConfigName. +func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + update := &ListenerUpdate{} + + apiLisAny := lis.GetApiListener().GetApiListener() + if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { + return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) + } + apiLis := &v3httppb.HttpConnectionManager{} + if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { + return nil, fmt.Errorf("failed to unmarshal api_listener: %v", err) + } + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if apiLis.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) + } + if len(apiLis.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) + } + + switch apiLis.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if configsource := apiLis.GetRds().GetConfigSource(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return nil, fmt.Errorf("LDS's RDS configSource is not ADS or Self: %+v", lis) + } + name := apiLis.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", lis) + } + update.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig()) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + update.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) + } + + // The following checks and fields only apply to xDS protocol versions v3+. + + update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() + + var err error + if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { + return nil, err + } + + return update, nil +} + +func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { + switch { + case config.MessageIs(&v3xdsxdstypepb.TypedStruct{}): + // The real type name is inside the new TypedStruct message. + s := new(v3xdsxdstypepb.TypedStruct) + if err := config.UnmarshalTo(s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + case config.MessageIs(&v1xdsudpatypepb.TypedStruct{}): + // The real type name is inside the old TypedStruct message. + s := new(v1xdsudpatypepb.TypedStruct) + if err := config.UnmarshalTo(s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + default: + return config, config.GetTypeUrl(), nil + } +} + +func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { + config, typeURL, err := unwrapHTTPFilterConfig(cfg) + if err != nil { + return nil, nil, err + } + filterBuilder := httpfilter.Get(typeURL) + if filterBuilder == nil { + if optional { + return nil, nil, nil + } + return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) + } + parseFunc := filterBuilder.ParseFilterConfig + if !lds { + parseFunc = filterBuilder.ParseFilterConfigOverride + } + filterConfig, err := parseFunc(config) + if err != nil { + return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) + } + return filterBuilder, filterConfig, nil +} + +func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { + if len(cfgs) == 0 { + return nil, nil + } + m := make(map[string]httpfilter.FilterConfig) + for name, cfg := range cfgs { + optional := false + s := new(v3routepb.FilterConfig) + if cfg.MessageIs(s) { + if err := cfg.UnmarshalTo(s); err != nil { + return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) + } + cfg = s.GetConfig() + optional = s.GetIsOptional() + } + + httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) + if err != nil { + return nil, fmt.Errorf("filter override %q: %v", name, err) + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + m[name] = config + } + return m, nil +} + +func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { + ret := make([]HTTPFilter, 0, len(filters)) + seenNames := make(map[string]bool, len(filters)) + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, errors.New("filter missing name field") + } + if seenNames[name] { + return nil, fmt.Errorf("duplicate filter name %q", name) + } + seenNames[name] = true + + httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) + if err != nil { + return nil, err + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + if server { + if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) + } + } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) + } + + // Save name/config + ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) + } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } + return ret, nil +} + +func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + if n := len(lis.ListenerFilters); n != 0 { + return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") + } + addr := lis.GetAddress() + if addr == nil { + return nil, fmt.Errorf("no address field in LDS response: %+v", lis) + } + sockAddr := addr.GetSocketAddress() + if sockAddr == nil { + return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) + } + lu := &ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: sockAddr.GetAddress(), + Port: strconv.Itoa(int(sockAddr.GetPortValue())), + }, + } + + fcMgr, err := NewFilterChainManager(lis) + if err != nil { + return nil, err + } + lu.InboundListenerCfg.FilterChains = fcMgr + return lu, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go new file mode 100644 index 0000000000..db862514eb --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -0,0 +1,436 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math" + "regexp" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsRouteConfigResource(r.GetTypeUrl()) { + return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + rc := &v3routepb.RouteConfiguration{} + if err := proto.Unmarshal(r.GetValue(), rc); err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + u, err := generateRDSUpdateFromRouteConfiguration(rc) + if err != nil { + return rc.GetName(), RouteConfigUpdate{}, err + } + u.Raw = r + return rc.GetName(), u, nil +} + +// generateRDSUpdateFromRouteConfiguration checks if the provided +// RouteConfiguration meets the expected criteria. If so, it returns a +// RouteConfigUpdate with nil error. +// +// A RouteConfiguration resource is considered valid when only if it contains a +// VirtualHost whose domain field matches the server name from the URI passed +// to the gRPC channel, and it contains a clusterName or a weighted cluster. +// +// The RouteConfiguration includes a list of virtualHosts, which may have zero +// or more elements. We are interested in the element whose domains field +// matches the server name specified in the "xds:" URI. The only field in the +// VirtualHost proto that the we are interested in is the list of routes. We +// only look at the last route in the list (the default route), whose match +// field must be empty and whose route field must be set. Inside that route +// message, the cluster field will contain the clusterName or weighted clusters +// we are looking for. +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration) (RouteConfigUpdate, error) { + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) + csps, err := processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + // cspNames represents all the cluster specifiers referenced by Route + // Actions - any cluster specifiers not referenced by a Route Action can be + // ignored and not emitted by the xdsclient. + var cspNames = make(map[string]bool) + for _, vh := range rc.GetVirtualHosts() { + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + for n := range cspNs { + cspNames[n] = true + } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + vhOut := &VirtualHost{ + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, + } + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) + } + vhOut.HTTPFilterConfigOverride = cfgs + vhs = append(vhs, vhOut) + } + + // "For any entry in the RouteConfiguration.cluster_specifier_plugins not + // referenced by an enclosed ActionType's cluster_specifier_plugin, the xDS + // client should not provide it to its consumers." - RLS in xDS Design + for name := range csps { + if !cspNames[name] { + delete(csps, name) + } + } + + return RouteConfigUpdate{VirtualHosts: vhs, ClusterSpecifierPlugins: csps}, nil +} + +func processClusterSpecifierPlugins(csps []*v3routepb.ClusterSpecifierPlugin) (map[string]clusterspecifier.BalancerConfig, error) { + cspCfgs := make(map[string]clusterspecifier.BalancerConfig) + // "The xDS client will inspect all elements of the + // cluster_specifier_plugins field looking up a plugin based on the + // extension.typed_config of each." - RLS in xDS design + for _, csp := range csps { + cs := clusterspecifier.Get(csp.GetExtension().GetTypedConfig().GetTypeUrl()) + if cs == nil { + if csp.GetIsOptional() { + // "If a plugin is not supported but has is_optional set, then + // we will ignore any routes that point to that plugin" + cspCfgs[csp.GetExtension().GetName()] = nil + continue + } + // "If no plugin is registered for it, the resource will be NACKed." + // - RLS in xDS design + return nil, fmt.Errorf("cluster specifier %q of type %q was not found", csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + lbCfg, err := cs.ParseClusterSpecifierConfig(csp.GetExtension().GetTypedConfig()) + if err != nil { + // "If a plugin is found, the value of the typed_config field will + // be passed to it's conversion method, and if an error is + // encountered, the resource will be NACKED." - RLS in xDS design + return nil, fmt.Errorf("error: %q parsing config %q for cluster specifier %q of type %q", err, csp.GetExtension().GetTypedConfig(), csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + // "If all cluster specifiers are valid, the xDS client will store the + // configurations in a map keyed by the name of the extension instance." - + // RLS in xDS Design + cspCfgs[csp.GetExtension().GetName()] = lbCfg + } + return cspCfgs, nil +} + +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig) ([]*Route, map[string]bool, error) { + var routesRet []*Route + var cspNames = make(map[string]bool) + for _, r := range routes { + match := r.GetMatch() + if match == nil { + return nil, nil, fmt.Errorf("route %+v doesn't have a match", r) + } + + if len(match.GetQueryParameters()) != 0 { + // Ignore route with query parameters. + logger.Warningf("Ignoring route %+v with query parameter matchers", r) + continue + } + + pathSp := match.GetPathSpecifier() + if pathSp == nil { + return nil, nil, fmt.Errorf("route %+v doesn't have a path specifier", r) + } + + var route Route + switch pt := pathSp.(type) { + case *v3routepb.RouteMatch_Prefix: + route.Prefix = &pt.Prefix + case *v3routepb.RouteMatch_Path: + route.Path = &pt.Path + case *v3routepb.RouteMatch_SafeRegex: + regex := pt.SafeRegex.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + route.Regex = re + default: + return nil, nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) + } + + if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { + route.CaseInsensitive = !caseSensitive.Value + } + + for _, h := range match.GetHeaders() { + var header HeaderMatcher + switch ht := h.GetHeaderMatchSpecifier().(type) { + case *v3routepb.HeaderMatcher_ExactMatch: + header.ExactMatch = &ht.ExactMatch + case *v3routepb.HeaderMatcher_SafeRegexMatch: + regex := ht.SafeRegexMatch.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + header.RegexMatch = re + case *v3routepb.HeaderMatcher_RangeMatch: + header.RangeMatch = &Int64Range{ + Start: ht.RangeMatch.Start, + End: ht.RangeMatch.End, + } + case *v3routepb.HeaderMatcher_PresentMatch: + header.PresentMatch = &ht.PresentMatch + case *v3routepb.HeaderMatcher_PrefixMatch: + header.PrefixMatch = &ht.PrefixMatch + case *v3routepb.HeaderMatcher_SuffixMatch: + header.SuffixMatch = &ht.SuffixMatch + case *v3routepb.HeaderMatcher_StringMatch: + sm, err := matcher.StringMatcherFromProto(ht.StringMatch) + if err != nil { + return nil, nil, fmt.Errorf("route %+v has an invalid string matcher: %v", err, ht.StringMatch) + } + header.StringMatch = &sm + default: + return nil, nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) + } + header.Name = h.GetName() + invert := h.GetInvertMatch() + header.InvertMatch = &invert + route.Headers = append(route.Headers, &header) + } + + if fr := match.GetRuntimeFraction(); fr != nil { + d := fr.GetDefaultValue() + n := d.GetNumerator() + switch d.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + n *= 10000 + case v3typepb.FractionalPercent_TEN_THOUSAND: + n *= 100 + case v3typepb.FractionalPercent_MILLION: + } + route.Fraction = &n + } + + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + hp, err := hashPoliciesProtoToSlice(action.HashPolicy) + if err != nil { + return nil, nil, err + } + route.HashPolicies = hp + + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint64 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + totalWeight += uint64(w) + if totalWeight > math.MaxUint32 { + return nil, nil, fmt.Errorf("xds: total weight of clusters exceeds MaxUint32") + } + wc := WeightedCluster{Weight: w} + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs + route.WeightedClusters[c.GetName()] = wc + } + if totalWeight == 0 { + return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterSpecifierPlugin: + // gRFC A28 was updated to say the following: + // + // The route’s action field must be route, and its + // cluster_specifier: + // - Can be Cluster + // - Can be Weighted_clusters + // - Can be unset or an unsupported field. The route containing + // this action will be ignored. + // + // This means that if this env var is not set, we should treat + // it as if it we didn't know about the cluster_specifier_plugin + // at all. + if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { + // "When processing RouteActions, if any action includes a + // cluster_specifier_plugin value that is not in + // RouteConfiguration.cluster_specifier_plugins, the + // resource will be NACKed." - RLS in xDS design + return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) + } + if csps[a.ClusterSpecifierPlugin] == nil { + logger.Warningf("Ignoring route %+v with optional and unsupported cluster specifier plugin %+v", r, a.ClusterSpecifierPlugin) + continue + } + cspNames[a.ClusterSpecifierPlugin] = true + route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin + default: + logger.Warningf("Ignoring route %+v with unknown ClusterSpecifier %+v", r, a) + continue + } + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() + } + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d + } + + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + + route.ActionType = RouteActionRoute + + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.ActionType = RouteActionNonForwardingAction + default: + route.ActionType = RouteActionUnsupported + } + + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v: %v", r, err) + } + route.HTTPFilterConfigOverride = cfgs + routesRet = append(routesRet, &route) + } + return routesRet, cspNames, nil +} + +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() + } + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Warningf("Ignoring hash policy %+v with invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Warningf("Ignoring unsupported hash policy %T", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go new file mode 100644 index 0000000000..82ad5fe52c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package version defines constants to distinguish between supported xDS API +// versions. +package version + +// Resource URLs. We need to be able to accept either version of the resource +// regardless of the version of the transport protocol in use. +const ( + googleapiPrefix = "type.googleapis.com/" + + V3ListenerType = "envoy.config.listener.v3.Listener" + V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration" + V3ClusterType = "envoy.config.cluster.v3.Cluster" + V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment" + + V3ResourceWrapperURL = googleapiPrefix + "envoy.service.discovery.v3.Resource" + V3ListenerURL = googleapiPrefix + V3ListenerType + V3RouteConfigURL = googleapiPrefix + V3RouteConfigType + V3ClusterURL = googleapiPrefix + V3ClusterType + V3EndpointsURL = googleapiPrefix + V3EndpointsType + V3HTTPConnManagerURL = googleapiPrefix + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + V3UpstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + V3DownstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" +) diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go new file mode 100644 index 0000000000..1fea8c8309 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -0,0 +1,321 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "errors" + "fmt" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const serverPrefix = "[xds-server %p] " + +var ( + // These new functions will be overridden in unit tests. + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.New(name) + } + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { + return grpc.NewServer(opts...) + } +) + +// grpcServer contains methods from grpc.Server which are used by the +// GRPCServer type here. This is useful for overriding in unit tests. +type grpcServer interface { + RegisterService(*grpc.ServiceDesc, any) + Serve(net.Listener) error + Stop() + GracefulStop() + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// GRPCServer wraps a gRPC server and provides server-side xDS functionality, by +// communication with a management server using xDS APIs. It implements the +// grpc.ServiceRegistrar interface and can be passed to service registration +// functions in IDL generated code. +type GRPCServer struct { + gs grpcServer + quit *grpcsync.Event + logger *internalgrpclog.PrefixLogger + opts *serverOptions + xdsC xdsclient.XDSClient + xdsClientClose func() +} + +// NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. +// The underlying gRPC server has no service registered and has not started to +// accept requests yet. +func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { + newOpts := []grpc.ServerOption{ + grpc.ChainUnaryInterceptor(xdsUnaryInterceptor), + grpc.ChainStreamInterceptor(xdsStreamInterceptor), + } + newOpts = append(newOpts, opts...) + s := &GRPCServer{ + gs: newGRPCServer(newOpts...), + quit: grpcsync.NewEvent(), + } + s.handleServerOptions(opts) + + // Initializing the xDS client upfront (instead of at serving time) + // simplifies the code by eliminating the need for a mutex to protect the + // xdsC and xdsClientClose fields. + newXDSClient := newXDSClient + if s.opts.bootstrapContentsForTesting != nil { + // Bootstrap file contents may be specified as a server option for tests. + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: name, + Contents: s.opts.bootstrapContentsForTesting, + }) + } + } + xdsClient, xdsClientClose, err := newXDSClient(xdsclient.NameForServer) + if err != nil { + return nil, fmt.Errorf("xDS client creation failed: %v", err) + } + + // Validate the bootstrap configuration for server specific fields. + + // Listener resource name template is mandatory on the server side. + cfg := xdsClient.BootstrapConfig() + if cfg.ServerListenerResourceNameTemplate() == "" { + xdsClientClose() + return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration") + } + + s.xdsC = xdsClient + s.xdsClientClose = xdsClientClose + + s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) + s.logger.Infof("Created xds.GRPCServer") + + return s, nil +} + +// handleServerOptions iterates through the list of server options passed in by +// the user, and handles the xDS server specific options. +func (s *GRPCServer) handleServerOptions(opts []grpc.ServerOption) { + so := s.defaultServerOptions() + for _, opt := range opts { + if o, ok := opt.(*serverOption); ok { + o.apply(so) + } + } + s.opts = so +} + +func (s *GRPCServer) defaultServerOptions() *serverOptions { + return &serverOptions{ + // A default serving mode change callback which simply logs at the + // default-visible log level. This will be used if the application does not + // register a mode change callback. + // + // Note that this means that `s.opts.modeCallback` will never be nil and can + // safely be invoked directly from `handleServingModeChanges`. + modeCallback: s.loggingServerModeChangeCallback, + } +} + +func (s *GRPCServer) loggingServerModeChangeCallback(addr net.Addr, args ServingModeChangeArgs) { + switch args.Mode { + case connectivity.ServingModeServing: + s.logger.Errorf("Listener %q entering mode: %q", addr.String(), args.Mode) + case connectivity.ServingModeNotServing: + s.logger.Errorf("Listener %q entering mode: %q due to error: %v", addr.String(), args.Mode, args.Err) + } +} + +// RegisterService registers a service and its implementation to the underlying +// gRPC server. It is called from the IDL generated code. This must be called +// before invoking Serve. +func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss any) { + s.gs.RegisterService(sd, ss) +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + return s.gs.GetServiceInfo() +} + +// Serve gets the underlying gRPC server to accept incoming connections on the +// listener lis, which is expected to be listening on a TCP port. +// +// A connection to the management server, to receive xDS configuration, is +// initiated here. +// +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *GRPCServer) Serve(lis net.Listener) error { + s.logger.Infof("Serve() passed a net.Listener on %s", lis.Addr().String()) + if _, ok := lis.Addr().(*net.TCPAddr); !ok { + return fmt.Errorf("xds: GRPCServer expects listener to return a net.TCPAddr. Got %T", lis.Addr()) + } + + if s.quit.HasFired() { + return grpc.ErrServerStopped + } + + // The server listener resource name template from the bootstrap + // configuration contains a template for the name of the Listener resource + // to subscribe to for a gRPC server. If the token `%s` is present in the + // string, it will be replaced with the server's listening "IP:port" (e.g., + // "0.0.0.0:8080", "[::]:8080"). + cfg := s.xdsC.BootstrapConfig() + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate(), lis.Addr().String()) + + // Create a listenerWrapper which handles all functionality required by + // this particular instance of Serve(). + lw := server.NewListenerWrapper(server.ListenerWrapperParams{ + Listener: lis, + ListenerResourceName: name, + XDSClient: s.xdsC, + ModeCallback: func(addr net.Addr, mode connectivity.ServingMode, err error) { + s.opts.modeCallback(addr, ServingModeChangeArgs{ + Mode: mode, + Err: err, + }) + }, + }) + return s.gs.Serve(lw) +} + +// Stop stops the underlying gRPC server. It immediately closes all open +// connections. It cancels all active RPCs on the server side and the +// corresponding pending RPCs on the client side will get notified by connection +// errors. +func (s *GRPCServer) Stop() { + s.quit.Fire() + s.gs.Stop() + if s.xdsC != nil { + s.xdsClientClose() + } +} + +// GracefulStop stops the underlying gRPC server gracefully. It stops the server +// from accepting new connections and RPCs and blocks until all the pending RPCs +// are finished. +func (s *GRPCServer) GracefulStop() { + s.quit.Fire() + s.gs.GracefulStop() + if s.xdsC != nil { + s.xdsClientClose() + } +} + +// routeAndProcess routes the incoming RPC to a configured route in the route +// table and also processes the RPC by running the incoming RPC through any HTTP +// Filters configured. +func routeAndProcess(ctx context.Context) error { + conn := transport.GetConnection(ctx) + cw, ok := conn.(interface { + UsableRouteConfiguration() xdsresource.UsableRouteConfiguration + }) + if !ok { + return errors.New("missing virtual hosts in incoming context") + } + + rc := cw.UsableRouteConfiguration() + // Error out at routing l7 level with a status code UNAVAILABLE, represents + // an nack before usable route configuration or resource not found for RDS + // or error combining LDS + RDS (Shouldn't happen). + if rc.Err != nil { + if logger.V(2) { + logger.Infof("RPC on connection with xDS Configuration error: %v", rc.Err) + } + return status.Error(codes.Unavailable, "error from xDS configuration for matched route configuration") + } + + mn, ok := grpc.Method(ctx) + if !ok { + return errors.New("missing method name in incoming context") + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("missing metadata in incoming context") + } + // A41 added logic to the core grpc implementation to guarantee that once + // the RPC gets to this point, there will be a single, unambiguous authority + // present in the header map. + authority := md.Get(":authority") + vh := xdsresource.FindBestMatchingVirtualHostServer(authority[0], rc.VHS) + if vh == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") + } + + var rwi *xdsresource.RouteWithInterceptors + rpcInfo := iresolver.RPCInfo{ + Context: ctx, + Method: mn, + } + for _, r := range vh.Routes { + if r.M.Match(rpcInfo) { + // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes + // RPCs matching that route to fail with UNAVAILABLE." - A36 + if r.ActionType != xdsresource.RouteActionNonForwardingAction { + return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") + } + rwi = &r + break + } + } + if rwi == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Route") + } + for _, interceptor := range rwi.Interceptors { + if err := interceptor.AllowRPC(ctx); err != nil { + return status.Errorf(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) + } + } + return nil +} + +// xdsUnaryInterceptor is the unary interceptor added to the gRPC server to +// perform any xDS specific functionality on unary RPCs. +func xdsUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if err := routeAndProcess(ctx); err != nil { + return nil, err + } + return handler(ctx, req) +} + +// xdsStreamInterceptor is the stream interceptor added to the gRPC server to +// perform any xDS specific functionality on streaming RPCs. +func xdsStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if err := routeAndProcess(ss.Context()); err != nil { + return err + } + return handler(srv, ss) +} diff --git a/vendor/google.golang.org/grpc/xds/server_options.go b/vendor/google.golang.org/grpc/xds/server_options.go new file mode 100644 index 0000000000..9b9700cf3b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/server_options.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" +) + +type serverOptions struct { + modeCallback ServingModeCallbackFunc + bootstrapContentsForTesting []byte +} + +type serverOption struct { + grpc.EmptyServerOption + apply func(*serverOptions) +} + +// ServingModeCallback returns a grpc.ServerOption which allows users to +// register a callback to get notified about serving mode changes. +func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.modeCallback = cb }} +} + +// ServingModeCallbackFunc is the callback that users can register to get +// notified about the server's serving mode changes. The callback is invoked +// with the address of the listener and its new mode. +// +// Users must not perform any blocking operations in this callback. +type ServingModeCallbackFunc func(addr net.Addr, args ServingModeChangeArgs) + +// ServingModeChangeArgs wraps the arguments passed to the serving mode callback +// function. +type ServingModeChangeArgs struct { + // Mode is the new serving mode of the server listener. + Mode connectivity.ServingMode + // Err is set to a non-nil error if the server has transitioned into + // not-serving mode. + Err error +} + +// BootstrapContentsForTesting returns a grpc.ServerOption which allows users +// to inject a bootstrap configuration used by only this server, instead of the +// global configuration from the environment variables. +// +// # Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.bootstrapContentsForTesting = contents }} +} diff --git a/vendor/google.golang.org/grpc/xds/xds.go b/vendor/google.golang.org/grpc/xds/xds.go new file mode 100644 index 0000000000..943d09f17e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/xds.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds contains an implementation of the xDS suite of protocols, to be +// used by gRPC client and server applications. +// +// On the client-side, users simply need to import this package to get all xDS +// functionality. On the server-side, users need to use the GRPCServer type +// exported by this package instead of the regular grpc.Server. +// +// See https://github.com/grpc/grpc-go/tree/master/examples/features/xds for +// example. +package xds + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internaladmin "google.golang.org/grpc/internal/admin" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/csds" + + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter" // Register the xDS LB Registry Converters. + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" +) + +var logger = grpclog.Component("xds") + +func init() { + internaladmin.AddService(func(registrar grpc.ServiceRegistrar) (func(), error) { + var grpcServer *grpc.Server + switch ss := registrar.(type) { + case *grpc.Server: + grpcServer = ss + case *GRPCServer: + sss, ok := ss.gs.(*grpc.Server) + if !ok { + logger.Warning("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") + return nil, nil + } + grpcServer = sss + default: + // Returning an error would cause the top level admin.Register() to + // fail. Log a warning instead. + logger.Error("Server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") + return nil, nil + } + + csdss, err := csds.NewClientStatusDiscoveryServer() + if err != nil { + return nil, fmt.Errorf("failed to create csds server: %v", err) + } + v3statusgrpc.RegisterClientStatusDiscoveryServiceServer(grpcServer, csdss) + return csdss.Close, nil + }) +} + +// NewXDSResolverWithConfigForTesting creates a new xDS resolver builder using +// the provided xDS bootstrap config instead of the global configuration from +// the supported environment variables. The resolver.Builder is meant to be +// used in conjunction with the grpc.WithResolvers DialOption. +// +// # Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { + return internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error))(bootstrapConfig) +} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore new file mode 100644 index 0000000000..b7ed7f956d --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore @@ -0,0 +1,6 @@ +# editor and IDE paraphernalia +.idea +.vscode + +# macOS paraphernalia +.DS_Store diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE new file mode 100644 index 0000000000..df76d7d771 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md new file mode 100644 index 0000000000..28e3516937 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md @@ -0,0 +1,317 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +**Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +These global variables control the behavior of `jsonpatch.Apply`. + +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. + +Structure `jsonpatch.ApplyOptions` includes the configuration options above +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. + +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore +`remove` operations whose `path` points to a non-existent location in the JSON document. +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` +returning an error when hitting a missing `path` on `remove`. + +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure +that `add` operations produce all the `path` elements that are missing from the target object. + +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` +whose values are populated from the global configuration variables. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/errors.go b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go new file mode 100644 index 0000000000..75304b4437 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/merge.go b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go new file mode 100644 index 0000000000..ad88d40181 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go @@ -0,0 +1,389 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + if !mergeMerge { + pruneNulls(v) + } + + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + } + newAry = append(newAry, v) + } + + *ary = newAry + + return ary +} + +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, ErrBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, ErrBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, ErrBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go new file mode 100644 index 0000000000..dc2b7e51e6 --- /dev/null +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go @@ -0,0 +1,851 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx < 0 { + if !SupportNegativeIndices { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go index c3940f090c..ca0086188a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=admissionregistration.k8s.io // Package v1 is the v1 version of the API. diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index 9a2d0bccdd..09295734df 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto +// source: k8s.io/api/admissionregistration/v1/generated.proto package v1 @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -44,10 +45,66 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{0} + return fileDescriptor_3205c7dc5bf0c9bf, []int{2} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -72,10 +129,38 @@ func (m *MatchCondition) XXX_DiscardUnknown() { var xxx_messageInfo_MatchCondition proto.InternalMessageInfo +func (m *MatchResources) Reset() { *m = MatchResources{} } +func (*MatchResources) ProtoMessage() {} +func (*MatchResources) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{3} +} +func (m *MatchResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchResources.Merge(m, src) +} +func (m *MatchResources) XXX_Size() int { + return m.Size() +} +func (m *MatchResources) XXX_DiscardUnknown() { + xxx_messageInfo_MatchResources.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchResources proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{1} + return fileDescriptor_3205c7dc5bf0c9bf, []int{4} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +188,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{2} + return fileDescriptor_3205c7dc5bf0c9bf, []int{5} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +216,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{3} + return fileDescriptor_3205c7dc5bf0c9bf, []int{6} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -156,10 +241,94 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo +func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } +func (*NamedRuleWithOperations) ProtoMessage() {} +func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{7} +} +func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRuleWithOperations.Merge(m, src) +} +func (m *NamedRuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *NamedRuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo + +func (m *ParamKind) Reset() { *m = ParamKind{} } +func (*ParamKind) ProtoMessage() {} +func (*ParamKind) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{8} +} +func (m *ParamKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamKind.Merge(m, src) +} +func (m *ParamKind) XXX_Size() int { + return m.Size() +} +func (m *ParamKind) XXX_DiscardUnknown() { + xxx_messageInfo_ParamKind.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamKind proto.InternalMessageInfo + +func (m *ParamRef) Reset() { *m = ParamRef{} } +func (*ParamRef) ProtoMessage() {} +func (*ParamRef) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{9} +} +func (m *ParamRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamRef.Merge(m, src) +} +func (m *ParamRef) XXX_Size() int { + return m.Size() +} +func (m *ParamRef) XXX_DiscardUnknown() { + xxx_messageInfo_ParamRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamRef proto.InternalMessageInfo + func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{4} + return fileDescriptor_3205c7dc5bf0c9bf, []int{10} } func (m *Rule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +356,7 @@ var xxx_messageInfo_Rule proto.InternalMessageInfo func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{5} + return fileDescriptor_3205c7dc5bf0c9bf, []int{11} } func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +384,7 @@ var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{6} + return fileDescriptor_3205c7dc5bf0c9bf, []int{12} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -240,10 +409,234 @@ func (m *ServiceReference) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceReference proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{13} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } +func (*ValidatingAdmissionPolicy) ProtoMessage() {} +func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{14} +} +func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src) +} +func (m *ValidatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } +func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{15} +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } +func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{16} +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } +func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{17} +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } +func (*ValidatingAdmissionPolicyList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{18} +} +func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } +func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{19} +} +func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{20} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{7} + return fileDescriptor_3205c7dc5bf0c9bf, []int{21} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +664,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{8} + return fileDescriptor_3205c7dc5bf0c9bf, []int{22} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +692,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{9} + return fileDescriptor_3205c7dc5bf0c9bf, []int{23} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -324,10 +717,66 @@ func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo +func (m *Validation) Reset() { *m = Validation{} } +func (*Validation) ProtoMessage() {} +func (*Validation) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{24} +} +func (m *Validation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validation.Merge(m, src) +} +func (m *Validation) XXX_Size() int { + return m.Size() +} +func (m *Validation) XXX_DiscardUnknown() { + xxx_messageInfo_Validation.DiscardUnknown(m) +} + +var xxx_messageInfo_Validation proto.InternalMessageInfo + +func (m *Variable) Reset() { *m = Variable{} } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{25} +} +func (m *Variable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Variable) XXX_Merge(src proto.Message) { + xxx_messageInfo_Variable.Merge(m, src) +} +func (m *Variable) XXX_Size() int { + return m.Size() +} +func (m *Variable) XXX_DiscardUnknown() { + xxx_messageInfo_Variable.DiscardUnknown(m) +} + +var xxx_messageInfo_Variable proto.InternalMessageInfo + func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{10} + return fileDescriptor_3205c7dc5bf0c9bf, []int{26} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -353,99 +802,237 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1.ExpressionWarning") proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition") + proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1.MatchResources") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") + proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.NamedRuleWithOperations") + proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1.ParamKind") + proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1.ParamRef") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1.TypeChecking") + proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicy") + proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding") + proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingList") + proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec") + proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyList") + proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus") proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1.Validation") + proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1.Variable") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_aaac5994f79683e8) -} - -var fileDescriptor_aaac5994f79683e8 = []byte{ - // 1169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xc6, 0x36, 0xb1, 0xc7, 0x4e, 0xd2, 0x0c, 0xd0, 0x2e, 0xa5, 0xf2, 0x5a, 0xae, 0x84, - 0x82, 0x00, 0x6f, 0x9b, 0x96, 0x52, 0x71, 0x41, 0xb1, 0x29, 0x28, 0x22, 0x69, 0xa3, 0x49, 0x3f, - 0x10, 0xea, 0xa1, 0xe3, 0xf5, 0xd8, 0x1e, 0x62, 0xef, 0xac, 0x66, 0x66, 0x4d, 0x7b, 0xe3, 0x27, - 0xf0, 0x17, 0xe0, 0x4f, 0xc0, 0x95, 0x5b, 0x8f, 0xbd, 0x91, 0x03, 0x5a, 0x91, 0xe5, 0xc2, 0x81, - 0x5f, 0x90, 0x13, 0x9a, 0xd9, 0xf5, 0xae, 0xbf, 0x12, 0x56, 0x39, 0xe4, 0x94, 0x5b, 0xe6, 0x79, - 0xdf, 0xf7, 0x79, 0xe7, 0x19, 0xbf, 0x1f, 0xab, 0x80, 0xdd, 0xc3, 0xfb, 0xa2, 0x41, 0x99, 0x7d, - 0xe8, 0xb7, 0x09, 0x77, 0x89, 0x24, 0xc2, 0x1e, 0x11, 0xb7, 0xc3, 0xb8, 0x1d, 0x1b, 0xb0, 0x47, - 0x6d, 0xdc, 0x19, 0x52, 0x21, 0x28, 0x73, 0x39, 0xe9, 0x51, 0x21, 0x39, 0x96, 0x94, 0xb9, 0xf6, - 0xe8, 0xb6, 0xdd, 0x23, 0x2e, 0xe1, 0x58, 0x92, 0x4e, 0xc3, 0xe3, 0x4c, 0x32, 0x78, 0x33, 0x0a, - 0x6a, 0x60, 0x8f, 0x36, 0x16, 0x06, 0x35, 0x46, 0xb7, 0xaf, 0x7f, 0xd2, 0xa3, 0xb2, 0xef, 0xb7, - 0x1b, 0x0e, 0x1b, 0xda, 0x3d, 0xd6, 0x63, 0xb6, 0x8e, 0x6d, 0xfb, 0x5d, 0x7d, 0xd2, 0x07, 0xfd, - 0x57, 0xc4, 0x79, 0xfd, 0x6e, 0x7a, 0x91, 0x21, 0x76, 0xfa, 0xd4, 0x25, 0xfc, 0x95, 0xed, 0x1d, - 0xf6, 0x14, 0x20, 0xec, 0x21, 0x91, 0x78, 0xc1, 0x4d, 0xae, 0xdb, 0xa7, 0x45, 0x71, 0xdf, 0x95, - 0x74, 0x48, 0xe6, 0x02, 0xee, 0xfd, 0x5f, 0x80, 0x70, 0xfa, 0x64, 0x88, 0x67, 0xe3, 0xea, 0x5d, - 0xb0, 0xb6, 0x87, 0xa5, 0xd3, 0x6f, 0x31, 0xb7, 0x43, 0x95, 0x44, 0x58, 0x03, 0x79, 0x17, 0x0f, - 0x89, 0x69, 0xd4, 0x8c, 0xcd, 0x52, 0xb3, 0xf2, 0x3a, 0xb0, 0x96, 0xc2, 0xc0, 0xca, 0x3f, 0xc4, - 0x43, 0x82, 0xb4, 0x05, 0x6e, 0x01, 0x40, 0x5e, 0x7a, 0x9c, 0xe8, 0xe7, 0x31, 0x97, 0xb5, 0x1f, - 0x8c, 0xfd, 0xc0, 0x83, 0xc4, 0x82, 0x26, 0xbc, 0xea, 0xbf, 0x16, 0xc1, 0xfa, 0x9e, 0x2f, 0xb1, - 0xa4, 0x6e, 0xef, 0x19, 0x69, 0xf7, 0x19, 0x3b, 0xcc, 0x90, 0x89, 0x83, 0x8a, 0x33, 0xa0, 0xc4, - 0x95, 0x2d, 0xe6, 0x76, 0x69, 0x4f, 0xe7, 0x2a, 0x6f, 0xdd, 0x6f, 0x64, 0xf8, 0x9d, 0x1a, 0x71, - 0x96, 0xd6, 0x44, 0x7c, 0xf3, 0x9d, 0x38, 0x47, 0x65, 0x12, 0x45, 0x53, 0x39, 0xe0, 0x73, 0x50, - 0xe0, 0xfe, 0x80, 0x08, 0x33, 0x57, 0xcb, 0x6d, 0x96, 0xb7, 0x3e, 0xcb, 0x94, 0x0c, 0xf9, 0x03, - 0xf2, 0x8c, 0xca, 0xfe, 0x23, 0x8f, 0x44, 0xa0, 0x68, 0xae, 0xc6, 0xb9, 0x0a, 0xca, 0x26, 0x50, - 0x44, 0x0a, 0x77, 0xc1, 0x6a, 0x17, 0xd3, 0x81, 0xcf, 0xc9, 0x3e, 0x1b, 0x50, 0xe7, 0x95, 0x99, - 0xd7, 0xe2, 0x3f, 0x08, 0x03, 0x6b, 0xf5, 0xab, 0x49, 0xc3, 0x49, 0x60, 0x6d, 0x4c, 0x01, 0x8f, - 0x5f, 0x79, 0x04, 0x4d, 0x07, 0xc3, 0x2f, 0x41, 0x79, 0xa8, 0x7e, 0xbd, 0x98, 0xab, 0xa4, 0xb9, - 0xea, 0x61, 0x60, 0x95, 0xf7, 0x52, 0xf8, 0x24, 0xb0, 0xd6, 0x27, 0x8e, 0x9a, 0x67, 0x32, 0x0c, - 0xbe, 0x04, 0x1b, 0xea, 0xb5, 0x85, 0x87, 0x1d, 0x72, 0x40, 0x06, 0xc4, 0x91, 0x8c, 0x9b, 0x05, - 0xfd, 0xd4, 0x77, 0x26, 0xd4, 0x27, 0x75, 0xd5, 0xf0, 0x0e, 0x7b, 0x0a, 0x10, 0x0d, 0x55, 0xbe, - 0x4a, 0xfe, 0x2e, 0x6e, 0x93, 0xc1, 0x38, 0xb4, 0xf9, 0x6e, 0x18, 0x58, 0x1b, 0x0f, 0x67, 0x19, - 0xd1, 0x7c, 0x12, 0xc8, 0xc0, 0x1a, 0x6b, 0x7f, 0x4f, 0x1c, 0x99, 0xa4, 0x2d, 0x9f, 0x3f, 0x2d, - 0x0c, 0x03, 0x6b, 0xed, 0xd1, 0x14, 0x1d, 0x9a, 0xa1, 0x57, 0x0f, 0x26, 0x68, 0x87, 0x3c, 0xe8, - 0x76, 0x89, 0x23, 0x85, 0xf9, 0x56, 0xfa, 0x60, 0x07, 0x29, 0xac, 0x1e, 0x2c, 0x3d, 0xb6, 0x06, - 0x58, 0x08, 0x34, 0x19, 0x06, 0x3f, 0x07, 0x6b, 0xaa, 0xa7, 0x98, 0x2f, 0x0f, 0x88, 0xc3, 0xdc, - 0x8e, 0x30, 0x57, 0x6a, 0xc6, 0x66, 0x21, 0xba, 0xc1, 0xe3, 0x29, 0x0b, 0x9a, 0xf1, 0x84, 0x4f, - 0xc0, 0xb5, 0xa4, 0x8a, 0x10, 0x19, 0x51, 0xf2, 0xc3, 0x53, 0xc2, 0xd5, 0x41, 0x98, 0xc5, 0x5a, - 0x6e, 0xb3, 0xd4, 0x7c, 0x3f, 0x0c, 0xac, 0x6b, 0xdb, 0x8b, 0x5d, 0xd0, 0x69, 0xb1, 0xf0, 0x05, - 0x80, 0x9c, 0x50, 0x77, 0xc4, 0x1c, 0x5d, 0x7e, 0x71, 0x41, 0x00, 0xad, 0xef, 0x56, 0x18, 0x58, - 0x10, 0xcd, 0x59, 0x4f, 0x02, 0xeb, 0xea, 0x3c, 0xaa, 0xcb, 0x63, 0x01, 0x17, 0x1c, 0x81, 0xf5, - 0xe1, 0xd4, 0xa4, 0x10, 0x66, 0x45, 0x77, 0xc8, 0x9d, 0x4c, 0x1d, 0x32, 0x3d, 0x65, 0x9a, 0xd7, - 0xe2, 0xee, 0x58, 0x9f, 0xc6, 0x05, 0x9a, 0x4d, 0x52, 0x3f, 0x32, 0xc0, 0x8d, 0x99, 0xc9, 0x11, - 0x75, 0xaa, 0x1f, 0x91, 0xc3, 0x17, 0xa0, 0xa8, 0x0a, 0xa2, 0x83, 0x25, 0xd6, 0xa3, 0xa4, 0xbc, - 0x75, 0x2b, 0x5b, 0xf9, 0x44, 0xb5, 0xb2, 0x47, 0x24, 0x4e, 0xc7, 0x57, 0x8a, 0xa1, 0x84, 0x15, - 0x3e, 0x05, 0xc5, 0x38, 0xb3, 0x30, 0x97, 0xb5, 0xe6, 0xbb, 0xd9, 0x34, 0x4f, 0x5f, 0xbb, 0x99, - 0x57, 0x59, 0x50, 0xc2, 0x55, 0xff, 0xc7, 0x00, 0xb5, 0xb3, 0xa4, 0xed, 0x52, 0x21, 0xe1, 0xf3, - 0x39, 0x79, 0x8d, 0x8c, 0xdd, 0x41, 0x45, 0x24, 0xee, 0x4a, 0x2c, 0xae, 0x38, 0x46, 0x26, 0xa4, - 0x75, 0x41, 0x81, 0x4a, 0x32, 0x1c, 0xeb, 0xda, 0x3e, 0x8f, 0xae, 0xa9, 0x3b, 0xa7, 0x73, 0x6f, - 0x47, 0xf1, 0xa2, 0x88, 0xbe, 0xfe, 0xbb, 0x01, 0xf2, 0x6a, 0x10, 0xc2, 0x8f, 0x40, 0x09, 0x7b, - 0xf4, 0x6b, 0xce, 0x7c, 0x4f, 0x98, 0x86, 0xae, 0xf8, 0xd5, 0x30, 0xb0, 0x4a, 0xdb, 0xfb, 0x3b, - 0x11, 0x88, 0x52, 0x3b, 0xbc, 0x0d, 0xca, 0xd8, 0xa3, 0x49, 0x83, 0x2c, 0x6b, 0xf7, 0x75, 0xd5, - 0xae, 0xdb, 0xfb, 0x3b, 0x49, 0x53, 0x4c, 0xfa, 0x28, 0x7e, 0x4e, 0x04, 0xf3, 0xb9, 0x13, 0x8f, - 0xf0, 0x98, 0x1f, 0x8d, 0x41, 0x94, 0xda, 0xe1, 0xc7, 0xa0, 0x20, 0x1c, 0xe6, 0x91, 0x78, 0x0a, - 0x5f, 0x55, 0xd7, 0x3e, 0x50, 0xc0, 0x49, 0x60, 0x95, 0xf4, 0x1f, 0xba, 0x1d, 0x22, 0xa7, 0xfa, - 0x2f, 0x06, 0x80, 0xf3, 0x83, 0x1e, 0x7e, 0x01, 0x00, 0x4b, 0x4e, 0xb1, 0x24, 0x4b, 0xd7, 0x52, - 0x82, 0x9e, 0x04, 0xd6, 0x6a, 0x72, 0xd2, 0x94, 0x13, 0x21, 0xf0, 0x1b, 0x90, 0x57, 0xcb, 0x21, - 0xde, 0x6e, 0x1f, 0x66, 0x5e, 0x38, 0xe9, 0xca, 0x54, 0x27, 0xa4, 0x49, 0xea, 0x3f, 0x1b, 0xe0, - 0xca, 0x01, 0xe1, 0x23, 0xea, 0x10, 0x44, 0xba, 0x84, 0x13, 0xd7, 0x21, 0xd0, 0x06, 0xa5, 0x64, - 0xf8, 0xc6, 0xeb, 0x76, 0x23, 0x8e, 0x2d, 0x25, 0x83, 0x1a, 0xa5, 0x3e, 0xc9, 0x6a, 0x5e, 0x3e, - 0x75, 0x35, 0xdf, 0x00, 0x79, 0x0f, 0xcb, 0xbe, 0x99, 0xd3, 0x1e, 0x45, 0x65, 0xdd, 0xc7, 0xb2, - 0x8f, 0x34, 0xaa, 0xad, 0x8c, 0x4b, 0xfd, 0xae, 0x85, 0xd8, 0xca, 0xb8, 0x44, 0x1a, 0xad, 0xff, - 0xb1, 0x02, 0x36, 0x9e, 0xe2, 0x01, 0xed, 0x5c, 0x7e, 0x0e, 0x5c, 0x7e, 0x0e, 0x9c, 0xf9, 0x39, - 0x00, 0x2e, 0x3f, 0x07, 0xce, 0xf5, 0x39, 0xb0, 0x60, 0x59, 0x97, 0x2f, 0x62, 0x59, 0xff, 0x69, - 0x80, 0xea, 0x5c, 0x67, 0x5f, 0xf4, 0xba, 0xfe, 0x76, 0x6e, 0x5d, 0xdf, 0xcb, 0xa4, 0x7a, 0xee, - 0xe2, 0x73, 0x0b, 0xfb, 0x5f, 0x03, 0xd4, 0xcf, 0x96, 0x77, 0x01, 0x2b, 0xbb, 0x3f, 0xbd, 0xb2, - 0x5b, 0xe7, 0xd3, 0x96, 0x65, 0x69, 0xff, 0x66, 0x80, 0xb7, 0x17, 0xcc, 0x4d, 0xf8, 0x1e, 0xc8, - 0xf9, 0x7c, 0x10, 0x8f, 0xfe, 0x95, 0x30, 0xb0, 0x72, 0x4f, 0xd0, 0x2e, 0x52, 0x18, 0x7c, 0x0e, - 0x56, 0x44, 0xb4, 0x7d, 0x62, 0xe5, 0x9f, 0x66, 0xba, 0xde, 0xec, 0xc6, 0x6a, 0x96, 0xc3, 0xc0, - 0x5a, 0x19, 0xa3, 0x63, 0x4a, 0xb8, 0x09, 0x8a, 0x0e, 0x6e, 0xfa, 0x6e, 0x27, 0xde, 0x96, 0x95, - 0x66, 0x45, 0x3d, 0x52, 0x6b, 0x3b, 0xc2, 0x50, 0x62, 0x6d, 0xee, 0xbc, 0x3e, 0xae, 0x2e, 0xbd, - 0x39, 0xae, 0x2e, 0x1d, 0x1d, 0x57, 0x97, 0x7e, 0x0c, 0xab, 0xc6, 0xeb, 0xb0, 0x6a, 0xbc, 0x09, - 0xab, 0xc6, 0x51, 0x58, 0x35, 0xfe, 0x0a, 0xab, 0xc6, 0x4f, 0x7f, 0x57, 0x97, 0xbe, 0xbb, 0x99, - 0xe1, 0xbf, 0x04, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xe1, 0x3a, 0x73, 0x64, 0x10, 0x00, - 0x00, + proto.RegisterFile("k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_3205c7dc5bf0c9bf) +} + +var fileDescriptor_3205c7dc5bf0c9bf = []byte{ + // 2075 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0x8a, 0x94, 0x44, 0x3e, 0xea, 0x8b, 0x13, 0x27, 0xa2, 0x1d, 0x87, 0x2b, 0x6c, 0x82, + 0xc2, 0x46, 0x63, 0x32, 0xb2, 0x53, 0x27, 0x08, 0x8a, 0x06, 0xa2, 0xfc, 0x01, 0xc5, 0x96, 0x2d, + 0x8c, 0x12, 0xa9, 0x68, 0xdd, 0x22, 0xab, 0xdd, 0x21, 0xb9, 0x11, 0xb9, 0xbb, 0xd8, 0xd9, 0x65, + 0xac, 0x9e, 0x8a, 0xf6, 0x5e, 0x14, 0xe8, 0x5f, 0xd0, 0xfe, 0x09, 0xbd, 0xb4, 0x40, 0x4f, 0xbd, + 0xf9, 0x52, 0x20, 0x3d, 0xd5, 0x87, 0x62, 0x51, 0xb3, 0x97, 0x1e, 0x7a, 0x68, 0xaf, 0x02, 0x8a, + 0x16, 0x33, 0x3b, 0xfb, 0xc9, 0xa5, 0xb5, 0x96, 0x6d, 0xf5, 0xe2, 0x9b, 0xf6, 0x7d, 0xfc, 0xde, + 0xbc, 0x37, 0x6f, 0xe6, 0xbd, 0x79, 0x14, 0x5c, 0x3f, 0xfc, 0x98, 0xb6, 0x0c, 0xab, 0xad, 0xda, + 0x46, 0x5b, 0xd5, 0x87, 0x06, 0xa5, 0x86, 0x65, 0x3a, 0xa4, 0x67, 0x50, 0xd7, 0x51, 0x5d, 0xc3, + 0x32, 0xdb, 0xa3, 0xf5, 0x76, 0x8f, 0x98, 0xc4, 0x51, 0x5d, 0xa2, 0xb7, 0x6c, 0xc7, 0x72, 0x2d, + 0xf4, 0x6e, 0xa0, 0xd4, 0x52, 0x6d, 0xa3, 0x95, 0xab, 0xd4, 0x1a, 0xad, 0x5f, 0xbc, 0xda, 0x33, + 0xdc, 0xbe, 0x77, 0xd0, 0xd2, 0xac, 0x61, 0xbb, 0x67, 0xf5, 0xac, 0x36, 0xd7, 0x3d, 0xf0, 0xba, + 0xfc, 0x8b, 0x7f, 0xf0, 0xbf, 0x02, 0xcc, 0x8b, 0x1f, 0xc6, 0x0b, 0x19, 0xaa, 0x5a, 0xdf, 0x30, + 0x89, 0x73, 0xd4, 0xb6, 0x0f, 0x7b, 0x8c, 0x40, 0xdb, 0x43, 0xe2, 0xaa, 0x39, 0x2b, 0xb9, 0xd8, + 0x9e, 0xa6, 0xe5, 0x78, 0xa6, 0x6b, 0x0c, 0xc9, 0x84, 0xc2, 0x8d, 0x93, 0x14, 0xa8, 0xd6, 0x27, + 0x43, 0x35, 0xab, 0xa7, 0x50, 0x58, 0xde, 0xf0, 0x74, 0xc3, 0xdd, 0x30, 0x4d, 0xcb, 0xe5, 0x3e, + 0xa2, 0x77, 0xa0, 0x74, 0x48, 0x8e, 0x1a, 0xd2, 0x9a, 0x74, 0xb9, 0xda, 0xa9, 0x3d, 0xf6, 0xe5, + 0x73, 0x63, 0x5f, 0x2e, 0xdd, 0x25, 0x47, 0x98, 0xd1, 0xd1, 0x06, 0x2c, 0x8f, 0xd4, 0x81, 0x47, + 0x6e, 0x3d, 0xb2, 0x1d, 0xc2, 0x23, 0xd4, 0x98, 0xe1, 0xa2, 0xab, 0x42, 0x74, 0x79, 0x2f, 0xcd, + 0xc6, 0x59, 0x79, 0x65, 0x00, 0xf5, 0xf8, 0x6b, 0x5f, 0x75, 0x4c, 0xc3, 0xec, 0xa1, 0xf7, 0xa1, + 0xd2, 0x35, 0xc8, 0x40, 0xc7, 0xa4, 0x2b, 0x00, 0x57, 0x04, 0x60, 0xe5, 0xb6, 0xa0, 0xe3, 0x48, + 0x02, 0x5d, 0x81, 0xf9, 0xaf, 0x03, 0xc5, 0x46, 0x89, 0x0b, 0x2f, 0x0b, 0xe1, 0x79, 0x81, 0x87, + 0x43, 0xbe, 0xd2, 0x85, 0xa5, 0x6d, 0xd5, 0xd5, 0xfa, 0x9b, 0x96, 0xa9, 0x1b, 0xdc, 0xc3, 0x35, + 0x28, 0x9b, 0xea, 0x90, 0x08, 0x17, 0x17, 0x84, 0x66, 0xf9, 0xbe, 0x3a, 0x24, 0x98, 0x73, 0xd0, + 0x35, 0x00, 0x92, 0xf5, 0x0f, 0x09, 0x39, 0x48, 0xb8, 0x96, 0x90, 0x52, 0xfe, 0x54, 0x16, 0x86, + 0x30, 0xa1, 0x96, 0xe7, 0x68, 0x84, 0xa2, 0x47, 0x50, 0x67, 0x70, 0xd4, 0x56, 0x35, 0xb2, 0x4b, + 0x06, 0x44, 0x73, 0x2d, 0x87, 0x5b, 0xad, 0x5d, 0xbb, 0xde, 0x8a, 0x93, 0x2d, 0xda, 0xb1, 0x96, + 0x7d, 0xd8, 0x63, 0x04, 0xda, 0x62, 0x89, 0xd1, 0x1a, 0xad, 0xb7, 0xee, 0xa9, 0x07, 0x64, 0x10, + 0xaa, 0x76, 0xde, 0x1c, 0xfb, 0x72, 0xfd, 0x7e, 0x16, 0x11, 0x4f, 0x1a, 0x41, 0x16, 0x2c, 0x59, + 0x07, 0x5f, 0x11, 0xcd, 0x8d, 0xcc, 0xce, 0x9c, 0xde, 0x2c, 0x1a, 0xfb, 0xf2, 0xd2, 0x83, 0x14, + 0x1c, 0xce, 0xc0, 0xa3, 0x23, 0x58, 0x74, 0x84, 0xdf, 0xd8, 0x1b, 0x10, 0xda, 0x28, 0xad, 0x95, + 0x2e, 0xd7, 0xae, 0x7d, 0xb7, 0x55, 0xe0, 0x4c, 0xb5, 0x98, 0x4b, 0x3a, 0x53, 0xdb, 0x37, 0xdc, + 0xfe, 0x03, 0x9b, 0x04, 0x1c, 0xda, 0x79, 0x53, 0x84, 0x7c, 0x11, 0x27, 0xa1, 0x71, 0xda, 0x12, + 0xfa, 0x85, 0x04, 0xe7, 0xc9, 0x23, 0x6d, 0xe0, 0xe9, 0x24, 0x25, 0xd7, 0x28, 0xbf, 0x84, 0x25, + 0x5c, 0x12, 0x4b, 0x38, 0x7f, 0x2b, 0xc7, 0x02, 0xce, 0xb5, 0x8b, 0x6e, 0x42, 0x6d, 0xc8, 0x12, + 0x61, 0xc7, 0x1a, 0x18, 0xda, 0x51, 0x63, 0x9e, 0xa7, 0x8f, 0x32, 0xf6, 0xe5, 0xda, 0x76, 0x4c, + 0x3e, 0xf6, 0xe5, 0xe5, 0xc4, 0xe7, 0xe7, 0x47, 0x36, 0xc1, 0x49, 0x35, 0xe5, 0x77, 0x15, 0x58, + 0xde, 0xf6, 0xd8, 0xa1, 0x34, 0x7b, 0xfb, 0xe4, 0xa0, 0x6f, 0x59, 0x87, 0x05, 0x32, 0xd7, 0x81, + 0x05, 0x6d, 0x60, 0x10, 0xd3, 0xdd, 0xb4, 0xcc, 0xae, 0xd1, 0x13, 0xdb, 0xfe, 0x71, 0xa1, 0x18, + 0x08, 0x2b, 0x9b, 0x09, 0xfd, 0xce, 0x79, 0x61, 0x63, 0x21, 0x49, 0xc5, 0x29, 0x1b, 0xe8, 0x21, + 0xcc, 0x3a, 0x89, 0x3d, 0xff, 0xa8, 0x90, 0xb1, 0x9c, 0x58, 0x2f, 0x0a, 0x5b, 0xb3, 0x41, 0x70, + 0x03, 0x50, 0x74, 0x0f, 0x16, 0xbb, 0xaa, 0x31, 0xf0, 0x1c, 0x22, 0xe2, 0x59, 0xe6, 0xce, 0x7f, + 0x8b, 0xe5, 0xc5, 0xed, 0x24, 0xe3, 0xd8, 0x97, 0xeb, 0x29, 0x02, 0x8f, 0x69, 0x5a, 0x39, 0xbb, + 0x37, 0xd5, 0x53, 0xed, 0x4d, 0xfe, 0xc1, 0x9e, 0xfd, 0xff, 0x1c, 0xec, 0xda, 0xab, 0x3d, 0xd8, + 0x37, 0xa1, 0x46, 0x0d, 0x9d, 0xdc, 0xea, 0x76, 0x89, 0xe6, 0xd2, 0xc6, 0x5c, 0x1c, 0xb0, 0xdd, + 0x98, 0xcc, 0x02, 0x16, 0x7f, 0x6e, 0x0e, 0x54, 0x4a, 0x71, 0x52, 0x0d, 0x7d, 0x02, 0x4b, 0xac, + 0x0c, 0x59, 0x9e, 0xbb, 0x4b, 0x34, 0xcb, 0xd4, 0x29, 0x3f, 0x15, 0xb3, 0xc1, 0x0a, 0x3e, 0x4f, + 0x71, 0x70, 0x46, 0x12, 0x7d, 0x01, 0xab, 0x51, 0x16, 0x61, 0x32, 0x32, 0xc8, 0xd7, 0x7b, 0xc4, + 0x61, 0x1f, 0xb4, 0x51, 0x59, 0x2b, 0x5d, 0xae, 0x76, 0xde, 0x1e, 0xfb, 0xf2, 0xea, 0x46, 0xbe, + 0x08, 0x9e, 0xa6, 0x8b, 0xbe, 0x04, 0xe4, 0x10, 0xc3, 0x1c, 0x59, 0x1a, 0x4f, 0x3f, 0x91, 0x10, + 0xc0, 0xfd, 0xfb, 0x60, 0xec, 0xcb, 0x08, 0x4f, 0x70, 0x8f, 0x7d, 0xf9, 0xad, 0x49, 0x2a, 0x4f, + 0x8f, 0x1c, 0x2c, 0x34, 0x82, 0xe5, 0x61, 0xaa, 0xf2, 0xd0, 0xc6, 0x02, 0x3f, 0x21, 0xd7, 0x0b, + 0x9d, 0x90, 0x74, 0xd5, 0x8a, 0xeb, 0x6b, 0x9a, 0x4e, 0x71, 0xd6, 0x88, 0xf2, 0x44, 0x82, 0x4b, + 0x99, 0x9b, 0x23, 0x38, 0xa9, 0x5e, 0x00, 0x8e, 0xbe, 0x84, 0x0a, 0x4b, 0x08, 0x5d, 0x75, 0x55, + 0x51, 0x8e, 0x3e, 0x28, 0x96, 0x3e, 0x41, 0xae, 0x6c, 0x13, 0x57, 0x8d, 0xcb, 0x61, 0x4c, 0xc3, + 0x11, 0x2a, 0xda, 0x83, 0x8a, 0xb0, 0x4c, 0x1b, 0x33, 0xdc, 0xe7, 0x0f, 0x8b, 0xf9, 0x9c, 0x5e, + 0x76, 0xa7, 0xcc, 0xac, 0xe0, 0x08, 0x4b, 0xf9, 0x87, 0x04, 0x6b, 0xcf, 0x72, 0xed, 0x9e, 0x41, + 0x5d, 0xf4, 0x70, 0xc2, 0xbd, 0x56, 0xc1, 0xd3, 0x61, 0xd0, 0xc0, 0xb9, 0xa8, 0xf5, 0x08, 0x29, + 0x09, 0xd7, 0xba, 0x30, 0x6b, 0xb8, 0x64, 0x18, 0xfa, 0xb5, 0x71, 0x1a, 0xbf, 0x52, 0x6b, 0x8e, + 0xef, 0xbd, 0x2d, 0x86, 0x8b, 0x03, 0x78, 0xb6, 0x8b, 0xab, 0x53, 0xaa, 0x12, 0xfa, 0x28, 0xae, + 0xb6, 0xfc, 0xd6, 0x68, 0x48, 0xfc, 0x20, 0xd4, 0x93, 0xb5, 0x92, 0x33, 0x70, 0x5a, 0x0e, 0xfd, + 0x5c, 0x02, 0xe4, 0x4c, 0xe0, 0x89, 0x2a, 0x71, 0xea, 0x8b, 0xfb, 0xa2, 0x70, 0x00, 0x4d, 0xf2, + 0x70, 0x8e, 0x39, 0x45, 0x85, 0xea, 0x8e, 0xea, 0xa8, 0xc3, 0xbb, 0x86, 0xa9, 0xb3, 0x5e, 0x4b, + 0xb5, 0x0d, 0x71, 0x2c, 0x45, 0x65, 0x8b, 0x92, 0x6b, 0x63, 0x67, 0x4b, 0x70, 0x70, 0x42, 0x8a, + 0xd5, 0xc1, 0x43, 0xc3, 0xd4, 0x45, 0x67, 0x16, 0xd5, 0x41, 0x86, 0x87, 0x39, 0x47, 0xf9, 0xed, + 0x0c, 0x54, 0xb8, 0x0d, 0xd6, 0x2d, 0x9e, 0x5c, 0x36, 0xdb, 0x50, 0x8d, 0xee, 0x5a, 0x81, 0x5a, + 0x17, 0x62, 0xd5, 0xe8, 0x5e, 0xc6, 0xb1, 0x0c, 0xfa, 0x11, 0x54, 0x68, 0x78, 0x03, 0x97, 0x4e, + 0x7f, 0x03, 0x2f, 0xb0, 0x24, 0x8b, 0xee, 0xde, 0x08, 0x12, 0xb9, 0xb0, 0x6a, 0xb3, 0xd5, 0x13, + 0x97, 0x38, 0xf7, 0x2d, 0xf7, 0xb6, 0xe5, 0x99, 0xfa, 0x86, 0xc6, 0xa2, 0x27, 0xca, 0xdf, 0x27, + 0xec, 0xce, 0xdb, 0xc9, 0x17, 0x39, 0xf6, 0xe5, 0xb7, 0xa7, 0xb0, 0xf8, 0x5d, 0x35, 0x0d, 0x5a, + 0xf9, 0xa3, 0x04, 0x65, 0xb6, 0x85, 0xe8, 0xdb, 0x50, 0x55, 0x6d, 0xe3, 0x8e, 0x63, 0x79, 0x76, + 0x98, 0x5b, 0x8b, 0x2c, 0x14, 0x1b, 0x3b, 0x5b, 0x01, 0x11, 0xc7, 0x7c, 0xb4, 0x0e, 0xb5, 0x78, + 0x6b, 0x82, 0x63, 0x51, 0xed, 0x2c, 0xb3, 0x0a, 0x11, 0xef, 0x1e, 0xc5, 0x49, 0x19, 0x86, 0x1f, + 0xe6, 0x65, 0xd0, 0x35, 0x08, 0xfc, 0xa8, 0x75, 0xc6, 0x31, 0x1f, 0xbd, 0x0f, 0xb3, 0x54, 0xb3, + 0x6c, 0x22, 0x3c, 0x7f, 0x8b, 0x9d, 0x94, 0x5d, 0x46, 0x38, 0xf6, 0xe5, 0x2a, 0xff, 0x83, 0x7b, + 0x15, 0x08, 0x29, 0xbf, 0x91, 0x20, 0x27, 0x0d, 0xd1, 0xa7, 0x00, 0x56, 0x9c, 0xef, 0x81, 0x4b, + 0x32, 0xbf, 0xbe, 0x22, 0xea, 0xb1, 0x2f, 0x2f, 0x46, 0x5f, 0x1c, 0x32, 0xa1, 0x82, 0xee, 0x42, + 0x99, 0x65, 0xb2, 0x38, 0x2a, 0x57, 0x0a, 0x1f, 0x95, 0x38, 0xdd, 0xd8, 0x17, 0xe6, 0x20, 0xca, + 0xaf, 0x25, 0x58, 0xd9, 0x25, 0xce, 0xc8, 0xd0, 0x08, 0x26, 0x5d, 0xe2, 0x10, 0x53, 0xcb, 0xe4, + 0xa0, 0x54, 0x20, 0x07, 0xc3, 0xb4, 0x9e, 0x99, 0x9a, 0xd6, 0x97, 0xa0, 0x6c, 0xab, 0x6e, 0x5f, + 0xbc, 0x91, 0x2a, 0x8c, 0xbb, 0xa3, 0xba, 0x7d, 0xcc, 0xa9, 0x9c, 0x6b, 0x39, 0x2e, 0x8f, 0xeb, + 0xac, 0xe0, 0x5a, 0x8e, 0x8b, 0x39, 0x55, 0xf9, 0x95, 0x04, 0x0b, 0x2c, 0x0a, 0x9b, 0x7d, 0xa2, + 0x1d, 0xb2, 0x17, 0xda, 0xcf, 0x24, 0x40, 0x24, 0xfb, 0x6e, 0x0b, 0x62, 0x59, 0xbb, 0x76, 0xa3, + 0x50, 0x40, 0x26, 0x9e, 0x7d, 0xf1, 0xd5, 0x31, 0xc1, 0xa2, 0x38, 0xc7, 0x9a, 0xf2, 0xe7, 0x19, + 0xb8, 0xb0, 0xa7, 0x0e, 0x0c, 0x9d, 0x5f, 0xa7, 0x51, 0xd1, 0x17, 0x15, 0xf7, 0xd5, 0x17, 0x36, + 0x1d, 0xca, 0xd4, 0x26, 0x9a, 0x48, 0x83, 0x4e, 0x21, 0xaf, 0xa7, 0xae, 0x77, 0xd7, 0x26, 0x5a, + 0xbc, 0x6f, 0xec, 0x0b, 0x73, 0x74, 0x34, 0x80, 0x39, 0xea, 0xaa, 0xae, 0x47, 0xc5, 0xdd, 0x72, + 0xf3, 0x05, 0xed, 0x70, 0xac, 0xce, 0x92, 0xb0, 0x34, 0x17, 0x7c, 0x63, 0x61, 0x43, 0xf9, 0xb7, + 0x04, 0x6b, 0x53, 0x75, 0x3b, 0x86, 0xa9, 0xb3, 0xdd, 0x7f, 0xf5, 0xa1, 0x3d, 0x4c, 0x85, 0x76, + 0xeb, 0xc5, 0x5c, 0x16, 0xcb, 0x9e, 0x16, 0x61, 0xe5, 0x5f, 0x12, 0xbc, 0x77, 0x92, 0xf2, 0x19, + 0x34, 0x13, 0x5f, 0xa5, 0x9b, 0x89, 0x5b, 0x2f, 0xc5, 0xe9, 0x29, 0x0d, 0xc5, 0x7f, 0x66, 0x4e, + 0x76, 0x99, 0x45, 0x88, 0x55, 0x64, 0x9b, 0x13, 0xef, 0xc7, 0x45, 0x33, 0xda, 0xba, 0x9d, 0x88, + 0x83, 0x13, 0x52, 0x68, 0x1f, 0x2a, 0xb6, 0x28, 0xb7, 0x62, 0x03, 0xaf, 0x16, 0xf2, 0x25, 0xac, + 0xd1, 0x41, 0x25, 0x0c, 0xbf, 0x70, 0x04, 0xc6, 0x1e, 0x3c, 0xc3, 0xd4, 0x54, 0x25, 0xa7, 0xdc, + 0x9e, 0xd0, 0x43, 0x47, 0xaa, 0xc1, 0x73, 0x23, 0x4d, 0xc3, 0x19, 0x78, 0xb4, 0x0f, 0xf5, 0x91, + 0x88, 0x92, 0x65, 0x06, 0x85, 0x31, 0x18, 0x25, 0x54, 0x3b, 0x57, 0xd8, 0x33, 0x6d, 0x2f, 0xcb, + 0x3c, 0xf6, 0xe5, 0x95, 0x2c, 0x11, 0x4f, 0x62, 0x28, 0x63, 0x09, 0xde, 0x99, 0x1a, 0xff, 0x33, + 0xc8, 0x35, 0x2d, 0x9d, 0x6b, 0xdf, 0x7b, 0xc1, 0x5c, 0x9b, 0x92, 0x64, 0xb3, 0xcf, 0x70, 0x92, + 0x67, 0xd7, 0x0f, 0xa1, 0x6a, 0x87, 0xcd, 0x5f, 0x8e, 0x97, 0x27, 0xa4, 0x0a, 0xd3, 0x0a, 0x7a, + 0x85, 0xe8, 0x13, 0xc7, 0x78, 0xc8, 0x83, 0x95, 0xf0, 0x35, 0xc4, 0x54, 0x0d, 0xd3, 0xa5, 0x39, + 0x93, 0xaf, 0xc2, 0xf9, 0x72, 0x7e, 0xec, 0xcb, 0x2b, 0xdb, 0x19, 0x40, 0x3c, 0x61, 0x02, 0x75, + 0xa1, 0x16, 0xef, 0x77, 0x38, 0x07, 0x69, 0x3f, 0x57, 0x80, 0x2d, 0xb3, 0xf3, 0x86, 0x88, 0x68, + 0x2d, 0xa6, 0x51, 0x9c, 0x04, 0x7e, 0xc9, 0xb3, 0x90, 0x9f, 0xc0, 0x8a, 0x9a, 0x1e, 0xfe, 0xd2, + 0xc6, 0xec, 0x73, 0x3c, 0xd6, 0x32, 0x93, 0xe3, 0x4e, 0x43, 0xac, 0x7f, 0x25, 0xc3, 0xa0, 0x78, + 0xc2, 0x4e, 0xde, 0xdb, 0x78, 0xee, 0x0c, 0xde, 0xc6, 0xe8, 0xc7, 0x50, 0x1d, 0xa9, 0x8e, 0xa1, + 0x1e, 0x0c, 0x08, 0x6d, 0xcc, 0x73, 0x8b, 0x57, 0x0b, 0xee, 0x53, 0xa0, 0x15, 0xf7, 0x64, 0x21, + 0x85, 0xe2, 0x18, 0x52, 0xf9, 0xc3, 0x0c, 0xc8, 0x27, 0xd4, 0x61, 0xf4, 0x19, 0x20, 0xeb, 0x80, + 0x12, 0x67, 0x44, 0xf4, 0x3b, 0xc1, 0x3c, 0x3e, 0x7c, 0xf9, 0x94, 0xe2, 0x7e, 0xe8, 0xc1, 0x84, + 0x04, 0xce, 0xd1, 0x42, 0x3d, 0x58, 0x70, 0x13, 0x4d, 0x9a, 0x48, 0xf6, 0xf5, 0x42, 0x2e, 0x25, + 0xbb, 0xbb, 0xce, 0xca, 0xd8, 0x97, 0x53, 0xfd, 0x1e, 0x4e, 0x01, 0x23, 0x0d, 0x40, 0x8b, 0xf7, + 0x6a, 0x32, 0xc3, 0x9f, 0x71, 0x3b, 0xc5, 0xfb, 0x14, 0x55, 0x91, 0xc4, 0x16, 0x25, 0x60, 0x95, + 0xbf, 0xcc, 0x43, 0x3d, 0x8e, 0xde, 0xeb, 0xa9, 0xe7, 0xeb, 0xa9, 0xe7, 0xb4, 0xa9, 0x27, 0xbc, + 0x9e, 0x7a, 0x9e, 0x6a, 0xea, 0x99, 0x73, 0xef, 0xd6, 0xce, 0x62, 0x26, 0xf9, 0x57, 0x09, 0x9a, + 0x13, 0x27, 0xfb, 0xac, 0xa7, 0x92, 0xdf, 0x9f, 0x98, 0x4a, 0xde, 0x78, 0xce, 0x26, 0x68, 0xda, + 0x5c, 0xf2, 0x9f, 0x12, 0x28, 0xcf, 0x76, 0xef, 0x0c, 0x1a, 0xbc, 0x7e, 0xba, 0xc1, 0xdb, 0x3c, + 0x9d, 0x6f, 0x45, 0x66, 0x93, 0xff, 0x95, 0x00, 0xe2, 0x26, 0x05, 0xbd, 0x07, 0x89, 0x1f, 0x45, + 0xc5, 0x35, 0x1d, 0x44, 0x28, 0x41, 0x47, 0x57, 0x60, 0x7e, 0x48, 0x28, 0x55, 0x7b, 0xe1, 0xc4, + 0x22, 0xfa, 0xcd, 0x76, 0x3b, 0x20, 0xe3, 0x90, 0x8f, 0xf6, 0x61, 0xce, 0x21, 0x2a, 0xb5, 0x4c, + 0x31, 0xb9, 0xf8, 0x94, 0xbd, 0x5a, 0x31, 0xa7, 0x1c, 0xfb, 0xf2, 0x7a, 0x91, 0xdf, 0xd4, 0x5b, + 0xe2, 0x91, 0xcb, 0x95, 0xb0, 0x80, 0x43, 0x77, 0xa0, 0x2e, 0x6c, 0x24, 0x16, 0x1c, 0x5c, 0xad, + 0x17, 0xc4, 0x6a, 0xea, 0xdb, 0x59, 0x01, 0x3c, 0xa9, 0xa3, 0x7c, 0x06, 0x95, 0xb0, 0xfe, 0xa3, + 0x06, 0x94, 0x13, 0x2f, 0xa5, 0xc0, 0x71, 0x4e, 0xc9, 0x04, 0x66, 0x26, 0x3f, 0x30, 0xca, 0xef, + 0x25, 0x78, 0x23, 0xa7, 0x0a, 0xa1, 0x0b, 0x50, 0xf2, 0x9c, 0x81, 0x08, 0xc1, 0xfc, 0xd8, 0x97, + 0x4b, 0x5f, 0xe0, 0x7b, 0x98, 0xd1, 0xd0, 0x43, 0x98, 0xa7, 0xc1, 0xfc, 0x48, 0xe4, 0xd1, 0x77, + 0x0a, 0x6d, 0x76, 0x76, 0xe6, 0xd4, 0xa9, 0xb1, 0xf0, 0x87, 0xd4, 0x10, 0x12, 0x5d, 0x86, 0x8a, + 0xa6, 0x76, 0x3c, 0x53, 0x17, 0xf3, 0xae, 0x85, 0xe0, 0x75, 0xb6, 0xb9, 0x11, 0xd0, 0x70, 0xc4, + 0xed, 0x6c, 0x3d, 0x7e, 0xda, 0x3c, 0xf7, 0xcd, 0xd3, 0xe6, 0xb9, 0x27, 0x4f, 0x9b, 0xe7, 0x7e, + 0x3a, 0x6e, 0x4a, 0x8f, 0xc7, 0x4d, 0xe9, 0x9b, 0x71, 0x53, 0x7a, 0x32, 0x6e, 0x4a, 0x7f, 0x1b, + 0x37, 0xa5, 0x5f, 0xfe, 0xbd, 0x79, 0xee, 0x07, 0xef, 0x16, 0xf8, 0x6f, 0x8c, 0xff, 0x05, 0x00, + 0x00, 0xff, 0xff, 0x1e, 0x59, 0xab, 0xd9, 0xb3, 0x21, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil } func (m *MatchCondition) Marshal() (dAtA []byte, err error) { @@ -481,6 +1068,88 @@ func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MatchResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x3a + } + if len(m.ExcludeResourceRules) > 0 { + for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -707,6 +1376,133 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ParamKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ParamRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i-- + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -851,7 +1647,7 @@ func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -861,20 +1657,20 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.MatchConditions) > 0 { - for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -882,84 +1678,87 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x5a - } - } - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x52 } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -968,15 +1767,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -986,20 +1790,20 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1011,7 +1815,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, } } { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1023,7 +1827,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1033,12 +1837,73 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1070,7 +1935,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1080,33 +1945,94 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if m.Service != nil { + if m.ParamKind != nil { { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1119,191 +2045,488 @@ func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *MatchCondition) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } } if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if len(m.MatchConditions) > 0 { - for _, e := range m.MatchConditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Rule) Size() (n int) { - if m == nil { - return 0 +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if m.Scope != nil { - l = len(*m.Scope) - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *RuleWithOperations) Size() (n int) { +func (m *ExpressionWarning) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Rule.Size() + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ServiceReference) Size() (n int) { +func (m *MatchCondition) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ValidatingWebhook) Size() (n int) { +func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 } @@ -1344,6 +2567,10 @@ func (m *ValidatingWebhook) Size() (n int) { l = len(*m.MatchPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } if m.ObjectSelector != nil { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -1357,7 +2584,7 @@ func (m *ValidatingWebhook) Size() (n int) { return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { +func (m *MutatingWebhookConfiguration) Size() (n int) { if m == nil { return 0 } @@ -1374,7 +2601,7 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) { return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { +func (m *MutatingWebhookConfigurationList) Size() (n int) { if m == nil { return 0 } @@ -1391,227 +2618,2818 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) { return n } -func (m *WebhookClientConfig) Size() (n int) { +func (m *NamedRuleWithOperations) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.CABundle != nil { - l = len(m.CABundle) + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.URL != nil { - l = len(*m.URL) + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) n += 1 + l + sovGenerated(uint64(l)) } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *MatchCondition) String() string { - if this == nil { - return "nil" +func (m *Rule) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&MatchCondition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForRules += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForMatchConditions += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `}`, - }, "") - return s + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `Scope:` + valueToStringGenerated(this.Scope) + `,`, - `}`, - }, "") - return s + return n } -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (this *ServiceReference) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingWebhook) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForMatchConditions += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, `}`, }, "") return s } -func (this *WebhookClientConfig) String() string { +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhook) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(this.RuleWithOperations.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex]) + m.ParameterNotFoundAction = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *MatchCondition) Unmarshal(dAtA []byte) error { +func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1634,13 +5452,45 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } @@ -1667,16 +5517,119 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1686,23 +5639,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1725,7 +5680,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1748,17 +5703,17 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1768,27 +5723,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1815,13 +5771,13 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1848,47 +5804,63 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1915,71 +5887,15 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1989,93 +5905,78 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) - m.ReinvocationPolicy = &s - iNdEx = postIndex - case 11: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2102,16 +6003,13 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2138,8 +6036,8 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchConditions = append(m.MatchConditions, MatchCondition{}) - if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2164,7 +6062,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2187,15 +6085,47 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2222,13 +6152,16 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2255,11 +6188,45 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, MutatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2281,7 +6248,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2304,10 +6271,10 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2372,7 +6339,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) + m.Items = append(m.Items, ValidatingAdmissionPolicy{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2398,7 +6365,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2421,17 +6388,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2441,29 +6408,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2473,29 +6444,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2505,27 +6480,29 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + m.Validations = append(m.Validations, Validation{}) + if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2553,64 +6530,48 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if msglen < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2620,27 +6581,29 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2667,7 +6630,8 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2692,7 +6656,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceReference) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2715,17 +6679,17 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2735,29 +6699,16 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2767,29 +6718,33 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} + } + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2799,45 +6754,26 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Port = &v + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3108,12 +7044,197 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { break } } - m.TimeoutSeconds = &v - case 8: + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3123,29 +7244,30 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3155,28 +7277,79 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3203,16 +7376,13 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3239,8 +7409,8 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchConditions = append(m.MatchConditions, MatchCondition{}) - if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3265,7 +7435,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *Validation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3288,17 +7458,17 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: Validation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3308,30 +7478,29 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3341,25 +7510,88 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3382,7 +7614,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *Variable) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3405,17 +7637,17 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: Variable: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3425,30 +7657,29 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3458,25 +7689,23 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index a8903621c8..e856e9eaf2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -28,6 +28,56 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1"; +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + // MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. message MatchCondition { // Name is an identifier for this match condition, used for strategic merging of MatchConditions, @@ -57,6 +107,101 @@ message MatchCondition { optional string expression = 2; } +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +message MatchResources { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + repeated NamedRuleWithOperations resourceRules = 3; + + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + repeated NamedRuleWithOperations excludeResourceRules = 4; + + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 7; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -76,6 +221,7 @@ message MutatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic repeated RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -144,7 +290,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -158,7 +304,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -183,6 +329,7 @@ message MutatingWebhook { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic repeated string admissionReviewVersions = 8; // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -215,13 +362,10 @@ message MutatingWebhook { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional repeated MatchCondition matchConditions = 12; } @@ -230,12 +374,14 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated MutatingWebhook Webhooks = 2; } @@ -244,12 +390,94 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; } +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +message NamedRuleWithOperations { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + repeated string resourceNames = 1; + + // RuleWithOperations is a tuple of Operations and Resources. + optional RuleWithOperations ruleWithOperations = 2; +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +message ParamKind { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + optional string apiVersion = 1; + + // Kind is the API kind the resources belong to. + // Required. + optional string kind = 2; +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +message ParamRef { + // name is the name of the resource being referenced. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + optional string name = 1; + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + optional string namespace = 2; + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + optional string parameterNotFoundAction = 4; +} + // Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended // to make sure that all the tuple expansions are valid. message Rule { @@ -333,6 +561,241 @@ message ServiceReference { optional int32 port = 4; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +message ValidatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; +} + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message ValidatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + optional ValidatingAdmissionPolicyBindingSpec spec = 2; +} + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated ValidatingAdmissionPolicyBinding items = 2; +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingSpec { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; +} + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated ValidatingAdmissionPolicy items = 2; +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +message ValidatingAdmissionPolicySpec { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + optional MatchResources matchConstraints = 2; + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + repeated Validation validations = 3; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated Variable variables = 7; +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; +} + // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. message ValidatingWebhook { // The name of the admission webhook. @@ -352,6 +815,7 @@ message ValidatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic repeated RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -420,7 +884,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -434,7 +898,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -459,6 +923,7 @@ message ValidatingWebhook { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic repeated string admissionReviewVersions = 8; // MatchConditions is a list of conditions that must be met for a request to be sent to this @@ -473,13 +938,10 @@ message ValidatingWebhook { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional repeated MatchCondition matchConditions = 11; } @@ -488,12 +950,14 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated ValidatingWebhook Webhooks = 2; } @@ -502,12 +966,103 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; } +// Validation specifies the CEL expression which is used to apply the validation. +message Validation { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + optional string Expression = 1; + + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + optional string message = 2; + + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +message Variable { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + optional string Name = 1; + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + optional string Expression = 2; +} + // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { diff --git a/vendor/k8s.io/api/admissionregistration/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1/register.go index e42a8bce3b..da74379ce2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingWebhookConfigurationList{}, &MutatingWebhookConfiguration{}, &MutatingWebhookConfigurationList{}, + &ValidatingAdmissionPolicy{}, + &ValidatingAdmissionPolicyList{}, + &ValidatingAdmissionPolicyBinding{}, + &ValidatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index 07ed7a6246..4efeb26748 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -91,6 +91,18 @@ const ( Fail FailurePolicyType = "Fail" ) +// ParameterNotFoundActionType specifies a failure policy that defines how a binding +// is evaluated when the param referred by its perNamespaceParamRef is not found. +type ParameterNotFoundActionType string + +const ( + // Allow means all requests will be admitted if no param resources + // could be found. + AllowAction ParameterNotFoundActionType = "Allow" + // Deny means all requests will be denied if no param resources are found. + DenyAction ParameterNotFoundActionType = "Deny" +) + // MatchPolicyType specifies the type of match policy. // +enum type MatchPolicyType string @@ -120,9 +132,590 @@ const ( SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +type ValidatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy. +type ValidatingAdmissionPolicyConditionType string + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +type ValidatingAdmissionPolicySpec struct { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +type ParamKind struct { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"` + + // Kind is the API kind the resources belong to. + // Required. + Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"` +} + +// Validation specifies the CEL expression which is used to apply the validation. +type Validation struct { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"` + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +type Variable struct { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + Name string `json:"name" protobuf:"bytes,1,opt,name=Name"` + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type ValidatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingSpec struct { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +type ParamRef struct { + // name is the name of the resource being referenced. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + // + Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"` + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"` + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"` +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +type MatchResources struct { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"` + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` +} + +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +type NamedRuleWithOperations struct { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"` + // RuleWithOperations is a tuple of Operations and Resources. + RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"` +} + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. type ValidatingWebhookConfiguration struct { @@ -134,10 +727,13 @@ type ValidatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. type ValidatingWebhookConfigurationList struct { @@ -153,6 +749,7 @@ type ValidatingWebhookConfigurationList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. type MutatingWebhookConfiguration struct { @@ -164,10 +761,13 @@ type MutatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. type MutatingWebhookConfigurationList struct { @@ -199,6 +799,7 @@ type ValidatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -306,6 +907,7 @@ type ValidatingWebhook struct { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` // MatchConditions is a list of conditions that must be met for a request to be sent to this @@ -320,13 +922,10 @@ type ValidatingWebhook struct { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"` } @@ -350,6 +949,7 @@ type MutatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -457,6 +1057,7 @@ type MutatingWebhook struct { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -489,13 +1090,10 @@ type MutatingWebhook struct { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index c41cceb2f2..f43139505d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -27,6 +27,26 @@ package v1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + var map_MatchCondition = map[string]string{ "": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", @@ -37,6 +57,19 @@ func (MatchCondition) SwaggerDoc() map[string]string { return map_MatchCondition } +var map_MatchResources = map[string]string{ + "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", +} + +func (MatchResources) SwaggerDoc() map[string]string { + return map_MatchResources +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -50,7 +83,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", - "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -77,6 +110,37 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_MutatingWebhookConfigurationList } +var map_NamedRuleWithOperations = map[string]string{ + "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", +} + +func (NamedRuleWithOperations) SwaggerDoc() map[string]string { + return map_NamedRuleWithOperations +} + +var map_ParamKind = map[string]string{ + "": "ParamKind is a tuple of Group Kind and Version.", + "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "kind": "Kind is the API kind the resources belong to. Required.", +} + +func (ParamKind) SwaggerDoc() map[string]string { + return map_ParamKind +} + +var map_ParamRef = map[string]string{ + "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "name": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", +} + +func (ParamRef) SwaggerDoc() map[string]string { + return map_ParamRef +} + var map_Rule = map[string]string{ "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", @@ -110,6 +174,94 @@ func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + +var map_ValidatingAdmissionPolicy = map[string]string{ + "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", +} + +func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicy +} + +var map_ValidatingAdmissionPolicyBinding = map[string]string{ + "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", +} + +func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBinding +} + +var map_ValidatingAdmissionPolicyBindingList = map[string]string{ + "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingList +} + +var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", +} + +func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingSpec +} + +var map_ValidatingAdmissionPolicyList = map[string]string{ + "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyList +} + +var map_ValidatingAdmissionPolicySpec = map[string]string{ + "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", +} + +func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicySpec +} + +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_ValidatingWebhook = map[string]string{ "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -122,7 +274,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", - "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (ValidatingWebhook) SwaggerDoc() map[string]string { @@ -149,6 +301,28 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } +var map_Validation = map[string]string{ + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", +} + +func (Validation) SwaggerDoc() map[string]string { + return map_Validation +} + +var map_Variable = map[string]string{ + "": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", +} + +func (Variable) SwaggerDoc() map[string]string { + return map_Variable +} + var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index b956099138..bfe599c1d3 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -26,6 +26,38 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { *out = *in @@ -42,6 +74,51 @@ func (in *MatchCondition) DeepCopy() *MatchCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchResources) DeepCopyInto(out *MatchResources) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeResourceRules != nil { + in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources. +func (in *MatchResources) DeepCopy() *MatchResources { + if in == nil { + return nil + } + out := new(MatchResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -177,6 +254,70 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { + *out = *in + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations. +func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations { + if in == nil { + return nil + } + out := new(NamedRuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamKind) DeepCopyInto(out *ParamKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind. +func (in *ParamKind) DeepCopy() *ParamKind { + if in == nil { + return nil + } + out := new(ParamKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamRef) DeepCopyInto(out *ParamRef) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ParameterNotFoundAction != nil { + in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction + *out = new(ParameterNotFoundActionType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef. +func (in *ParamRef) DeepCopy() *ParamRef { + if in == nil { + return nil + } + out := new(ParamRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Rule) DeepCopyInto(out *Rule) { *out = *in @@ -261,6 +402,260 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy. +func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding. +func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList. +func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Validations != nil { + in, out := &in.Validations, &out.Validations + *out = make([]Validation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec. +func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = *in @@ -391,6 +786,43 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Validation) DeepCopyInto(out *Validation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(metav1.StatusReason) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation. +func (in *Validation) DeepCopy() *Validation { + if in == nil { + return nil + } + out := new(Validation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..0862bb1f2d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 4f1373ec5a..111cc72874 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +// source: k8s.io/api/admissionregistration/v1alpha1/generated.proto package v1alpha1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } func (*AuditAnnotation) ProtoMessage() {} func (*AuditAnnotation) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{0} + return fileDescriptor_2c49182728ae0af5, []int{0} } func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } func (*ExpressionWarning) ProtoMessage() {} func (*ExpressionWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{1} + return fileDescriptor_2c49182728ae0af5, []int{1} } func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +104,7 @@ var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{2} + return fileDescriptor_2c49182728ae0af5, []int{2} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +132,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{3} + return fileDescriptor_2c49182728ae0af5, []int{3} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +160,7 @@ var xxx_messageInfo_MatchResources proto.InternalMessageInfo func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{4} + return fileDescriptor_2c49182728ae0af5, []int{4} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +188,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{5} + return fileDescriptor_2c49182728ae0af5, []int{5} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +216,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{6} + return fileDescriptor_2c49182728ae0af5, []int{6} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +244,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *TypeChecking) Reset() { *m = TypeChecking{} } func (*TypeChecking) ProtoMessage() {} func (*TypeChecking) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{7} + return fileDescriptor_2c49182728ae0af5, []int{7} } func (m *TypeChecking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +272,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{8} + return fileDescriptor_2c49182728ae0af5, []int{8} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +300,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{9} + return fileDescriptor_2c49182728ae0af5, []int{9} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +328,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{10} + return fileDescriptor_2c49182728ae0af5, []int{10} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +356,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{11} + return fileDescriptor_2c49182728ae0af5, []int{11} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +384,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{12} + return fileDescriptor_2c49182728ae0af5, []int{12} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +412,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{13} + return fileDescriptor_2c49182728ae0af5, []int{13} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +440,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{14} + return fileDescriptor_2c49182728ae0af5, []int{14} } func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +468,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{15} + return fileDescriptor_2c49182728ae0af5, []int{15} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +496,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo func (m *Variable) Reset() { *m = Variable{} } func (*Variable) ProtoMessage() {} func (*Variable) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{16} + return fileDescriptor_2c49182728ae0af5, []int{16} } func (m *Variable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -542,106 +542,105 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptor_c3be8d256e3ae3cf) -} - -var fileDescriptor_c3be8d256e3ae3cf = []byte{ - // 1509 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0xf3, 0xf2, 0xd0, 0x2a, 0x6e, 0xa0, 0xde, 0x68, 0x55, 0xa1, - 0x46, 0x82, 0x35, 0x49, 0x0b, 0x85, 0x0a, 0x09, 0x65, 0xfb, 0xa2, 0x8f, 0x3c, 0x34, 0x45, 0x89, - 0x84, 0x40, 0x62, 0xb2, 0x3b, 0x71, 0xa6, 0xf6, 0x3e, 0xd8, 0x59, 0x9b, 0x46, 0x20, 0x51, 0x89, - 0x0b, 0xdc, 0x38, 0x70, 0xe1, 0xca, 0x9f, 0xc0, 0x7f, 0xc0, 0xad, 0xc7, 0x1e, 0xcb, 0x01, 0x8b, - 0x9a, 0x0b, 0x7f, 0x01, 0x48, 0xb9, 0x80, 0x66, 0x76, 0xf6, 0x69, 0x9b, 0xd8, 0x25, 0x70, 0xf3, - 0x7c, 0x8f, 0xdf, 0xf7, 0x98, 0xef, 0xfb, 0xf6, 0x1b, 0x03, 0xd4, 0x7c, 0x9b, 0xe9, 0xd4, 0xad, - 0x37, 0xdb, 0xfb, 0xc4, 0x77, 0x48, 0x40, 0x58, 0xbd, 0x43, 0x1c, 0xcb, 0xf5, 0xeb, 0x92, 0x81, - 0x3d, 0x5a, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x06, 0x65, 0x81, 0x8f, 0x03, 0xea, - 0x3a, 0xf5, 0xce, 0x1a, 0x6e, 0x79, 0x87, 0x78, 0xad, 0xde, 0x20, 0x0e, 0xf1, 0x71, 0x40, 0x2c, - 0xdd, 0xf3, 0xdd, 0xc0, 0x85, 0xab, 0xa1, 0xaa, 0x8e, 0x3d, 0xaa, 0x0f, 0x54, 0xd5, 0x23, 0xd5, - 0xe5, 0xd7, 0x1b, 0x34, 0x38, 0x6c, 0xef, 0xeb, 0xa6, 0x6b, 0xd7, 0x1b, 0x6e, 0xc3, 0xad, 0x0b, - 0x84, 0xfd, 0xf6, 0x81, 0x38, 0x89, 0x83, 0xf8, 0x15, 0x22, 0x2f, 0x5f, 0x1e, 0xc1, 0xa9, 0xbc, - 0x3b, 0xcb, 0x57, 0x12, 0x25, 0x1b, 0x9b, 0x87, 0xd4, 0x21, 0xfe, 0x51, 0xdd, 0x6b, 0x36, 0x38, - 0x81, 0xd5, 0x6d, 0x12, 0xe0, 0x41, 0x5a, 0xf5, 0x61, 0x5a, 0x7e, 0xdb, 0x09, 0xa8, 0x4d, 0xfa, - 0x14, 0xde, 0x3a, 0x49, 0x81, 0x99, 0x87, 0xc4, 0xc6, 0x79, 0x3d, 0x8d, 0x81, 0x85, 0x8d, 0xb6, - 0x45, 0x83, 0x0d, 0xc7, 0x71, 0x03, 0x11, 0x04, 0xbc, 0x00, 0x0a, 0x4d, 0x72, 0x54, 0x55, 0x56, - 0x94, 0x4b, 0x25, 0xa3, 0xfc, 0xa4, 0xab, 0x4e, 0xf4, 0xba, 0x6a, 0xe1, 0x1e, 0x39, 0x42, 0x9c, - 0x0e, 0x37, 0xc0, 0x42, 0x07, 0xb7, 0xda, 0xe4, 0xe6, 0x23, 0xcf, 0x27, 0x22, 0x05, 0xd5, 0x49, - 0x21, 0xba, 0x24, 0x45, 0x17, 0x76, 0xb3, 0x6c, 0x94, 0x97, 0xd7, 0x5a, 0xa0, 0x92, 0x9c, 0xf6, - 0xb0, 0xef, 0x50, 0xa7, 0x01, 0x5f, 0x03, 0x33, 0x07, 0x94, 0xb4, 0x2c, 0x44, 0x0e, 0x24, 0xe0, - 0xa2, 0x04, 0x9c, 0xb9, 0x25, 0xe9, 0x28, 0x96, 0x80, 0xab, 0x60, 0xfa, 0xb3, 0x50, 0xb1, 0x5a, - 0x10, 0xc2, 0x0b, 0x52, 0x78, 0x5a, 0xe2, 0xa1, 0x88, 0xaf, 0x1d, 0x80, 0xf9, 0x4d, 0x1c, 0x98, - 0x87, 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x15, 0x50, 0x74, 0xb0, 0x4d, 0x64, 0x88, 0xb3, 0x52, - 0xb3, 0xb8, 0x85, 0x6d, 0x82, 0x04, 0x07, 0xae, 0x03, 0x40, 0xf2, 0xf1, 0x41, 0x29, 0x07, 0x52, - 0xa1, 0xa5, 0xa4, 0xb4, 0x9f, 0x8b, 0xd2, 0x10, 0x22, 0xcc, 0x6d, 0xfb, 0x26, 0x61, 0xf0, 0x11, - 0xa8, 0x70, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x20, 0x2d, 0x62, 0x06, 0xae, 0x2f, 0xac, 0x96, 0xd7, - 0x2f, 0xeb, 0x49, 0x9d, 0xc6, 0x37, 0xa6, 0x7b, 0xcd, 0x06, 0x27, 0x30, 0x9d, 0x17, 0x86, 0xde, - 0x59, 0xd3, 0xef, 0xe3, 0x7d, 0xd2, 0x8a, 0x54, 0x8d, 0x73, 0xbd, 0xae, 0x5a, 0xd9, 0xca, 0x23, - 0xa2, 0x7e, 0x23, 0xd0, 0x05, 0xf3, 0xee, 0xfe, 0x43, 0x62, 0x06, 0xb1, 0xd9, 0xc9, 0x17, 0x37, - 0x0b, 0x7b, 0x5d, 0x75, 0x7e, 0x3b, 0x03, 0x87, 0x72, 0xf0, 0xf0, 0x4b, 0x30, 0xe7, 0xcb, 0xb8, - 0x51, 0xbb, 0x45, 0x58, 0xb5, 0xb0, 0x52, 0xb8, 0x54, 0x5e, 0x37, 0xf4, 0x91, 0xdb, 0x51, 0xe7, - 0x81, 0x59, 0x5c, 0x79, 0x8f, 0x06, 0x87, 0xdb, 0x1e, 0x09, 0xf9, 0xcc, 0x38, 0x27, 0x13, 0x3f, - 0x87, 0xd2, 0x06, 0x50, 0xd6, 0x1e, 0xfc, 0x4e, 0x01, 0x67, 0xc9, 0x23, 0xb3, 0xd5, 0xb6, 0x48, - 0x46, 0xae, 0x5a, 0x3c, 0x35, 0x47, 0x5e, 0x91, 0x8e, 0x9c, 0xbd, 0x39, 0xc0, 0x0e, 0x1a, 0x68, - 0x1d, 0xde, 0x00, 0x65, 0x9b, 0x17, 0xc5, 0x8e, 0xdb, 0xa2, 0xe6, 0x51, 0x75, 0x5a, 0x94, 0x92, - 0xd6, 0xeb, 0xaa, 0xe5, 0xcd, 0x84, 0x7c, 0xdc, 0x55, 0x17, 0x52, 0xc7, 0x0f, 0x8e, 0x3c, 0x82, - 0xd2, 0x6a, 0xda, 0x33, 0x05, 0x2c, 0x0d, 0xf1, 0x0a, 0x5e, 0x4d, 0x32, 0x2f, 0x4a, 0xa3, 0xaa, - 0xac, 0x14, 0x2e, 0x95, 0x8c, 0x4a, 0x3a, 0x63, 0x82, 0x81, 0xb2, 0x72, 0xf0, 0x2b, 0x05, 0x40, - 0xbf, 0x0f, 0x4f, 0x16, 0xca, 0xd5, 0x51, 0xf2, 0xa5, 0x0f, 0x48, 0xd2, 0xb2, 0x4c, 0x12, 0xec, - 0xe7, 0xa1, 0x01, 0xe6, 0x34, 0x0c, 0x4a, 0x3b, 0xd8, 0xc7, 0xf6, 0x3d, 0xea, 0x58, 0xbc, 0xef, - 0xb0, 0x47, 0x77, 0x89, 0x2f, 0xfa, 0x4e, 0xc9, 0xf6, 0xdd, 0xc6, 0xce, 0x1d, 0xc9, 0x41, 0x29, - 0x29, 0xde, 0xcd, 0x4d, 0xea, 0x58, 0xb2, 0x4b, 0xe3, 0x6e, 0xe6, 0x78, 0x48, 0x70, 0xb4, 0x1f, - 0x27, 0xc1, 0x8c, 0xb0, 0xc1, 0x27, 0xc7, 0xc9, 0xcd, 0x5f, 0x07, 0xa5, 0xb8, 0xa1, 0x24, 0x6a, - 0x45, 0x8a, 0x95, 0xe2, 0xe6, 0x43, 0x89, 0x0c, 0xfc, 0x18, 0xcc, 0xb0, 0xa8, 0xcd, 0x0a, 0x2f, - 0xde, 0x66, 0xb3, 0x7c, 0xd6, 0xc5, 0x0d, 0x16, 0x43, 0xc2, 0x00, 0x2c, 0x79, 0xdc, 0x7b, 0x12, - 0x10, 0x7f, 0xcb, 0x0d, 0x6e, 0xb9, 0x6d, 0xc7, 0xda, 0x30, 0x79, 0xf6, 0xaa, 0x45, 0xe1, 0xdd, - 0xb5, 0x5e, 0x57, 0x5d, 0xda, 0x19, 0x2c, 0x72, 0xdc, 0x55, 0x5f, 0x1e, 0xc2, 0x12, 0x65, 0x36, - 0x0c, 0x5a, 0xfb, 0x5e, 0x01, 0xb3, 0x5c, 0xe2, 0xfa, 0x21, 0x31, 0x9b, 0x7c, 0x40, 0x7f, 0xad, - 0x00, 0x48, 0xf2, 0x63, 0x3b, 0xac, 0xb6, 0xf2, 0xfa, 0xbb, 0x63, 0xb4, 0x57, 0xdf, 0xec, 0x4f, - 0x6a, 0xa6, 0x8f, 0xc5, 0xd0, 0x00, 0x9b, 0xda, 0x2f, 0x93, 0xe0, 0xfc, 0x2e, 0x6e, 0x51, 0x0b, - 0x07, 0xd4, 0x69, 0x6c, 0x44, 0xe6, 0xc2, 0x66, 0x81, 0x9f, 0x80, 0x19, 0x9e, 0x60, 0x0b, 0x07, - 0x58, 0x0e, 0xdb, 0x37, 0x46, 0xbb, 0x8e, 0x70, 0xc4, 0x6d, 0x92, 0x00, 0x27, 0x45, 0x97, 0xd0, - 0x50, 0x8c, 0x0a, 0x1f, 0x82, 0x22, 0xf3, 0x88, 0x29, 0x5b, 0xe5, 0xfd, 0x31, 0x62, 0x1f, 0xea, - 0xf5, 0x03, 0x8f, 0x98, 0x49, 0x35, 0xf2, 0x13, 0x12, 0x36, 0xa0, 0x0f, 0xa6, 0x58, 0x80, 0x83, - 0x36, 0x93, 0xa5, 0x75, 0xf7, 0x54, 0xac, 0x09, 0x44, 0x63, 0x5e, 0xda, 0x9b, 0x0a, 0xcf, 0x48, - 0x5a, 0xd2, 0xfe, 0x54, 0xc0, 0xca, 0x50, 0x5d, 0x83, 0x3a, 0x16, 0xaf, 0x87, 0xff, 0x3e, 0xcd, - 0x9f, 0x66, 0xd2, 0xbc, 0x7d, 0x1a, 0x81, 0x4b, 0xe7, 0x87, 0x65, 0x5b, 0xfb, 0x43, 0x01, 0x17, - 0x4f, 0x52, 0xbe, 0x4f, 0x59, 0x00, 0x3f, 0xea, 0x8b, 0x5e, 0x1f, 0xb1, 0xe7, 0x29, 0x0b, 0x63, - 0x8f, 0xd7, 0x9b, 0x88, 0x92, 0x8a, 0xdc, 0x03, 0x67, 0x68, 0x40, 0x6c, 0x3e, 0x8c, 0x79, 0x77, - 0xdd, 0x3b, 0xc5, 0xd0, 0x8d, 0x39, 0x69, 0xf7, 0xcc, 0x1d, 0x6e, 0x01, 0x85, 0x86, 0xb4, 0x6f, - 0x0a, 0x27, 0x07, 0xce, 0xf3, 0xc4, 0x47, 0xb4, 0x27, 0x88, 0x5b, 0xc9, 0x14, 0x8d, 0xaf, 0x71, - 0x27, 0xe6, 0xa0, 0x94, 0x14, 0x1f, 0x90, 0x9e, 0x9c, 0xbf, 0x03, 0xf6, 0x90, 0x93, 0x22, 0x8a, - 0x46, 0x77, 0x38, 0x20, 0xa3, 0x13, 0x8a, 0x21, 0x61, 0x1b, 0xcc, 0xdb, 0x99, 0xc5, 0x4b, 0xb6, - 0xca, 0x3b, 0x63, 0x18, 0xc9, 0x6e, 0x6e, 0xe1, 0xca, 0x93, 0xa5, 0xa1, 0x9c, 0x11, 0xb8, 0x07, - 0x2a, 0x1d, 0x99, 0x31, 0xd7, 0x09, 0xa7, 0x66, 0xb8, 0x6d, 0x94, 0x8c, 0x55, 0xbe, 0xa8, 0xed, - 0xe6, 0x99, 0xc7, 0x5d, 0x75, 0x31, 0x4f, 0x44, 0xfd, 0x18, 0xda, 0xef, 0x0a, 0xb8, 0x30, 0xf4, - 0x2e, 0xfe, 0x87, 0xea, 0xa3, 0xd9, 0xea, 0xbb, 0x71, 0x2a, 0xd5, 0x37, 0xb8, 0xec, 0x7e, 0x98, - 0xfa, 0x87, 0x50, 0x45, 0xbd, 0x61, 0x50, 0xf2, 0xa2, 0xfd, 0x40, 0xc6, 0x7a, 0x65, 0xdc, 0xe2, - 0xe1, 0xba, 0xc6, 0x1c, 0xff, 0x7e, 0xc7, 0x47, 0x94, 0xa0, 0xc2, 0xcf, 0xc1, 0xa2, 0x2d, 0x5f, - 0x08, 0x1c, 0x80, 0x3a, 0x41, 0xb4, 0x05, 0xfd, 0x8b, 0x0a, 0x3a, 0xdb, 0xeb, 0xaa, 0x8b, 0x9b, - 0x39, 0x58, 0xd4, 0x67, 0x08, 0xb6, 0x40, 0x39, 0xa9, 0x80, 0x68, 0x6d, 0x7e, 0xf3, 0x05, 0x52, - 0xee, 0x3a, 0xc6, 0x4b, 0x32, 0xc7, 0xe5, 0x84, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0x73, 0x07, - 0x98, 0xb6, 0xda, 0x3e, 0x91, 0x0b, 0x69, 0xb8, 0x41, 0xbc, 0xca, 0x97, 0xc5, 0x5b, 0x69, 0xc6, - 0x71, 0x57, 0xad, 0x64, 0x08, 0x62, 0x5b, 0xc8, 0x2a, 0xc3, 0xc7, 0x0a, 0x58, 0xc4, 0xd9, 0xe7, - 0x23, 0xab, 0x9e, 0x11, 0x11, 0x5c, 0x1b, 0x23, 0x82, 0xdc, 0x0b, 0xd4, 0xa8, 0xca, 0x30, 0x16, - 0x73, 0x0c, 0x86, 0xfa, 0xac, 0xc1, 0x2f, 0xc0, 0x82, 0x9d, 0x79, 0xdd, 0xb1, 0xea, 0x94, 0x70, - 0x60, 0xec, 0xab, 0x8b, 0x11, 0x92, 0x97, 0x6c, 0x96, 0xce, 0x50, 0xde, 0x14, 0xb4, 0x40, 0xa9, - 0x83, 0x7d, 0x8a, 0xf7, 0xf9, 0x43, 0x63, 0x5a, 0xd8, 0xbd, 0x3c, 0xd6, 0xd5, 0x85, 0xba, 0xc9, - 0x7e, 0x19, 0x51, 0x18, 0x4a, 0x80, 0xb5, 0x9f, 0x26, 0x81, 0x7a, 0xc2, 0xa7, 0x1c, 0xde, 0x05, - 0xd0, 0xdd, 0x67, 0xc4, 0xef, 0x10, 0xeb, 0x76, 0xf8, 0xc6, 0x8f, 0x36, 0xe8, 0x42, 0xb2, 0x5e, - 0x6d, 0xf7, 0x49, 0xa0, 0x01, 0x5a, 0xd0, 0x06, 0xb3, 0x41, 0x6a, 0xf3, 0x1b, 0xe7, 0x45, 0x20, - 0x03, 0x4b, 0x2f, 0x8e, 0xc6, 0x62, 0xaf, 0xab, 0x66, 0x56, 0x49, 0x94, 0x81, 0x87, 0x26, 0x00, - 0x66, 0x72, 0x7b, 0x61, 0x03, 0xd4, 0x47, 0x1b, 0x67, 0xc9, 0x9d, 0xc5, 0x9f, 0xa0, 0xd4, 0x75, - 0xa5, 0x60, 0xb5, 0xbf, 0x14, 0x00, 0x92, 0xae, 0x80, 0x17, 0x41, 0xea, 0x19, 0x2f, 0xbf, 0x62, - 0x45, 0x0e, 0x81, 0x52, 0x74, 0xb8, 0x0a, 0xa6, 0x6d, 0xc2, 0x18, 0x6e, 0x44, 0xef, 0x80, 0xf8, - 0x5f, 0x86, 0xcd, 0x90, 0x8c, 0x22, 0x3e, 0xdc, 0x03, 0x53, 0x3e, 0xc1, 0xcc, 0x75, 0xe4, 0xff, - 0x11, 0xef, 0xf1, 0xb5, 0x0a, 0x09, 0xca, 0x71, 0x57, 0x5d, 0x1b, 0xe5, 0x5f, 0x20, 0x5d, 0x6e, - 0x61, 0x42, 0x09, 0x49, 0x38, 0x78, 0x1b, 0x54, 0xa4, 0x8d, 0x94, 0xc3, 0x61, 0xd7, 0x9e, 0x97, - 0xde, 0x54, 0x36, 0xf3, 0x02, 0xa8, 0x5f, 0x47, 0xbb, 0x0b, 0x66, 0xa2, 0xea, 0x82, 0x55, 0x50, - 0x4c, 0x7d, 0xbe, 0xc3, 0xc0, 0x05, 0x25, 0x97, 0x98, 0xc9, 0xc1, 0x89, 0x31, 0xb6, 0x9f, 0x3c, - 0xaf, 0x4d, 0x3c, 0x7d, 0x5e, 0x9b, 0x78, 0xf6, 0xbc, 0x36, 0xf1, 0xb8, 0x57, 0x53, 0x9e, 0xf4, - 0x6a, 0xca, 0xd3, 0x5e, 0x4d, 0x79, 0xd6, 0xab, 0x29, 0xbf, 0xf6, 0x6a, 0xca, 0xb7, 0xbf, 0xd5, - 0x26, 0x3e, 0x5c, 0x1d, 0xf9, 0x5f, 0xbc, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xad, 0xe2, 0x61, - 0x96, 0x0a, 0x14, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptor_2c49182728ae0af5) +} + +var fileDescriptor_2c49182728ae0af5 = []byte{ + // 1498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0x73, 0xb1, 0xe7, 0xdf, 0x2a, 0x6e, 0xfe, 0xd4, 0x1b, 0xad, + 0x2a, 0xd4, 0x48, 0xb0, 0x26, 0x69, 0xa1, 0xb4, 0x42, 0x42, 0xd9, 0xde, 0xe8, 0x25, 0x17, 0x4d, + 0x51, 0x22, 0x21, 0x90, 0x98, 0xec, 0x4e, 0xec, 0x69, 0xbc, 0x17, 0x76, 0xd6, 0xa1, 0x11, 0x48, + 0x54, 0xe2, 0x05, 0xde, 0x78, 0xe0, 0x85, 0x57, 0x3e, 0x02, 0xdf, 0x80, 0xb7, 0x3e, 0xf6, 0xb1, + 0x3c, 0x60, 0x51, 0xf3, 0xc2, 0x27, 0x00, 0x29, 0x2f, 0xa0, 0x99, 0x9d, 0xbd, 0xda, 0x26, 0x76, + 0x09, 0xbc, 0x79, 0xce, 0x9c, 0xf3, 0xfb, 0xcd, 0x39, 0x73, 0xce, 0xd9, 0x33, 0x06, 0xd7, 0x0e, + 0xde, 0x66, 0x3a, 0x75, 0x1b, 0xd8, 0xa3, 0x0d, 0x6c, 0xd9, 0x94, 0x31, 0xea, 0x3a, 0x3e, 0x69, + 0x52, 0x16, 0xf8, 0x38, 0xa0, 0xae, 0xd3, 0x38, 0x5c, 0xc5, 0x6d, 0xaf, 0x85, 0x57, 0x1b, 0x4d, + 0xe2, 0x10, 0x1f, 0x07, 0xc4, 0xd2, 0x3d, 0xdf, 0x0d, 0x5c, 0xb8, 0x12, 0x9a, 0xea, 0xd8, 0xa3, + 0xfa, 0x40, 0x53, 0x3d, 0x32, 0x5d, 0x7a, 0xbd, 0x49, 0x83, 0x56, 0x67, 0x4f, 0x37, 0x5d, 0xbb, + 0xd1, 0x74, 0x9b, 0x6e, 0x43, 0x20, 0xec, 0x75, 0xf6, 0xc5, 0x4a, 0x2c, 0xc4, 0xaf, 0x10, 0x79, + 0xe9, 0xf2, 0x08, 0x87, 0xca, 0x1f, 0x67, 0xe9, 0x4a, 0x62, 0x64, 0x63, 0xb3, 0x45, 0x1d, 0xe2, + 0x1f, 0x35, 0xbc, 0x83, 0x26, 0x17, 0xb0, 0x86, 0x4d, 0x02, 0x3c, 0xc8, 0xaa, 0x31, 0xcc, 0xca, + 0xef, 0x38, 0x01, 0xb5, 0x49, 0x9f, 0xc1, 0x5b, 0x27, 0x19, 0x30, 0xb3, 0x45, 0x6c, 0x9c, 0xb7, + 0xd3, 0x18, 0x58, 0x58, 0xef, 0x58, 0x34, 0x58, 0x77, 0x1c, 0x37, 0x10, 0x4e, 0xc0, 0x0b, 0xa0, + 0x70, 0x40, 0x8e, 0x6a, 0xca, 0xb2, 0x72, 0xa9, 0x64, 0x94, 0x9f, 0x76, 0xd5, 0x89, 0x5e, 0x57, + 0x2d, 0xdc, 0x27, 0x47, 0x88, 0xcb, 0xe1, 0x3a, 0x58, 0x38, 0xc4, 0xed, 0x0e, 0xb9, 0xf5, 0xd8, + 0xf3, 0x89, 0x08, 0x41, 0x6d, 0x52, 0xa8, 0x2e, 0x4a, 0xd5, 0x85, 0x9d, 0xec, 0x36, 0xca, 0xeb, + 0x6b, 0x6d, 0x50, 0x4d, 0x56, 0xbb, 0xd8, 0x77, 0xa8, 0xd3, 0x84, 0xaf, 0x81, 0x99, 0x7d, 0x4a, + 0xda, 0x16, 0x22, 0xfb, 0x12, 0xb0, 0x22, 0x01, 0x67, 0x6e, 0x4b, 0x39, 0x8a, 0x35, 0xe0, 0x0a, + 0x98, 0xfe, 0x34, 0x34, 0xac, 0x15, 0x84, 0xf2, 0x82, 0x54, 0x9e, 0x96, 0x78, 0x28, 0xda, 0xd7, + 0xf6, 0xc1, 0xfc, 0x06, 0x0e, 0xcc, 0xd6, 0x0d, 0xd7, 0xb1, 0xa8, 0xf0, 0x70, 0x19, 0x14, 0x1d, + 0x6c, 0x13, 0xe9, 0xe2, 0xac, 0xb4, 0x2c, 0x6e, 0x62, 0x9b, 0x20, 0xb1, 0x03, 0xd7, 0x00, 0x20, + 0x79, 0xff, 0xa0, 0xd4, 0x03, 0x29, 0xd7, 0x52, 0x5a, 0xda, 0x4f, 0x45, 0x49, 0x84, 0x08, 0x73, + 0x3b, 0xbe, 0x49, 0x18, 0x7c, 0x0c, 0xaa, 0x1c, 0x8e, 0x79, 0xd8, 0x24, 0x0f, 0x49, 0x9b, 0x98, + 0x81, 0xeb, 0x0b, 0xd6, 0xf2, 0xda, 0x65, 0x3d, 0xc9, 0xd3, 0xf8, 0xc6, 0x74, 0xef, 0xa0, 0xc9, + 0x05, 0x4c, 0xe7, 0x89, 0xa1, 0x1f, 0xae, 0xea, 0x0f, 0xf0, 0x1e, 0x69, 0x47, 0xa6, 0xc6, 0xb9, + 0x5e, 0x57, 0xad, 0x6e, 0xe6, 0x11, 0x51, 0x3f, 0x09, 0x74, 0xc1, 0xbc, 0xbb, 0xf7, 0x88, 0x98, + 0x41, 0x4c, 0x3b, 0xf9, 0xf2, 0xb4, 0xb0, 0xd7, 0x55, 0xe7, 0xb7, 0x32, 0x70, 0x28, 0x07, 0x0f, + 0xbf, 0x00, 0x73, 0xbe, 0xf4, 0x1b, 0x75, 0xda, 0x84, 0xd5, 0x0a, 0xcb, 0x85, 0x4b, 0xe5, 0x35, + 0x43, 0x1f, 0xb9, 0x1c, 0x75, 0xee, 0x98, 0xc5, 0x8d, 0x77, 0x69, 0xd0, 0xda, 0xf2, 0x48, 0xb8, + 0xcf, 0x8c, 0x73, 0x32, 0xf0, 0x73, 0x28, 0x4d, 0x80, 0xb2, 0x7c, 0xf0, 0x5b, 0x05, 0x9c, 0x25, + 0x8f, 0xcd, 0x76, 0xc7, 0x22, 0x19, 0xbd, 0x5a, 0xf1, 0xd4, 0x0e, 0xf2, 0x8a, 0x3c, 0xc8, 0xd9, + 0x5b, 0x03, 0x78, 0xd0, 0x40, 0x76, 0x78, 0x13, 0x94, 0x6d, 0x9e, 0x14, 0xdb, 0x6e, 0x9b, 0x9a, + 0x47, 0xb5, 0x69, 0x91, 0x4a, 0x5a, 0xaf, 0xab, 0x96, 0x37, 0x12, 0xf1, 0x71, 0x57, 0x5d, 0x48, + 0x2d, 0xdf, 0x3f, 0xf2, 0x08, 0x4a, 0x9b, 0x69, 0xcf, 0x15, 0xb0, 0x38, 0xe4, 0x54, 0xf0, 0x6a, + 0x12, 0x79, 0x91, 0x1a, 0x35, 0x65, 0xb9, 0x70, 0xa9, 0x64, 0x54, 0xd3, 0x11, 0x13, 0x1b, 0x28, + 0xab, 0x07, 0xbf, 0x54, 0x00, 0xf4, 0xfb, 0xf0, 0x64, 0xa2, 0x5c, 0x1d, 0x25, 0x5e, 0xfa, 0x80, + 0x20, 0x2d, 0xc9, 0x20, 0xc1, 0xfe, 0x3d, 0x34, 0x80, 0x4e, 0xc3, 0xa0, 0xb4, 0x8d, 0x7d, 0x6c, + 0xdf, 0xa7, 0x8e, 0xc5, 0xeb, 0x0e, 0x7b, 0x74, 0x87, 0xf8, 0xa2, 0xee, 0x94, 0x6c, 0xdd, 0xad, + 0x6f, 0xdf, 0x95, 0x3b, 0x28, 0xa5, 0xc5, 0xab, 0xf9, 0x80, 0x3a, 0x96, 0xac, 0xd2, 0xb8, 0x9a, + 0x39, 0x1e, 0x12, 0x3b, 0xda, 0x0f, 0x93, 0x60, 0x46, 0x70, 0xf0, 0xce, 0x71, 0x72, 0xf1, 0x37, + 0x40, 0x29, 0x2e, 0x28, 0x89, 0x5a, 0x95, 0x6a, 0xa5, 0xb8, 0xf8, 0x50, 0xa2, 0x03, 0x3f, 0x02, + 0x33, 0x2c, 0x2a, 0xb3, 0xc2, 0xcb, 0x97, 0xd9, 0x2c, 0xef, 0x75, 0x71, 0x81, 0xc5, 0x90, 0x30, + 0x00, 0x8b, 0x1e, 0x3f, 0x3d, 0x09, 0x88, 0xbf, 0xe9, 0x06, 0xb7, 0xdd, 0x8e, 0x63, 0xad, 0x9b, + 0x3c, 0x7a, 0xb5, 0xa2, 0x38, 0xdd, 0xf5, 0x5e, 0x57, 0x5d, 0xdc, 0x1e, 0xac, 0x72, 0xdc, 0x55, + 0xff, 0x3f, 0x64, 0x4b, 0xa4, 0xd9, 0x30, 0x68, 0xed, 0x3b, 0x05, 0xcc, 0x72, 0x8d, 0x1b, 0x2d, + 0x62, 0x1e, 0xf0, 0x06, 0xfd, 0x95, 0x02, 0x20, 0xc9, 0xb7, 0xed, 0x30, 0xdb, 0xca, 0x6b, 0xef, + 0x8c, 0x51, 0x5e, 0x7d, 0xbd, 0x3f, 0xc9, 0x99, 0xbe, 0x2d, 0x86, 0x06, 0x70, 0x6a, 0x3f, 0x4f, + 0x82, 0xf3, 0x3b, 0xb8, 0x4d, 0x2d, 0x1c, 0x50, 0xa7, 0xb9, 0x1e, 0xd1, 0x85, 0xc5, 0x02, 0x3f, + 0x06, 0x33, 0x3c, 0xc0, 0x16, 0x0e, 0xb0, 0x6c, 0xb6, 0x6f, 0x8c, 0x76, 0x1d, 0x61, 0x8b, 0xdb, + 0x20, 0x01, 0x4e, 0x92, 0x2e, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x02, 0x45, 0xe6, 0x11, 0x53, 0x96, + 0xca, 0x7b, 0x63, 0xf8, 0x3e, 0xf4, 0xd4, 0x0f, 0x3d, 0x62, 0x26, 0xd9, 0xc8, 0x57, 0x48, 0x70, + 0x40, 0x1f, 0x4c, 0xb1, 0x00, 0x07, 0x1d, 0x26, 0x53, 0xeb, 0xde, 0xa9, 0xb0, 0x09, 0x44, 0x63, + 0x5e, 0xf2, 0x4d, 0x85, 0x6b, 0x24, 0x99, 0xb4, 0x3f, 0x14, 0xb0, 0x3c, 0xd4, 0xd6, 0xa0, 0x8e, + 0xc5, 0xf3, 0xe1, 0xdf, 0x0f, 0xf3, 0x27, 0x99, 0x30, 0x6f, 0x9d, 0x86, 0xe3, 0xf2, 0xf0, 0xc3, + 0xa2, 0xad, 0xfd, 0xae, 0x80, 0x8b, 0x27, 0x19, 0x3f, 0xa0, 0x2c, 0x80, 0x1f, 0xf6, 0x79, 0xaf, + 0x8f, 0x58, 0xf3, 0x94, 0x85, 0xbe, 0xc7, 0xe3, 0x4d, 0x24, 0x49, 0x79, 0xee, 0x81, 0x33, 0x34, + 0x20, 0x36, 0x6f, 0xc6, 0xbc, 0xba, 0xee, 0x9f, 0xa2, 0xeb, 0xc6, 0x9c, 0xe4, 0x3d, 0x73, 0x97, + 0x33, 0xa0, 0x90, 0x48, 0xfb, 0xba, 0x70, 0xb2, 0xe3, 0x3c, 0x4e, 0xbc, 0x45, 0x7b, 0x42, 0xb8, + 0x99, 0x74, 0xd1, 0xf8, 0x1a, 0xb7, 0xe3, 0x1d, 0x94, 0xd2, 0xe2, 0x0d, 0xd2, 0x93, 0xfd, 0x77, + 0xc0, 0x1c, 0x72, 0x92, 0x47, 0x51, 0xeb, 0x0e, 0x1b, 0x64, 0xb4, 0x42, 0x31, 0x24, 0xec, 0x80, + 0x79, 0x3b, 0x33, 0x78, 0xc9, 0x52, 0xb9, 0x36, 0x06, 0x49, 0x76, 0x72, 0x0b, 0x47, 0x9e, 0xac, + 0x0c, 0xe5, 0x48, 0xe0, 0x2e, 0xa8, 0x1e, 0xca, 0x88, 0xb9, 0x4e, 0xd8, 0x35, 0xc3, 0x69, 0xa3, + 0x64, 0xac, 0xf0, 0x41, 0x6d, 0x27, 0xbf, 0x79, 0xdc, 0x55, 0x2b, 0x79, 0x21, 0xea, 0xc7, 0xd0, + 0x7e, 0x53, 0xc0, 0x85, 0xa1, 0x77, 0xf1, 0x1f, 0x64, 0x1f, 0xcd, 0x66, 0xdf, 0xcd, 0x53, 0xc9, + 0xbe, 0xc1, 0x69, 0xf7, 0xfd, 0xd4, 0xdf, 0xb8, 0x2a, 0xf2, 0x0d, 0x83, 0x92, 0x17, 0xcd, 0x07, + 0xd2, 0xd7, 0x2b, 0xe3, 0x26, 0x0f, 0xb7, 0x35, 0xe6, 0xf8, 0xf7, 0x3b, 0x5e, 0xa2, 0x04, 0x15, + 0x7e, 0x06, 0x2a, 0xb6, 0x7c, 0x21, 0x70, 0x00, 0xea, 0x04, 0xd1, 0x14, 0xf4, 0x0f, 0x32, 0xe8, + 0x6c, 0xaf, 0xab, 0x56, 0x36, 0x72, 0xb0, 0xa8, 0x8f, 0x08, 0xb6, 0x41, 0x39, 0xc9, 0x80, 0x68, + 0x6c, 0x7e, 0xf3, 0x25, 0x42, 0xee, 0x3a, 0xc6, 0xff, 0x64, 0x8c, 0xcb, 0x89, 0x8c, 0xa1, 0x34, + 0x3c, 0x7c, 0x00, 0xe6, 0xf6, 0x31, 0x6d, 0x77, 0x7c, 0x22, 0x07, 0xd2, 0x70, 0x82, 0x78, 0x95, + 0x0f, 0x8b, 0xb7, 0xd3, 0x1b, 0xc7, 0x5d, 0xb5, 0x9a, 0x11, 0x88, 0x69, 0x21, 0x6b, 0x0c, 0x9f, + 0x28, 0xa0, 0x82, 0xb3, 0xcf, 0x47, 0x56, 0x3b, 0x23, 0x3c, 0xb8, 0x3e, 0x86, 0x07, 0xb9, 0x17, + 0xa8, 0x51, 0x93, 0x6e, 0x54, 0x72, 0x1b, 0x0c, 0xf5, 0xb1, 0xc1, 0xcf, 0xc1, 0x82, 0x9d, 0x79, + 0xdd, 0xb1, 0xda, 0x94, 0x38, 0xc0, 0xd8, 0x57, 0x17, 0x23, 0x24, 0x2f, 0xd9, 0xac, 0x9c, 0xa1, + 0x3c, 0x15, 0xb4, 0x40, 0xe9, 0x10, 0xfb, 0x14, 0xef, 0xf1, 0x87, 0xc6, 0xb4, 0xe0, 0xbd, 0x3c, + 0xd6, 0xd5, 0x85, 0xb6, 0xc9, 0x7c, 0x19, 0x49, 0x18, 0x4a, 0x80, 0xb5, 0x1f, 0x27, 0x81, 0x7a, + 0xc2, 0xa7, 0x1c, 0xde, 0x03, 0xd0, 0xdd, 0x63, 0xc4, 0x3f, 0x24, 0xd6, 0x9d, 0xf0, 0x8d, 0x1f, + 0x4d, 0xd0, 0x85, 0x64, 0xbc, 0xda, 0xea, 0xd3, 0x40, 0x03, 0xac, 0xa0, 0x0d, 0x66, 0x83, 0xd4, + 0xe4, 0x37, 0xce, 0x8b, 0x40, 0x3a, 0x96, 0x1e, 0x1c, 0x8d, 0x4a, 0xaf, 0xab, 0x66, 0x46, 0x49, + 0x94, 0x81, 0x87, 0x26, 0x00, 0x66, 0x72, 0x7b, 0x61, 0x01, 0x34, 0x46, 0x6b, 0x67, 0xc9, 0x9d, + 0xc5, 0x9f, 0xa0, 0xd4, 0x75, 0xa5, 0x60, 0xb5, 0x3f, 0x15, 0x00, 0x92, 0xaa, 0x80, 0x17, 0x41, + 0xea, 0x19, 0x2f, 0xbf, 0x62, 0x45, 0x0e, 0x81, 0x52, 0x72, 0xb8, 0x02, 0xa6, 0x6d, 0xc2, 0x18, + 0x6e, 0x46, 0xef, 0x80, 0xf8, 0x5f, 0x86, 0x8d, 0x50, 0x8c, 0xa2, 0x7d, 0xb8, 0x0b, 0xa6, 0x7c, + 0x82, 0x99, 0xeb, 0xc8, 0xff, 0x23, 0xde, 0xe5, 0x63, 0x15, 0x12, 0x92, 0xe3, 0xae, 0xba, 0x3a, + 0xca, 0xbf, 0x40, 0xba, 0x9c, 0xc2, 0x84, 0x11, 0x92, 0x70, 0xf0, 0x0e, 0xa8, 0x4a, 0x8e, 0xd4, + 0x81, 0xc3, 0xaa, 0x3d, 0x2f, 0x4f, 0x53, 0xdd, 0xc8, 0x2b, 0xa0, 0x7e, 0x1b, 0xed, 0x1e, 0x98, + 0x89, 0xb2, 0x0b, 0xd6, 0x40, 0x31, 0xf5, 0xf9, 0x0e, 0x1d, 0x17, 0x92, 0x5c, 0x60, 0x26, 0x07, + 0x07, 0xc6, 0xd8, 0x7a, 0xfa, 0xa2, 0x3e, 0xf1, 0xec, 0x45, 0x7d, 0xe2, 0xf9, 0x8b, 0xfa, 0xc4, + 0x93, 0x5e, 0x5d, 0x79, 0xda, 0xab, 0x2b, 0xcf, 0x7a, 0x75, 0xe5, 0x79, 0xaf, 0xae, 0xfc, 0xd2, + 0xab, 0x2b, 0xdf, 0xfc, 0x5a, 0x9f, 0xf8, 0x60, 0x65, 0xe4, 0x7f, 0xf1, 0xfe, 0x0a, 0x00, 0x00, + 0xff, 0xff, 0x22, 0xbd, 0xc5, 0xc7, 0xf1, 0x13, 0x00, 0x00, } func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index db02dd929f..d5974d5ec4 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -156,7 +156,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -170,7 +170,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -211,7 +211,7 @@ message NamedRuleWithOperations { repeated string resourceNames = 1; // RuleWithOperations is a tuple of Operations and Resources. - optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; } // ParamKind is a tuple of Group Kind and Version. @@ -267,7 +267,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -295,7 +295,7 @@ message TypeChecking { message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -322,7 +322,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -333,7 +333,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -409,7 +409,7 @@ message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -514,7 +514,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // Validation specifies the CEL expression which is used to apply the validation. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 267ddc1cbd..261ae41bd0 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// source: k8s.io/api/admissionregistration/v1beta1/generated.proto package v1beta1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } func (*AuditAnnotation) ProtoMessage() {} func (*AuditAnnotation) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} + return fileDescriptor_7f7c65a4f012fb19, []int{0} } func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } func (*ExpressionWarning) ProtoMessage() {} func (*ExpressionWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} + return fileDescriptor_7f7c65a4f012fb19, []int{1} } func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} + return fileDescriptor_7f7c65a4f012fb19, []int{2} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} + return fileDescriptor_7f7c65a4f012fb19, []int{3} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_MatchResources proto.InternalMessageInfo func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} + return fileDescriptor_7f7c65a4f012fb19, []int{4} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} + return fileDescriptor_7f7c65a4f012fb19, []int{5} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} + return fileDescriptor_7f7c65a4f012fb19, []int{6} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +245,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} + return fileDescriptor_7f7c65a4f012fb19, []int{7} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +273,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{8} + return fileDescriptor_7f7c65a4f012fb19, []int{8} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +301,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{9} + return fileDescriptor_7f7c65a4f012fb19, []int{9} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +329,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{10} + return fileDescriptor_7f7c65a4f012fb19, []int{10} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +357,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *TypeChecking) Reset() { *m = TypeChecking{} } func (*TypeChecking) ProtoMessage() {} func (*TypeChecking) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{11} + return fileDescriptor_7f7c65a4f012fb19, []int{11} } func (m *TypeChecking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +385,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{12} + return fileDescriptor_7f7c65a4f012fb19, []int{12} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +413,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{13} + return fileDescriptor_7f7c65a4f012fb19, []int{13} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +441,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{14} + return fileDescriptor_7f7c65a4f012fb19, []int{14} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +469,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{15} + return fileDescriptor_7f7c65a4f012fb19, []int{15} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +497,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{16} + return fileDescriptor_7f7c65a4f012fb19, []int{16} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +525,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{17} + return fileDescriptor_7f7c65a4f012fb19, []int{17} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -553,7 +553,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{18} + return fileDescriptor_7f7c65a4f012fb19, []int{18} } func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,7 +581,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{19} + return fileDescriptor_7f7c65a4f012fb19, []int{19} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -609,7 +609,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{20} + return fileDescriptor_7f7c65a4f012fb19, []int{20} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -637,7 +637,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{21} + return fileDescriptor_7f7c65a4f012fb19, []int{21} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -665,7 +665,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{22} + return fileDescriptor_7f7c65a4f012fb19, []int{22} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -693,7 +693,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo func (m *Variable) Reset() { *m = Variable{} } func (*Variable) ProtoMessage() {} func (*Variable) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{23} + return fileDescriptor_7f7c65a4f012fb19, []int{23} } func (m *Variable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -721,7 +721,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{24} + return fileDescriptor_7f7c65a4f012fb19, []int{24} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,135 +775,134 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) -} - -var fileDescriptor_abeea74cbc46f55a = []byte{ - // 1973 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x23, 0x49, - 0x35, 0x1d, 0xe7, 0xc3, 0x7e, 0xce, 0x97, 0x6b, 0x67, 0x89, 0x77, 0x76, 0xd6, 0x8e, 0x5a, 0x2b, - 0x94, 0x91, 0xc0, 0xde, 0xc9, 0xae, 0x76, 0x97, 0x59, 0x21, 0x14, 0x67, 0x67, 0x86, 0x99, 0x9d, - 0x64, 0x42, 0x65, 0x37, 0x91, 0x60, 0x57, 0x9a, 0x72, 0x77, 0xd9, 0x6e, 0x6c, 0x77, 0x37, 0x5d, - 0x6d, 0xcf, 0x04, 0x24, 0x40, 0xe2, 0xb0, 0x57, 0x24, 0x2e, 0x48, 0x9c, 0xf8, 0x0b, 0xdc, 0x91, - 0xe0, 0x36, 0xc7, 0xbd, 0x31, 0x12, 0xc2, 0x22, 0xe6, 0xc0, 0x89, 0x03, 0x07, 0x38, 0xe4, 0x02, - 0xaa, 0xea, 0xea, 0x4f, 0xb7, 0x27, 0x9d, 0x90, 0x09, 0x97, 0xb9, 0xa5, 0xdf, 0x67, 0xbd, 0x57, - 0xef, 0xab, 0x9e, 0x03, 0xdf, 0xeb, 0x7e, 0xc8, 0x6a, 0x86, 0x55, 0xef, 0x0e, 0x9a, 0xd4, 0x31, - 0xa9, 0x4b, 0x59, 0x7d, 0x48, 0x4d, 0xdd, 0x72, 0xea, 0x12, 0x41, 0x6c, 0xa3, 0x4e, 0xf4, 0xbe, - 0xc1, 0x98, 0x61, 0x99, 0x0e, 0x6d, 0x1b, 0xcc, 0x75, 0x88, 0x6b, 0x58, 0x66, 0x7d, 0x78, 0xab, - 0x49, 0x5d, 0x72, 0xab, 0xde, 0xa6, 0x26, 0x75, 0x88, 0x4b, 0xf5, 0x9a, 0xed, 0x58, 0xae, 0x85, - 0x36, 0x3d, 0xce, 0x1a, 0xb1, 0x8d, 0x5a, 0x2a, 0x67, 0x4d, 0x72, 0x5e, 0xff, 0x66, 0xdb, 0x70, - 0x3b, 0x83, 0x66, 0x4d, 0xb3, 0xfa, 0xf5, 0xb6, 0xd5, 0xb6, 0xea, 0x42, 0x40, 0x73, 0xd0, 0x12, - 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x6e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, 0xbd, - 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xb8, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x7d, - 0xea, 0x92, 0x34, 0xae, 0xfa, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0xbc, 0x7f, - 0x16, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xd5, 0xed, 0x81, 0x6e, 0xb8, 0xdb, - 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x16, 0xe4, 0xba, 0xf4, 0xb8, 0xac, 0x6c, 0x28, 0x9b, 0x85, - 0x46, 0xf1, 0xd9, 0xa8, 0x3a, 0x33, 0x1e, 0x55, 0x73, 0x9f, 0xd0, 0x63, 0xcc, 0xe1, 0x68, 0x1b, - 0x56, 0x87, 0xa4, 0x37, 0xa0, 0x77, 0x9e, 0xda, 0x0e, 0x15, 0x2e, 0x28, 0xcf, 0x0a, 0xd2, 0x75, - 0x49, 0xba, 0x7a, 0x18, 0x47, 0xe3, 0x24, 0xbd, 0xda, 0x83, 0x52, 0xf8, 0x75, 0x44, 0x1c, 0xd3, - 0x30, 0xdb, 0xe8, 0x1b, 0x90, 0x6f, 0x19, 0xb4, 0xa7, 0x63, 0xda, 0x92, 0x02, 0xd7, 0xa4, 0xc0, - 0xfc, 0x5d, 0x09, 0xc7, 0x01, 0x05, 0xba, 0x09, 0x8b, 0x4f, 0x3c, 0xc6, 0x72, 0x4e, 0x10, 0xaf, - 0x4a, 0xe2, 0x45, 0x29, 0x0f, 0xfb, 0x78, 0xb5, 0x05, 0x2b, 0xbb, 0xc4, 0xd5, 0x3a, 0x3b, 0x96, - 0xa9, 0x1b, 0xc2, 0xc2, 0x0d, 0x98, 0x33, 0x49, 0x9f, 0x4a, 0x13, 0x97, 0x24, 0xe7, 0xdc, 0x1e, - 0xe9, 0x53, 0x2c, 0x30, 0x68, 0x0b, 0x80, 0x26, 0xed, 0x43, 0x92, 0x0e, 0x22, 0xa6, 0x45, 0xa8, - 0xd4, 0x3f, 0xcd, 0x49, 0x45, 0x98, 0x32, 0x6b, 0xe0, 0x68, 0x94, 0xa1, 0xa7, 0x50, 0xe2, 0xe2, - 0x98, 0x4d, 0x34, 0x7a, 0x40, 0x7b, 0x54, 0x73, 0x2d, 0x47, 0x68, 0x2d, 0x6e, 0xbd, 0x5b, 0x0b, - 0xc3, 0x34, 0xb8, 0xb1, 0x9a, 0xdd, 0x6d, 0x73, 0x00, 0xab, 0xf1, 0xc0, 0xa8, 0x0d, 0x6f, 0xd5, - 0x1e, 0x92, 0x26, 0xed, 0xf9, 0xac, 0x8d, 0xd7, 0xc7, 0xa3, 0x6a, 0x69, 0x2f, 0x29, 0x11, 0x4f, - 0x2a, 0x41, 0x16, 0xac, 0x58, 0xcd, 0x1f, 0x52, 0xcd, 0x0d, 0xd4, 0xce, 0x5e, 0x5c, 0x2d, 0x1a, - 0x8f, 0xaa, 0x2b, 0x8f, 0x62, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x29, 0x2c, 0x3b, 0xd2, 0x6e, 0x3c, - 0xe8, 0x51, 0x56, 0xce, 0x6d, 0xe4, 0x36, 0x8b, 0x5b, 0xdb, 0xb5, 0xac, 0xd9, 0x58, 0xe3, 0x76, - 0xe9, 0x9c, 0xf7, 0xc8, 0x70, 0x3b, 0x8f, 0x6c, 0xea, 0xa1, 0x59, 0xe3, 0x75, 0xe9, 0xf7, 0x65, - 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x14, 0xb8, 0x46, 0x9f, 0x6a, 0xbd, 0x81, 0x4e, 0x63, - 0x74, 0xe5, 0xb9, 0xcb, 0x3a, 0xc7, 0x0d, 0x79, 0x8e, 0x6b, 0x77, 0x52, 0xd4, 0xe0, 0x54, 0xe5, - 0xe8, 0x63, 0x28, 0xf6, 0x79, 0x48, 0xec, 0x5b, 0x3d, 0x43, 0x3b, 0x2e, 0x2f, 0x8a, 0x40, 0x52, - 0xc7, 0xa3, 0x6a, 0x71, 0x37, 0x04, 0x9f, 0x8e, 0xaa, 0xab, 0x91, 0xcf, 0x4f, 0x8f, 0x6d, 0x8a, - 0xa3, 0x6c, 0xea, 0x1f, 0xf3, 0xb0, 0xba, 0x3b, 0xe0, 0xe9, 0x69, 0xb6, 0x8f, 0x68, 0xb3, 0x63, - 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x04, 0x96, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xc7, 0x32, 0x5b, 0x46, - 0x5b, 0x06, 0xc0, 0xb7, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x89, 0x08, 0x69, 0x5c, 0x93, 0x8a, 0x96, - 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x79, 0x27, 0x12, 0x02, 0x1f, 0x64, 0xd1, 0x58, 0x4b, - 0x71, 0xf8, 0xb2, 0xd4, 0x35, 0xef, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc2, 0x72, 0x8b, 0x18, 0xbd, - 0x81, 0x43, 0xa5, 0x53, 0xe7, 0x84, 0x07, 0xbe, 0xce, 0x23, 0xe4, 0x6e, 0x14, 0x71, 0x3a, 0xaa, - 0x96, 0x62, 0x00, 0xe1, 0xd8, 0x38, 0x73, 0xf2, 0x82, 0x0a, 0x17, 0xba, 0xa0, 0xf4, 0x3c, 0x9f, - 0xff, 0xff, 0xe4, 0x79, 0xf1, 0xe5, 0xe6, 0xf9, 0xc7, 0x50, 0x64, 0x86, 0x4e, 0xef, 0xb4, 0x5a, - 0x54, 0x73, 0x59, 0x79, 0x21, 0x74, 0xd8, 0x41, 0x08, 0xe6, 0x0e, 0x0b, 0x3f, 0x77, 0x7a, 0x84, - 0x31, 0x1c, 0x65, 0x43, 0xb7, 0x61, 0x85, 0x77, 0x25, 0x6b, 0xe0, 0x1e, 0x50, 0xcd, 0x32, 0x75, - 0x26, 0x52, 0x63, 0xde, 0x3b, 0xc1, 0xa7, 0x31, 0x0c, 0x4e, 0x50, 0xa2, 0xcf, 0x60, 0x3d, 0x88, - 0x22, 0x4c, 0x87, 0x06, 0x7d, 0x72, 0x48, 0x1d, 0xfe, 0xc1, 0xca, 0xf9, 0x8d, 0xdc, 0x66, 0xa1, - 0xf1, 0xe6, 0x78, 0x54, 0x5d, 0xdf, 0x4e, 0x27, 0xc1, 0xd3, 0x78, 0xd1, 0x63, 0x40, 0x0e, 0x35, - 0xcc, 0xa1, 0xa5, 0x89, 0xf0, 0x93, 0x01, 0x01, 0xc2, 0xbe, 0x77, 0xc6, 0xa3, 0x2a, 0xc2, 0x13, - 0xd8, 0xd3, 0x51, 0xf5, 0x6b, 0x93, 0x50, 0x11, 0x1e, 0x29, 0xb2, 0xd0, 0x4f, 0x60, 0xb5, 0x1f, - 0x6b, 0x44, 0xac, 0xbc, 0x24, 0x32, 0xe4, 0xc3, 0xec, 0x39, 0x19, 0xef, 0x64, 0x61, 0xcf, 0x8d, - 0xc3, 0x19, 0x4e, 0x6a, 0x52, 0xff, 0xa2, 0xc0, 0x8d, 0x44, 0x0d, 0xf1, 0xd2, 0x75, 0xe0, 0x69, - 0x40, 0x8f, 0x21, 0xcf, 0xa3, 0x42, 0x27, 0x2e, 0x91, 0x2d, 0xea, 0x9d, 0x6c, 0x31, 0xe4, 0x05, - 0xcc, 0x2e, 0x75, 0x49, 0xd8, 0x22, 0x43, 0x18, 0x0e, 0xa4, 0xa2, 0x1f, 0x40, 0x5e, 0x6a, 0x66, - 0xe5, 0x59, 0x61, 0xf8, 0xb7, 0xce, 0x61, 0x78, 0xfc, 0xec, 0x8d, 0x39, 0xae, 0x0a, 0x07, 0x02, - 0xd5, 0x7f, 0x28, 0xb0, 0xf1, 0x22, 0xfb, 0x1e, 0x1a, 0xcc, 0x45, 0x9f, 0x4f, 0xd8, 0x58, 0xcb, - 0x98, 0x27, 0x06, 0xf3, 0x2c, 0x0c, 0x66, 0x12, 0x1f, 0x12, 0xb1, 0xaf, 0x0b, 0xf3, 0x86, 0x4b, - 0xfb, 0xbe, 0x71, 0x77, 0x2f, 0x6c, 0x5c, 0xec, 0xe0, 0x61, 0x19, 0xbc, 0xcf, 0x85, 0x63, 0x4f, - 0x87, 0xfa, 0x5c, 0x81, 0xf5, 0x29, 0x9d, 0x0a, 0x7d, 0x10, 0xf6, 0x62, 0x51, 0x44, 0xca, 0x8a, - 0xc8, 0x8b, 0x52, 0xb4, 0x89, 0x0a, 0x04, 0x8e, 0xd3, 0xa1, 0x5f, 0x28, 0x80, 0x9c, 0x09, 0x79, - 0xb2, 0x73, 0x5c, 0xb8, 0x8e, 0x5f, 0x97, 0x06, 0xa0, 0x49, 0x1c, 0x4e, 0x51, 0xa7, 0x12, 0x28, - 0xec, 0x13, 0x87, 0xf4, 0x3f, 0x31, 0x4c, 0x9d, 0x4f, 0x62, 0xc4, 0x36, 0x64, 0x96, 0xca, 0x6e, - 0x17, 0x84, 0xd9, 0xf6, 0xfe, 0x7d, 0x89, 0xc1, 0x11, 0x2a, 0xde, 0x1b, 0xbb, 0x86, 0xa9, 0xcb, - 0xb9, 0x2d, 0xe8, 0x8d, 0x5c, 0x1e, 0x16, 0x18, 0xf5, 0x77, 0xb3, 0x90, 0x17, 0x3a, 0xf8, 0x2c, - 0x79, 0x76, 0x2b, 0xad, 0x43, 0x21, 0x28, 0xbd, 0x52, 0x6a, 0x49, 0x92, 0x15, 0x82, 0x32, 0x8d, - 0x43, 0x1a, 0xf4, 0x05, 0xe4, 0x99, 0x5f, 0x90, 0x73, 0x17, 0x2f, 0xc8, 0x4b, 0x3c, 0xd2, 0x82, - 0x52, 0x1c, 0x88, 0x44, 0x2e, 0xac, 0xdb, 0xfc, 0xf4, 0xd4, 0xa5, 0xce, 0x9e, 0xe5, 0xde, 0xb5, - 0x06, 0xa6, 0xbe, 0xad, 0x71, 0xef, 0xc9, 0x6e, 0x78, 0x9b, 0x97, 0xc0, 0xfd, 0x74, 0x92, 0xd3, - 0x51, 0xf5, 0xcd, 0x29, 0x28, 0x51, 0xba, 0xa6, 0x89, 0x56, 0x7f, 0xab, 0xc0, 0xda, 0x01, 0x75, - 0x86, 0x86, 0x46, 0x31, 0x6d, 0x51, 0x87, 0x9a, 0x5a, 0xc2, 0x35, 0x4a, 0x06, 0xd7, 0xf8, 0xde, - 0x9e, 0x9d, 0xea, 0xed, 0x1b, 0x30, 0x67, 0x13, 0xb7, 0x23, 0x07, 0xfb, 0x3c, 0xc7, 0xee, 0x13, - 0xb7, 0x83, 0x05, 0x54, 0x60, 0x2d, 0xc7, 0x15, 0x86, 0xce, 0x4b, 0xac, 0xe5, 0xb8, 0x58, 0x40, - 0xd5, 0x5f, 0x2b, 0xb0, 0xc4, 0xad, 0xd8, 0xe9, 0x50, 0xad, 0xcb, 0x9f, 0x15, 0x5f, 0x2a, 0x80, - 0x68, 0xf2, 0xb1, 0xe1, 0x65, 0x44, 0x71, 0xeb, 0xa3, 0xec, 0x29, 0x3a, 0xf1, 0x60, 0x09, 0xc3, - 0x7a, 0x02, 0xc5, 0x70, 0x8a, 0x4a, 0xf5, 0xcf, 0xb3, 0xf0, 0xc6, 0x21, 0xe9, 0x19, 0xba, 0x48, - 0xf5, 0xa0, 0x3f, 0xc9, 0xe6, 0xf0, 0xf2, 0xcb, 0xaf, 0x01, 0x73, 0xcc, 0xa6, 0x9a, 0xcc, 0xe6, - 0x7b, 0xd9, 0x4d, 0x9f, 0x7a, 0xe8, 0x03, 0x9b, 0x6a, 0xe1, 0x0d, 0xf2, 0x2f, 0x2c, 0x54, 0xa0, - 0x1f, 0xc1, 0x02, 0x73, 0x89, 0x3b, 0x60, 0x32, 0xf8, 0xef, 0x5f, 0x86, 0x32, 0x21, 0xb0, 0xb1, - 0x22, 0xd5, 0x2d, 0x78, 0xdf, 0x58, 0x2a, 0x52, 0xff, 0xad, 0xc0, 0xc6, 0x54, 0xde, 0x86, 0x61, - 0xea, 0x3c, 0x18, 0x5e, 0xbe, 0x93, 0xed, 0x98, 0x93, 0xf7, 0x2e, 0xc1, 0x6e, 0x79, 0xf6, 0x69, - 0xbe, 0x56, 0xff, 0xa5, 0xc0, 0xdb, 0x67, 0x31, 0x5f, 0x41, 0xf3, 0xb3, 0xe2, 0xcd, 0xef, 0xc1, - 0xe5, 0x59, 0x3e, 0xa5, 0x01, 0x7e, 0x99, 0x3b, 0xdb, 0x6e, 0xee, 0x26, 0xde, 0x41, 0x6c, 0x01, - 0xdc, 0x0b, 0x8b, 0x7c, 0x70, 0x89, 0xfb, 0x01, 0x06, 0x47, 0xa8, 0xb8, 0xaf, 0x6c, 0xd9, 0x1e, - 0xe4, 0x55, 0x6e, 0x65, 0x37, 0xc8, 0x6f, 0x2c, 0x5e, 0xf9, 0xf6, 0xbf, 0x70, 0x20, 0x11, 0xb9, - 0xb0, 0xd2, 0x8f, 0x2d, 0x0a, 0x64, 0x9a, 0x9c, 0x77, 0x0e, 0x0c, 0xf8, 0xbd, 0xb9, 0x39, 0x0e, - 0xc3, 0x09, 0x1d, 0xe8, 0x08, 0x4a, 0x43, 0xe9, 0x2f, 0xcb, 0xf4, 0x4a, 0xba, 0xf7, 0x3a, 0x2e, - 0x34, 0x6e, 0xf2, 0xf7, 0xc6, 0x61, 0x12, 0x79, 0x3a, 0xaa, 0xae, 0x25, 0x81, 0x78, 0x52, 0x86, - 0xfa, 0x77, 0x05, 0xde, 0x9a, 0x7a, 0x13, 0x57, 0x10, 0x7a, 0x9d, 0x78, 0xe8, 0xed, 0x5c, 0x46, - 0xe8, 0xa5, 0xc7, 0xdc, 0x6f, 0x16, 0x5e, 0x60, 0xa9, 0x08, 0xb6, 0xc7, 0x50, 0xb0, 0xfd, 0xd9, - 0x25, 0x65, 0xd3, 0x93, 0x25, 0x72, 0x38, 0x6b, 0x63, 0x99, 0xf7, 0xcf, 0xe0, 0x13, 0x87, 0x42, - 0xd1, 0x8f, 0x61, 0xcd, 0x9f, 0xed, 0x39, 0xbf, 0x61, 0xba, 0xfe, 0x80, 0x76, 0xf1, 0xf0, 0xb9, - 0x36, 0x1e, 0x55, 0xd7, 0x76, 0x13, 0x52, 0xf1, 0x84, 0x1e, 0xd4, 0x85, 0x62, 0x78, 0xfd, 0xfe, - 0xfb, 0xfe, 0xbd, 0xf3, 0xfb, 0xdb, 0x32, 0x1b, 0xaf, 0x49, 0x07, 0x17, 0x43, 0x18, 0xc3, 0x51, - 0xe9, 0x97, 0xfc, 0xd0, 0xff, 0x19, 0xac, 0x91, 0xf8, 0xa2, 0x93, 0x95, 0xe7, 0xcf, 0xfb, 0x08, - 0x49, 0xac, 0x4a, 0x1b, 0x65, 0x69, 0xc4, 0x5a, 0x02, 0xc1, 0xf0, 0x84, 0xb2, 0xb4, 0xd7, 0xdf, - 0xc2, 0x55, 0xbd, 0xfe, 0x90, 0x06, 0x85, 0x21, 0x71, 0x0c, 0xd2, 0xec, 0x51, 0xfe, 0xd4, 0xce, - 0x9d, 0xaf, 0xa0, 0x1d, 0x4a, 0xd6, 0x70, 0xb2, 0xf3, 0x21, 0x0c, 0x87, 0x72, 0xd5, 0x3f, 0xcc, - 0x42, 0xf5, 0x8c, 0xf6, 0x8d, 0x1e, 0x00, 0xb2, 0x9a, 0x8c, 0x3a, 0x43, 0xaa, 0xdf, 0xf3, 0x56, - 0xd1, 0xfe, 0x58, 0x9f, 0x0b, 0x07, 0xaa, 0x47, 0x13, 0x14, 0x38, 0x85, 0x0b, 0xf5, 0x60, 0xc9, - 0x8d, 0x8c, 0x7a, 0x32, 0x0b, 0xde, 0xcf, 0x6e, 0x57, 0x74, 0x50, 0x6c, 0xac, 0x8d, 0x47, 0xd5, - 0xd8, 0xe8, 0x88, 0x63, 0xd2, 0x91, 0x06, 0xa0, 0x85, 0x57, 0xe7, 0x85, 0x7e, 0x3d, 0x5b, 0x15, - 0x0b, 0x6f, 0x2c, 0xe8, 0x3b, 0x91, 0xcb, 0x8a, 0x88, 0x55, 0x4f, 0x16, 0xa1, 0x14, 0xba, 0xf0, - 0xd5, 0xae, 0xef, 0xd5, 0xae, 0xef, 0x85, 0xbb, 0x3e, 0x78, 0xb5, 0xeb, 0xbb, 0xd0, 0xae, 0x2f, - 0xa5, 0x16, 0x17, 0xaf, 0x6c, 0x13, 0x77, 0xa2, 0x40, 0x65, 0x22, 0xc7, 0xaf, 0x7a, 0x17, 0xf7, - 0xc5, 0xc4, 0x2e, 0xee, 0xa3, 0x8b, 0x8c, 0x4d, 0xd3, 0xb6, 0x71, 0xff, 0x54, 0x40, 0x7d, 0xb1, - 0x8d, 0x57, 0x30, 0x17, 0xf6, 0xe3, 0x73, 0xe1, 0x77, 0xff, 0x07, 0x03, 0xb3, 0x6c, 0xe4, 0xfe, - 0xa3, 0x00, 0x84, 0xc3, 0x0c, 0x7a, 0x1b, 0x22, 0x3f, 0x14, 0xca, 0xd2, 0xed, 0xb9, 0x29, 0x02, - 0x47, 0x37, 0x61, 0xb1, 0x4f, 0x19, 0x23, 0x6d, 0x7f, 0x21, 0x12, 0xfc, 0x8e, 0xb9, 0xeb, 0x81, - 0xb1, 0x8f, 0x47, 0x47, 0xb0, 0xe0, 0x50, 0xc2, 0x2c, 0x53, 0x2e, 0x46, 0xbe, 0xc3, 0x5f, 0xc1, - 0x58, 0x40, 0x4e, 0x47, 0xd5, 0x5b, 0x59, 0x7e, 0x67, 0xae, 0xc9, 0x47, 0xb3, 0x60, 0xc2, 0x52, - 0x1c, 0xba, 0x07, 0x25, 0xa9, 0x23, 0x72, 0x60, 0xaf, 0xd2, 0xbe, 0x21, 0x4f, 0x53, 0xda, 0x4d, - 0x12, 0xe0, 0x49, 0x1e, 0xf5, 0x01, 0xe4, 0xfd, 0xc1, 0x00, 0x95, 0x61, 0x2e, 0xf2, 0xde, 0xf2, - 0x0c, 0x17, 0x90, 0x84, 0x63, 0x66, 0xd3, 0x1d, 0xa3, 0xfe, 0x5e, 0x81, 0xd7, 0x52, 0x9a, 0x12, - 0x7a, 0x03, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0xb0, 0x38, 0x1e, 0x55, 0x73, 0x9f, 0xe1, 0x87, 0x98, - 0xc3, 0x10, 0x81, 0x45, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xce, 0x7e, 0xe3, 0xc9, 0xbd, 0x56, - 0xa3, 0xc8, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x26, 0xe4, 0x35, 0xd2, 0x18, 0x98, 0x7a, 0xcf, - 0xbb, 0xaf, 0x25, 0xef, 0x8d, 0xb7, 0xb3, 0xed, 0xc1, 0x70, 0x80, 0x6d, 0xec, 0x3d, 0x3b, 0xa9, - 0xcc, 0x7c, 0x75, 0x52, 0x99, 0x79, 0x7e, 0x52, 0x99, 0xf9, 0xf9, 0xb8, 0xa2, 0x3c, 0x1b, 0x57, - 0x94, 0xaf, 0xc6, 0x15, 0xe5, 0xf9, 0xb8, 0xa2, 0xfc, 0x75, 0x5c, 0x51, 0x7e, 0xf9, 0xb7, 0xca, - 0xcc, 0xf7, 0x37, 0xb3, 0xfe, 0x97, 0xc3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x71, 0x54, 0x54, - 0xe6, 0x29, 0x21, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_7f7c65a4f012fb19) +} + +var fileDescriptor_7f7c65a4f012fb19 = []byte{ + // 1957 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7, + 0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85, + 0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30, + 0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b, + 0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed, + 0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2, + 0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f, + 0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde, + 0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f, + 0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b, + 0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36, + 0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0, + 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, + 0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a, + 0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c, + 0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8, + 0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb, + 0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1, + 0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4, + 0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38, + 0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48, + 0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20, + 0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6, + 0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e, + 0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22, + 0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9, + 0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5, + 0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b, + 0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88, + 0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d, + 0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e, + 0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7, + 0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7, + 0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e, + 0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa, + 0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20, + 0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36, + 0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0, + 0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b, + 0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a, + 0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58, + 0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18, + 0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19, + 0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e, + 0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad, + 0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e, + 0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c, + 0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d, + 0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e, + 0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43, + 0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0, + 0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee, + 0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9, + 0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c, + 0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc, + 0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd, + 0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20, + 0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5, + 0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8, + 0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6, + 0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2, + 0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90, + 0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81, + 0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec, + 0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba, + 0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf, + 0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3, + 0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d, + 0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d, + 0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39, + 0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51, + 0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef, + 0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97, + 0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02, + 0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01, + 0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18, + 0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22, + 0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd, + 0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40, + 0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73, + 0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3, + 0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3, + 0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3, + 0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02, + 0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d, + 0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72, + 0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c, + 0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c, + 0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c, + 0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88, + 0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3, + 0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85, + 0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73, + 0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd, + 0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3, + 0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11, + 0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf, + 0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b, + 0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38, + 0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7, + 0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b, + 0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa, + 0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa, + 0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85, + 0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d, + 0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8, + 0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb, + 0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7, + 0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11, + 0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f, + 0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a, + 0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96, + 0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b, + 0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5, + 0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25, + 0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98, + 0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56, + 0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf, + 0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae, + 0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8, + 0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57, + 0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8, + 0x4a, 0x10, 0x21, 0x00, 0x00, } func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 1855cdfc4f..30f99f64d0 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -157,7 +157,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -171,7 +171,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -222,7 +222,8 @@ message MutatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; + // +listType=atomic + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -290,7 +291,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -304,7 +305,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -332,6 +333,7 @@ message MutatingWebhook { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic repeated string admissionReviewVersions = 8; // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -364,13 +366,10 @@ message MutatingWebhook { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional repeated MatchCondition matchConditions = 12; } @@ -380,12 +379,14 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated MutatingWebhook Webhooks = 2; } @@ -394,7 +395,7 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; @@ -409,7 +410,7 @@ message NamedRuleWithOperations { repeated string resourceNames = 1; // RuleWithOperations is a tuple of Operations and Resources. - optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; } // ParamKind is a tuple of Group Kind and Version. @@ -467,7 +468,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -522,7 +523,7 @@ message TypeChecking { message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -549,7 +550,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -560,7 +561,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -638,7 +639,7 @@ message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -743,7 +744,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -765,7 +766,8 @@ message ValidatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; + // +listType=atomic + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -833,7 +835,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -847,7 +849,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -856,6 +858,7 @@ message ValidatingWebhook { // Requests with the dryRun attribute will be auto-rejected if they match a webhook with // sideEffects == Unknown or Some. Defaults to Unknown. // +optional + // +listType=atomic optional string sideEffects = 6; // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, @@ -875,6 +878,7 @@ message ValidatingWebhook { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic repeated string admissionReviewVersions = 8; // MatchConditions is a list of conditions that must be met for a request to be sent to this @@ -889,13 +893,10 @@ message ValidatingWebhook { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional repeated MatchCondition matchConditions = 11; } @@ -905,12 +906,14 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated ValidatingWebhook Webhooks = 2; } @@ -919,7 +922,7 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 27085e056a..0f59031239 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -684,6 +684,8 @@ type ValidatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } @@ -723,6 +725,8 @@ type MutatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } @@ -762,6 +766,7 @@ type ValidatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -853,6 +858,7 @@ type ValidatingWebhook struct { // Requests with the dryRun attribute will be auto-rejected if they match a webhook with // sideEffects == Unknown or Some. Defaults to Unknown. // +optional + // +listType=atomic SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, @@ -872,6 +878,7 @@ type ValidatingWebhook struct { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` // MatchConditions is a list of conditions that must be met for a request to be sent to this @@ -886,13 +893,10 @@ type ValidatingWebhook struct { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"` } @@ -916,6 +920,7 @@ type MutatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -1026,6 +1031,7 @@ type MutatingWebhook struct { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -1058,13 +1064,10 @@ type MutatingWebhook struct { // - If failurePolicy=Fail, reject the request // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped // - // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=AdmissionWebhookMatchConditions // +optional MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index adaf4bc11d..cc1509b539 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -83,7 +83,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", - "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -253,7 +253,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", - "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apidiscovery/v2/doc.go b/vendor/k8s.io/api/apidiscovery/v2/doc.go new file mode 100644 index 0000000000..4f3ad5f139 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=apidiscovery.k8s.io + +package v2 // import "k8s.io/api/apidiscovery/v2" diff --git a/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go b/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go new file mode 100644 index 0000000000..5c37feaa2e --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go @@ -0,0 +1,1742 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/apidiscovery/v2/generated.proto + +package v2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} } +func (*APIGroupDiscovery) ProtoMessage() {} +func (*APIGroupDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{0} +} +func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscovery.Merge(m, src) +} +func (m *APIGroupDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo + +func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} } +func (*APIGroupDiscoveryList) ProtoMessage() {} +func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{1} +} +func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscoveryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscoveryList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscoveryList.Merge(m, src) +} +func (m *APIGroupDiscoveryList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscoveryList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscoveryList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo + +func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} } +func (*APIResourceDiscovery) ProtoMessage() {} +func (*APIResourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{2} +} +func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceDiscovery.Merge(m, src) +} +func (m *APIResourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIResourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo + +func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} } +func (*APISubresourceDiscovery) ProtoMessage() {} +func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{3} +} +func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APISubresourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APISubresourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APISubresourceDiscovery.Merge(m, src) +} +func (m *APISubresourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APISubresourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APISubresourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo + +func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} } +func (*APIVersionDiscovery) ProtoMessage() {} +func (*APIVersionDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{4} +} +func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersionDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersionDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersionDiscovery.Merge(m, src) +} +func (m *APIVersionDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIVersionDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersionDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersionDiscovery proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIGroupDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIGroupDiscovery") + proto.RegisterType((*APIGroupDiscoveryList)(nil), "k8s.io.api.apidiscovery.v2.APIGroupDiscoveryList") + proto.RegisterType((*APIResourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIResourceDiscovery") + proto.RegisterType((*APISubresourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APISubresourceDiscovery") + proto.RegisterType((*APIVersionDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIVersionDiscovery") +} + +func init() { + proto.RegisterFile("k8s.io/api/apidiscovery/v2/generated.proto", fileDescriptor_e0b7287280068d8f) +} + +var fileDescriptor_e0b7287280068d8f = []byte{ + // 736 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0x8d, 0x09, 0xf9, 0x48, 0x26, 0xc9, 0xd7, 0x30, 0x80, 0x6a, 0x65, 0xe1, 0xa0, 0x6c, 0x4a, + 0xab, 0x32, 0x86, 0x94, 0xa2, 0x2e, 0x9b, 0x94, 0xb6, 0x8a, 0xfa, 0x87, 0x26, 0x15, 0x8b, 0xaa, + 0x95, 0xea, 0x38, 0x83, 0xe3, 0x82, 0x7f, 0x34, 0xe3, 0x44, 0x62, 0xd7, 0x47, 0xe8, 0x13, 0xf4, + 0x79, 0xe8, 0x8e, 0x05, 0x0b, 0x56, 0x51, 0x49, 0x77, 0x7d, 0x04, 0x56, 0xd5, 0x8c, 0xc7, 0x3f, + 0x21, 0x44, 0x41, 0x5d, 0x74, 0x81, 0x84, 0xcf, 0x9c, 0x73, 0xee, 0x3d, 0xd7, 0xd7, 0x13, 0xf0, + 0xe0, 0xe8, 0x09, 0x43, 0xb6, 0xa7, 0x1b, 0xbe, 0xcd, 0xff, 0x7a, 0x36, 0x33, 0xbd, 0x21, 0xa1, + 0x27, 0xfa, 0xb0, 0xa1, 0x5b, 0xc4, 0x25, 0xd4, 0x08, 0x48, 0x0f, 0xf9, 0xd4, 0x0b, 0x3c, 0x58, + 0x0d, 0xb9, 0xc8, 0xf0, 0x6d, 0x94, 0xe6, 0xa2, 0x61, 0xa3, 0xba, 0x69, 0xd9, 0x41, 0x7f, 0xd0, + 0x45, 0xa6, 0xe7, 0xe8, 0x96, 0x67, 0x79, 0xba, 0x90, 0x74, 0x07, 0x87, 0xe2, 0x49, 0x3c, 0x88, + 0xff, 0x42, 0xab, 0xea, 0x4e, 0x52, 0xd6, 0x31, 0xcc, 0xbe, 0xed, 0xf2, 0x92, 0xfe, 0x91, 0xc5, + 0x01, 0xa6, 0x3b, 0x24, 0x30, 0xf4, 0xe1, 0xf6, 0xf5, 0x06, 0xaa, 0xfa, 0x2c, 0x15, 0x1d, 0xb8, + 0x81, 0xed, 0x90, 0x29, 0xc1, 0xee, 0x3c, 0x01, 0x33, 0xfb, 0xc4, 0x31, 0xae, 0xeb, 0xea, 0xe7, + 0x0a, 0x58, 0x6e, 0xee, 0xb7, 0x5f, 0x52, 0x6f, 0xe0, 0xef, 0x45, 0x31, 0xe1, 0x67, 0x90, 0xe7, + 0x9d, 0xf5, 0x8c, 0xc0, 0x50, 0x95, 0x75, 0x65, 0xa3, 0xd8, 0xd8, 0x42, 0xc9, 0x48, 0xe2, 0x02, + 0xc8, 0x3f, 0xb2, 0x38, 0xc0, 0x10, 0x67, 0xa3, 0xe1, 0x36, 0x7a, 0xd7, 0xfd, 0x42, 0xcc, 0xe0, + 0x0d, 0x09, 0x8c, 0x16, 0x3c, 0x1d, 0xd5, 0x32, 0xe3, 0x51, 0x0d, 0x24, 0x18, 0x8e, 0x5d, 0xe1, + 0x27, 0x90, 0x1f, 0x12, 0xca, 0x6c, 0xcf, 0x65, 0xea, 0xc2, 0x7a, 0x76, 0xa3, 0xd8, 0xd0, 0xd1, + 0xec, 0xa1, 0xa3, 0xe6, 0x7e, 0xfb, 0x20, 0xa4, 0xc7, 0x4d, 0xb6, 0x2a, 0xb2, 0x40, 0x5e, 0x9e, + 0x30, 0x1c, 0x5b, 0xd6, 0x7f, 0x28, 0x60, 0x6d, 0x2a, 0xd6, 0x6b, 0x9b, 0x05, 0xf0, 0xe3, 0x54, + 0x34, 0x74, 0xbb, 0x68, 0x5c, 0x2d, 0x82, 0xc5, 0x75, 0x23, 0x24, 0x15, 0x0b, 0x83, 0x9c, 0x1d, + 0x10, 0x27, 0xca, 0xb4, 0x39, 0x27, 0xd3, 0x64, 0x7f, 0xad, 0xb2, 0x74, 0xce, 0xb5, 0xb9, 0x07, + 0x0e, 0xad, 0xea, 0xdf, 0x17, 0xc1, 0x6a, 0x73, 0xbf, 0x8d, 0x09, 0xf3, 0x06, 0xd4, 0x24, 0xc9, + 0x5b, 0x7a, 0x08, 0xf2, 0x54, 0x82, 0x22, 0x4a, 0x21, 0x69, 0x2d, 0x22, 0xe3, 0x98, 0x01, 0x8f, + 0x41, 0x89, 0x12, 0xe6, 0x7b, 0x2e, 0x23, 0xaf, 0x6c, 0xb7, 0xa7, 0x2e, 0x88, 0xf0, 0xbb, 0xb7, + 0x0b, 0x2f, 0x1a, 0x95, 0x73, 0xe6, 0xea, 0x56, 0x65, 0x3c, 0xaa, 0x95, 0x70, 0xca, 0x0f, 0x4f, + 0xb8, 0xc3, 0x1d, 0x90, 0x63, 0xa6, 0xe7, 0x13, 0x35, 0x2b, 0x1a, 0xd3, 0xa2, 0x64, 0x1d, 0x0e, + 0x5e, 0x8d, 0x6a, 0xe5, 0xa8, 0x43, 0x01, 0xe0, 0x90, 0x0c, 0xf7, 0x40, 0x85, 0xd9, 0xae, 0x35, + 0x38, 0x36, 0x68, 0x74, 0xae, 0x2e, 0x0a, 0x03, 0x55, 0x1a, 0x54, 0x3a, 0xd7, 0xce, 0xf1, 0x94, + 0x02, 0xd6, 0x40, 0x6e, 0x48, 0x68, 0x97, 0xa9, 0xb9, 0xf5, 0xec, 0x46, 0xa1, 0x55, 0xe0, 0x75, + 0x0f, 0x38, 0x80, 0x43, 0x1c, 0x22, 0x00, 0x58, 0xdf, 0xa3, 0xc1, 0x5b, 0xc3, 0x21, 0x4c, 0xfd, + 0x4f, 0xb0, 0xfe, 0xe7, 0xab, 0xda, 0x89, 0x51, 0x9c, 0x62, 0x70, 0xbe, 0x69, 0x04, 0xc4, 0xf2, + 0xa8, 0x4d, 0x98, 0xba, 0x94, 0xf0, 0x9f, 0xc5, 0x28, 0x4e, 0x31, 0xa0, 0x03, 0x4a, 0x6c, 0xd0, + 0x8d, 0x26, 0xcf, 0xd4, 0xbc, 0x58, 0x86, 0x47, 0x73, 0x96, 0xa1, 0x93, 0x48, 0x92, 0x95, 0x58, + 0x95, 0xb9, 0x4b, 0xa9, 0x53, 0x86, 0x27, 0xec, 0xeb, 0xe7, 0x0b, 0xe0, 0xee, 0x0c, 0x3d, 0x7c, + 0x0c, 0x8a, 0x29, 0xae, 0x5c, 0x93, 0x15, 0x69, 0x5a, 0x4c, 0x49, 0x70, 0x9a, 0xf7, 0x8f, 0x97, + 0x85, 0x81, 0xb2, 0x61, 0x9a, 0xc4, 0x0f, 0x48, 0xef, 0xfd, 0x89, 0x4f, 0x98, 0x9a, 0x15, 0x03, + 0xfb, 0xdb, 0x72, 0x6b, 0x32, 0x5e, 0xb9, 0x99, 0x36, 0xc5, 0x93, 0x35, 0x92, 0x2d, 0x59, 0xbc, + 0x79, 0x4b, 0xea, 0xbf, 0x15, 0xb0, 0x72, 0xc3, 0xbd, 0x03, 0xef, 0x83, 0x25, 0x79, 0xcf, 0xc8, + 0x71, 0xde, 0x91, 0xf5, 0x96, 0x24, 0x15, 0x47, 0xe7, 0xd0, 0x00, 0x85, 0x64, 0x0b, 0xc2, 0x2b, + 0x61, 0x6b, 0xce, 0x16, 0x4c, 0x7d, 0xe6, 0xad, 0x65, 0x69, 0x5f, 0xc0, 0xf1, 0xfb, 0x4f, 0x5c, + 0xe1, 0x73, 0x50, 0x38, 0xa4, 0x84, 0xf5, 0x5d, 0xc2, 0x98, 0xfc, 0xd8, 0xee, 0x45, 0x82, 0x17, + 0xd1, 0xc1, 0xd5, 0xa8, 0x06, 0x63, 0xc3, 0x18, 0xc5, 0x89, 0xb2, 0xf5, 0xf4, 0xf4, 0x52, 0xcb, + 0x9c, 0x5d, 0x6a, 0x99, 0x8b, 0x4b, 0x2d, 0xf3, 0x75, 0xac, 0x29, 0xa7, 0x63, 0x4d, 0x39, 0x1b, + 0x6b, 0xca, 0xc5, 0x58, 0x53, 0x7e, 0x8e, 0x35, 0xe5, 0xdb, 0x2f, 0x2d, 0xf3, 0xa1, 0x3a, 0xfb, + 0x37, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x35, 0x6a, 0x0f, 0x60, 0x07, 0x00, 0x00, +} + +func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIGroupDiscoveryList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscoveryList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscoveryList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIResourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Subresources) > 0 { + for iNdEx := len(m.Subresources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subresources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.SingularResource) + copy(dAtA[i:], m.SingularResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularResource))) + i-- + dAtA[i] = 0x22 + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x1a + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APISubresourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APISubresourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APISubresourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.AcceptedTypes) > 0 { + for iNdEx := len(m.AcceptedTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AcceptedTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIVersionDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersionDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersionDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Freshness) + copy(dAtA[i:], m.Freshness) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Freshness))) + i-- + dAtA[i] = 0x1a + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIGroupDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupDiscoveryList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SingularResource) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Subresources) > 0 { + for _, e := range m.Subresources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APISubresourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AcceptedTypes) > 0 { + for _, e := range m.AcceptedTypes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersionDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Freshness) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroupDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]APIVersionDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "APIVersionDiscovery", "APIVersionDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&APIGroupDiscovery{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupDiscoveryList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]APIGroupDiscovery{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIGroupDiscovery", "APIGroupDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&APIGroupDiscoveryList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubresources := "[]APISubresourceDiscovery{" + for _, f := range this.Subresources { + repeatedStringForSubresources += strings.Replace(strings.Replace(f.String(), "APISubresourceDiscovery", "APISubresourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForSubresources += "}" + s := strings.Join([]string{`&APIResourceDiscovery{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `SingularResource:` + fmt.Sprintf("%v", this.SingularResource) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Subresources:` + repeatedStringForSubresources + `,`, + `}`, + }, "") + return s +} +func (this *APISubresourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForAcceptedTypes := "[]GroupVersionKind{" + for _, f := range this.AcceptedTypes { + repeatedStringForAcceptedTypes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAcceptedTypes += "}" + s := strings.Join([]string{`&APISubresourceDiscovery{`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `AcceptedTypes:` + repeatedStringForAcceptedTypes + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `}`, + }, "") + return s +} +func (this *APIVersionDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]APIResourceDiscovery{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "APIResourceDiscovery", "APIResourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&APIVersionDiscovery{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Freshness:` + fmt.Sprintf("%v", this.Freshness) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroupDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersionDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupDiscoveryList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscoveryList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscoveryList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, APIGroupDiscovery{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresources = append(m.Subresources, APISubresourceDiscovery{}) + if err := m.Subresources[len(m.Subresources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APISubresourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APISubresourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APISubresourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AcceptedTypes = append(m.AcceptedTypes, v1.GroupVersionKind{}) + if err := m.AcceptedTypes[len(m.AcceptedTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersionDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersionDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersionDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, APIResourceDiscovery{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Freshness", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Freshness = DiscoveryFreshness(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apidiscovery/v2/generated.proto b/vendor/k8s.io/api/apidiscovery/v2/generated.proto new file mode 100644 index 0000000000..62f2d7f2c1 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/generated.proto @@ -0,0 +1,156 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apidiscovery.v2; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/apidiscovery/v2"; + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +message APIGroupDiscovery { + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + repeated APIVersionDiscovery versions = 2; +} + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +message APIGroupDiscoveryList { + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of groups for discovery. The groups are listed in priority order. + repeated APIGroupDiscovery items = 2; +} + +// APIResourceDiscovery provides information about an API resource for discovery. +message APIResourceDiscovery { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + optional string resource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // scope indicates the scope of a resource, either Cluster or Namespaced + optional string scope = 3; + + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + optional string singularResource = 4; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + repeated string verbs = 5; + + // shortNames is a list of suggested short names of the resource. + // +listType=set + repeated string shortNames = 6; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + repeated string categories = 7; + + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + repeated APISubresourceDiscovery subresources = 8; +} + +// APISubresourceDiscovery provides information about an API subresource for discovery. +message APISubresourceDiscovery { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + optional string subresource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + repeated string verbs = 4; +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +message APIVersionDiscovery { + // version is the name of the version within a group version. + optional string version = 1; + + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + repeated APIResourceDiscovery resources = 2; + + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + optional string freshness = 3; +} + diff --git a/vendor/k8s.io/api/apidiscovery/v2/register.go b/vendor/k8s.io/api/apidiscovery/v2/register.go new file mode 100644 index 0000000000..dd759defce --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "apidiscovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIGroupDiscoveryList{}, + &APIGroupDiscovery{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apidiscovery/v2/types.go b/vendor/k8s.io/api/apidiscovery/v2/types.go new file mode 100644 index 0000000000..449679b61d --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/types.go @@ -0,0 +1,157 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +type APIGroupDiscoveryList struct { + v1.TypeMeta `json:",inline"` + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of groups for discovery. The groups are listed in priority order. + Items []APIGroupDiscovery `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +type APIGroupDiscovery struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + Versions []APIVersionDiscovery `json:"versions,omitempty" protobuf:"bytes,2,rep,name=versions"` +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +type APIVersionDiscovery struct { + // version is the name of the version within a group version. + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + Resources []APIResourceDiscovery `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + Freshness DiscoveryFreshness `json:"freshness,omitempty" protobuf:"bytes,3,opt,name=freshness"` +} + +// APIResourceDiscovery provides information about an API resource for discovery. +type APIResourceDiscovery struct { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + Resource string `json:"resource" protobuf:"bytes,1,opt,name=resource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // scope indicates the scope of a resource, either Cluster or Namespaced + Scope ResourceScope `json:"scope" protobuf:"bytes,3,opt,name=scope"` + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + SingularResource string `json:"singularResource" protobuf:"bytes,4,opt,name=singularResource"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,5,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + // +listType=set + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,6,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + Subresources []APISubresourceDiscovery `json:"subresources,omitempty" protobuf:"bytes,8,rep,name=subresources"` +} + +// ResourceScope is an enum defining the different scopes available to a resource. +type ResourceScope string + +const ( + ScopeCluster ResourceScope = "Cluster" + ScopeNamespace ResourceScope = "Namespaced" +) + +// DiscoveryFreshness is an enum defining whether the Discovery document published by an apiservice is up to date (fresh). +type DiscoveryFreshness string + +const ( + DiscoveryFreshnessCurrent DiscoveryFreshness = "Current" + DiscoveryFreshnessStale DiscoveryFreshness = "Stale" +) + +// APISubresourceDiscovery provides information about an API subresource for discovery. +type APISubresourceDiscovery struct { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + Subresource string `json:"subresource" protobuf:"bytes,1,opt,name=subresource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + AcceptedTypes []v1.GroupVersionKind `json:"acceptedTypes,omitempty" protobuf:"bytes,3,rep,name=acceptedTypes"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` +} diff --git a/vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..029aeeab8c --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go @@ -0,0 +1,190 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscovery) DeepCopyInto(out *APIGroupDiscovery) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersionDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscovery. +func (in *APIGroupDiscovery) DeepCopy() *APIGroupDiscovery { + if in == nil { + return nil + } + out := new(APIGroupDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscovery) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscoveryList) DeepCopyInto(out *APIGroupDiscoveryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIGroupDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscoveryList. +func (in *APIGroupDiscoveryList) DeepCopy() *APIGroupDiscoveryList { + if in == nil { + return nil + } + out := new(APIGroupDiscoveryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscoveryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceDiscovery) DeepCopyInto(out *APIResourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = make([]APISubresourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceDiscovery. +func (in *APIResourceDiscovery) DeepCopy() *APIResourceDiscovery { + if in == nil { + return nil + } + out := new(APIResourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISubresourceDiscovery) DeepCopyInto(out *APISubresourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.AcceptedTypes != nil { + in, out := &in.AcceptedTypes, &out.AcceptedTypes + *out = make([]v1.GroupVersionKind, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISubresourceDiscovery. +func (in *APISubresourceDiscovery) DeepCopy() *APISubresourceDiscovery { + if in == nil { + return nil + } + out := new(APISubresourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersionDiscovery) DeepCopyInto(out *APIVersionDiscovery) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionDiscovery. +func (in *APIVersionDiscovery) DeepCopy() *APIVersionDiscovery { + if in == nil { + return nil + } + out := new(APIVersionDiscovery) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b7132c647d --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go index ba6eee1b32..398c5f94f2 100644 --- a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto +// source: k8s.io/api/apidiscovery/v2beta1/generated.proto package v2beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} } func (*APIGroupDiscovery) ProtoMessage() {} func (*APIGroupDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_0442b7af4d680cb7, []int{0} + return fileDescriptor_48661e6ba3d554f3, []int{0} } func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} } func (*APIGroupDiscoveryList) ProtoMessage() {} func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) { - return fileDescriptor_0442b7af4d680cb7, []int{1} + return fileDescriptor_48661e6ba3d554f3, []int{1} } func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} } func (*APIResourceDiscovery) ProtoMessage() {} func (*APIResourceDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_0442b7af4d680cb7, []int{2} + return fileDescriptor_48661e6ba3d554f3, []int{2} } func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} } func (*APISubresourceDiscovery) ProtoMessage() {} func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_0442b7af4d680cb7, []int{3} + return fileDescriptor_48661e6ba3d554f3, []int{3} } func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} } func (*APIVersionDiscovery) ProtoMessage() {} func (*APIVersionDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_0442b7af4d680cb7, []int{4} + return fileDescriptor_48661e6ba3d554f3, []int{4} } func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -193,59 +193,58 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_0442b7af4d680cb7) + proto.RegisterFile("k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_48661e6ba3d554f3) } -var fileDescriptor_0442b7af4d680cb7 = []byte{ - // 754 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, - 0x14, 0x8d, 0x09, 0xf9, 0x48, 0x26, 0xc9, 0xf7, 0x85, 0x01, 0xf4, 0x59, 0x2c, 0x6c, 0x94, 0x4d, - 0xa9, 0xd4, 0xda, 0x25, 0x02, 0xc4, 0x36, 0x29, 0xb4, 0x8d, 0xfa, 0x87, 0x26, 0x15, 0x95, 0xaa, - 0x2e, 0x6a, 0x3b, 0x17, 0xc7, 0x0d, 0xb1, 0xad, 0x99, 0x71, 0x24, 0x76, 0x7d, 0x84, 0xbe, 0x43, - 0x5f, 0x86, 0x55, 0xc5, 0xa2, 0x0b, 0xba, 0x89, 0x4a, 0xfa, 0x00, 0xdd, 0xb3, 0xaa, 0xec, 0x8c, - 0x7f, 0x42, 0x40, 0x44, 0x5d, 0x74, 0x81, 0x84, 0xcf, 0x3d, 0xe7, 0xdc, 0x7b, 0x2e, 0xd7, 0x06, - 0x3d, 0xeb, 0xef, 0x31, 0xcd, 0xf1, 0xf4, 0x7e, 0x60, 0x02, 0x75, 0x81, 0x03, 0xd3, 0x87, 0xe0, - 0x76, 0x3d, 0xaa, 0x8b, 0x82, 0xe1, 0x3b, 0xe1, 0x4f, 0xd7, 0x61, 0x96, 0x37, 0x04, 0x7a, 0xaa, - 0x0f, 0x1b, 0x26, 0x70, 0x63, 0x4b, 0xb7, 0xc1, 0x05, 0x6a, 0x70, 0xe8, 0x6a, 0x3e, 0xf5, 0xb8, - 0x87, 0xd5, 0x89, 0x40, 0x33, 0x7c, 0x47, 0xcb, 0x0a, 0x34, 0x21, 0x58, 0x7f, 0x68, 0x3b, 0xbc, - 0x17, 0x98, 0x9a, 0xe5, 0x0d, 0x74, 0xdb, 0xb3, 0x3d, 0x3d, 0xd2, 0x99, 0xc1, 0x71, 0xf4, 0x14, - 0x3d, 0x44, 0xbf, 0x4d, 0xfc, 0xd6, 0xb7, 0xd3, 0x01, 0x06, 0x86, 0xd5, 0x73, 0xdc, 0xb0, 0xb9, - 0xdf, 0xb7, 0x43, 0x80, 0xe9, 0x03, 0xe0, 0x86, 0x3e, 0x9c, 0x99, 0x62, 0x5d, 0xbf, 0x4d, 0x45, - 0x03, 0x97, 0x3b, 0x03, 0x98, 0x11, 0xec, 0xde, 0x25, 0x60, 0x56, 0x0f, 0x06, 0xc6, 0x75, 0x5d, - 0xfd, 0xbb, 0x84, 0x96, 0x9b, 0x87, 0xed, 0xa7, 0xd4, 0x0b, 0xfc, 0xfd, 0x38, 0x2b, 0xfe, 0x80, - 0x8a, 0xe1, 0x64, 0x5d, 0x83, 0x1b, 0xb2, 0xb4, 0x21, 0x6d, 0x96, 0x1b, 0x8f, 0xb4, 0x74, 0x2f, - 0x49, 0x03, 0xcd, 0xef, 0xdb, 0x21, 0xc0, 0xb4, 0x90, 0xad, 0x0d, 0xb7, 0xb4, 0xd7, 0xe6, 0x47, - 0xb0, 0xf8, 0x4b, 0xe0, 0x46, 0x0b, 0x9f, 0x8d, 0xd4, 0xdc, 0x78, 0xa4, 0xa2, 0x14, 0x23, 0x89, - 0x2b, 0x36, 0x51, 0x71, 0x08, 0x94, 0x39, 0x9e, 0xcb, 0xe4, 0x85, 0x8d, 0xfc, 0x66, 0xb9, 0xb1, - 0xad, 0xdd, 0xb1, 0x79, 0xad, 0x79, 0xd8, 0x3e, 0x9a, 0x68, 0x92, 0x49, 0x5b, 0x35, 0xd1, 0xa5, - 0x28, 0x2a, 0x8c, 0x24, 0xbe, 0xf5, 0xaf, 0x12, 0x5a, 0x9b, 0xc9, 0xf6, 0xc2, 0x61, 0x1c, 0xbf, - 0x9f, 0xc9, 0xa7, 0xcd, 0x97, 0x2f, 0x54, 0x47, 0xe9, 0x92, 0xbe, 0x31, 0x92, 0xc9, 0xf6, 0x16, - 0x15, 0x1c, 0x0e, 0x83, 0x38, 0x58, 0x63, 0x9e, 0x60, 0xd3, 0x43, 0xb6, 0xaa, 0xc2, 0xbe, 0xd0, - 0x0e, 0x8d, 0xc8, 0xc4, 0xaf, 0xfe, 0x65, 0x11, 0xad, 0x36, 0x0f, 0xdb, 0x04, 0x98, 0x17, 0x50, - 0x0b, 0xd2, 0xbf, 0xd7, 0x03, 0x54, 0xa4, 0x02, 0x8c, 0xf2, 0x94, 0xd2, 0xf9, 0x62, 0x32, 0x49, - 0x18, 0xf8, 0x04, 0x55, 0x28, 0x30, 0xdf, 0x73, 0x19, 0x3c, 0x77, 0xdc, 0xae, 0xbc, 0x10, 0x6d, - 0x60, 0x77, 0xbe, 0x0d, 0x44, 0x83, 0x8a, 0x65, 0x87, 0xea, 0x56, 0x6d, 0x3c, 0x52, 0x2b, 0x24, - 0xe3, 0x47, 0xa6, 0xdc, 0xf1, 0x36, 0x2a, 0x30, 0xcb, 0xf3, 0x41, 0xce, 0x47, 0x83, 0x29, 0x71, - 0xb2, 0x4e, 0x08, 0x5e, 0x8d, 0xd4, 0x6a, 0x3c, 0x61, 0x04, 0x90, 0x09, 0x19, 0xef, 0xa3, 0x1a, - 0x73, 0x5c, 0x3b, 0x38, 0x31, 0x68, 0x5c, 0x97, 0x17, 0x23, 0x03, 0x59, 0x18, 0xd4, 0x3a, 0xd7, - 0xea, 0x64, 0x46, 0x81, 0x55, 0x54, 0x18, 0x02, 0x35, 0x99, 0x5c, 0xd8, 0xc8, 0x6f, 0x96, 0x5a, - 0xa5, 0xb0, 0xef, 0x51, 0x08, 0x90, 0x09, 0x8e, 0x35, 0x84, 0x58, 0xcf, 0xa3, 0xfc, 0x95, 0x31, - 0x00, 0x26, 0xff, 0x13, 0xb1, 0xfe, 0x0d, 0x8f, 0xb6, 0x93, 0xa0, 0x24, 0xc3, 0x08, 0xf9, 0x96, - 0xc1, 0xc1, 0xf6, 0xa8, 0x03, 0x4c, 0x5e, 0x4a, 0xf9, 0x8f, 0x13, 0x94, 0x64, 0x18, 0x98, 0xa2, - 0x0a, 0x0b, 0xcc, 0x78, 0xf3, 0x4c, 0x2e, 0x46, 0x17, 0xb1, 0x37, 0xcf, 0x45, 0x74, 0x52, 0x5d, - 0x7a, 0x17, 0xab, 0x22, 0x7c, 0x25, 0x53, 0x65, 0x64, 0xaa, 0x47, 0xfd, 0xdb, 0x02, 0xfa, 0xff, - 0x16, 0x3d, 0xde, 0x41, 0xe5, 0x0c, 0x57, 0xdc, 0xca, 0x8a, 0x30, 0x2d, 0x67, 0x24, 0x24, 0xcb, - 0xfb, 0xcb, 0x17, 0xc3, 0x50, 0xd5, 0xb0, 0x2c, 0xf0, 0x39, 0x74, 0xdf, 0x9c, 0xfa, 0xc0, 0xe4, - 0x7c, 0xb4, 0xb5, 0x3f, 0x6d, 0xb7, 0x26, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x32, 0xdd, 0x23, 0x3d, - 0x95, 0xc5, 0x9b, 0x4f, 0xa5, 0xfe, 0x4b, 0x42, 0x2b, 0x37, 0x7c, 0x81, 0xf0, 0x7d, 0xb4, 0x24, - 0xbe, 0x38, 0x62, 0x9d, 0xff, 0x89, 0x7e, 0x4b, 0x82, 0x4a, 0xe2, 0x3a, 0x3e, 0x46, 0xa5, 0xf4, - 0x14, 0x26, 0x1f, 0x87, 0x9d, 0x79, 0x4e, 0x61, 0xe6, 0x85, 0x6f, 0x2d, 0x8b, 0x1e, 0x25, 0x92, - 0x1c, 0x41, 0x6a, 0x8d, 0x0f, 0x50, 0xe9, 0x98, 0x02, 0xeb, 0xb9, 0xc0, 0x98, 0x78, 0xed, 0xee, - 0xc5, 0x82, 0x27, 0x71, 0xe1, 0x6a, 0xa4, 0xe2, 0xc4, 0x30, 0x41, 0x49, 0xaa, 0x6c, 0x1d, 0x9c, - 0x5d, 0x2a, 0xb9, 0xf3, 0x4b, 0x25, 0x77, 0x71, 0xa9, 0xe4, 0x3e, 0x8d, 0x15, 0xe9, 0x6c, 0xac, - 0x48, 0xe7, 0x63, 0x45, 0xba, 0x18, 0x2b, 0xd2, 0x8f, 0xb1, 0x22, 0x7d, 0xfe, 0xa9, 0xe4, 0xde, - 0xa9, 0x77, 0xfc, 0x87, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x66, 0x3b, 0x84, 0x9c, 0x07, - 0x00, 0x00, +var fileDescriptor_48661e6ba3d554f3 = []byte{ + // 740 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4a, + 0x18, 0x8d, 0x09, 0xb9, 0x24, 0x93, 0xe4, 0xde, 0x30, 0x80, 0xae, 0xc5, 0xc2, 0x46, 0xd9, 0x5c, + 0xae, 0xd4, 0x8e, 0x4b, 0x04, 0x88, 0x6d, 0x52, 0x68, 0x15, 0xf5, 0x0f, 0x4d, 0x2a, 0x2a, 0x55, + 0x5d, 0xd4, 0x71, 0x06, 0xc7, 0x85, 0xd8, 0xd6, 0xcc, 0x24, 0x12, 0xbb, 0x3e, 0x42, 0xdf, 0xa1, + 0x2f, 0xc3, 0xaa, 0x62, 0xd1, 0x05, 0xdd, 0x44, 0x25, 0x7d, 0x80, 0xee, 0x59, 0x55, 0x33, 0x1e, + 0xff, 0x84, 0x80, 0x88, 0xba, 0xe8, 0x22, 0x52, 0x7c, 0xe6, 0x9c, 0xf3, 0x7d, 0xe7, 0xcb, 0xe7, + 0x09, 0xb0, 0x4e, 0xf6, 0x18, 0xf2, 0x02, 0xcb, 0x0e, 0x3d, 0xf1, 0xe9, 0x79, 0xcc, 0x09, 0x46, + 0x84, 0x9e, 0x59, 0xa3, 0x46, 0x97, 0x70, 0x7b, 0xcb, 0x72, 0x89, 0x4f, 0xa8, 0xcd, 0x49, 0x0f, + 0x85, 0x34, 0xe0, 0x01, 0x34, 0x23, 0x01, 0xb2, 0x43, 0x0f, 0x65, 0x05, 0x48, 0x09, 0xd6, 0x1f, + 0xba, 0x1e, 0xef, 0x0f, 0xbb, 0xc8, 0x09, 0x06, 0x96, 0x1b, 0xb8, 0x81, 0x25, 0x75, 0xdd, 0xe1, + 0xb1, 0x7c, 0x92, 0x0f, 0xf2, 0x5b, 0xe4, 0xb7, 0xbe, 0x9d, 0x36, 0x30, 0xb0, 0x9d, 0xbe, 0xe7, + 0x8b, 0xe2, 0xe1, 0x89, 0x2b, 0x00, 0x66, 0x0d, 0x08, 0xb7, 0xad, 0xd1, 0x4c, 0x17, 0xeb, 0xd6, + 0x5d, 0x2a, 0x3a, 0xf4, 0xb9, 0x37, 0x20, 0x33, 0x82, 0xdd, 0xfb, 0x04, 0xcc, 0xe9, 0x93, 0x81, + 0x7d, 0x53, 0x57, 0xff, 0xa6, 0x81, 0xe5, 0xe6, 0x61, 0xfb, 0x29, 0x0d, 0x86, 0xe1, 0x7e, 0x9c, + 0x15, 0xbe, 0x07, 0x45, 0xd1, 0x59, 0xcf, 0xe6, 0xb6, 0xae, 0x6d, 0x68, 0x9b, 0xe5, 0xc6, 0x23, + 0x94, 0xce, 0x25, 0x29, 0x80, 0xc2, 0x13, 0x57, 0x00, 0x0c, 0x09, 0x36, 0x1a, 0x6d, 0xa1, 0x57, + 0xdd, 0x0f, 0xc4, 0xe1, 0x2f, 0x08, 0xb7, 0x5b, 0xf0, 0x7c, 0x6c, 0xe6, 0x26, 0x63, 0x13, 0xa4, + 0x18, 0x4e, 0x5c, 0x61, 0x17, 0x14, 0x47, 0x84, 0x32, 0x2f, 0xf0, 0x99, 0xbe, 0xb0, 0x91, 0xdf, + 0x2c, 0x37, 0xb6, 0xd1, 0x3d, 0x93, 0x47, 0xcd, 0xc3, 0xf6, 0x51, 0xa4, 0x49, 0x3a, 0x6d, 0xd5, + 0x54, 0x95, 0xa2, 0x3a, 0x61, 0x38, 0xf1, 0xad, 0x7f, 0xd1, 0xc0, 0xda, 0x4c, 0xb6, 0xe7, 0x1e, + 0xe3, 0xf0, 0xdd, 0x4c, 0x3e, 0x34, 0x5f, 0x3e, 0xa1, 0x96, 0xe9, 0x92, 0xba, 0x31, 0x92, 0xc9, + 0xf6, 0x06, 0x14, 0x3c, 0x4e, 0x06, 0x71, 0xb0, 0xc6, 0x3c, 0xc1, 0xa6, 0x9b, 0x6c, 0x55, 0x95, + 0x7d, 0xa1, 0x2d, 0x8c, 0x70, 0xe4, 0x57, 0xff, 0xbc, 0x08, 0x56, 0x9b, 0x87, 0x6d, 0x4c, 0x58, + 0x30, 0xa4, 0x0e, 0x49, 0x7f, 0xaf, 0x07, 0xa0, 0x48, 0x15, 0x28, 0xf3, 0x94, 0xd2, 0xfe, 0x62, + 0x32, 0x4e, 0x18, 0xf0, 0x14, 0x54, 0x28, 0x61, 0x61, 0xe0, 0x33, 0xf2, 0xcc, 0xf3, 0x7b, 0xfa, + 0x82, 0x9c, 0xc0, 0xee, 0x7c, 0x13, 0x90, 0x8d, 0xaa, 0x61, 0x0b, 0x75, 0xab, 0x36, 0x19, 0x9b, + 0x15, 0x9c, 0xf1, 0xc3, 0x53, 0xee, 0x70, 0x1b, 0x14, 0x98, 0x13, 0x84, 0x44, 0xcf, 0xcb, 0xc6, + 0x8c, 0x38, 0x59, 0x47, 0x80, 0xd7, 0x63, 0xb3, 0x1a, 0x77, 0x28, 0x01, 0x1c, 0x91, 0xe1, 0x3e, + 0xa8, 0x31, 0xcf, 0x77, 0x87, 0xa7, 0x36, 0x8d, 0xcf, 0xf5, 0x45, 0x69, 0xa0, 0x2b, 0x83, 0x5a, + 0xe7, 0xc6, 0x39, 0x9e, 0x51, 0x40, 0x13, 0x14, 0x46, 0x84, 0x76, 0x99, 0x5e, 0xd8, 0xc8, 0x6f, + 0x96, 0x5a, 0x25, 0x51, 0xf7, 0x48, 0x00, 0x38, 0xc2, 0x21, 0x02, 0x80, 0xf5, 0x03, 0xca, 0x5f, + 0xda, 0x03, 0xc2, 0xf4, 0xbf, 0x24, 0xeb, 0x6f, 0xb1, 0xb4, 0x9d, 0x04, 0xc5, 0x19, 0x86, 0xe0, + 0x3b, 0x36, 0x27, 0x6e, 0x40, 0x3d, 0xc2, 0xf4, 0xa5, 0x94, 0xff, 0x38, 0x41, 0x71, 0x86, 0x01, + 0x29, 0xa8, 0xb0, 0x61, 0x37, 0x9e, 0x3c, 0xd3, 0x8b, 0x72, 0x23, 0xf6, 0xe6, 0xd9, 0x88, 0x4e, + 0xaa, 0x4b, 0xf7, 0x62, 0x55, 0x85, 0xaf, 0x64, 0x4e, 0x19, 0x9e, 0xaa, 0x51, 0xff, 0xba, 0x00, + 0xfe, 0xbd, 0x43, 0x0f, 0x77, 0x40, 0x39, 0xc3, 0x55, 0xbb, 0xb2, 0xa2, 0x4c, 0xcb, 0x19, 0x09, + 0xce, 0xf2, 0xfe, 0xf0, 0xc6, 0x30, 0x50, 0xb5, 0x1d, 0x87, 0x84, 0x9c, 0xf4, 0x5e, 0x9f, 0x85, + 0x84, 0xe9, 0x79, 0x39, 0xb5, 0xdf, 0x2d, 0xb7, 0xa6, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x3c, 0x5d, + 0x23, 0x5d, 0x95, 0xc5, 0xdb, 0x57, 0xa5, 0xfe, 0x53, 0x03, 0x2b, 0xb7, 0xdc, 0x40, 0xf0, 0x7f, + 0xb0, 0xa4, 0x6e, 0x1c, 0x35, 0xce, 0x7f, 0x54, 0xbd, 0x25, 0x45, 0xc5, 0xf1, 0x39, 0x3c, 0x06, + 0xa5, 0x74, 0x15, 0xa2, 0xcb, 0x61, 0x67, 0x9e, 0x55, 0x98, 0x79, 0xe1, 0x5b, 0xcb, 0xaa, 0x46, + 0x09, 0x27, 0x4b, 0x90, 0x5a, 0xc3, 0x03, 0x50, 0x3a, 0xa6, 0x84, 0xf5, 0x7d, 0xc2, 0x98, 0x7a, + 0xed, 0xfe, 0x8b, 0x05, 0x4f, 0xe2, 0x83, 0xeb, 0xb1, 0x09, 0x13, 0xc3, 0x04, 0xc5, 0xa9, 0xb2, + 0x75, 0x70, 0x7e, 0x65, 0xe4, 0x2e, 0xae, 0x8c, 0xdc, 0xe5, 0x95, 0x91, 0xfb, 0x38, 0x31, 0xb4, + 0xf3, 0x89, 0xa1, 0x5d, 0x4c, 0x0c, 0xed, 0x72, 0x62, 0x68, 0xdf, 0x27, 0x86, 0xf6, 0xe9, 0x87, + 0x91, 0x7b, 0x6b, 0xde, 0xf3, 0x0f, 0xfb, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x85, 0x3b, 0x06, + 0x83, 0x07, 0x00, 0x00, } func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto index a09af750ba..e9ae88072a 100644 --- a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto @@ -38,7 +38,7 @@ message APIGroupDiscovery { // name is allowed to be "" to represent the legacy, ungroupified resources. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // versions are the versions supported in this group. They are sorted in descending order of preference, // with the preferred version being the first entry. @@ -55,7 +55,7 @@ message APIGroupDiscoveryList { // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of groups for discovery. The groups are listed in priority order. repeated APIGroupDiscovery items = 2; @@ -72,7 +72,7 @@ message APIResourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // scope indicates the scope of a resource, either Cluster or Namespaced optional string scope = 3; @@ -112,7 +112,7 @@ message APISubresourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // Some subresources do not return normal resources, these will have null or empty return types. - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // acceptedTypes describes the kinds that this endpoint accepts. // Subresources may accept the standard content types or define @@ -122,7 +122,7 @@ message APISubresourceDiscovery { // +listMapKey=group // +listMapKey=version // +listMapKey=kind - repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; // verbs is a list of supported API operation types (this includes // but is not limited to get, list, watch, create, update, patch, diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go index 6871da414c..b0343ffcfb 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +// source: k8s.io/api/apiserverinternal/v1alpha1/generated.proto package v1alpha1 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} } func (*ServerStorageVersion) ProtoMessage() {} func (*ServerStorageVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{0} + return fileDescriptor_126bcbf538b54729, []int{0} } func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo func (m *StorageVersion) Reset() { *m = StorageVersion{} } func (*StorageVersion) ProtoMessage() {} func (*StorageVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{1} + return fileDescriptor_126bcbf538b54729, []int{1} } func (m *StorageVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_StorageVersion proto.InternalMessageInfo func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} } func (*StorageVersionCondition) ProtoMessage() {} func (*StorageVersionCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{2} + return fileDescriptor_126bcbf538b54729, []int{2} } func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo func (m *StorageVersionList) Reset() { *m = StorageVersionList{} } func (*StorageVersionList) ProtoMessage() {} func (*StorageVersionList) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{3} + return fileDescriptor_126bcbf538b54729, []int{3} } func (m *StorageVersionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} } func (*StorageVersionSpec) ProtoMessage() {} func (*StorageVersionSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{4} + return fileDescriptor_126bcbf538b54729, []int{4} } func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} } func (*StorageVersionStatus) ProtoMessage() {} func (*StorageVersionStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{5} + return fileDescriptor_126bcbf538b54729, []int{5} } func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -221,61 +221,60 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_a3903ff5e3cc7a03) + proto.RegisterFile("k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_126bcbf538b54729) } -var fileDescriptor_a3903ff5e3cc7a03 = []byte{ - // 790 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0xdb, 0x48, - 0x14, 0x8e, 0x49, 0x08, 0x30, 0xd9, 0x4d, 0x96, 0x59, 0x10, 0xd9, 0xac, 0xe4, 0xb0, 0x91, 0x58, - 0xb1, 0xbb, 0x5a, 0x7b, 0x89, 0x96, 0xaa, 0xb4, 0x52, 0x2b, 0x0c, 0xa8, 0xa2, 0x85, 0x52, 0x4d, - 0x50, 0x0f, 0xb4, 0x87, 0x4e, 0xec, 0xa9, 0xe3, 0x26, 0xf6, 0x58, 0x9e, 0x49, 0x24, 0x2e, 0x55, - 0x7f, 0x42, 0xfb, 0x3f, 0x7a, 0xec, 0x8f, 0xe0, 0x54, 0x71, 0x44, 0xaa, 0x14, 0x15, 0xf7, 0x5f, - 0x70, 0xaa, 0x66, 0xec, 0x38, 0x38, 0x09, 0x6a, 0xc4, 0x21, 0x52, 0xe6, 0xbd, 0xf7, 0x7d, 0xef, - 0xcd, 0x37, 0xdf, 0x8c, 0xc1, 0xd3, 0xf6, 0x5d, 0xa6, 0x39, 0x54, 0x6f, 0x77, 0x9b, 0x24, 0xf0, - 0x08, 0x27, 0x4c, 0xef, 0x11, 0xcf, 0xa2, 0x81, 0x1e, 0x27, 0xb0, 0xef, 0x88, 0x1f, 0x23, 0x41, - 0x8f, 0x04, 0x8e, 0xc7, 0x49, 0xe0, 0xe1, 0x8e, 0xde, 0xdb, 0xc0, 0x1d, 0xbf, 0x85, 0x37, 0x74, - 0x9b, 0x78, 0x24, 0xc0, 0x9c, 0x58, 0x9a, 0x1f, 0x50, 0x4e, 0xe1, 0x5a, 0x04, 0xd3, 0xb0, 0xef, - 0x68, 0x63, 0x30, 0x6d, 0x00, 0xab, 0xfc, 0x6b, 0x3b, 0xbc, 0xd5, 0x6d, 0x6a, 0x26, 0x75, 0x75, - 0x9b, 0xda, 0x54, 0x97, 0xe8, 0x66, 0xf7, 0xb5, 0x5c, 0xc9, 0x85, 0xfc, 0x17, 0xb1, 0x56, 0xfe, - 0x1f, 0x0e, 0xe3, 0x62, 0xb3, 0xe5, 0x78, 0x24, 0x38, 0xd5, 0xfd, 0xb6, 0x2d, 0x27, 0xd3, 0x5d, - 0xc2, 0xb1, 0xde, 0x1b, 0x9b, 0xa5, 0xa2, 0xdf, 0x84, 0x0a, 0xba, 0x1e, 0x77, 0x5c, 0x32, 0x06, - 0xb8, 0xf3, 0x23, 0x00, 0x33, 0x5b, 0xc4, 0xc5, 0xa3, 0xb8, 0xda, 0x87, 0x19, 0xb0, 0xd4, 0x90, - 0x3b, 0x6d, 0x70, 0x1a, 0x60, 0x9b, 0x3c, 0x27, 0x01, 0x73, 0xa8, 0x07, 0x37, 0x41, 0x01, 0xfb, - 0x4e, 0x94, 0xda, 0xdf, 0x2d, 0x2b, 0xab, 0xca, 0xfa, 0x82, 0xf1, 0xeb, 0x59, 0xbf, 0x9a, 0x09, - 0xfb, 0xd5, 0xc2, 0xf6, 0xb3, 0xfd, 0x41, 0x0a, 0x5d, 0xaf, 0x83, 0xdb, 0xa0, 0x44, 0x3c, 0x93, - 0x5a, 0x8e, 0x67, 0xc7, 0x4c, 0xe5, 0x19, 0x09, 0x5d, 0x89, 0xa1, 0xa5, 0xbd, 0x74, 0x1a, 0x8d, - 0xd6, 0xc3, 0x1d, 0xb0, 0x68, 0x11, 0x93, 0x5a, 0xb8, 0xd9, 0x19, 0x4c, 0xc3, 0xca, 0xd9, 0xd5, - 0xec, 0xfa, 0x82, 0xb1, 0x1c, 0xf6, 0xab, 0x8b, 0xbb, 0xa3, 0x49, 0x34, 0x5e, 0x0f, 0xef, 0x81, - 0xa2, 0x3c, 0x40, 0x2b, 0x61, 0xc8, 0x49, 0x06, 0x18, 0xf6, 0xab, 0xc5, 0x46, 0x2a, 0x83, 0x46, - 0x2a, 0x6b, 0x9f, 0x66, 0x40, 0x71, 0x44, 0x8d, 0x57, 0x60, 0x5e, 0x1c, 0x95, 0x85, 0x39, 0x96, - 0x52, 0x14, 0xea, 0xff, 0x69, 0x43, 0xbb, 0x24, 0x8a, 0x6b, 0x7e, 0xdb, 0x96, 0xde, 0xd1, 0x44, - 0xb5, 0xd6, 0xdb, 0xd0, 0x8e, 0x9a, 0x6f, 0x88, 0xc9, 0x0f, 0x09, 0xc7, 0x06, 0x8c, 0x15, 0x00, - 0xc3, 0x18, 0x4a, 0x58, 0xe1, 0x0b, 0x90, 0x63, 0x3e, 0x31, 0xa5, 0x5a, 0x85, 0xfa, 0x96, 0x36, - 0x95, 0x19, 0xb5, 0xf4, 0x98, 0x0d, 0x9f, 0x98, 0xc6, 0x4f, 0x71, 0x9b, 0x9c, 0x58, 0x21, 0x49, - 0x0a, 0x4d, 0x90, 0x67, 0x1c, 0xf3, 0xae, 0xd0, 0x51, 0xd0, 0xdf, 0xbf, 0x1d, 0xbd, 0xa4, 0x30, - 0x8a, 0x71, 0x83, 0x7c, 0xb4, 0x46, 0x31, 0x75, 0xed, 0x63, 0x16, 0xac, 0xa4, 0x01, 0x3b, 0xd4, - 0xb3, 0x1c, 0x2e, 0xf4, 0x7b, 0x08, 0x72, 0xfc, 0xd4, 0x27, 0xb1, 0x8d, 0xfe, 0x19, 0x8c, 0x78, - 0x7c, 0xea, 0x93, 0xab, 0x7e, 0xf5, 0xf7, 0x1b, 0x60, 0x22, 0x8d, 0x24, 0x10, 0x6e, 0x25, 0x3b, - 0x88, 0xec, 0xf4, 0x47, 0x7a, 0x88, 0xab, 0x7e, 0xb5, 0x94, 0xc0, 0xd2, 0x73, 0xc1, 0xc7, 0x00, - 0xd2, 0x66, 0x74, 0xc4, 0x8f, 0x22, 0xf7, 0x0b, 0x57, 0x0a, 0x21, 0xb2, 0x46, 0x25, 0xa6, 0x81, - 0x47, 0x63, 0x15, 0x68, 0x02, 0x0a, 0xf6, 0x00, 0xec, 0x60, 0xc6, 0x8f, 0x03, 0xec, 0xb1, 0x68, - 0x44, 0xc7, 0x25, 0xe5, 0x9c, 0x14, 0xf5, 0xef, 0xe9, 0x1c, 0x21, 0x10, 0xc3, 0xbe, 0x07, 0x63, - 0x6c, 0x68, 0x42, 0x07, 0xf8, 0x27, 0xc8, 0x07, 0x04, 0x33, 0xea, 0x95, 0x67, 0xe5, 0xf6, 0x93, - 0x33, 0x40, 0x32, 0x8a, 0xe2, 0x2c, 0xfc, 0x0b, 0xcc, 0xb9, 0x84, 0x31, 0x6c, 0x93, 0x72, 0x5e, - 0x16, 0x96, 0xe2, 0xc2, 0xb9, 0xc3, 0x28, 0x8c, 0x06, 0xf9, 0xda, 0x67, 0x05, 0xc0, 0xb4, 0xee, - 0x07, 0x0e, 0xe3, 0xf0, 0xe5, 0x98, 0xd3, 0xb5, 0xe9, 0xf6, 0x25, 0xd0, 0xd2, 0xe7, 0xbf, 0xc4, - 0x2d, 0xe7, 0x07, 0x91, 0x6b, 0x2e, 0x3f, 0x01, 0xb3, 0x0e, 0x27, 0xae, 0x38, 0xc5, 0xec, 0x7a, - 0xa1, 0xbe, 0x79, 0x2b, 0x1f, 0x1a, 0x3f, 0xc7, 0x1d, 0x66, 0xf7, 0x05, 0x17, 0x8a, 0x28, 0x6b, - 0x4b, 0xa3, 0xfb, 0x11, 0x17, 0xa0, 0xf6, 0x45, 0x3c, 0x70, 0x13, 0x6c, 0x0c, 0xdf, 0x82, 0x12, - 0x4b, 0xc5, 0x59, 0x59, 0x91, 0x43, 0x4d, 0x7d, 0x39, 0x26, 0x3c, 0x9b, 0xc3, 0x67, 0x2e, 0x1d, - 0x67, 0x68, 0xb4, 0x19, 0x3c, 0x02, 0xcb, 0x26, 0x75, 0x5d, 0xea, 0xed, 0x4d, 0x7c, 0x2f, 0x7f, - 0x0b, 0xfb, 0xd5, 0xe5, 0x9d, 0x49, 0x05, 0x68, 0x32, 0x0e, 0x06, 0x00, 0x98, 0x83, 0x2b, 0x10, - 0x3d, 0x98, 0x85, 0xfa, 0x83, 0x5b, 0x09, 0x9c, 0xdc, 0xa4, 0xe1, 0x9b, 0x95, 0x84, 0x18, 0xba, - 0xd6, 0xc5, 0x78, 0x72, 0x76, 0xa9, 0x66, 0xce, 0x2f, 0xd5, 0xcc, 0xc5, 0xa5, 0x9a, 0x79, 0x17, - 0xaa, 0xca, 0x59, 0xa8, 0x2a, 0xe7, 0xa1, 0xaa, 0x5c, 0x84, 0xaa, 0xf2, 0x35, 0x54, 0x95, 0xf7, - 0xdf, 0xd4, 0xcc, 0xc9, 0xda, 0x54, 0x1f, 0xe4, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x3a, - 0x2e, 0x07, 0xd1, 0x07, 0x00, 0x00, +var fileDescriptor_126bcbf538b54729 = []byte{ + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0x13, 0x41, + 0x14, 0xee, 0xd2, 0x52, 0x60, 0xaa, 0xad, 0x8c, 0x10, 0x6a, 0x4d, 0xb6, 0xd8, 0x04, 0x83, 0x1a, + 0x77, 0xa5, 0x11, 0x23, 0x9a, 0x68, 0x58, 0x20, 0x06, 0x85, 0x60, 0xa6, 0xc4, 0x03, 0x7a, 0x70, + 0xba, 0x1d, 0xb7, 0x2b, 0xdd, 0x9d, 0xcd, 0xce, 0xb4, 0x09, 0x17, 0xe3, 0x4f, 0xd0, 0xff, 0xe1, + 0xd1, 0x1f, 0xc1, 0xc9, 0x70, 0x24, 0x31, 0x69, 0x64, 0xfd, 0x17, 0x9c, 0xcc, 0xcc, 0x6e, 0xb7, + 0x6c, 0x5b, 0x62, 0xc3, 0xa1, 0x49, 0xe7, 0xbd, 0xf7, 0x7d, 0xef, 0xcd, 0x37, 0xdf, 0xcc, 0x82, + 0xd5, 0xc3, 0xa7, 0x4c, 0xb3, 0xa9, 0x8e, 0x3d, 0x5b, 0xfc, 0x18, 0xf1, 0x3b, 0xc4, 0xb7, 0x5d, + 0x4e, 0x7c, 0x17, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0x79, 0x4d, 0xbc, 0xa2, 0x5b, 0xc4, 0x25, 0x3e, + 0xe6, 0xa4, 0xa1, 0x79, 0x3e, 0xe5, 0x14, 0x2e, 0x85, 0x30, 0x0d, 0x7b, 0xb6, 0x36, 0x04, 0xd3, + 0x7a, 0xb0, 0xd2, 0x43, 0xcb, 0xe6, 0xcd, 0x76, 0x5d, 0x33, 0xa9, 0xa3, 0x5b, 0xd4, 0xa2, 0xba, + 0x44, 0xd7, 0xdb, 0x9f, 0xe4, 0x4a, 0x2e, 0xe4, 0xbf, 0x90, 0xb5, 0xf4, 0xb8, 0x3f, 0x8c, 0x83, + 0xcd, 0xa6, 0xed, 0x12, 0xff, 0x48, 0xf7, 0x0e, 0x2d, 0x39, 0x99, 0xee, 0x10, 0x8e, 0xf5, 0xce, + 0xd0, 0x2c, 0x25, 0xfd, 0x32, 0x94, 0xdf, 0x76, 0xb9, 0xed, 0x90, 0x21, 0xc0, 0x93, 0xff, 0x01, + 0x98, 0xd9, 0x24, 0x0e, 0x1e, 0xc4, 0x55, 0xbe, 0x4f, 0x80, 0xb9, 0x9a, 0xdc, 0x69, 0x8d, 0x53, + 0x1f, 0x5b, 0xe4, 0x1d, 0xf1, 0x99, 0x4d, 0x5d, 0xb8, 0x0a, 0x72, 0xd8, 0xb3, 0xc3, 0xd4, 0xf6, + 0x66, 0x51, 0x59, 0x54, 0x96, 0x67, 0x8c, 0x9b, 0xc7, 0xdd, 0x72, 0x2a, 0xe8, 0x96, 0x73, 0xeb, + 0x6f, 0xb7, 0x7b, 0x29, 0x74, 0xb1, 0x0e, 0xae, 0x83, 0x02, 0x71, 0x4d, 0xda, 0xb0, 0x5d, 0x2b, + 0x62, 0x2a, 0x4e, 0x48, 0xe8, 0x42, 0x04, 0x2d, 0x6c, 0x25, 0xd3, 0x68, 0xb0, 0x1e, 0x6e, 0x80, + 0xd9, 0x06, 0x31, 0x69, 0x03, 0xd7, 0x5b, 0xbd, 0x69, 0x58, 0x31, 0xbd, 0x98, 0x5e, 0x9e, 0x31, + 0xe6, 0x83, 0x6e, 0x79, 0x76, 0x73, 0x30, 0x89, 0x86, 0xeb, 0xe1, 0x33, 0x90, 0x97, 0x07, 0xd8, + 0x88, 0x19, 0x32, 0x92, 0x01, 0x06, 0xdd, 0x72, 0xbe, 0x96, 0xc8, 0xa0, 0x81, 0xca, 0xca, 0xcf, + 0x09, 0x90, 0x1f, 0x50, 0xe3, 0x23, 0x98, 0x16, 0x47, 0xd5, 0xc0, 0x1c, 0x4b, 0x29, 0x72, 0xd5, + 0x47, 0x5a, 0xdf, 0x2e, 0xb1, 0xe2, 0x9a, 0x77, 0x68, 0x49, 0xef, 0x68, 0xa2, 0x5a, 0xeb, 0xac, + 0x68, 0x7b, 0xf5, 0xcf, 0xc4, 0xe4, 0xbb, 0x84, 0x63, 0x03, 0x46, 0x0a, 0x80, 0x7e, 0x0c, 0xc5, + 0xac, 0xf0, 0x3d, 0xc8, 0x30, 0x8f, 0x98, 0x52, 0xad, 0x5c, 0x75, 0x4d, 0x1b, 0xcb, 0x8c, 0x5a, + 0x72, 0xcc, 0x9a, 0x47, 0x4c, 0xe3, 0x5a, 0xd4, 0x26, 0x23, 0x56, 0x48, 0x92, 0x42, 0x13, 0x64, + 0x19, 0xc7, 0xbc, 0x2d, 0x74, 0x14, 0xf4, 0xcf, 0xaf, 0x46, 0x2f, 0x29, 0x8c, 0x7c, 0xd4, 0x20, + 0x1b, 0xae, 0x51, 0x44, 0x5d, 0xf9, 0x91, 0x06, 0x0b, 0x49, 0xc0, 0x06, 0x75, 0x1b, 0x36, 0x17, + 0xfa, 0xbd, 0x04, 0x19, 0x7e, 0xe4, 0x91, 0xc8, 0x46, 0x0f, 0x7a, 0x23, 0xee, 0x1f, 0x79, 0xe4, + 0xbc, 0x5b, 0xbe, 0x7d, 0x09, 0x4c, 0xa4, 0x91, 0x04, 0xc2, 0xb5, 0x78, 0x07, 0xa1, 0x9d, 0xee, + 0x24, 0x87, 0x38, 0xef, 0x96, 0x0b, 0x31, 0x2c, 0x39, 0x17, 0x7c, 0x0d, 0x20, 0xad, 0x87, 0x47, + 0xfc, 0x2a, 0x74, 0xbf, 0x70, 0xa5, 0x10, 0x22, 0x6d, 0x94, 0x22, 0x1a, 0xb8, 0x37, 0x54, 0x81, + 0x46, 0xa0, 0x60, 0x07, 0xc0, 0x16, 0x66, 0x7c, 0xdf, 0xc7, 0x2e, 0x0b, 0x47, 0xb4, 0x1d, 0x52, + 0xcc, 0x48, 0x51, 0xef, 0x8f, 0xe7, 0x08, 0x81, 0xe8, 0xf7, 0xdd, 0x19, 0x62, 0x43, 0x23, 0x3a, + 0xc0, 0xbb, 0x20, 0xeb, 0x13, 0xcc, 0xa8, 0x5b, 0x9c, 0x94, 0xdb, 0x8f, 0xcf, 0x00, 0xc9, 0x28, + 0x8a, 0xb2, 0xf0, 0x1e, 0x98, 0x72, 0x08, 0x63, 0xd8, 0x22, 0xc5, 0xac, 0x2c, 0x2c, 0x44, 0x85, + 0x53, 0xbb, 0x61, 0x18, 0xf5, 0xf2, 0x95, 0x5f, 0x0a, 0x80, 0x49, 0xdd, 0x77, 0x6c, 0xc6, 0xe1, + 0x87, 0x21, 0xa7, 0x6b, 0xe3, 0xed, 0x4b, 0xa0, 0xa5, 0xcf, 0x6f, 0x44, 0x2d, 0xa7, 0x7b, 0x91, + 0x0b, 0x2e, 0x3f, 0x00, 0x93, 0x36, 0x27, 0x8e, 0x38, 0xc5, 0xf4, 0x72, 0xae, 0xba, 0x7a, 0x25, + 0x1f, 0x1a, 0xd7, 0xa3, 0x0e, 0x93, 0xdb, 0x82, 0x0b, 0x85, 0x94, 0x95, 0xb9, 0xc1, 0xfd, 0x88, + 0x0b, 0x50, 0xf9, 0x2d, 0x1e, 0xb8, 0x11, 0x36, 0x86, 0x5f, 0x40, 0x81, 0x25, 0xe2, 0xac, 0xa8, + 0xc8, 0xa1, 0xc6, 0xbe, 0x1c, 0x23, 0x9e, 0xcd, 0xfe, 0x33, 0x97, 0x8c, 0x33, 0x34, 0xd8, 0x0c, + 0xee, 0x81, 0x79, 0x93, 0x3a, 0x0e, 0x75, 0xb7, 0x46, 0xbe, 0x97, 0xb7, 0x82, 0x6e, 0x79, 0x7e, + 0x63, 0x54, 0x01, 0x1a, 0x8d, 0x83, 0x3e, 0x00, 0x66, 0xef, 0x0a, 0x84, 0x0f, 0x66, 0xae, 0xfa, + 0xe2, 0x4a, 0x02, 0xc7, 0x37, 0xa9, 0xff, 0x66, 0xc5, 0x21, 0x86, 0x2e, 0x74, 0x31, 0xde, 0x1c, + 0x9f, 0xa9, 0xa9, 0x93, 0x33, 0x35, 0x75, 0x7a, 0xa6, 0xa6, 0xbe, 0x06, 0xaa, 0x72, 0x1c, 0xa8, + 0xca, 0x49, 0xa0, 0x2a, 0xa7, 0x81, 0xaa, 0xfc, 0x09, 0x54, 0xe5, 0xdb, 0x5f, 0x35, 0x75, 0xb0, + 0x34, 0xd6, 0x07, 0xf9, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x04, 0x7d, 0x78, 0xb8, 0x07, + 0x00, 0x00, } func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index 6e6bab5218..8a77860720 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -52,7 +52,7 @@ message ServerStorageVersion { // Storage version of a specific resource. message StorageVersion { // The name is .. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is an empty spec. It is here to comply with Kubernetes API style. optional StorageVersionSpec spec = 2; @@ -77,8 +77,7 @@ message StorageVersionCondition { optional int64 observedGeneration = 3; // Last time the condition transitioned from one status to another. - // +required - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // The reason for the condition's last transition. // +required @@ -94,7 +93,7 @@ message StorageVersionList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items holds a list of StorageVersion repeated StorageVersion items = 2; diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go index 0ffcf95f06..31a419abf1 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -111,7 +111,6 @@ type StorageVersionCondition struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` // Last time the condition transitioned from one status to another. - // +required LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // The reason for the condition's last transition. // +required diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 61dc97bde5..d189e860f2 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 84a7af5994..ea62a099fe 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto +// source: k8s.io/api/apps/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{0} + return fileDescriptor_5b781835628d5338, []int{0} } func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } func (*ControllerRevisionList) ProtoMessage() {} func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{1} + return fileDescriptor_5b781835628d5338, []int{1} } func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{2} + return fileDescriptor_5b781835628d5338, []int{2} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{3} + return fileDescriptor_5b781835628d5338, []int{3} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{4} + return fileDescriptor_5b781835628d5338, []int{4} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{5} + return fileDescriptor_5b781835628d5338, []int{5} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{6} + return fileDescriptor_5b781835628d5338, []int{6} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{7} + return fileDescriptor_5b781835628d5338, []int{7} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{8} + return fileDescriptor_5b781835628d5338, []int{8} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{9} + return fileDescriptor_5b781835628d5338, []int{9} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{10} + return fileDescriptor_5b781835628d5338, []int{10} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{11} + return fileDescriptor_5b781835628d5338, []int{11} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{12} + return fileDescriptor_5b781835628d5338, []int{12} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{13} + return fileDescriptor_5b781835628d5338, []int{13} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{14} + return fileDescriptor_5b781835628d5338, []int{14} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{15} + return fileDescriptor_5b781835628d5338, []int{15} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{16} + return fileDescriptor_5b781835628d5338, []int{16} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +527,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{17} + return fileDescriptor_5b781835628d5338, []int{17} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +555,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{18} + return fileDescriptor_5b781835628d5338, []int{18} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +583,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{19} + return fileDescriptor_5b781835628d5338, []int{19} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +611,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{20} + return fileDescriptor_5b781835628d5338, []int{20} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +639,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{21} + return fileDescriptor_5b781835628d5338, []int{21} } func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +667,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{22} + return fileDescriptor_5b781835628d5338, []int{22} } func (m *StatefulSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +695,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } func (*StatefulSetCondition) ProtoMessage() {} func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{23} + return fileDescriptor_5b781835628d5338, []int{23} } func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +723,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{24} + return fileDescriptor_5b781835628d5338, []int{24} } func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +751,7 @@ var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } func (*StatefulSetOrdinals) ProtoMessage() {} func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{25} + return fileDescriptor_5b781835628d5338, []int{25} } func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,7 +781,7 @@ func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{26} + return fileDescriptor_5b781835628d5338, []int{26} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -809,7 +809,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{27} + return fileDescriptor_5b781835628d5338, []int{27} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -837,7 +837,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{28} + return fileDescriptor_5b781835628d5338, []int{28} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -865,7 +865,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{29} + return fileDescriptor_5b781835628d5338, []int{29} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -924,150 +924,149 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) + proto.RegisterFile("k8s.io/api/apps/v1/generated.proto", fileDescriptor_5b781835628d5338) } -var fileDescriptor_e1014cab6f31e43b = []byte{ - // 2211 bytes of a gzipped FileDescriptorProto +var fileDescriptor_5b781835628d5338 = []byte{ + // 2194 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, 0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8, - 0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0x60, 0x17, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab, - 0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x88, 0x1c, 0x53, 0x1b, 0xed, 0x17, 0x76, 0x87, 0x8a, 0x89, - 0x5e, 0x8a, 0x02, 0xbd, 0xf5, 0xd0, 0xbf, 0xa1, 0xff, 0x40, 0x51, 0x14, 0xcd, 0x2d, 0x08, 0x82, - 0x5e, 0x7c, 0x29, 0x10, 0xf4, 0xd2, 0x9c, 0x88, 0x9a, 0x39, 0x15, 0x45, 0x6f, 0xed, 0xc5, 0x97, - 0x16, 0x33, 0x3b, 0xfb, 0x3d, 0x2b, 0x52, 0x72, 0xac, 0x34, 0x81, 0x6f, 0xdc, 0x99, 0xdf, 0xfb, - 0xed, 0x9b, 0x99, 0xf7, 0xe6, 0xfd, 0x66, 0x96, 0xe0, 0xf6, 0xc1, 0xeb, 0x6e, 0x5d, 0xb3, 0x1a, - 0x07, 0xfd, 0x3d, 0xe2, 0x98, 0x84, 0x12, 0xb7, 0x71, 0x48, 0xcc, 0xae, 0xe5, 0x34, 0x44, 0x07, - 0xb6, 0xb5, 0x06, 0xb6, 0x6d, 0xb7, 0x71, 0x78, 0xbd, 0xd1, 0x23, 0x26, 0x71, 0x30, 0x25, 0xdd, - 0xba, 0xed, 0x58, 0xd4, 0x82, 0xd0, 0xc3, 0xd4, 0xb1, 0xad, 0xd5, 0x19, 0xa6, 0x7e, 0x78, 0xfd, - 0xfc, 0xb5, 0x9e, 0x46, 0xf7, 0xfb, 0x7b, 0xf5, 0x8e, 0x65, 0x34, 0x7a, 0x56, 0xcf, 0x6a, 0x70, - 0xe8, 0x5e, 0xff, 0x3e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x47, 0x71, 0x5e, 0x8d, 0xbc, 0xa6, 0x63, - 0x39, 0x44, 0xf2, 0x9a, 0xf3, 0x37, 0x43, 0x8c, 0x81, 0x3b, 0xfb, 0x9a, 0x49, 0x9c, 0x41, 0xc3, - 0x3e, 0xe8, 0xb1, 0x06, 0xb7, 0x61, 0x10, 0x8a, 0x65, 0x56, 0x8d, 0x2c, 0x2b, 0xa7, 0x6f, 0x52, - 0xcd, 0x20, 0x29, 0x83, 0xd7, 0xc6, 0x19, 0xb8, 0x9d, 0x7d, 0x62, 0xe0, 0x94, 0xdd, 0x2b, 0x59, - 0x76, 0x7d, 0xaa, 0xe9, 0x0d, 0xcd, 0xa4, 0x2e, 0x75, 0x92, 0x46, 0xea, 0x7f, 0x14, 0x00, 0x5b, - 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0xc8, 0xa1, 0xe6, 0x6a, 0x96, 0x09, 0x7f, 0x0e, 0x4a, - 0x6c, 0x3c, 0x5d, 0x4c, 0x71, 0x45, 0xb9, 0xa8, 0xac, 0x96, 0x6f, 0x7c, 0xaf, 0x1e, 0x4e, 0x72, - 0x40, 0x5f, 0xb7, 0x0f, 0x7a, 0xac, 0xc1, 0xad, 0x33, 0x74, 0xfd, 0xf0, 0x7a, 0x7d, 0x7b, 0xef, - 0x03, 0xd2, 0xa1, 0x9b, 0x84, 0xe2, 0x26, 0x7c, 0x38, 0xac, 0x4d, 0x8d, 0x86, 0x35, 0x10, 0xb6, - 0xa1, 0x80, 0x15, 0x6e, 0x83, 0x02, 0x67, 0xcf, 0x71, 0xf6, 0x6b, 0x99, 0xec, 0x62, 0xd0, 0x75, - 0x84, 0x3f, 0x7c, 0xeb, 0x01, 0x25, 0x26, 0x73, 0xaf, 0x79, 0x46, 0x50, 0x17, 0xd6, 0x31, 0xc5, - 0x88, 0x13, 0xc1, 0x97, 0x41, 0xc9, 0x11, 0xee, 0x57, 0xf2, 0x17, 0x95, 0xd5, 0x7c, 0xf3, 0xac, - 0x40, 0x95, 0xfc, 0x61, 0xa1, 0x00, 0xa1, 0xfe, 0x59, 0x01, 0xcb, 0xe9, 0x71, 0x6f, 0x68, 0x2e, - 0x85, 0xef, 0xa7, 0xc6, 0x5e, 0x9f, 0x6c, 0xec, 0xcc, 0x9a, 0x8f, 0x3c, 0x78, 0xb1, 0xdf, 0x12, - 0x19, 0xf7, 0xbb, 0xa0, 0xa8, 0x51, 0x62, 0xb8, 0x95, 0xdc, 0xc5, 0xfc, 0x6a, 0xf9, 0xc6, 0xe5, - 0x7a, 0x3a, 0x76, 0xeb, 0x69, 0xc7, 0x9a, 0x73, 0x82, 0xb2, 0xf8, 0x0e, 0x33, 0x46, 0x1e, 0x87, - 0xfa, 0x5f, 0x05, 0xcc, 0xae, 0x63, 0x62, 0x58, 0x66, 0x9b, 0xd0, 0x53, 0x58, 0xb4, 0x16, 0x28, - 0xb8, 0x36, 0xe9, 0x88, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xc0, 0x9d, 0xb6, 0x4d, 0x3a, 0xe1, 0x42, - 0xb1, 0x27, 0xc4, 0x8d, 0xe1, 0xbb, 0x60, 0xda, 0xa5, 0x98, 0xf6, 0x5d, 0xbe, 0x4c, 0xe5, 0x1b, - 0x2f, 0x1c, 0x4d, 0xc3, 0xa1, 0xcd, 0x79, 0x41, 0x34, 0xed, 0x3d, 0x23, 0x41, 0xa1, 0xfe, 0x23, - 0x07, 0x60, 0x80, 0x6d, 0x59, 0x66, 0x57, 0xa3, 0x2c, 0x7e, 0x6f, 0x81, 0x02, 0x1d, 0xd8, 0x84, - 0x4f, 0xc3, 0x6c, 0xf3, 0xb2, 0xef, 0xc5, 0x9d, 0x81, 0x4d, 0x1e, 0x0f, 0x6b, 0xcb, 0x69, 0x0b, - 0xd6, 0x83, 0xb8, 0x0d, 0xdc, 0x08, 0xfc, 0xcb, 0x71, 0xeb, 0x9b, 0xf1, 0x57, 0x3f, 0x1e, 0xd6, - 0x24, 0x9b, 0x45, 0x3d, 0x60, 0x8a, 0x3b, 0x08, 0x0f, 0x01, 0xd4, 0xb1, 0x4b, 0xef, 0x38, 0xd8, - 0x74, 0xbd, 0x37, 0x69, 0x06, 0x11, 0x23, 0x7f, 0x69, 0xb2, 0xe5, 0x61, 0x16, 0xcd, 0xf3, 0xc2, - 0x0b, 0xb8, 0x91, 0x62, 0x43, 0x92, 0x37, 0xc0, 0xcb, 0x60, 0xda, 0x21, 0xd8, 0xb5, 0xcc, 0x4a, - 0x81, 0x8f, 0x22, 0x98, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0x2f, 0x82, 0x19, 0x83, 0xb8, 0x2e, - 0xee, 0x91, 0x4a, 0x91, 0x03, 0x17, 0x04, 0x70, 0x66, 0xd3, 0x6b, 0x46, 0x7e, 0xbf, 0xfa, 0x07, - 0x05, 0xcc, 0x05, 0x33, 0x77, 0x0a, 0xa9, 0xd2, 0x8c, 0xa7, 0xca, 0xf3, 0x47, 0xc6, 0x49, 0x46, - 0x86, 0x7c, 0x92, 0x8f, 0xf8, 0xcc, 0x82, 0x10, 0xfe, 0x14, 0x94, 0x5c, 0xa2, 0x93, 0x0e, 0xb5, - 0x1c, 0xe1, 0xf3, 0x2b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xde, 0x16, 0xa6, 0xcd, 0x33, 0xcc, 0x69, - 0xff, 0x09, 0x05, 0x94, 0xf0, 0xc7, 0xa0, 0x44, 0x89, 0x61, 0xeb, 0x98, 0x12, 0x91, 0x26, 0xb1, - 0xf8, 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xac, 0xee, 0x1d, 0x01, 0xe3, 0x89, 0x12, 0xcc, 0x83, 0xdf, - 0x8a, 0x02, 0x1a, 0x78, 0x00, 0xe6, 0xfb, 0x76, 0x97, 0x21, 0x29, 0xdb, 0xba, 0x7b, 0x03, 0x11, - 0x3e, 0x57, 0x8f, 0x9c, 0x90, 0xdd, 0x98, 0x49, 0x73, 0x59, 0xbc, 0x60, 0x3e, 0xde, 0x8e, 0x12, - 0xd4, 0x70, 0x0d, 0x2c, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xda, 0xa4, 0x63, 0x99, 0x5d, 0x97, - 0x07, 0x50, 0xb1, 0xb9, 0x22, 0x08, 0x16, 0x36, 0xe3, 0xdd, 0x28, 0x89, 0x87, 0x1b, 0x60, 0xc9, - 0xdf, 0x67, 0x7f, 0xa8, 0xb9, 0xd4, 0x72, 0x06, 0x1b, 0x9a, 0xa1, 0xd1, 0xca, 0x34, 0xe7, 0xa9, - 0x8c, 0x86, 0xb5, 0x25, 0x24, 0xe9, 0x47, 0x52, 0x2b, 0xf5, 0x37, 0xd3, 0x60, 0x21, 0xb1, 0x1b, - 0xc0, 0xbb, 0x60, 0xb9, 0xd3, 0x77, 0x1c, 0x62, 0xd2, 0xad, 0xbe, 0xb1, 0x47, 0x9c, 0x76, 0x67, - 0x9f, 0x74, 0xfb, 0x3a, 0xe9, 0xf2, 0x15, 0x2d, 0x36, 0xab, 0xc2, 0xd7, 0xe5, 0x96, 0x14, 0x85, - 0x32, 0xac, 0xe1, 0x8f, 0x00, 0x34, 0x79, 0xd3, 0xa6, 0xe6, 0xba, 0x01, 0x67, 0x8e, 0x73, 0x06, - 0x09, 0xb8, 0x95, 0x42, 0x20, 0x89, 0x15, 0xf3, 0xb1, 0x4b, 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, - 0xf9, 0xb8, 0x8f, 0xeb, 0x52, 0x14, 0xca, 0xb0, 0x86, 0xaf, 0x82, 0xb2, 0xf7, 0x36, 0x3e, 0xe7, - 0x62, 0x71, 0x16, 0x05, 0x59, 0x79, 0x2b, 0xec, 0x42, 0x51, 0x1c, 0x1b, 0x9a, 0xb5, 0xe7, 0x12, - 0xe7, 0x90, 0x74, 0xdf, 0xf6, 0x34, 0x00, 0x2b, 0x94, 0x45, 0x5e, 0x28, 0x83, 0xa1, 0x6d, 0xa7, - 0x10, 0x48, 0x62, 0xc5, 0x86, 0xe6, 0x45, 0x4d, 0x6a, 0x68, 0xd3, 0xf1, 0xa1, 0xed, 0x4a, 0x51, - 0x28, 0xc3, 0x9a, 0xc5, 0x9e, 0xe7, 0xf2, 0xda, 0x21, 0xd6, 0x74, 0xbc, 0xa7, 0x93, 0xca, 0x4c, - 0x3c, 0xf6, 0xb6, 0xe2, 0xdd, 0x28, 0x89, 0x87, 0x6f, 0x83, 0x73, 0x5e, 0xd3, 0xae, 0x89, 0x03, - 0x92, 0x12, 0x27, 0x79, 0x4e, 0x90, 0x9c, 0xdb, 0x4a, 0x02, 0x50, 0xda, 0x06, 0xde, 0x02, 0xf3, - 0x1d, 0x4b, 0xd7, 0x79, 0x3c, 0xb6, 0xac, 0xbe, 0x49, 0x2b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0x6a, - 0xc5, 0x7a, 0x50, 0x02, 0x09, 0xef, 0x01, 0xd0, 0xf1, 0xcb, 0x81, 0x5b, 0x01, 0xd9, 0x85, 0x3e, - 0x5d, 0x87, 0xc2, 0x02, 0x1c, 0x34, 0xb9, 0x28, 0xc2, 0xa6, 0x7e, 0xa2, 0x80, 0x95, 0x8c, 0x1c, - 0x87, 0x6f, 0xc6, 0xaa, 0xde, 0xd5, 0x44, 0xd5, 0xbb, 0x90, 0x61, 0x16, 0x29, 0x7d, 0x1d, 0x30, - 0xc7, 0x74, 0x87, 0x66, 0xf6, 0x3c, 0x88, 0xd8, 0xc1, 0x5e, 0x92, 0xf9, 0x8e, 0xa2, 0xc0, 0x70, - 0x1b, 0x3e, 0x37, 0x1a, 0xd6, 0xe6, 0x62, 0x7d, 0x28, 0xce, 0xa9, 0xfe, 0x2a, 0x07, 0xc0, 0x3a, - 0xb1, 0x75, 0x6b, 0x60, 0x10, 0xf3, 0x34, 0x54, 0xcb, 0x7a, 0x4c, 0xb5, 0xa8, 0xd2, 0x85, 0x08, - 0xfc, 0xc9, 0x94, 0x2d, 0x1b, 0x09, 0xd9, 0x72, 0x69, 0x0c, 0xcf, 0xd1, 0xba, 0xe5, 0x6f, 0x79, - 0xb0, 0x18, 0x82, 0x43, 0xe1, 0x72, 0x3b, 0xb6, 0x84, 0x57, 0x12, 0x4b, 0xb8, 0x22, 0x31, 0x79, - 0x6a, 0xca, 0xe5, 0x03, 0x30, 0xcf, 0x74, 0x85, 0xb7, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, - 0x04, 0x55, 0x67, 0x23, 0xc6, 0x84, 0x12, 0xcc, 0x19, 0x2a, 0x69, 0xe6, 0xeb, 0xa8, 0x92, 0xfe, - 0xa8, 0x80, 0xf9, 0x70, 0x99, 0x4e, 0x41, 0x26, 0xb5, 0xe2, 0x32, 0xa9, 0x7a, 0x74, 0x5c, 0x66, - 0xe8, 0xa4, 0xbf, 0x16, 0xa2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0xb6, 0xae, 0x75, 0xb0, - 0x2b, 0xca, 0xea, 0x19, 0xef, 0x30, 0xe5, 0xb5, 0xa1, 0xa0, 0x37, 0x26, 0xa9, 0x72, 0x4f, 0x57, - 0x52, 0xe5, 0xbf, 0x1c, 0x49, 0x75, 0x07, 0x94, 0x5c, 0x5f, 0x4c, 0x15, 0x38, 0xe5, 0xe5, 0x71, - 0xe9, 0x2c, 0x74, 0x54, 0xc0, 0x1a, 0x28, 0xa8, 0x80, 0x49, 0xa6, 0x9d, 0x8a, 0x5f, 0xa5, 0x76, - 0x62, 0xe1, 0x6d, 0xe3, 0xbe, 0x4b, 0xba, 0x3c, 0x95, 0x4a, 0x61, 0x78, 0xef, 0xf0, 0x56, 0x24, - 0x7a, 0xe1, 0x2e, 0x58, 0xb1, 0x1d, 0xab, 0xe7, 0x10, 0xd7, 0x5d, 0x27, 0xb8, 0xab, 0x6b, 0x26, - 0xf1, 0x07, 0xe0, 0x55, 0xbd, 0x0b, 0xa3, 0x61, 0x6d, 0x65, 0x47, 0x0e, 0x41, 0x59, 0xb6, 0xea, - 0xc7, 0x05, 0x70, 0x36, 0xb9, 0x23, 0x66, 0x08, 0x11, 0xe5, 0x44, 0x42, 0xe4, 0xe5, 0x48, 0x88, - 0x7a, 0x2a, 0x2d, 0x72, 0xe6, 0x4f, 0x85, 0xe9, 0x1a, 0x58, 0x10, 0xc2, 0xc3, 0xef, 0x14, 0x52, - 0x2c, 0x58, 0x9e, 0xdd, 0x78, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x30, 0xe7, 0x70, 0x6d, 0xe5, 0x13, - 0x78, 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0x45, 0x3b, 0x51, 0x1c, 0xcb, 0xb4, 0x49, 0x28, 0x39, - 0x7c, 0x82, 0x42, 0x5c, 0x9b, 0xac, 0x25, 0x01, 0x28, 0x6d, 0x03, 0x37, 0xc1, 0x62, 0xdf, 0x4c, - 0x53, 0x79, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0xa6, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x12, 0x93, - 0x2b, 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x34, 0xa9, - 0x8e, 0x52, 0x3f, 0x52, 0x00, 0x4c, 0xa7, 0xe0, 0xd8, 0xc3, 0x7d, 0xca, 0x22, 0x52, 0x22, 0xbb, - 0x72, 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x09, 0x77, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, - 0x98, 0x99, 0x40, 0xe2, 0x84, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf0, 0x1c, 0x2d, 0x71, 0xfe, 0x99, - 0x03, 0x8b, 0x21, 0x78, 0x62, 0x89, 0x23, 0x31, 0x79, 0x76, 0x39, 0x33, 0x99, 0xec, 0x08, 0xa7, - 0xee, 0xff, 0x44, 0x76, 0x84, 0x0e, 0x65, 0xc8, 0x8e, 0xdf, 0xe7, 0xa2, 0x5e, 0x1f, 0x53, 0x76, - 0x7c, 0x09, 0x57, 0x15, 0x5f, 0x3b, 0xe5, 0xa2, 0x7e, 0x9a, 0x07, 0x67, 0x93, 0x29, 0x18, 0xab, - 0x83, 0xca, 0xd8, 0x3a, 0xb8, 0x03, 0x96, 0xee, 0xf7, 0x75, 0x7d, 0xc0, 0xc7, 0x10, 0x29, 0x86, - 0x5e, 0x05, 0xfd, 0xb6, 0xb0, 0x5c, 0xfa, 0x81, 0x04, 0x83, 0xa4, 0x96, 0xe9, 0xb2, 0x58, 0x78, - 0xd2, 0xb2, 0x58, 0x3c, 0x41, 0x59, 0x94, 0x2b, 0x8b, 0xfc, 0x89, 0x94, 0xc5, 0xc4, 0x35, 0x51, - 0xb2, 0x5d, 0x8d, 0x3d, 0xc3, 0x8f, 0x14, 0xb0, 0x2c, 0x3f, 0x3e, 0x43, 0x1d, 0xcc, 0x1b, 0xf8, - 0x41, 0xf4, 0xf2, 0x62, 0x5c, 0xc1, 0xe8, 0x53, 0x4d, 0xaf, 0x7b, 0x5f, 0x77, 0xea, 0xef, 0x98, - 0x74, 0xdb, 0x69, 0x53, 0x47, 0x33, 0x7b, 0x5e, 0x81, 0xdd, 0x8c, 0x71, 0xa1, 0x04, 0x37, 0xbc, - 0x07, 0x4a, 0x06, 0x7e, 0xd0, 0xee, 0x3b, 0x3d, 0xbf, 0x10, 0x1e, 0xff, 0x3d, 0x3c, 0xf6, 0x37, - 0x05, 0x0b, 0x0a, 0xf8, 0xd4, 0x2f, 0x14, 0xb0, 0x92, 0x51, 0x41, 0xbf, 0x41, 0xa3, 0xfc, 0x58, - 0x01, 0x17, 0x63, 0xa3, 0x64, 0x19, 0x49, 0xee, 0xf7, 0x75, 0x9e, 0x9c, 0x42, 0xb0, 0x5c, 0x05, - 0xb3, 0x36, 0x76, 0xa8, 0x16, 0x28, 0xdd, 0x62, 0x73, 0x6e, 0x34, 0xac, 0xcd, 0xee, 0xf8, 0x8d, - 0x28, 0xec, 0x97, 0xcc, 0x4d, 0xee, 0xe9, 0xcd, 0x8d, 0xfa, 0xeb, 0x1c, 0x28, 0x47, 0x5c, 0x3e, - 0x05, 0xa9, 0xf2, 0x56, 0x4c, 0xaa, 0x48, 0x3f, 0xfe, 0x44, 0xe7, 0x30, 0x4b, 0xab, 0x6c, 0x26, - 0xb4, 0xca, 0x77, 0xc7, 0x11, 0x1d, 0x2d, 0x56, 0xfe, 0x95, 0x03, 0x4b, 0x11, 0x74, 0xa8, 0x56, - 0xbe, 0x1f, 0x53, 0x2b, 0xab, 0x09, 0xb5, 0x52, 0x91, 0xd9, 0x3c, 0x93, 0x2b, 0xe3, 0xe5, 0xca, - 0x9f, 0x14, 0xb0, 0x10, 0x99, 0xbb, 0x53, 0xd0, 0x2b, 0xeb, 0x71, 0xbd, 0x52, 0x1b, 0x13, 0x2f, - 0x19, 0x82, 0xe5, 0x16, 0x58, 0x8c, 0x80, 0xb6, 0x9d, 0xae, 0x66, 0x62, 0xdd, 0x85, 0x2f, 0x80, - 0xa2, 0x4b, 0xb1, 0x43, 0xfd, 0xec, 0xf6, 0x6d, 0xdb, 0xac, 0x11, 0x79, 0x7d, 0xea, 0xbf, 0x15, - 0xd0, 0x88, 0x18, 0xef, 0x10, 0xc7, 0xd5, 0x5c, 0x4a, 0x4c, 0x7a, 0xd7, 0xd2, 0xfb, 0x06, 0x69, - 0xe9, 0x58, 0x33, 0x10, 0x61, 0x0d, 0x9a, 0x65, 0xee, 0x58, 0xba, 0xd6, 0x19, 0x40, 0x0c, 0xca, - 0x1f, 0xee, 0x13, 0x73, 0x9d, 0xe8, 0x84, 0x8a, 0xcf, 0x1b, 0xb3, 0xcd, 0x37, 0xfd, 0xdb, 0xfe, - 0xf7, 0xc2, 0xae, 0xc7, 0xc3, 0xda, 0xea, 0x24, 0x8c, 0x3c, 0x38, 0xa3, 0x9c, 0xf0, 0x67, 0x00, - 0xb0, 0xc7, 0x76, 0x07, 0xfb, 0x1f, 0x3b, 0x66, 0x9b, 0x6f, 0xf8, 0x29, 0xfc, 0x5e, 0xd0, 0x73, - 0xac, 0x17, 0x44, 0x18, 0xd5, 0xdf, 0x95, 0x62, 0x4b, 0xfd, 0x8d, 0xbf, 0x5b, 0xfa, 0x05, 0x58, - 0x3a, 0x0c, 0x67, 0xc7, 0x07, 0x30, 0x4d, 0xc4, 0xe2, 0xee, 0x45, 0x29, 0xbd, 0x6c, 0x5e, 0x43, - 0x25, 0x76, 0x57, 0x42, 0x87, 0xa4, 0x2f, 0x81, 0xaf, 0x82, 0x32, 0xd3, 0x32, 0x5a, 0x87, 0x6c, - 0x61, 0xc3, 0x4f, 0xc3, 0xe0, 0xeb, 0x50, 0x3b, 0xec, 0x42, 0x51, 0x1c, 0xdc, 0x07, 0x8b, 0xb6, - 0xd5, 0xdd, 0xc4, 0x26, 0xee, 0x11, 0x56, 0xa1, 0xbd, 0xa5, 0xe4, 0xb7, 0x4e, 0xb3, 0xcd, 0xd7, - 0xfc, 0x1b, 0x85, 0x9d, 0x34, 0x84, 0x9d, 0xd8, 0x24, 0xcd, 0x3c, 0x08, 0x64, 0x94, 0xd0, 0x48, - 0x7d, 0xcc, 0x9c, 0x49, 0xfd, 0x03, 0x44, 0x96, 0x8f, 0x27, 0xfc, 0x9c, 0x99, 0x75, 0x9f, 0x56, - 0x3a, 0xd1, 0x7d, 0x9a, 0xe4, 0xc4, 0x31, 0x7b, 0xcc, 0x13, 0xc7, 0xa7, 0x0a, 0xb8, 0x64, 0x4f, - 0x90, 0x46, 0x15, 0xc0, 0xa7, 0xa5, 0x35, 0x66, 0x5a, 0x26, 0xc9, 0xc8, 0xe6, 0xea, 0x68, 0x58, - 0xbb, 0x34, 0x09, 0x12, 0x4d, 0xe4, 0x1a, 0x4b, 0x1a, 0x4b, 0xec, 0x7c, 0x95, 0x32, 0x77, 0xf3, - 0xca, 0x18, 0x37, 0xfd, 0x8d, 0xd2, 0xcb, 0x43, 0xff, 0x09, 0x05, 0x34, 0xea, 0x47, 0x45, 0x70, - 0x2e, 0x55, 0xad, 0xbf, 0xc2, 0xbb, 0xc2, 0xd4, 0x89, 0x26, 0x7f, 0x8c, 0x13, 0xcd, 0x1a, 0x58, - 0x10, 0x1f, 0x98, 0x13, 0x07, 0xa2, 0x20, 0x4c, 0x5a, 0xf1, 0x6e, 0x94, 0xc4, 0xcb, 0xee, 0x2a, - 0x8b, 0xc7, 0xbc, 0xab, 0x8c, 0x7a, 0x21, 0xfe, 0x17, 0xe5, 0xe5, 0x73, 0xda, 0x0b, 0xf1, 0xf7, - 0xa8, 0x24, 0x1e, 0xbe, 0xe1, 0x27, 0x6b, 0xc0, 0x30, 0xc3, 0x19, 0x12, 0xd9, 0x17, 0x10, 0x24, - 0xd0, 0x4f, 0xf4, 0x11, 0xf5, 0x7d, 0xc9, 0x47, 0xd4, 0xd5, 0x31, 0x61, 0x36, 0xf9, 0xb5, 0xa4, - 0xf4, 0xd0, 0x59, 0x3e, 0xfe, 0xa1, 0x53, 0xfd, 0x8b, 0x02, 0x9e, 0xcb, 0xdc, 0xa6, 0xe0, 0x5a, - 0x4c, 0x3d, 0x5e, 0x4b, 0xa8, 0xc7, 0xe7, 0x33, 0x0d, 0x23, 0x12, 0xd2, 0x90, 0xdf, 0x58, 0xde, - 0x1c, 0x7b, 0x63, 0x29, 0x39, 0x89, 0x8c, 0xbf, 0xba, 0x6c, 0xbe, 0xfe, 0xf0, 0x51, 0x75, 0xea, - 0xb3, 0x47, 0xd5, 0xa9, 0xcf, 0x1f, 0x55, 0xa7, 0x7e, 0x39, 0xaa, 0x2a, 0x0f, 0x47, 0x55, 0xe5, - 0xb3, 0x51, 0x55, 0xf9, 0x7c, 0x54, 0x55, 0xfe, 0x3e, 0xaa, 0x2a, 0xbf, 0xfd, 0xa2, 0x3a, 0x75, - 0x0f, 0xa6, 0xff, 0x95, 0xf9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xae, 0x39, 0x4c, 0x13, 0xc3, - 0x29, 0x00, 0x00, + 0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0xe0, 0x14, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab, + 0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x68, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x0b, + 0xbd, 0x14, 0x05, 0x7a, 0xeb, 0xa1, 0x7f, 0x43, 0xff, 0x81, 0xa2, 0x28, 0x9a, 0x5b, 0x10, 0x04, + 0xbd, 0xf8, 0x52, 0x20, 0xe8, 0xa5, 0x39, 0x11, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x2f, + 0x2d, 0x66, 0x76, 0xf6, 0x7b, 0x56, 0xa4, 0xe4, 0x58, 0x69, 0x82, 0xdc, 0xb8, 0x33, 0xbf, 0xf7, + 0xdb, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x2c, 0x81, 0x7a, 0xff, 0x55, 0xaf, 0xa9, 0xdb, 0x2d, + 0xec, 0xe8, 0x2d, 0xec, 0x38, 0x5e, 0xeb, 0xe0, 0x7a, 0xab, 0x4f, 0x2c, 0xe2, 0x62, 0x4a, 0x7a, + 0x4d, 0xc7, 0xb5, 0xa9, 0x0d, 0xa1, 0x8f, 0x69, 0x62, 0x47, 0x6f, 0x32, 0x4c, 0xf3, 0xe0, 0xfa, + 0xf9, 0x6b, 0x7d, 0x9d, 0xee, 0x0f, 0xf6, 0x9a, 0x9a, 0x6d, 0xb6, 0xfa, 0x76, 0xdf, 0x6e, 0x71, + 0xe8, 0xde, 0xe0, 0x1e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x4f, 0x71, 0x3e, 0xfe, 0x1a, 0xcd, 0x76, + 0x89, 0xe4, 0x35, 0xe7, 0x6f, 0x46, 0x18, 0x13, 0x6b, 0xfb, 0xba, 0x45, 0xdc, 0xc3, 0x96, 0x73, + 0xbf, 0xcf, 0x1a, 0xbc, 0x96, 0x49, 0x28, 0x96, 0x59, 0xb5, 0xf2, 0xac, 0xdc, 0x81, 0x45, 0x75, + 0x93, 0x64, 0x0c, 0x5e, 0x19, 0x67, 0xe0, 0x69, 0xfb, 0xc4, 0xc4, 0x19, 0xbb, 0x97, 0xf2, 0xec, + 0x06, 0x54, 0x37, 0x5a, 0xba, 0x45, 0x3d, 0xea, 0xa6, 0x8d, 0xd4, 0xff, 0x28, 0x00, 0x76, 0x6c, + 0x8b, 0xba, 0xb6, 0x61, 0x10, 0x17, 0x91, 0x03, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x1c, 0x54, 0xd8, + 0x78, 0x7a, 0x98, 0xe2, 0x9a, 0x72, 0x51, 0x59, 0xad, 0xde, 0xf8, 0x5e, 0x33, 0x9a, 0xe4, 0x90, + 0xbe, 0xe9, 0xdc, 0xef, 0xb3, 0x06, 0xaf, 0xc9, 0xd0, 0xcd, 0x83, 0xeb, 0xcd, 0xed, 0xbd, 0xf7, + 0x89, 0x46, 0x37, 0x09, 0xc5, 0x6d, 0xf8, 0x70, 0xd8, 0x98, 0x1a, 0x0d, 0x1b, 0x20, 0x6a, 0x43, + 0x21, 0x2b, 0xdc, 0x06, 0x25, 0xce, 0x5e, 0xe0, 0xec, 0xd7, 0x72, 0xd9, 0xc5, 0xa0, 0x9b, 0x08, + 0x7f, 0xf0, 0xe6, 0x03, 0x4a, 0x2c, 0xe6, 0x5e, 0xfb, 0x8c, 0xa0, 0x2e, 0xad, 0x63, 0x8a, 0x11, + 0x27, 0x82, 0x2f, 0x82, 0x8a, 0x2b, 0xdc, 0xaf, 0x15, 0x2f, 0x2a, 0xab, 0xc5, 0xf6, 0x59, 0x81, + 0xaa, 0x04, 0xc3, 0x42, 0x21, 0x42, 0xfd, 0xb3, 0x02, 0x96, 0xb3, 0xe3, 0xde, 0xd0, 0x3d, 0x0a, + 0xdf, 0xcb, 0x8c, 0xbd, 0x39, 0xd9, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xf8, 0xe2, 0xa0, 0x25, 0x36, + 0xee, 0x77, 0x40, 0x59, 0xa7, 0xc4, 0xf4, 0x6a, 0x85, 0x8b, 0xc5, 0xd5, 0xea, 0x8d, 0xcb, 0xcd, + 0x6c, 0xec, 0x36, 0xb3, 0x8e, 0xb5, 0xe7, 0x04, 0x65, 0xf9, 0x6d, 0x66, 0x8c, 0x7c, 0x0e, 0xf5, + 0xbf, 0x0a, 0x98, 0x5d, 0xc7, 0xc4, 0xb4, 0xad, 0x2e, 0xa1, 0xa7, 0xb0, 0x68, 0x1d, 0x50, 0xf2, + 0x1c, 0xa2, 0x89, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xd0, 0x9d, 0xae, 0x43, 0xb4, 0x68, 0xa1, 0xd8, + 0x13, 0xe2, 0xc6, 0xf0, 0x1d, 0x30, 0xed, 0x51, 0x4c, 0x07, 0x1e, 0x5f, 0xa6, 0xea, 0x8d, 0xe7, + 0x8e, 0xa6, 0xe1, 0xd0, 0xf6, 0xbc, 0x20, 0x9a, 0xf6, 0x9f, 0x91, 0xa0, 0x50, 0xff, 0x51, 0x00, + 0x30, 0xc4, 0x76, 0x6c, 0xab, 0xa7, 0x53, 0x16, 0xbf, 0xb7, 0x40, 0x89, 0x1e, 0x3a, 0x84, 0x4f, + 0xc3, 0x6c, 0xfb, 0x72, 0xe0, 0xc5, 0xed, 0x43, 0x87, 0x3c, 0x1e, 0x36, 0x96, 0xb3, 0x16, 0xac, + 0x07, 0x71, 0x1b, 0xb8, 0x11, 0xfa, 0x57, 0xe0, 0xd6, 0x37, 0x93, 0xaf, 0x7e, 0x3c, 0x6c, 0x48, + 0x36, 0x8b, 0x66, 0xc8, 0x94, 0x74, 0x10, 0x1e, 0x00, 0x68, 0x60, 0x8f, 0xde, 0x76, 0xb1, 0xe5, + 0xf9, 0x6f, 0xd2, 0x4d, 0x22, 0x46, 0xfe, 0xc2, 0x64, 0xcb, 0xc3, 0x2c, 0xda, 0xe7, 0x85, 0x17, + 0x70, 0x23, 0xc3, 0x86, 0x24, 0x6f, 0x80, 0x97, 0xc1, 0xb4, 0x4b, 0xb0, 0x67, 0x5b, 0xb5, 0x12, + 0x1f, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x9f, 0x07, 0x33, 0x26, 0xf1, 0x3c, 0xdc, + 0x27, 0xb5, 0x32, 0x07, 0x2e, 0x08, 0xe0, 0xcc, 0xa6, 0xdf, 0x8c, 0x82, 0x7e, 0xf5, 0x0f, 0x0a, + 0x98, 0x0b, 0x67, 0xee, 0x14, 0x52, 0xa5, 0x9d, 0x4c, 0x95, 0x67, 0x8f, 0x8c, 0x93, 0x9c, 0x0c, + 0xf9, 0xb8, 0x18, 0xf3, 0x99, 0x05, 0x21, 0xfc, 0x29, 0xa8, 0x78, 0xc4, 0x20, 0x1a, 0xb5, 0x5d, + 0xe1, 0xf3, 0x4b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xd1, 0x15, 0xa6, 0xed, 0x33, 0xcc, 0xe9, 0xe0, + 0x09, 0x85, 0x94, 0xf0, 0xc7, 0xa0, 0x42, 0x89, 0xe9, 0x18, 0x98, 0x12, 0x91, 0x26, 0x89, 0xf8, + 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xec, 0xde, 0x6d, 0x01, 0xe3, 0x89, 0x12, 0xce, 0x43, 0xd0, 0x8a, + 0x42, 0x1a, 0x78, 0x1f, 0xcc, 0x0f, 0x9c, 0x1e, 0x43, 0x52, 0xb6, 0x75, 0xf7, 0x0f, 0x45, 0xf8, + 0x5c, 0x3d, 0x72, 0x42, 0x76, 0x13, 0x26, 0xed, 0x65, 0xf1, 0x82, 0xf9, 0x64, 0x3b, 0x4a, 0x51, + 0xc3, 0x35, 0xb0, 0x60, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xd8, 0x25, 0x9a, 0x6d, 0xf5, 0x3c, 0x1e, + 0x40, 0xe5, 0xf6, 0x8a, 0x20, 0x58, 0xd8, 0x4c, 0x76, 0xa3, 0x34, 0x1e, 0x6e, 0x80, 0xa5, 0x60, + 0x9f, 0xfd, 0xa1, 0xee, 0x51, 0xdb, 0x3d, 0xdc, 0xd0, 0x4d, 0x9d, 0xd6, 0xa6, 0x39, 0x4f, 0x6d, + 0x34, 0x6c, 0x2c, 0x21, 0x49, 0x3f, 0x92, 0x5a, 0xa9, 0xbf, 0x99, 0x06, 0x0b, 0xa9, 0xdd, 0x00, + 0xde, 0x01, 0xcb, 0xda, 0xc0, 0x75, 0x89, 0x45, 0xb7, 0x06, 0xe6, 0x1e, 0x71, 0xbb, 0xda, 0x3e, + 0xe9, 0x0d, 0x0c, 0xd2, 0xe3, 0x2b, 0x5a, 0x6e, 0xd7, 0x85, 0xaf, 0xcb, 0x1d, 0x29, 0x0a, 0xe5, + 0x58, 0xc3, 0x1f, 0x01, 0x68, 0xf1, 0xa6, 0x4d, 0xdd, 0xf3, 0x42, 0xce, 0x02, 0xe7, 0x0c, 0x13, + 0x70, 0x2b, 0x83, 0x40, 0x12, 0x2b, 0xe6, 0x63, 0x8f, 0x78, 0xba, 0x4b, 0x7a, 0x69, 0x1f, 0x8b, + 0x49, 0x1f, 0xd7, 0xa5, 0x28, 0x94, 0x63, 0x0d, 0x5f, 0x06, 0x55, 0xff, 0x6d, 0x7c, 0xce, 0xc5, + 0xe2, 0x2c, 0x0a, 0xb2, 0xea, 0x56, 0xd4, 0x85, 0xe2, 0x38, 0x36, 0x34, 0x7b, 0xcf, 0x23, 0xee, + 0x01, 0xe9, 0xbd, 0xe5, 0x6b, 0x00, 0x56, 0x28, 0xcb, 0xbc, 0x50, 0x86, 0x43, 0xdb, 0xce, 0x20, + 0x90, 0xc4, 0x8a, 0x0d, 0xcd, 0x8f, 0x9a, 0xcc, 0xd0, 0xa6, 0x93, 0x43, 0xdb, 0x95, 0xa2, 0x50, + 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xb5, 0x03, 0xac, 0x1b, 0x78, 0xcf, 0x20, 0xb5, 0x99, 0x64, + 0xec, 0x6d, 0x25, 0xbb, 0x51, 0x1a, 0x0f, 0xdf, 0x02, 0xe7, 0xfc, 0xa6, 0x5d, 0x0b, 0x87, 0x24, + 0x15, 0x4e, 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x95, 0x06, 0xa0, 0xac, 0x0d, 0xbc, 0x05, 0xe6, 0x35, + 0xdb, 0x30, 0x78, 0x3c, 0x76, 0xec, 0x81, 0x45, 0x6b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0x24, + 0x7a, 0x50, 0x0a, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x81, 0xfc, 0x42, 0x9f, 0xad, + 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x63, 0x53, 0x3f, 0x56, 0xc0, 0x4a, 0x4e, 0x8e, 0xc3, + 0x37, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, 0xcc, 0x31, + 0xdd, 0xa1, 0x5b, 0x7d, 0x1f, 0x22, 0x76, 0xb0, 0x17, 0x64, 0xbe, 0xa3, 0x38, 0x30, 0xda, 0x86, + 0xcf, 0x8d, 0x86, 0x8d, 0xb9, 0x44, 0x1f, 0x4a, 0x72, 0xaa, 0xbf, 0x2a, 0x00, 0xb0, 0x4e, 0x1c, + 0xc3, 0x3e, 0x34, 0x89, 0x75, 0x1a, 0xaa, 0x65, 0x3d, 0xa1, 0x5a, 0x54, 0xe9, 0x42, 0x84, 0xfe, + 0xe4, 0xca, 0x96, 0x8d, 0x94, 0x6c, 0xb9, 0x34, 0x86, 0xe7, 0x68, 0xdd, 0xf2, 0xb7, 0x22, 0x58, + 0x8c, 0xc0, 0x91, 0x70, 0x79, 0x2d, 0xb1, 0x84, 0x57, 0x52, 0x4b, 0xb8, 0x22, 0x31, 0x79, 0x6a, + 0xca, 0xe5, 0x7d, 0x30, 0xcf, 0x74, 0x85, 0xbf, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, 0x84, + 0x55, 0x67, 0x23, 0xc1, 0x84, 0x52, 0xcc, 0x39, 0x2a, 0x69, 0xe6, 0xab, 0xa8, 0x92, 0xfe, 0xa8, + 0x80, 0xf9, 0x68, 0x99, 0x4e, 0x41, 0x26, 0x75, 0x92, 0x32, 0xa9, 0x7e, 0x74, 0x5c, 0xe6, 0xe8, + 0xa4, 0xbf, 0x96, 0xe2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0x8e, 0xa1, 0x6b, 0xd8, 0x13, + 0x65, 0xf5, 0x8c, 0x7f, 0x98, 0xf2, 0xdb, 0x50, 0xd8, 0x9b, 0x90, 0x54, 0x85, 0xa7, 0x2b, 0xa9, + 0x8a, 0x5f, 0x8c, 0xa4, 0xba, 0x0d, 0x2a, 0x5e, 0x20, 0xa6, 0x4a, 0x9c, 0xf2, 0xf2, 0xb8, 0x74, + 0x16, 0x3a, 0x2a, 0x64, 0x0d, 0x15, 0x54, 0xc8, 0x24, 0xd3, 0x4e, 0xe5, 0x2f, 0x53, 0x3b, 0xb1, + 0xf0, 0x76, 0xf0, 0xc0, 0x23, 0x3d, 0x9e, 0x4a, 0x95, 0x28, 0xbc, 0x77, 0x78, 0x2b, 0x12, 0xbd, + 0x70, 0x17, 0xac, 0x38, 0xae, 0xdd, 0x77, 0x89, 0xe7, 0xad, 0x13, 0xdc, 0x33, 0x74, 0x8b, 0x04, + 0x03, 0xf0, 0xab, 0xde, 0x85, 0xd1, 0xb0, 0xb1, 0xb2, 0x23, 0x87, 0xa0, 0x3c, 0x5b, 0xf5, 0xa3, + 0x12, 0x38, 0x9b, 0xde, 0x11, 0x73, 0x84, 0x88, 0x72, 0x22, 0x21, 0xf2, 0x62, 0x2c, 0x44, 0x7d, + 0x95, 0x16, 0x3b, 0xf3, 0x67, 0xc2, 0x74, 0x0d, 0x2c, 0x08, 0xe1, 0x11, 0x74, 0x0a, 0x29, 0x16, + 0x2e, 0xcf, 0x6e, 0xb2, 0x1b, 0xa5, 0xf1, 0xf0, 0x35, 0x30, 0xe7, 0x72, 0x6d, 0x15, 0x10, 0xf8, + 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0xc5, 0x3b, 0x51, 0x12, 0xcb, 0xb4, 0x49, 0x24, 0x39, 0x02, + 0x82, 0x52, 0x52, 0x9b, 0xac, 0xa5, 0x01, 0x28, 0x6b, 0x03, 0x37, 0xc1, 0xe2, 0xc0, 0xca, 0x52, + 0xf9, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0x66, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x92, 0x90, 0x2b, + 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x32, 0xa9, 0x8e, + 0x52, 0x3f, 0x54, 0x00, 0xcc, 0xa6, 0xe0, 0xd8, 0xc3, 0x7d, 0xc6, 0x22, 0x56, 0x22, 0x7b, 0x72, + 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x89, 0x76, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, 0x98, + 0x99, 0x40, 0xe2, 0x44, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf1, 0x1c, 0x2d, 0x71, 0xfe, 0x59, 0x00, + 0x8b, 0x11, 0x78, 0x62, 0x89, 0x23, 0x31, 0xf9, 0xe6, 0x72, 0x66, 0x32, 0xd9, 0x11, 0x4d, 0xdd, + 0xff, 0x89, 0xec, 0x88, 0x1c, 0xca, 0x91, 0x1d, 0xbf, 0x2f, 0xc4, 0xbd, 0x3e, 0xa6, 0xec, 0xf8, + 0x02, 0xae, 0x2a, 0xbe, 0x72, 0xca, 0x45, 0xfd, 0xa4, 0x08, 0xce, 0xa6, 0x53, 0x30, 0x51, 0x07, + 0x95, 0xb1, 0x75, 0x70, 0x07, 0x2c, 0xdd, 0x1b, 0x18, 0xc6, 0x21, 0x1f, 0x43, 0xac, 0x18, 0xfa, + 0x15, 0xf4, 0xdb, 0xc2, 0x72, 0xe9, 0x07, 0x12, 0x0c, 0x92, 0x5a, 0x66, 0xcb, 0x62, 0xe9, 0x49, + 0xcb, 0x62, 0xf9, 0x04, 0x65, 0x51, 0xae, 0x2c, 0x8a, 0x27, 0x52, 0x16, 0x13, 0xd7, 0x44, 0xc9, + 0x76, 0x35, 0xf6, 0x0c, 0x3f, 0x52, 0xc0, 0xb2, 0xfc, 0xf8, 0x0c, 0x0d, 0x30, 0x6f, 0xe2, 0x07, + 0xf1, 0xcb, 0x8b, 0x71, 0x05, 0x63, 0x40, 0x75, 0xa3, 0xe9, 0x7f, 0xdd, 0x69, 0xbe, 0x6d, 0xd1, + 0x6d, 0xb7, 0x4b, 0x5d, 0xdd, 0xea, 0xfb, 0x05, 0x76, 0x33, 0xc1, 0x85, 0x52, 0xdc, 0xf0, 0x2e, + 0xa8, 0x98, 0xf8, 0x41, 0x77, 0xe0, 0xf6, 0x83, 0x42, 0x78, 0xfc, 0xf7, 0xf0, 0xd8, 0xdf, 0x14, + 0x2c, 0x28, 0xe4, 0x53, 0x3f, 0x57, 0xc0, 0x4a, 0x4e, 0x05, 0xfd, 0x1a, 0x8d, 0xf2, 0x23, 0x05, + 0x5c, 0x4c, 0x8c, 0x92, 0x65, 0x24, 0xb9, 0x37, 0x30, 0x78, 0x72, 0x0a, 0xc1, 0x72, 0x15, 0xcc, + 0x3a, 0xd8, 0xa5, 0x7a, 0xa8, 0x74, 0xcb, 0xed, 0xb9, 0xd1, 0xb0, 0x31, 0xbb, 0x13, 0x34, 0xa2, + 0xa8, 0x5f, 0x32, 0x37, 0x85, 0xa7, 0x37, 0x37, 0xea, 0xaf, 0x0b, 0xa0, 0x1a, 0x73, 0xf9, 0x14, + 0xa4, 0xca, 0x9b, 0x09, 0xa9, 0x22, 0xfd, 0xf8, 0x13, 0x9f, 0xc3, 0x3c, 0xad, 0xb2, 0x99, 0xd2, + 0x2a, 0xdf, 0x1d, 0x47, 0x74, 0xb4, 0x58, 0xf9, 0x57, 0x01, 0x2c, 0xc5, 0xd0, 0x91, 0x5a, 0xf9, + 0x7e, 0x42, 0xad, 0xac, 0xa6, 0xd4, 0x4a, 0x4d, 0x66, 0xf3, 0x8d, 0x5c, 0x19, 0x2f, 0x57, 0xfe, + 0xa4, 0x80, 0x85, 0xd8, 0xdc, 0x9d, 0x82, 0x5e, 0x59, 0x4f, 0xea, 0x95, 0xc6, 0x98, 0x78, 0xc9, + 0x11, 0x2c, 0xb7, 0xc0, 0x62, 0x0c, 0xb4, 0xed, 0xf6, 0x74, 0x0b, 0x1b, 0x1e, 0x7c, 0x0e, 0x94, + 0x3d, 0x8a, 0x5d, 0x1a, 0x64, 0x77, 0x60, 0xdb, 0x65, 0x8d, 0xc8, 0xef, 0x53, 0xff, 0xad, 0x80, + 0x56, 0xcc, 0x78, 0x87, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x3b, 0xb6, 0x31, 0x30, 0x49, 0xc7, + 0xc0, 0xba, 0x89, 0x08, 0x6b, 0xd0, 0x6d, 0x6b, 0xc7, 0x36, 0x74, 0xed, 0x10, 0x62, 0x50, 0xfd, + 0x60, 0x9f, 0x58, 0xeb, 0xc4, 0x20, 0x54, 0x7c, 0xde, 0x98, 0x6d, 0xbf, 0x11, 0xdc, 0xf6, 0xbf, + 0x1b, 0x75, 0x3d, 0x1e, 0x36, 0x56, 0x27, 0x61, 0xe4, 0xc1, 0x19, 0xe7, 0x84, 0x3f, 0x03, 0x80, + 0x3d, 0x76, 0x35, 0x1c, 0x7c, 0xec, 0x98, 0x6d, 0xbf, 0x1e, 0xa4, 0xf0, 0xbb, 0x61, 0xcf, 0xb1, + 0x5e, 0x10, 0x63, 0x54, 0x7f, 0x57, 0x49, 0x2c, 0xf5, 0xd7, 0xfe, 0x6e, 0xe9, 0x17, 0x60, 0xe9, + 0x20, 0x9a, 0x9d, 0x00, 0xc0, 0x34, 0x11, 0x8b, 0xbb, 0xe7, 0xa5, 0xf4, 0xb2, 0x79, 0x8d, 0x94, + 0xd8, 0x1d, 0x09, 0x1d, 0x92, 0xbe, 0x04, 0xbe, 0x0c, 0xaa, 0x4c, 0xcb, 0xe8, 0x1a, 0xd9, 0xc2, + 0x66, 0x90, 0x86, 0xe1, 0xd7, 0xa1, 0x6e, 0xd4, 0x85, 0xe2, 0x38, 0xb8, 0x0f, 0x16, 0x1d, 0xbb, + 0xb7, 0x89, 0x2d, 0xdc, 0x27, 0xac, 0x42, 0xfb, 0x4b, 0xc9, 0x6f, 0x9d, 0x66, 0xdb, 0xaf, 0x04, + 0x37, 0x0a, 0x3b, 0x59, 0x08, 0x3b, 0xb1, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, 0xa1, 0x99, 0xf9, + 0x98, 0x39, 0x93, 0xf9, 0x07, 0x88, 0x2c, 0x1f, 0x4f, 0xf8, 0x39, 0x33, 0xef, 0x3e, 0xad, 0x72, + 0xa2, 0xfb, 0x34, 0xc9, 0x89, 0x63, 0xf6, 0x98, 0x27, 0x8e, 0x4f, 0x14, 0x70, 0xc9, 0x99, 0x20, + 0x8d, 0x6a, 0x80, 0x4f, 0x4b, 0x67, 0xcc, 0xb4, 0x4c, 0x92, 0x91, 0xed, 0xd5, 0xd1, 0xb0, 0x71, + 0x69, 0x12, 0x24, 0x9a, 0xc8, 0x35, 0x96, 0x34, 0xb6, 0xd8, 0xf9, 0x6a, 0x55, 0xee, 0xe6, 0x95, + 0x31, 0x6e, 0x06, 0x1b, 0xa5, 0x9f, 0x87, 0xc1, 0x13, 0x0a, 0x69, 0xd4, 0x0f, 0xcb, 0xe0, 0x5c, + 0xa6, 0x5a, 0x7f, 0x89, 0x77, 0x85, 0x99, 0x13, 0x4d, 0xf1, 0x18, 0x27, 0x9a, 0x35, 0xb0, 0x20, + 0x3e, 0x30, 0xa7, 0x0e, 0x44, 0x61, 0x98, 0x74, 0x92, 0xdd, 0x28, 0x8d, 0x97, 0xdd, 0x55, 0x96, + 0x8f, 0x79, 0x57, 0x19, 0xf7, 0x42, 0xfc, 0x2f, 0xca, 0xcf, 0xe7, 0xac, 0x17, 0xe2, 0xef, 0x51, + 0x69, 0x3c, 0x7c, 0x3d, 0x48, 0xd6, 0x90, 0x61, 0x86, 0x33, 0xa4, 0xb2, 0x2f, 0x24, 0x48, 0xa1, + 0x9f, 0xe8, 0x23, 0xea, 0x7b, 0x92, 0x8f, 0xa8, 0xab, 0x63, 0xc2, 0x6c, 0xf2, 0x6b, 0x49, 0xe9, + 0xa1, 0xb3, 0x7a, 0xfc, 0x43, 0xa7, 0xfa, 0x17, 0x05, 0x3c, 0x93, 0xbb, 0x4d, 0xc1, 0xb5, 0x84, + 0x7a, 0xbc, 0x96, 0x52, 0x8f, 0xcf, 0xe6, 0x1a, 0xc6, 0x24, 0xa4, 0x29, 0xbf, 0xb1, 0xbc, 0x39, + 0xf6, 0xc6, 0x52, 0x72, 0x12, 0x19, 0x7f, 0x75, 0xd9, 0x7e, 0xf5, 0xe1, 0xa3, 0xfa, 0xd4, 0xa7, + 0x8f, 0xea, 0x53, 0x9f, 0x3d, 0xaa, 0x4f, 0xfd, 0x72, 0x54, 0x57, 0x1e, 0x8e, 0xea, 0xca, 0xa7, + 0xa3, 0xba, 0xf2, 0xd9, 0xa8, 0xae, 0xfc, 0x7d, 0x54, 0x57, 0x7e, 0xfb, 0x79, 0x7d, 0xea, 0x2e, + 0xcc, 0xfe, 0x2b, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfa, 0xed, 0x70, 0xaa, 0x29, + 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index a7a7e7c547..d864f2eebf 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -43,10 +43,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -56,7 +56,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -67,7 +67,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -93,7 +93,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -109,7 +109,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -121,7 +121,7 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -129,7 +129,7 @@ message DaemonSetSpec { // selector is specified). // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -200,6 +200,8 @@ message DaemonSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DaemonSetCondition conditions = 10; } @@ -223,7 +225,7 @@ message Deployment { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -243,10 +245,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -259,7 +261,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -275,11 +277,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -341,6 +343,8 @@ message DeploymentStatus { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -372,7 +376,7 @@ message ReplicaSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -398,7 +402,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -414,7 +418,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -440,13 +444,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -475,6 +479,8 @@ message ReplicaSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicaSetCondition conditions = 6; } @@ -495,7 +501,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -516,7 +522,7 @@ message RollingUpdateDaemonSet { // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -532,7 +538,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -546,7 +552,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -566,7 +572,7 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // StatefulSet represents a set of pods with consistent identities. @@ -580,7 +586,7 @@ message StatefulSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -602,7 +608,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -618,7 +624,7 @@ message StatefulSetList { // Standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of stateful sets. repeated StatefulSet items = 2; @@ -669,7 +675,7 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -678,7 +684,7 @@ message StatefulSetSpec { // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -688,7 +694,8 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + // +listType=atomic + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -731,14 +738,13 @@ message StatefulSetSpec { // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // which is beta. + // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } @@ -782,6 +788,8 @@ message StatefulSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 644d368fe4..e942cd526e 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -37,6 +37,7 @@ const ( // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: @@ -211,6 +212,7 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional + // +listType=atomic VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` // serviceName is the name of the service that governs this StatefulSet. @@ -254,14 +256,13 @@ type StatefulSetSpec struct { // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // which is beta. + // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -305,6 +306,8 @@ type StatefulSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. @@ -332,6 +335,7 @@ type StatefulSetCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { @@ -350,6 +354,7 @@ type StatefulSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { @@ -507,6 +512,8 @@ type DeploymentStatus struct { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -550,6 +557,7 @@ type DeploymentCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DeploymentList is a list of Deployments. type DeploymentList struct { @@ -714,6 +722,8 @@ type DaemonSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } @@ -740,6 +750,7 @@ type DaemonSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { @@ -771,6 +782,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { @@ -789,6 +801,7 @@ type DaemonSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { @@ -816,6 +829,7 @@ type ReplicaSet struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { @@ -884,6 +898,8 @@ type ReplicaSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -916,6 +932,7 @@ type ReplicaSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain @@ -941,6 +958,7 @@ type ControllerRevision struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevisionList is a resource containing a list of ControllerRevision objects. type ControllerRevisionList struct { diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 6676da0640..f3e221a0e9 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -354,8 +354,8 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..34a036b625 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevision) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevisionList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Deployment) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 2f1e7c00a1..76e755b4a3 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto +// source: k8s.io/api/apps/v1beta1/generated.proto package v1beta1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{0} + return fileDescriptor_2747f709ac7c95e7, []int{0} } func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } func (*ControllerRevisionList) ProtoMessage() {} func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{1} + return fileDescriptor_2747f709ac7c95e7, []int{1} } func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{2} + return fileDescriptor_2747f709ac7c95e7, []int{2} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{3} + return fileDescriptor_2747f709ac7c95e7, []int{3} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{4} + return fileDescriptor_2747f709ac7c95e7, []int{4} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{5} + return fileDescriptor_2747f709ac7c95e7, []int{5} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{6} + return fileDescriptor_2747f709ac7c95e7, []int{6} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{7} + return fileDescriptor_2747f709ac7c95e7, []int{7} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{8} + return fileDescriptor_2747f709ac7c95e7, []int{8} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{9} + return fileDescriptor_2747f709ac7c95e7, []int{9} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +332,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{10} + return fileDescriptor_2747f709ac7c95e7, []int{10} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +360,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{11} + return fileDescriptor_2747f709ac7c95e7, []int{11} } func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +388,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{12} + return fileDescriptor_2747f709ac7c95e7, []int{12} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +416,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{13} + return fileDescriptor_2747f709ac7c95e7, []int{13} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +444,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{14} + return fileDescriptor_2747f709ac7c95e7, []int{14} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +472,7 @@ var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{15} + return fileDescriptor_2747f709ac7c95e7, []int{15} } func (m *StatefulSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +500,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } func (*StatefulSetCondition) ProtoMessage() {} func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{16} + return fileDescriptor_2747f709ac7c95e7, []int{16} } func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +528,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{17} + return fileDescriptor_2747f709ac7c95e7, []int{17} } func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +556,7 @@ var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } func (*StatefulSetOrdinals) ProtoMessage() {} func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{18} + return fileDescriptor_2747f709ac7c95e7, []int{18} } func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,7 +586,7 @@ func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{19} + return fileDescriptor_2747f709ac7c95e7, []int{19} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -614,7 +614,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{20} + return fileDescriptor_2747f709ac7c95e7, []int{20} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -642,7 +642,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{21} + return fileDescriptor_2747f709ac7c95e7, []int{21} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -670,7 +670,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{22} + return fileDescriptor_2747f709ac7c95e7, []int{22} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,138 +724,137 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) + proto.RegisterFile("k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2747f709ac7c95e7) } -var fileDescriptor_2a07313e8f66e805 = []byte{ - // 2034 bytes of a gzipped FileDescriptorProto +var fileDescriptor_2747f709ac7c95e7 = []byte{ + // 2018 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x54, 0x3c, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63, - 0x24, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x2b, 0x4a, 0x6e, 0xec, 0x40, 0x8a, + 0x15, 0xf7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63, + 0xc4, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x21, 0x4a, 0x6e, 0xec, 0x40, 0x8a, 0x94, 0x91, 0x64, 0xa3, 0xe9, 0x07, 0x32, 0x22, 0xc7, 0xd4, 0x46, 0xfb, 0x85, 0xdd, 0x21, 0x63, 0xa2, 0x97, 0xfe, 0x01, 0x05, 0xd2, 0x73, 0xff, 0x8a, 0xf6, 0xd4, 0xa2, 0x45, 0x2f, 0x3d, 0x14, 0x3e, 0x06, 0xbd, 0x34, 0x27, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, - 0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xed, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6, - 0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfb, 0x67, 0xef, 0x79, 0x9a, 0x6e, 0x37, 0xcf, 0x7a, 0x27, 0xd4, - 0xb5, 0x28, 0xa3, 0x5e, 0xb3, 0x4f, 0xad, 0x8e, 0xed, 0x36, 0x25, 0x81, 0x38, 0x7a, 0x93, 0x38, - 0x8e, 0xd7, 0xec, 0xdf, 0x3c, 0xa1, 0x8c, 0xdc, 0x6c, 0x76, 0xa9, 0x45, 0x5d, 0xc2, 0x68, 0x47, - 0x73, 0x5c, 0x9b, 0xd9, 0x68, 0xd9, 0x67, 0xd4, 0x88, 0xa3, 0x6b, 0x9c, 0x51, 0x93, 0x8c, 0x2b, - 0x37, 0xba, 0x3a, 0x3b, 0xed, 0x9d, 0x68, 0x6d, 0xdb, 0x6c, 0x76, 0xed, 0xae, 0xdd, 0x14, 0xfc, - 0x27, 0xbd, 0x47, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xce, 0x8a, 0x1a, 0x53, 0xd8, 0xb6, 0x5d, - 0xda, 0xec, 0x67, 0x74, 0xad, 0xbc, 0x13, 0xf1, 0x98, 0xa4, 0x7d, 0xaa, 0x5b, 0xd4, 0x1d, 0x34, - 0x9d, 0xb3, 0x2e, 0x5f, 0xf0, 0x9a, 0x26, 0x65, 0x24, 0x4f, 0xaa, 0x59, 0x24, 0xe5, 0xf6, 0x2c, - 0xa6, 0x9b, 0x34, 0x23, 0xf0, 0xee, 0x45, 0x02, 0x5e, 0xfb, 0x94, 0x9a, 0x24, 0x23, 0xf7, 0x76, - 0x91, 0x5c, 0x8f, 0xe9, 0x46, 0x53, 0xb7, 0x98, 0xc7, 0xdc, 0xb4, 0x90, 0xfa, 0x2f, 0x05, 0xd0, - 0xb6, 0x6d, 0x31, 0xd7, 0x36, 0x0c, 0xea, 0x62, 0xda, 0xd7, 0x3d, 0xdd, 0xb6, 0xd0, 0xa7, 0x50, - 0xe3, 0xfb, 0xe9, 0x10, 0x46, 0x1a, 0xca, 0xba, 0xb2, 0x31, 0xb7, 0xf9, 0x96, 0x16, 0x79, 0x3a, - 0x84, 0xd7, 0x9c, 0xb3, 0x2e, 0x5f, 0xf0, 0x34, 0xce, 0xad, 0xf5, 0x6f, 0x6a, 0xfb, 0x27, 0x9f, - 0xd1, 0x36, 0xdb, 0xa3, 0x8c, 0xb4, 0xd0, 0x93, 0xe1, 0xda, 0xc4, 0x68, 0xb8, 0x06, 0xd1, 0x1a, - 0x0e, 0x51, 0xd1, 0x3e, 0x4c, 0x09, 0xf4, 0x8a, 0x40, 0xbf, 0x51, 0x88, 0x2e, 0x37, 0xad, 0x61, - 0xf2, 0xf9, 0xdd, 0xc7, 0x8c, 0x5a, 0xdc, 0xbc, 0xd6, 0x0b, 0x12, 0x7a, 0x6a, 0x87, 0x30, 0x82, - 0x05, 0x10, 0xba, 0x0e, 0x35, 0x57, 0x9a, 0xdf, 0x98, 0x5c, 0x57, 0x36, 0x26, 0x5b, 0x2f, 0x4a, - 0xae, 0x5a, 0xb0, 0x2d, 0x1c, 0x72, 0xa8, 0x4f, 0x14, 0xb8, 0x9a, 0xdd, 0xf7, 0xae, 0xee, 0x31, - 0xf4, 0xe3, 0xcc, 0xde, 0xb5, 0x72, 0x7b, 0xe7, 0xd2, 0x62, 0xe7, 0xa1, 0xe2, 0x60, 0x25, 0xb6, - 0xef, 0x03, 0xa8, 0xea, 0x8c, 0x9a, 0x5e, 0xa3, 0xb2, 0x3e, 0xb9, 0x31, 0xb7, 0xf9, 0xa6, 0x56, - 0x10, 0xc0, 0x5a, 0xd6, 0xba, 0xd6, 0xbc, 0xc4, 0xad, 0xde, 0xe7, 0x08, 0xd8, 0x07, 0x52, 0x7f, - 0x51, 0x01, 0xd8, 0xa1, 0x8e, 0x61, 0x0f, 0x4c, 0x6a, 0xb1, 0x4b, 0x38, 0xba, 0xfb, 0x30, 0xe5, - 0x39, 0xb4, 0x2d, 0x8f, 0xee, 0xb5, 0xc2, 0x1d, 0x44, 0x46, 0x1d, 0x3a, 0xb4, 0x1d, 0x1d, 0x1a, - 0xff, 0xc2, 0x02, 0x02, 0x7d, 0x0c, 0xd3, 0x1e, 0x23, 0xac, 0xe7, 0x89, 0x23, 0x9b, 0xdb, 0x7c, - 0xbd, 0x0c, 0x98, 0x10, 0x68, 0xd5, 0x25, 0xdc, 0xb4, 0xff, 0x8d, 0x25, 0x90, 0xfa, 0xd7, 0x49, - 0x58, 0x8c, 0x98, 0xb7, 0x6d, 0xab, 0xa3, 0x33, 0x1e, 0xd2, 0xb7, 0x61, 0x8a, 0x0d, 0x1c, 0x2a, - 0x7c, 0x32, 0xdb, 0x7a, 0x2d, 0x30, 0xe6, 0x68, 0xe0, 0xd0, 0x67, 0xc3, 0xb5, 0xe5, 0x1c, 0x11, - 0x4e, 0xc2, 0x42, 0x08, 0xed, 0x86, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x86, 0x6b, - 0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x3b, 0x1d, - 0xc2, 0xe8, 0x91, 0x6e, 0xd2, 0xc6, 0xb4, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae, - 0x4a, 0x0b, 0xea, 0xbb, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xc8, 0x25, 0x96, - 0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x19, 0x5b, 0xdf, 0x8a, 0xd4, 0x87, 0x76, 0x33, 0x68, 0x38, 0x47, - 0x03, 0x7a, 0x15, 0xa6, 0x5d, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x09, 0x8f, 0x85, 0xc7, 0x85, 0xc5, - 0x2a, 0x96, 0x54, 0xf4, 0x3a, 0xcc, 0x98, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x41, - 0x32, 0xce, 0xec, 0xf9, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x53, 0xa0, 0x1e, 0x1d, 0xd3, 0x25, 0xe4, - 0xea, 0xbd, 0x64, 0xae, 0xbe, 0x52, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0xef, 0x15, 0x40, 0x11, 0x13, - 0xb6, 0x0d, 0xe3, 0x84, 0xb4, 0xcf, 0xd0, 0x3a, 0x4c, 0x59, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90, - 0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x2d, 0xcb, 0xb2, - 0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2e, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xce, 0xa0, 0xdc, 0xb5, - 0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x01, 0xb8, 0x12, 0xf3, 0xc8, - 0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0xdb, 0xb6, 0xf5, 0x48, 0xef, 0x46, 0x85, 0x05, 0x87, - 0x10, 0x38, 0x06, 0xb7, 0x72, 0x17, 0x96, 0x0b, 0xec, 0x44, 0x2f, 0xc2, 0xe4, 0x19, 0x1d, 0xf8, - 0xae, 0xc2, 0xfc, 0x27, 0x5a, 0x82, 0x6a, 0x9f, 0x18, 0x3d, 0xea, 0xe7, 0x24, 0xf6, 0x3f, 0x6e, - 0x55, 0xde, 0x53, 0xd4, 0x5f, 0x57, 0xe3, 0x91, 0xc2, 0xeb, 0x0d, 0xda, 0xe0, 0xd7, 0x83, 0x63, - 0xe8, 0x6d, 0xe2, 0x09, 0x8c, 0x6a, 0xeb, 0x05, 0xff, 0x6a, 0xf0, 0xd7, 0x70, 0x48, 0x45, 0x3f, - 0x81, 0x9a, 0x47, 0x0d, 0xda, 0x66, 0xb6, 0x2b, 0x4b, 0xdc, 0xdb, 0x25, 0x63, 0x8a, 0x9c, 0x50, - 0xe3, 0x50, 0x8a, 0xfa, 0xf0, 0xc1, 0x17, 0x0e, 0x21, 0xd1, 0xc7, 0x50, 0x63, 0xd4, 0x74, 0x0c, - 0xc2, 0xa8, 0xf4, 0x5e, 0x22, 0xae, 0x78, 0xed, 0xe0, 0x60, 0x07, 0x76, 0xe7, 0x48, 0xb2, 0x89, - 0xea, 0x19, 0xc6, 0x69, 0xb0, 0x8a, 0x43, 0x18, 0xf4, 0x43, 0xa8, 0x79, 0x8c, 0xdf, 0xea, 0xdd, - 0x81, 0xc8, 0xb6, 0xf3, 0xae, 0x95, 0x78, 0x1d, 0xf5, 0x45, 0x22, 0xe8, 0x60, 0x05, 0x87, 0x70, - 0x68, 0x0b, 0x16, 0x4c, 0xdd, 0xc2, 0x94, 0x74, 0x06, 0x87, 0xb4, 0x6d, 0x5b, 0x1d, 0x4f, 0xa4, - 0x69, 0xb5, 0xb5, 0x2c, 0x85, 0x16, 0xf6, 0x92, 0x64, 0x9c, 0xe6, 0x47, 0xbb, 0xb0, 0x14, 0x5c, - 0xbb, 0xf7, 0x74, 0x8f, 0xd9, 0xee, 0x60, 0x57, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xb5, 0xd5, 0x18, - 0x0d, 0xd7, 0x96, 0x70, 0x0e, 0x1d, 0xe7, 0x4a, 0xf1, 0xba, 0xe2, 0x90, 0x9e, 0x47, 0x3b, 0xa2, - 0x86, 0xd5, 0xa2, 0xba, 0x72, 0x20, 0x56, 0xb1, 0xa4, 0xa2, 0x87, 0x89, 0x30, 0xad, 0x8d, 0x17, - 0xa6, 0xf5, 0xe2, 0x10, 0x45, 0xc7, 0xb0, 0xec, 0xb8, 0x76, 0xd7, 0xa5, 0x9e, 0xb7, 0x43, 0x49, - 0xc7, 0xd0, 0x2d, 0x1a, 0x78, 0x66, 0x56, 0xec, 0xe8, 0xe5, 0xd1, 0x70, 0x6d, 0xf9, 0x20, 0x9f, - 0x05, 0x17, 0xc9, 0xaa, 0x7f, 0x9a, 0x82, 0x17, 0xd3, 0x77, 0x1c, 0xfa, 0x10, 0x90, 0x7d, 0xe2, - 0x51, 0xb7, 0x4f, 0x3b, 0x1f, 0xf8, 0x8d, 0x1b, 0xef, 0x6e, 0x14, 0xd1, 0xdd, 0x84, 0x79, 0xbb, - 0x9f, 0xe1, 0xc0, 0x39, 0x52, 0x7e, 0x7f, 0x24, 0x13, 0xa0, 0x22, 0x0c, 0x8d, 0xf5, 0x47, 0x99, - 0x24, 0xd8, 0x82, 0x05, 0x99, 0xfb, 0x01, 0x51, 0x04, 0x6b, 0xec, 0xdc, 0x8f, 0x93, 0x64, 0x9c, - 0xe6, 0x47, 0xb7, 0x61, 0xde, 0xe5, 0x71, 0x10, 0x02, 0xcc, 0x08, 0x80, 0x6f, 0x48, 0x80, 0x79, - 0x1c, 0x27, 0xe2, 0x24, 0x2f, 0xfa, 0x00, 0xae, 0x90, 0x3e, 0xd1, 0x0d, 0x72, 0x62, 0xd0, 0x10, - 0x60, 0x4a, 0x00, 0xbc, 0x24, 0x01, 0xae, 0x6c, 0xa5, 0x19, 0x70, 0x56, 0x06, 0xed, 0xc1, 0x62, - 0xcf, 0xca, 0x42, 0xf9, 0x41, 0xfc, 0xb2, 0x84, 0x5a, 0x3c, 0xce, 0xb2, 0xe0, 0x3c, 0x39, 0xf4, - 0x29, 0x40, 0x3b, 0xb8, 0xd5, 0xbd, 0xc6, 0xb4, 0x28, 0xc3, 0xd7, 0x4b, 0x24, 0x5b, 0xd8, 0x0a, - 0x44, 0x25, 0x30, 0x5c, 0xf2, 0x70, 0x0c, 0x13, 0xdd, 0x82, 0x7a, 0xdb, 0x36, 0x0c, 0x11, 0xf9, - 0xdb, 0x76, 0xcf, 0x62, 0x22, 0x78, 0xab, 0x2d, 0xc4, 0x2f, 0xfb, 0xed, 0x04, 0x05, 0xa7, 0x38, - 0xd5, 0x3f, 0x28, 0xf1, 0x6b, 0x26, 0x48, 0x67, 0x74, 0x2b, 0xd1, 0xfa, 0xbc, 0x9a, 0x6a, 0x7d, - 0xae, 0x66, 0x25, 0x62, 0x9d, 0x8f, 0x0e, 0xf3, 0x3c, 0xf8, 0x75, 0xab, 0xeb, 0x1f, 0xb8, 0x2c, - 0x89, 0x6f, 0x9d, 0x9b, 0x4a, 0x21, 0x77, 0xec, 0x62, 0xbc, 0x22, 0xce, 0x3c, 0x4e, 0xc4, 0x49, - 0x64, 0xf5, 0x0e, 0xd4, 0x93, 0x79, 0x98, 0xe8, 0xe9, 0x95, 0x0b, 0x7b, 0xfa, 0xaf, 0x15, 0x58, - 0x2e, 0xd0, 0x8e, 0x0c, 0xa8, 0x9b, 0xe4, 0x71, 0xec, 0x98, 0x2f, 0xec, 0x8d, 0xf9, 0xd4, 0xa4, - 0xf9, 0x53, 0x93, 0x76, 0xdf, 0x62, 0xfb, 0xee, 0x21, 0x73, 0x75, 0xab, 0xeb, 0x9f, 0xc3, 0x5e, - 0x02, 0x0b, 0xa7, 0xb0, 0xd1, 0x27, 0x50, 0x33, 0xc9, 0xe3, 0xc3, 0x9e, 0xdb, 0xcd, 0xf3, 0x57, - 0x39, 0x3d, 0xe2, 0xfe, 0xd8, 0x93, 0x28, 0x38, 0xc4, 0x53, 0xff, 0xa8, 0xc0, 0x7a, 0x62, 0x97, - 0xbc, 0x56, 0xd0, 0x47, 0x3d, 0xe3, 0x90, 0x46, 0x27, 0xfe, 0x26, 0xcc, 0x3a, 0xc4, 0x65, 0x7a, - 0x58, 0x2f, 0xaa, 0xad, 0xf9, 0xd1, 0x70, 0x6d, 0xf6, 0x20, 0x58, 0xc4, 0x11, 0x3d, 0xc7, 0x37, - 0x95, 0xe7, 0xe7, 0x1b, 0xf5, 0xdf, 0x0a, 0x54, 0x0f, 0xdb, 0xc4, 0xa0, 0x97, 0x30, 0xa9, 0xec, - 0x24, 0x26, 0x15, 0xb5, 0x30, 0x66, 0x85, 0x3d, 0x85, 0x43, 0xca, 0x6e, 0x6a, 0x48, 0xb9, 0x76, - 0x01, 0xce, 0xf9, 0xf3, 0xc9, 0xfb, 0x30, 0x1b, 0xaa, 0x4b, 0x14, 0x65, 0xe5, 0xa2, 0xa2, 0xac, - 0xfe, 0xaa, 0x02, 0x73, 0x31, 0x15, 0xe3, 0x49, 0x73, 0x77, 0xc7, 0xfa, 0x1a, 0x5e, 0xb8, 0x36, - 0xcb, 0x6c, 0x44, 0x0b, 0x7a, 0x18, 0xbf, 0x5d, 0x8c, 0x9a, 0x85, 0x6c, 0x6b, 0x73, 0x07, 0xea, - 0x8c, 0xb8, 0x5d, 0xca, 0x02, 0x9a, 0x70, 0xd8, 0x6c, 0x34, 0xab, 0x1c, 0x25, 0xa8, 0x38, 0xc5, - 0xbd, 0x72, 0x1b, 0xe6, 0x13, 0xca, 0xc6, 0xea, 0xf9, 0xbe, 0xe0, 0xce, 0x89, 0x52, 0xe1, 0x12, - 0xa2, 0xeb, 0xc3, 0x44, 0x74, 0x6d, 0x14, 0x3b, 0x33, 0x96, 0xa0, 0x45, 0x31, 0x86, 0x53, 0x31, - 0xf6, 0x46, 0x29, 0xb4, 0xf3, 0x23, 0xed, 0x1f, 0x15, 0x58, 0x8a, 0x71, 0x47, 0xa3, 0xf0, 0x77, - 0x12, 0xf7, 0xc1, 0x46, 0xea, 0x3e, 0x68, 0xe4, 0xc9, 0x3c, 0xb7, 0x59, 0x38, 0x7f, 0x3e, 0x9d, - 0xfc, 0x5f, 0x9c, 0x4f, 0x7f, 0xaf, 0xc0, 0x42, 0xcc, 0x77, 0x97, 0x30, 0xa0, 0xde, 0x4f, 0x0e, - 0xa8, 0xd7, 0xca, 0x04, 0x4d, 0xc1, 0x84, 0x7a, 0x0b, 0x16, 0x63, 0x4c, 0xfb, 0x6e, 0x47, 0xb7, - 0x88, 0xe1, 0xa1, 0x57, 0xa0, 0xea, 0x31, 0xe2, 0xb2, 0xe0, 0x12, 0x09, 0x64, 0x0f, 0xf9, 0x22, - 0xf6, 0x69, 0xea, 0x3f, 0x15, 0x68, 0xc6, 0x84, 0x0f, 0xa8, 0xeb, 0xe9, 0x1e, 0xa3, 0x16, 0x7b, - 0x60, 0x1b, 0x3d, 0x93, 0x6e, 0x1b, 0x44, 0x37, 0x31, 0xe5, 0x0b, 0xba, 0x6d, 0x1d, 0xd8, 0x86, - 0xde, 0x1e, 0x20, 0x02, 0x73, 0x9f, 0x9f, 0x52, 0x6b, 0x87, 0x1a, 0x94, 0xd1, 0x8e, 0x0c, 0xc5, - 0xef, 0x49, 0xf8, 0xb9, 0x87, 0x11, 0xe9, 0xd9, 0x70, 0x6d, 0xa3, 0x0c, 0xa2, 0x88, 0xd0, 0x38, - 0x26, 0xfa, 0x29, 0x00, 0xff, 0x14, 0xb5, 0xac, 0x23, 0x83, 0xf5, 0x4e, 0x90, 0xd1, 0x0f, 0x43, - 0xca, 0x58, 0x0a, 0x62, 0x88, 0xea, 0x6f, 0x6a, 0x89, 0xf3, 0xfe, 0xbf, 0x1f, 0x33, 0x7f, 0x06, - 0x4b, 0xfd, 0xc8, 0x3b, 0x01, 0x03, 0x6f, 0xcb, 0x27, 0xd3, 0x4f, 0x77, 0x21, 0x7c, 0x9e, 0x5f, - 0x5b, 0xdf, 0x94, 0x4a, 0x96, 0x1e, 0xe4, 0xc0, 0xe1, 0x5c, 0x25, 0xe8, 0xdb, 0x30, 0xc7, 0x47, - 0x1a, 0xbd, 0x4d, 0x3f, 0x22, 0x66, 0x90, 0x8b, 0x8b, 0x41, 0xbc, 0x1c, 0x46, 0x24, 0x1c, 0xe7, - 0x43, 0xa7, 0xb0, 0xe8, 0xd8, 0x9d, 0x3d, 0x62, 0x91, 0x2e, 0xe5, 0x8d, 0xa0, 0x7f, 0x94, 0x62, - 0xf6, 0x9c, 0x6d, 0xbd, 0x1b, 0xb4, 0xff, 0x07, 0x59, 0x96, 0x67, 0x7c, 0x88, 0xcb, 0x2e, 0x8b, - 0x20, 0xc8, 0x83, 0x44, 0x2e, 0xd4, 0x7b, 0xb2, 0x1f, 0x93, 0xa3, 0xb8, 0xff, 0xc8, 0xb6, 0x59, - 0x26, 0x29, 0x8f, 0x13, 0x92, 0xd1, 0x85, 0x99, 0x5c, 0xc7, 0x29, 0x0d, 0x85, 0xa3, 0x75, 0xed, - 0xbf, 0x1a, 0xad, 0x73, 0x66, 0xfd, 0xd9, 0x31, 0x67, 0xfd, 0x3f, 0x2b, 0x70, 0xcd, 0x29, 0x91, - 0x4b, 0x0d, 0x10, 0xbe, 0xb9, 0x57, 0xc6, 0x37, 0x65, 0x72, 0xb3, 0xb5, 0x31, 0x1a, 0xae, 0x5d, - 0x2b, 0xc3, 0x89, 0x4b, 0xd9, 0x87, 0x1e, 0x40, 0xcd, 0x96, 0x35, 0xb0, 0x31, 0x27, 0x6c, 0xbd, - 0x5e, 0xc6, 0xd6, 0xa0, 0x6e, 0xfa, 0x69, 0x19, 0x7c, 0xe1, 0x10, 0x4b, 0xfd, 0x6d, 0x15, 0xae, - 0x64, 0x6e, 0x70, 0xf4, 0x83, 0x73, 0xe6, 0xfc, 0xab, 0xcf, 0x6d, 0xc6, 0xcf, 0x0c, 0xe8, 0x93, - 0x63, 0x0c, 0xe8, 0x5b, 0xb0, 0xd0, 0xee, 0xb9, 0x2e, 0xb5, 0x58, 0x6a, 0x3c, 0x0f, 0x83, 0x65, - 0x3b, 0x49, 0xc6, 0x69, 0xfe, 0xbc, 0x37, 0x86, 0xea, 0x98, 0x6f, 0x0c, 0x71, 0x2b, 0xe4, 0x9c, - 0xe8, 0xa7, 0x76, 0xd6, 0x0a, 0x39, 0x2e, 0xa6, 0xf9, 0x79, 0xd3, 0xea, 0xa3, 0x86, 0x08, 0x33, - 0xc9, 0xa6, 0xf5, 0x38, 0x41, 0xc5, 0x29, 0xee, 0x9c, 0x79, 0x7d, 0xb6, 0xec, 0xbc, 0x8e, 0x48, - 0xe2, 0x35, 0x01, 0x44, 0x1d, 0xbd, 0x51, 0x26, 0xce, 0xca, 0x3f, 0x27, 0xe4, 0x3e, 0xa4, 0xcc, - 0x8d, 0xff, 0x90, 0xa2, 0xfe, 0x45, 0x81, 0x97, 0x0a, 0x2b, 0x16, 0xda, 0x4a, 0xb4, 0x94, 0x37, - 0x52, 0x2d, 0xe5, 0xb7, 0x0a, 0x05, 0x63, 0x7d, 0xa5, 0x9b, 0xff, 0xd2, 0xf0, 0x7e, 0xb9, 0x97, - 0x86, 0x9c, 0x29, 0xf8, 0xe2, 0x27, 0x87, 0xd6, 0x77, 0x9f, 0x3c, 0x5d, 0x9d, 0xf8, 0xf2, 0xe9, - 0xea, 0xc4, 0x57, 0x4f, 0x57, 0x27, 0x7e, 0x3e, 0x5a, 0x55, 0x9e, 0x8c, 0x56, 0x95, 0x2f, 0x47, - 0xab, 0xca, 0x57, 0xa3, 0x55, 0xe5, 0x6f, 0xa3, 0x55, 0xe5, 0x97, 0x5f, 0xaf, 0x4e, 0x7c, 0xb2, - 0x5c, 0xf0, 0x6f, 0xf4, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x0a, 0xd6, 0x32, 0xc0, 0x1e, + 0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6, + 0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfa, 0xe9, 0x7b, 0x9e, 0xa6, 0xdb, 0x4d, 0xe2, 0xe8, 0x4d, 0xe2, + 0x38, 0x5e, 0xb3, 0x7f, 0xeb, 0x98, 0x32, 0x72, 0xab, 0xd9, 0xa5, 0x16, 0x75, 0x09, 0xa3, 0x1d, + 0xcd, 0x71, 0x6d, 0x66, 0xa3, 0x25, 0x9f, 0x51, 0x23, 0x8e, 0xae, 0x71, 0x46, 0x4d, 0x32, 0x2e, + 0xdf, 0xec, 0xea, 0xec, 0xa4, 0x77, 0xac, 0xb5, 0x6d, 0xb3, 0xd9, 0xb5, 0xbb, 0x76, 0x53, 0xf0, + 0x1f, 0xf7, 0x1e, 0x8b, 0x2f, 0xf1, 0x21, 0x7e, 0xf9, 0x38, 0xcb, 0x6a, 0x4c, 0x61, 0xdb, 0x76, + 0x69, 0xb3, 0x9f, 0xd1, 0xb5, 0xfc, 0x4e, 0xc4, 0x63, 0x92, 0xf6, 0x89, 0x6e, 0x51, 0x77, 0xd0, + 0x74, 0x4e, 0xbb, 0x7c, 0xc1, 0x6b, 0x9a, 0x94, 0x91, 0x3c, 0xa9, 0x66, 0x91, 0x94, 0xdb, 0xb3, + 0x98, 0x6e, 0xd2, 0x8c, 0xc0, 0xbb, 0xe7, 0x09, 0x78, 0xed, 0x13, 0x6a, 0x92, 0x8c, 0xdc, 0xdb, + 0x45, 0x72, 0x3d, 0xa6, 0x1b, 0x4d, 0xdd, 0x62, 0x1e, 0x73, 0xd3, 0x42, 0xea, 0xbf, 0x15, 0x40, + 0x5b, 0xb6, 0xc5, 0x5c, 0xdb, 0x30, 0xa8, 0x8b, 0x69, 0x5f, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, + 0x8d, 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0x6b, 0xca, 0xfa, 0xec, 0xc6, 0x5b, 0x5a, 0xe4, 0xe9, + 0x10, 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0xd6, 0xbf, 0xa5, 0xed, 0x1d, 0x7f, + 0x46, 0xdb, 0x6c, 0x97, 0x32, 0xd2, 0x42, 0x4f, 0x87, 0xab, 0x97, 0x46, 0xc3, 0x55, 0x88, 0xd6, + 0x70, 0x88, 0x8a, 0xf6, 0x60, 0x52, 0xa0, 0x57, 0x04, 0xfa, 0xcd, 0x42, 0x74, 0xb9, 0x69, 0x0d, + 0x93, 0xcf, 0xef, 0x3d, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x93, 0xdb, 0x84, 0x11, + 0x2c, 0x80, 0xd0, 0x0d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xc4, 0x9a, 0xb2, 0x3e, 0xd1, 0xba, 0x2c, + 0xb9, 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x6a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, + 0xd0, 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, + 0xbe, 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xda, 0xc4, 0xfa, 0xec, 0xc6, 0x9b, 0x5a, + 0x41, 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0x73, 0x12, 0xb7, 0xfa, 0x80, 0x23, 0x60, 0x1f, 0x48, 0xfd, + 0x65, 0x05, 0x60, 0x9b, 0x3a, 0x86, 0x3d, 0x30, 0xa9, 0xc5, 0x2e, 0xe0, 0xe8, 0x1e, 0xc0, 0xa4, + 0xe7, 0xd0, 0xb6, 0x3c, 0xba, 0xeb, 0x85, 0x3b, 0x88, 0x8c, 0x3a, 0x70, 0x68, 0x3b, 0x3a, 0x34, + 0xfe, 0x85, 0x05, 0x04, 0xfa, 0x18, 0xa6, 0x3c, 0x46, 0x58, 0xcf, 0x13, 0x47, 0x36, 0xbb, 0xf1, + 0x7a, 0x19, 0x30, 0x21, 0xd0, 0xaa, 0x4b, 0xb8, 0x29, 0xff, 0x1b, 0x4b, 0x20, 0xf5, 0x6f, 0x13, + 0xb0, 0x10, 0x31, 0x6f, 0xd9, 0x56, 0x47, 0x67, 0x3c, 0xa4, 0xef, 0xc0, 0x24, 0x1b, 0x38, 0x54, + 0xf8, 0x64, 0xa6, 0x75, 0x3d, 0x30, 0xe6, 0x70, 0xe0, 0xd0, 0xe7, 0xc3, 0xd5, 0xa5, 0x1c, 0x11, + 0x4e, 0xc2, 0x42, 0x08, 0xed, 0x84, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x87, 0xab, + 0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x39, 0x1d, + 0xc2, 0xe8, 0xa1, 0x6e, 0xd2, 0xc6, 0x94, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae, + 0x4a, 0x0b, 0xea, 0x3b, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xd0, 0x25, 0x96, + 0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x1e, 0x5b, 0xdf, 0xb2, 0xd4, 0x87, 0x76, 0x32, 0x68, 0x38, 0x47, + 0x03, 0x7a, 0x0d, 0xa6, 0x5c, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x0a, 0x8f, 0x85, 0xc7, 0x85, 0xc5, + 0x2a, 0x96, 0x54, 0xf4, 0x3a, 0x4c, 0x9b, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x5e, + 0x32, 0x4e, 0xef, 0xfa, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x57, 0xa0, 0x1e, 0x1d, 0xd3, 0x05, 0xe4, + 0xea, 0xfd, 0x64, 0xae, 0xbe, 0x5a, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0x1f, 0x15, 0x40, 0x11, 0x13, + 0xb6, 0x0d, 0xe3, 0x98, 0xb4, 0x4f, 0xd1, 0x1a, 0x4c, 0x5a, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90, + 0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x4d, 0xcb, 0xb2, + 0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2a, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xca, 0xa0, 0xdc, 0xb3, + 0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x03, 0xb8, 0x12, 0xf3, 0xd0, + 0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0x5b, 0xb6, 0xf5, 0x58, 0xef, 0x46, 0x85, 0x05, 0x87, + 0x10, 0x38, 0x06, 0xb7, 0x7c, 0x0f, 0x96, 0x0a, 0xec, 0x44, 0x97, 0x61, 0xe2, 0x94, 0x0e, 0x7c, + 0x57, 0x61, 0xfe, 0x13, 0x2d, 0x42, 0xb5, 0x4f, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0xb7, + 0x2b, 0xef, 0x29, 0xea, 0x6f, 0xaa, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf3, 0xeb, 0xc1, 0x31, + 0xf4, 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, + 0x42, 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, + 0x71, 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, + 0x61, 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, + 0xf5, 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0xee, + 0x40, 0x64, 0xdb, 0x59, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, + 0xb4, 0x09, 0xf3, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0x83, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, + 0xb4, 0xda, 0x5a, 0x92, 0x42, 0xf3, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x0c, 0xae, + 0xdd, 0xfb, 0xba, 0xc7, 0x6c, 0x77, 0xb0, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x8c, + 0x86, 0xab, 0x8b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, + 0xc3, 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0xc6, 0x0b, + 0xd3, 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x72, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa6, 0xa4, + 0x63, 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x23, 0x76, 0xf4, 0xca, 0x68, 0xb8, 0xba, 0xb4, 0x9f, 0xcf, + 0x82, 0x8b, 0x64, 0xd5, 0x3f, 0x4f, 0xc2, 0xe5, 0xf4, 0x1d, 0x87, 0x3e, 0x04, 0x64, 0x1f, 0x7b, + 0xd4, 0xed, 0xd3, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, + 0x65, 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, + 0x09, 0x36, 0x61, 0x5e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, + 0xf9, 0xd1, 0x1d, 0x98, 0x73, 0x79, 0x1c, 0x84, 0x00, 0xd3, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x0e, + 0xc7, 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0x2b, 0xa4, 0x4f, 0x74, 0x83, 0x1c, 0x1b, 0x34, 0x04, + 0x98, 0x14, 0x00, 0x2f, 0x4b, 0x80, 0x2b, 0x9b, 0x69, 0x06, 0x9c, 0x95, 0x41, 0xbb, 0xb0, 0xd0, + 0xb3, 0xb2, 0x50, 0x7e, 0x10, 0xbf, 0x22, 0xa1, 0x16, 0x8e, 0xb2, 0x2c, 0x38, 0x4f, 0x0e, 0x7d, + 0x0a, 0xd0, 0x0e, 0x6e, 0x75, 0xaf, 0x31, 0x25, 0xca, 0xf0, 0x8d, 0x12, 0xc9, 0x16, 0xb6, 0x02, + 0x51, 0x09, 0x0c, 0x97, 0x3c, 0x1c, 0xc3, 0x44, 0xb7, 0xa1, 0xde, 0xb6, 0x0d, 0x43, 0x44, 0xfe, + 0x96, 0xdd, 0xb3, 0x98, 0x08, 0xde, 0x6a, 0x0b, 0xf1, 0xcb, 0x7e, 0x2b, 0x41, 0xc1, 0x29, 0x4e, + 0xf5, 0x8f, 0x4a, 0xfc, 0x9a, 0x09, 0xd2, 0x19, 0xdd, 0x4e, 0xb4, 0x3e, 0xaf, 0xa5, 0x5a, 0x9f, + 0xab, 0x59, 0x89, 0x58, 0xe7, 0xa3, 0xc3, 0x1c, 0x0f, 0x7e, 0xdd, 0xea, 0xfa, 0x07, 0x2e, 0x4b, + 0xe2, 0x5b, 0x67, 0xa6, 0x52, 0xc8, 0x1d, 0xbb, 0x18, 0xaf, 0x88, 0x33, 0x8f, 0x13, 0x71, 0x12, + 0x59, 0xbd, 0x0b, 0xf5, 0x64, 0x1e, 0x26, 0x7a, 0x7a, 0xe5, 0xdc, 0x9e, 0xfe, 0x6b, 0x05, 0x96, + 0x0a, 0xb4, 0x23, 0x03, 0xea, 0x26, 0x79, 0x12, 0x3b, 0xe6, 0x73, 0x7b, 0x63, 0x3e, 0x35, 0x69, + 0xfe, 0xd4, 0xa4, 0x3d, 0xb0, 0xd8, 0x9e, 0x7b, 0xc0, 0x5c, 0xdd, 0xea, 0xfa, 0xe7, 0xb0, 0x9b, + 0xc0, 0xc2, 0x29, 0x6c, 0xf4, 0x09, 0xd4, 0x4c, 0xf2, 0xe4, 0xa0, 0xe7, 0x76, 0xf3, 0xfc, 0x55, + 0x4e, 0x8f, 0xb8, 0x3f, 0x76, 0x25, 0x0a, 0x0e, 0xf1, 0xd4, 0x3f, 0x29, 0xb0, 0x96, 0xd8, 0x25, + 0xaf, 0x15, 0xf4, 0x71, 0xcf, 0x38, 0xa0, 0xd1, 0x89, 0xbf, 0x09, 0x33, 0x0e, 0x71, 0x99, 0x1e, + 0xd6, 0x8b, 0x6a, 0x6b, 0x6e, 0x34, 0x5c, 0x9d, 0xd9, 0x0f, 0x16, 0x71, 0x44, 0xcf, 0xf1, 0x4d, + 0xe5, 0xc5, 0xf9, 0x46, 0xfd, 0x8f, 0x02, 0xd5, 0x83, 0x36, 0x31, 0xe8, 0x05, 0x4c, 0x2a, 0xdb, + 0x89, 0x49, 0x45, 0x2d, 0x8c, 0x59, 0x61, 0x4f, 0xe1, 0x90, 0xb2, 0x93, 0x1a, 0x52, 0xae, 0x9d, + 0x83, 0x73, 0xf6, 0x7c, 0xf2, 0x3e, 0xcc, 0x84, 0xea, 0x12, 0x45, 0x59, 0x39, 0xaf, 0x28, 0xab, + 0xbf, 0xae, 0xc0, 0x6c, 0x4c, 0xc5, 0x78, 0xd2, 0xdc, 0xdd, 0xb1, 0xbe, 0x86, 0x17, 0xae, 0x8d, + 0x32, 0x1b, 0xd1, 0x82, 0x1e, 0xc6, 0x6f, 0x17, 0xa3, 0x66, 0x21, 0xdb, 0xda, 0xdc, 0x85, 0x3a, + 0x23, 0x6e, 0x97, 0xb2, 0x80, 0x26, 0x1c, 0x36, 0x13, 0xcd, 0x2a, 0x87, 0x09, 0x2a, 0x4e, 0x71, + 0x2f, 0xdf, 0x81, 0xb9, 0x84, 0xb2, 0xb1, 0x7a, 0xbe, 0x2f, 0xb8, 0x73, 0xa2, 0x54, 0xb8, 0x80, + 0xe8, 0xfa, 0x30, 0x11, 0x5d, 0xeb, 0xc5, 0xce, 0x8c, 0x25, 0x68, 0x51, 0x8c, 0xe1, 0x54, 0x8c, + 0xbd, 0x51, 0x0a, 0xed, 0xec, 0x48, 0xfb, 0x67, 0x05, 0x16, 0x63, 0xdc, 0xd1, 0x28, 0xfc, 0xbd, + 0xc4, 0x7d, 0xb0, 0x9e, 0xba, 0x0f, 0x1a, 0x79, 0x32, 0x2f, 0x6c, 0x16, 0xce, 0x9f, 0x4f, 0x27, + 0xfe, 0x1f, 0xe7, 0xd3, 0x3f, 0x28, 0x30, 0x1f, 0xf3, 0xdd, 0x05, 0x0c, 0xa8, 0x0f, 0x92, 0x03, + 0xea, 0xb5, 0x32, 0x41, 0x53, 0x30, 0xa1, 0xde, 0x86, 0x85, 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, + 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8, 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, + 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, + 0xda, 0x46, 0xcf, 0xa4, 0x5b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9, 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, + 0xb7, 0x07, 0x88, 0xc0, 0xec, 0xe7, 0x27, 0xd4, 0xda, 0xa6, 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, + 0x07, 0x12, 0x7e, 0xf6, 0x51, 0x44, 0x7a, 0x3e, 0x5c, 0x5d, 0x2f, 0x83, 0x28, 0x22, 0x34, 0x8e, + 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60, 0xbd, 0x1b, 0x64, 0xf4, 0xa3, 0x90, + 0x32, 0x96, 0x82, 0x18, 0xa2, 0xfa, 0xdb, 0x5a, 0xe2, 0xbc, 0xbf, 0xf1, 0x63, 0xe6, 0xcf, 0x61, + 0xb1, 0x1f, 0x79, 0x27, 0x60, 0xe0, 0x6d, 0xf9, 0x44, 0xfa, 0xe9, 0x2e, 0x84, 0xcf, 0xf3, 0x6b, + 0xeb, 0xdb, 0x52, 0xc9, 0xe2, 0xc3, 0x1c, 0x38, 0x9c, 0xab, 0x04, 0x7d, 0x17, 0x66, 0xf9, 0x48, + 0xa3, 0xb7, 0xe9, 0x47, 0xc4, 0x0c, 0x72, 0x71, 0x21, 0x88, 0x97, 0x83, 0x88, 0x84, 0xe3, 0x7c, + 0xe8, 0x04, 0x16, 0x1c, 0xbb, 0xb3, 0x4b, 0x2c, 0xd2, 0xa5, 0xbc, 0x11, 0xf4, 0x8f, 0x52, 0xcc, + 0x9e, 0x33, 0xad, 0x77, 0x83, 0xf6, 0x7f, 0x3f, 0xcb, 0xf2, 0x9c, 0x0f, 0x71, 0xd9, 0x65, 0x11, + 0x04, 0x79, 0x90, 0xc8, 0x85, 0x7a, 0x4f, 0xf6, 0x63, 0x72, 0x14, 0xf7, 0x1f, 0xd9, 0x36, 0xca, + 0x24, 0xe5, 0x51, 0x42, 0x32, 0xba, 0x30, 0x93, 0xeb, 0x38, 0xa5, 0xa1, 0x70, 0xb4, 0xae, 0xfd, + 0x4f, 0xa3, 0x75, 0xce, 0xac, 0x3f, 0x33, 0xe6, 0xac, 0xff, 0x17, 0x05, 0xae, 0x39, 0x25, 0x72, + 0xa9, 0x01, 0xc2, 0x37, 0xf7, 0xcb, 0xf8, 0xa6, 0x4c, 0x6e, 0xb6, 0xd6, 0x47, 0xc3, 0xd5, 0x6b, + 0x65, 0x38, 0x71, 0x29, 0xfb, 0xd0, 0x43, 0xa8, 0xd9, 0xb2, 0x06, 0x36, 0x66, 0x85, 0xad, 0x37, + 0xca, 0xd8, 0x1a, 0xd4, 0x4d, 0x3f, 0x2d, 0x83, 0x2f, 0x1c, 0x62, 0xa9, 0xbf, 0xab, 0xc2, 0x95, + 0xcc, 0x0d, 0x8e, 0x7e, 0x78, 0xc6, 0x9c, 0x7f, 0xf5, 0x85, 0xcd, 0xf8, 0x99, 0x01, 0x7d, 0x62, + 0x8c, 0x01, 0x7d, 0x13, 0xe6, 0xdb, 0x3d, 0xd7, 0xa5, 0x16, 0x4b, 0x8d, 0xe7, 0x61, 0xb0, 0x6c, + 0x25, 0xc9, 0x38, 0xcd, 0x9f, 0xf7, 0xc6, 0x50, 0x1d, 0xf3, 0x8d, 0x21, 0x6e, 0x85, 0x9c, 0x13, + 0xfd, 0xd4, 0xce, 0x5a, 0x21, 0xc7, 0xc5, 0x34, 0x3f, 0x6f, 0x5a, 0x7d, 0xd4, 0x10, 0x61, 0x3a, + 0xd9, 0xb4, 0x1e, 0x25, 0xa8, 0x38, 0xc5, 0x9d, 0x33, 0xaf, 0xcf, 0x94, 0x9d, 0xd7, 0x11, 0x49, + 0xbc, 0x26, 0x80, 0xa8, 0xa3, 0x37, 0xcb, 0xc4, 0x59, 0xf9, 0xe7, 0x84, 0xdc, 0x87, 0x94, 0xd9, + 0xf1, 0x1f, 0x52, 0xd4, 0xbf, 0x2a, 0xf0, 0x72, 0x61, 0xc5, 0x42, 0x9b, 0x89, 0x96, 0xf2, 0x66, + 0xaa, 0xa5, 0xfc, 0x4e, 0xa1, 0x60, 0xac, 0xaf, 0x74, 0xf3, 0x5f, 0x1a, 0xde, 0x2f, 0xf7, 0xd2, + 0x90, 0x33, 0x05, 0x9f, 0xff, 0xe4, 0xd0, 0xfa, 0xfe, 0xd3, 0x67, 0x2b, 0x97, 0xbe, 0x7c, 0xb6, + 0x72, 0xe9, 0xab, 0x67, 0x2b, 0x97, 0x7e, 0x31, 0x5a, 0x51, 0x9e, 0x8e, 0x56, 0x94, 0x2f, 0x47, + 0x2b, 0xca, 0x57, 0xa3, 0x15, 0xe5, 0xef, 0xa3, 0x15, 0xe5, 0x57, 0x5f, 0xaf, 0x5c, 0xfa, 0x64, + 0xa9, 0xe0, 0xdf, 0xe8, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xc9, 0xe6, 0x8c, 0xa7, 0x1e, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 245ec30f42..4b0fa366cf 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -70,7 +70,7 @@ message ControllerRevisionList { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -90,10 +90,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -106,7 +106,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -136,11 +136,11 @@ message DeploymentSpec { // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -208,6 +208,8 @@ message DeploymentStatus { // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this @@ -252,7 +254,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -266,7 +268,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -285,14 +287,14 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -340,7 +342,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -362,7 +364,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -376,7 +378,7 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -427,7 +429,7 @@ message StatefulSetSpec { // If empty, defaulted to labels on the pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -435,7 +437,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -445,7 +447,8 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + // +listType=atomic + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -490,9 +493,7 @@ message StatefulSetSpec { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } @@ -536,6 +537,8 @@ message StatefulSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated StatefulSetCondition conditions = 10; // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 59ed9c2ac3..07bfa88c5f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -251,6 +251,7 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional + // +listType=atomic VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` // serviceName is the name of the service that governs this StatefulSet. @@ -296,9 +297,7 @@ type StatefulSetSpec struct { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -342,6 +341,8 @@ type StatefulSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. @@ -577,6 +578,8 @@ type DeploymentStatus struct { // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index a62e9869d6..9e7fb1adc2 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -259,7 +259,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 6dfb4d5d2a..1c3d3be5bc 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto +// source: k8s.io/api/apps/v1beta2/generated.proto package v1beta2 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{0} + return fileDescriptor_c423c016abf485d4, []int{0} } func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } func (*ControllerRevisionList) ProtoMessage() {} func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{1} + return fileDescriptor_c423c016abf485d4, []int{1} } func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{2} + return fileDescriptor_c423c016abf485d4, []int{2} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{3} + return fileDescriptor_c423c016abf485d4, []int{3} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{4} + return fileDescriptor_c423c016abf485d4, []int{4} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{5} + return fileDescriptor_c423c016abf485d4, []int{5} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{6} + return fileDescriptor_c423c016abf485d4, []int{6} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{7} + return fileDescriptor_c423c016abf485d4, []int{7} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{8} + return fileDescriptor_c423c016abf485d4, []int{8} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{9} + return fileDescriptor_c423c016abf485d4, []int{9} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +332,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{10} + return fileDescriptor_c423c016abf485d4, []int{10} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +360,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{11} + return fileDescriptor_c423c016abf485d4, []int{11} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +388,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{12} + return fileDescriptor_c423c016abf485d4, []int{12} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +416,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{13} + return fileDescriptor_c423c016abf485d4, []int{13} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +444,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{14} + return fileDescriptor_c423c016abf485d4, []int{14} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +472,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{15} + return fileDescriptor_c423c016abf485d4, []int{15} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +500,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{16} + return fileDescriptor_c423c016abf485d4, []int{16} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +528,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{17} + return fileDescriptor_c423c016abf485d4, []int{17} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +556,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{18} + return fileDescriptor_c423c016abf485d4, []int{18} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +584,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{19} + return fileDescriptor_c423c016abf485d4, []int{19} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +612,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{20} + return fileDescriptor_c423c016abf485d4, []int{20} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +640,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{21} + return fileDescriptor_c423c016abf485d4, []int{21} } func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +668,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{22} + return fileDescriptor_c423c016abf485d4, []int{22} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +696,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{23} + return fileDescriptor_c423c016abf485d4, []int{23} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +724,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{24} + return fileDescriptor_c423c016abf485d4, []int{24} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +752,7 @@ var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{25} + return fileDescriptor_c423c016abf485d4, []int{25} } func (m *StatefulSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +780,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } func (*StatefulSetCondition) ProtoMessage() {} func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{26} + return fileDescriptor_c423c016abf485d4, []int{26} } func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +808,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{27} + return fileDescriptor_c423c016abf485d4, []int{27} } func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +836,7 @@ var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } func (*StatefulSetOrdinals) ProtoMessage() {} func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{28} + return fileDescriptor_c423c016abf485d4, []int{28} } func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -866,7 +866,7 @@ func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{29} + return fileDescriptor_c423c016abf485d4, []int{29} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -894,7 +894,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{30} + return fileDescriptor_c423c016abf485d4, []int{30} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -922,7 +922,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{31} + return fileDescriptor_c423c016abf485d4, []int{31} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -950,7 +950,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{32} + return fileDescriptor_c423c016abf485d4, []int{32} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1013,158 +1013,157 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) + proto.RegisterFile("k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_c423c016abf485d4) } -var fileDescriptor_42fe616264472f7e = []byte{ - // 2345 bytes of a gzipped FileDescriptorProto +var fileDescriptor_c423c016abf485d4 = []byte{ + // 2328 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0x91, 0x5b, 0xd2, 0x58, 0x1b, - 0x8e, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0xa4, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1, - 0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0x5a, 0xee, 0x2e, 0x76, 0x87, 0x8c, - 0x89, 0x5e, 0x7a, 0x2d, 0x50, 0xa0, 0xed, 0xb5, 0xff, 0x44, 0xd1, 0x4b, 0x51, 0x34, 0xe8, 0xa5, - 0x08, 0x02, 0x1f, 0x83, 0x5e, 0x92, 0x13, 0x51, 0x33, 0xa7, 0xa2, 0xe8, 0xad, 0xbd, 0x18, 0x28, - 0x50, 0xcc, 0xec, 0xec, 0xf7, 0xae, 0xb9, 0x54, 0x6c, 0xe5, 0x03, 0xb9, 0x71, 0xe7, 0xbd, 0xf7, - 0x9b, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x0c, 0xc1, 0x0f, 0x0f, 0x5f, 0xb3, 0x6a, 0x8a, 0x5e, - 0x3f, 0xec, 0xef, 0x13, 0x53, 0x23, 0x94, 0x58, 0xf5, 0x01, 0xd1, 0x3a, 0xba, 0x59, 0x17, 0x02, - 0x6c, 0x28, 0x75, 0x6c, 0x18, 0x56, 0x7d, 0x70, 0x6d, 0x9f, 0x50, 0xbc, 0x56, 0xef, 0x12, 0x8d, - 0x98, 0x98, 0x92, 0x4e, 0xcd, 0x30, 0x75, 0xaa, 0xc3, 0x65, 0x5b, 0xb1, 0x86, 0x0d, 0xa5, 0xc6, - 0x14, 0x6b, 0x42, 0x71, 0xe5, 0x4a, 0x57, 0xa1, 0x07, 0xfd, 0xfd, 0x5a, 0x5b, 0xef, 0xd5, 0xbb, - 0x7a, 0x57, 0xaf, 0x73, 0xfd, 0xfd, 0xfe, 0x7d, 0xfe, 0xc5, 0x3f, 0xf8, 0x2f, 0x1b, 0x67, 0x45, - 0xf6, 0x75, 0xd8, 0xd6, 0x4d, 0x52, 0x1f, 0x5c, 0x0b, 0xf7, 0xb5, 0xf2, 0xb2, 0xa7, 0xd3, 0xc3, - 0xed, 0x03, 0x45, 0x23, 0xe6, 0xb0, 0x6e, 0x1c, 0x76, 0x59, 0x83, 0x55, 0xef, 0x11, 0x8a, 0xe3, - 0xac, 0xea, 0x49, 0x56, 0x66, 0x5f, 0xa3, 0x4a, 0x8f, 0x44, 0x0c, 0x5e, 0x9d, 0x64, 0x60, 0xb5, - 0x0f, 0x48, 0x0f, 0x47, 0xec, 0x5e, 0x4a, 0xb2, 0xeb, 0x53, 0x45, 0xad, 0x2b, 0x1a, 0xb5, 0xa8, - 0x19, 0x36, 0x92, 0xff, 0x2b, 0x01, 0xb8, 0xa1, 0x6b, 0xd4, 0xd4, 0x55, 0x95, 0x98, 0x88, 0x0c, - 0x14, 0x4b, 0xd1, 0x35, 0x78, 0x0f, 0x14, 0xd8, 0x78, 0x3a, 0x98, 0xe2, 0xb2, 0x74, 0x4e, 0x5a, - 0x2d, 0xad, 0x5d, 0xad, 0x79, 0x33, 0xed, 0xc2, 0xd7, 0x8c, 0xc3, 0x2e, 0x6b, 0xb0, 0x6a, 0x4c, - 0xbb, 0x36, 0xb8, 0x56, 0xdb, 0xdd, 0x7f, 0x9f, 0xb4, 0xe9, 0x36, 0xa1, 0xb8, 0x01, 0x1f, 0x8e, - 0xaa, 0x27, 0xc6, 0xa3, 0x2a, 0xf0, 0xda, 0x90, 0x8b, 0x0a, 0x77, 0x41, 0x8e, 0xa3, 0x67, 0x38, - 0xfa, 0x95, 0x44, 0x74, 0x31, 0xe8, 0x1a, 0xc2, 0x1f, 0xbc, 0xf5, 0x80, 0x12, 0x8d, 0xb9, 0xd7, - 0x38, 0x25, 0xa0, 0x73, 0x9b, 0x98, 0x62, 0xc4, 0x81, 0xe0, 0x65, 0x50, 0x30, 0x85, 0xfb, 0xe5, - 0xec, 0x39, 0x69, 0x35, 0xdb, 0x38, 0x2d, 0xb4, 0x0a, 0xce, 0xb0, 0x90, 0xab, 0x21, 0x3f, 0x94, - 0xc0, 0x52, 0x74, 0xdc, 0x5b, 0x8a, 0x45, 0xe1, 0x4f, 0x22, 0x63, 0xaf, 0xa5, 0x1b, 0x3b, 0xb3, - 0xe6, 0x23, 0x77, 0x3b, 0x76, 0x5a, 0x7c, 0xe3, 0x6e, 0x82, 0xbc, 0x42, 0x49, 0xcf, 0x2a, 0x67, - 0xce, 0x65, 0x57, 0x4b, 0x6b, 0x97, 0x6a, 0x09, 0x01, 0x5c, 0x8b, 0x7a, 0xd7, 0x98, 0x15, 0xb8, - 0xf9, 0xdb, 0x0c, 0x01, 0xd9, 0x40, 0xf2, 0xaf, 0x32, 0xa0, 0xb8, 0x89, 0x49, 0x4f, 0xd7, 0x5a, - 0x84, 0x1e, 0xc3, 0xca, 0xdd, 0x02, 0x39, 0xcb, 0x20, 0x6d, 0xb1, 0x72, 0x17, 0x13, 0x07, 0xe0, - 0xfa, 0xd4, 0x32, 0x48, 0xdb, 0x5b, 0x32, 0xf6, 0x85, 0x38, 0x02, 0x6c, 0x82, 0x19, 0x8b, 0x62, - 0xda, 0xb7, 0xf8, 0x82, 0x95, 0xd6, 0x56, 0x53, 0x60, 0x71, 0xfd, 0xc6, 0x9c, 0x40, 0x9b, 0xb1, - 0xbf, 0x91, 0xc0, 0x91, 0xff, 0x99, 0x01, 0xd0, 0xd5, 0xdd, 0xd0, 0xb5, 0x8e, 0x42, 0x59, 0x38, - 0x5f, 0x07, 0x39, 0x3a, 0x34, 0x08, 0x9f, 0x90, 0x62, 0xe3, 0xa2, 0xe3, 0xca, 0x9d, 0xa1, 0x41, - 0x1e, 0x8f, 0xaa, 0x4b, 0x51, 0x0b, 0x26, 0x41, 0xdc, 0x06, 0x6e, 0xb9, 0x4e, 0x66, 0xb8, 0xf5, - 0xcb, 0xc1, 0xae, 0x1f, 0x8f, 0xaa, 0x31, 0x7b, 0x47, 0xcd, 0x45, 0x0a, 0x3a, 0x08, 0x07, 0x00, - 0xaa, 0xd8, 0xa2, 0x77, 0x4c, 0xac, 0x59, 0x76, 0x4f, 0x4a, 0x8f, 0x88, 0xe1, 0xbf, 0x98, 0x6e, - 0xa1, 0x98, 0x45, 0x63, 0x45, 0x78, 0x01, 0xb7, 0x22, 0x68, 0x28, 0xa6, 0x07, 0x78, 0x11, 0xcc, - 0x98, 0x04, 0x5b, 0xba, 0x56, 0xce, 0xf1, 0x51, 0xb8, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8, - 0x02, 0x38, 0xd9, 0x23, 0x96, 0x85, 0xbb, 0xa4, 0x9c, 0xe7, 0x8a, 0xf3, 0x42, 0xf1, 0xe4, 0xb6, - 0xdd, 0x8c, 0x1c, 0xb9, 0xfc, 0x27, 0x09, 0xcc, 0xba, 0x33, 0x77, 0x0c, 0x99, 0x73, 0x33, 0x98, - 0x39, 0xf2, 0xe4, 0x60, 0x49, 0x48, 0x98, 0x8f, 0xb2, 0x3e, 0xc7, 0x59, 0x38, 0xc2, 0x9f, 0x82, - 0x82, 0x45, 0x54, 0xd2, 0xa6, 0xba, 0x29, 0x1c, 0x7f, 0x29, 0xa5, 0xe3, 0x78, 0x9f, 0xa8, 0x2d, - 0x61, 0xda, 0x38, 0xc5, 0x3c, 0x77, 0xbe, 0x90, 0x0b, 0x09, 0xdf, 0x01, 0x05, 0x4a, 0x7a, 0x86, - 0x8a, 0x29, 0x11, 0x59, 0x73, 0xde, 0xef, 0x3c, 0x8b, 0x19, 0x06, 0xd6, 0xd4, 0x3b, 0x77, 0x84, - 0x1a, 0x4f, 0x19, 0x77, 0x32, 0x9c, 0x56, 0xe4, 0xc2, 0x40, 0x03, 0xcc, 0xf5, 0x8d, 0x0e, 0xd3, - 0xa4, 0x6c, 0x3b, 0xef, 0x0e, 0x45, 0x0c, 0x5d, 0x9d, 0x3c, 0x2b, 0x7b, 0x01, 0xbb, 0xc6, 0x92, - 0xe8, 0x65, 0x2e, 0xd8, 0x8e, 0x42, 0xf8, 0x70, 0x1d, 0xcc, 0xf7, 0x14, 0x0d, 0x11, 0xdc, 0x19, - 0xb6, 0x48, 0x5b, 0xd7, 0x3a, 0x16, 0x0f, 0xa5, 0x7c, 0x63, 0x59, 0x00, 0xcc, 0x6f, 0x07, 0xc5, - 0x28, 0xac, 0x0f, 0xb7, 0xc0, 0xa2, 0xb3, 0x01, 0xdf, 0x52, 0x2c, 0xaa, 0x9b, 0xc3, 0x2d, 0xa5, - 0xa7, 0xd0, 0xf2, 0x0c, 0xc7, 0x29, 0x8f, 0x47, 0xd5, 0x45, 0x14, 0x23, 0x47, 0xb1, 0x56, 0xf2, - 0xef, 0x66, 0xc0, 0x7c, 0x68, 0x5f, 0x80, 0x77, 0xc1, 0x52, 0xbb, 0x6f, 0x9a, 0x44, 0xa3, 0x3b, - 0xfd, 0xde, 0x3e, 0x31, 0x5b, 0xed, 0x03, 0xd2, 0xe9, 0xab, 0xa4, 0xc3, 0x97, 0x35, 0xdf, 0xa8, - 0x08, 0x5f, 0x97, 0x36, 0x62, 0xb5, 0x50, 0x82, 0x35, 0x7c, 0x1b, 0x40, 0x8d, 0x37, 0x6d, 0x2b, - 0x96, 0xe5, 0x62, 0x66, 0x38, 0xa6, 0x9b, 0x8a, 0x3b, 0x11, 0x0d, 0x14, 0x63, 0xc5, 0x7c, 0xec, - 0x10, 0x4b, 0x31, 0x49, 0x27, 0xec, 0x63, 0x36, 0xe8, 0xe3, 0x66, 0xac, 0x16, 0x4a, 0xb0, 0x86, - 0xaf, 0x80, 0x92, 0xdd, 0x1b, 0x9f, 0x73, 0xb1, 0x38, 0x0b, 0x02, 0xac, 0xb4, 0xe3, 0x89, 0x90, - 0x5f, 0x8f, 0x0d, 0x4d, 0xdf, 0xb7, 0x88, 0x39, 0x20, 0x9d, 0x9b, 0x36, 0x39, 0x60, 0x15, 0x34, - 0xcf, 0x2b, 0xa8, 0x3b, 0xb4, 0xdd, 0x88, 0x06, 0x8a, 0xb1, 0x62, 0x43, 0xb3, 0xa3, 0x26, 0x32, - 0xb4, 0x99, 0xe0, 0xd0, 0xf6, 0x62, 0xb5, 0x50, 0x82, 0x35, 0x8b, 0x3d, 0xdb, 0xe5, 0xf5, 0x01, - 0x56, 0x54, 0xbc, 0xaf, 0x92, 0xf2, 0xc9, 0x60, 0xec, 0xed, 0x04, 0xc5, 0x28, 0xac, 0x0f, 0x6f, - 0x82, 0x33, 0x76, 0xd3, 0x9e, 0x86, 0x5d, 0x90, 0x02, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xd9, 0x09, - 0x2b, 0xa0, 0xa8, 0x0d, 0xbc, 0x0e, 0xe6, 0xda, 0xba, 0xaa, 0xf2, 0x78, 0xdc, 0xd0, 0xfb, 0x1a, - 0x2d, 0x17, 0x39, 0x0a, 0x64, 0x39, 0xb4, 0x11, 0x90, 0xa0, 0x90, 0x26, 0xfc, 0x39, 0x00, 0x6d, - 0xa7, 0x30, 0x58, 0x65, 0x30, 0x81, 0x01, 0x44, 0xcb, 0x92, 0x57, 0x99, 0xdd, 0x26, 0x0b, 0xf9, - 0x20, 0xe5, 0x8f, 0x24, 0xb0, 0x9c, 0x90, 0xe8, 0xf0, 0xcd, 0x40, 0x11, 0xbc, 0x14, 0x2a, 0x82, - 0x67, 0x13, 0xcc, 0x7c, 0x95, 0xf0, 0x00, 0xcc, 0x32, 0x42, 0xa2, 0x68, 0x5d, 0x5b, 0x45, 0xec, - 0x65, 0xf5, 0xc4, 0x01, 0x20, 0xbf, 0xb6, 0xb7, 0x2b, 0x9f, 0x19, 0x8f, 0xaa, 0xb3, 0x01, 0x19, - 0x0a, 0x02, 0xcb, 0xbf, 0xce, 0x00, 0xb0, 0x49, 0x0c, 0x55, 0x1f, 0xf6, 0x88, 0x76, 0x1c, 0x9c, - 0xe6, 0x76, 0x80, 0xd3, 0x3c, 0x9f, 0xbc, 0x24, 0xae, 0x53, 0x89, 0xa4, 0xe6, 0x9d, 0x10, 0xa9, - 0x79, 0x21, 0x0d, 0xd8, 0x93, 0x59, 0xcd, 0xa7, 0x59, 0xb0, 0xe0, 0x29, 0x7b, 0xb4, 0xe6, 0x46, - 0x60, 0x45, 0x9f, 0x0f, 0xad, 0xe8, 0x72, 0x8c, 0xc9, 0x33, 0xe3, 0x35, 0xef, 0x83, 0x39, 0xc6, - 0x3a, 0xec, 0xf5, 0xe3, 0x9c, 0x66, 0x66, 0x6a, 0x4e, 0xe3, 0x56, 0xa2, 0xad, 0x00, 0x12, 0x0a, - 0x21, 0x27, 0x70, 0xa8, 0x93, 0x5f, 0x47, 0x0e, 0xf5, 0x67, 0x09, 0xcc, 0x79, 0xcb, 0x74, 0x0c, - 0x24, 0xea, 0x56, 0x90, 0x44, 0x9d, 0x4f, 0x11, 0x9c, 0x09, 0x2c, 0xea, 0xd3, 0x9c, 0xdf, 0x75, - 0x4e, 0xa3, 0x56, 0xd9, 0x11, 0xcc, 0x50, 0x95, 0x36, 0xb6, 0x44, 0xbd, 0x3d, 0x65, 0x1f, 0xbf, - 0xec, 0x36, 0xe4, 0x4a, 0x03, 0x84, 0x2b, 0xf3, 0x6c, 0x09, 0x57, 0xf6, 0xe9, 0x10, 0xae, 0x1f, - 0x83, 0x82, 0xe5, 0x50, 0xad, 0x1c, 0x87, 0xbc, 0x94, 0x2a, 0xb1, 0x05, 0xcb, 0x72, 0xa1, 0x5d, - 0x7e, 0xe5, 0xc2, 0xc5, 0x31, 0xab, 0xfc, 0x97, 0xc9, 0xac, 0x58, 0xa0, 0x1b, 0xb8, 0x6f, 0x91, - 0x0e, 0x4f, 0xaa, 0x82, 0x17, 0xe8, 0x4d, 0xde, 0x8a, 0x84, 0x14, 0xee, 0x81, 0x65, 0xc3, 0xd4, - 0xbb, 0x26, 0xb1, 0xac, 0x4d, 0x82, 0x3b, 0xaa, 0xa2, 0x11, 0x67, 0x00, 0x76, 0x4d, 0x3c, 0x3b, - 0x1e, 0x55, 0x97, 0x9b, 0xf1, 0x2a, 0x28, 0xc9, 0x56, 0xfe, 0x5b, 0x0e, 0x9c, 0x0e, 0xef, 0x8d, - 0x09, 0x34, 0x45, 0x3a, 0x12, 0x4d, 0xb9, 0xec, 0x8b, 0x53, 0x9b, 0xc3, 0xf9, 0xae, 0x0a, 0x22, - 0xb1, 0xba, 0x0e, 0xe6, 0x05, 0x2d, 0x71, 0x84, 0x82, 0xa8, 0xb9, 0xcb, 0xb3, 0x17, 0x14, 0xa3, - 0xb0, 0x3e, 0xbc, 0x01, 0x66, 0x4d, 0xce, 0xbc, 0x1c, 0x00, 0x9b, 0xbd, 0x7c, 0x47, 0x00, 0xcc, - 0x22, 0xbf, 0x10, 0x05, 0x75, 0x19, 0x73, 0xf1, 0x08, 0x89, 0x03, 0x90, 0x0b, 0x32, 0x97, 0xf5, - 0xb0, 0x02, 0x8a, 0xda, 0xc0, 0x6d, 0xb0, 0xd0, 0xd7, 0xa2, 0x50, 0x76, 0xac, 0x9d, 0x15, 0x50, - 0x0b, 0x7b, 0x51, 0x15, 0x14, 0x67, 0x07, 0xef, 0x05, 0xc8, 0xcc, 0x0c, 0xdf, 0x4f, 0x2e, 0xa7, - 0xc8, 0x89, 0xd4, 0x6c, 0x26, 0x86, 0x6a, 0x15, 0xd2, 0x52, 0x2d, 0xf9, 0x43, 0x09, 0xc0, 0x68, - 0x1e, 0x4e, 0xbc, 0x09, 0x88, 0x58, 0xf8, 0x2a, 0xa6, 0x12, 0xcf, 0x7f, 0xae, 0xa6, 0xe4, 0x3f, - 0xde, 0x86, 0x9a, 0x8e, 0x00, 0x89, 0x89, 0x3e, 0x9e, 0x4b, 0x9d, 0xb4, 0x04, 0xc8, 0x73, 0xea, - 0x29, 0x10, 0x20, 0x1f, 0xd8, 0x93, 0x09, 0xd0, 0xbf, 0x32, 0x60, 0xc1, 0x53, 0x4e, 0x4d, 0x80, - 0x62, 0x4c, 0xbe, 0xbd, 0xd8, 0x49, 0x47, 0x4a, 0xbc, 0xa9, 0xfb, 0x2a, 0x91, 0x12, 0xcf, 0xab, - 0x04, 0x52, 0xf2, 0x87, 0x8c, 0xdf, 0xf5, 0x29, 0x49, 0xc9, 0x53, 0xb8, 0xe1, 0xf8, 0xda, 0xf1, - 0x1a, 0xf9, 0xe3, 0x2c, 0x38, 0x1d, 0xce, 0xc3, 0x40, 0x81, 0x94, 0x26, 0x16, 0xc8, 0x26, 0x58, - 0xbc, 0xdf, 0x57, 0xd5, 0x21, 0x1f, 0x83, 0xaf, 0x4a, 0xda, 0xa5, 0xf5, 0xbb, 0xc2, 0x72, 0xf1, - 0x47, 0x31, 0x3a, 0x28, 0xd6, 0x32, 0x5a, 0x2f, 0x73, 0x5f, 0xb4, 0x5e, 0xe6, 0x8f, 0x50, 0x2f, - 0xe3, 0x29, 0x47, 0xf6, 0x48, 0x94, 0x63, 0xba, 0x62, 0x19, 0xb3, 0x71, 0x4d, 0x3c, 0xfa, 0x8f, - 0x25, 0xb0, 0x14, 0x7f, 0xe0, 0x86, 0x2a, 0x98, 0xeb, 0xe1, 0x07, 0xfe, 0x8b, 0x8f, 0x49, 0x45, - 0xa4, 0x4f, 0x15, 0xb5, 0x66, 0x3f, 0x19, 0xd5, 0x6e, 0x6b, 0x74, 0xd7, 0x6c, 0x51, 0x53, 0xd1, - 0xba, 0x76, 0xe5, 0xdd, 0x0e, 0x60, 0xa1, 0x10, 0x36, 0x7c, 0x0f, 0x14, 0x7a, 0xf8, 0x41, 0xab, - 0x6f, 0x76, 0xe3, 0x2a, 0x64, 0xba, 0x7e, 0x78, 0x02, 0x6c, 0x0b, 0x14, 0xe4, 0xe2, 0xc9, 0x9f, - 0x4b, 0x60, 0x39, 0xa1, 0xaa, 0x7e, 0x83, 0x46, 0xf9, 0x57, 0x09, 0x9c, 0x0b, 0x8c, 0x92, 0xa5, - 0x25, 0xb9, 0xdf, 0x57, 0x79, 0x86, 0x0a, 0x26, 0x73, 0x09, 0x14, 0x0d, 0x6c, 0x52, 0xc5, 0xe5, - 0xc1, 0xf9, 0xc6, 0xec, 0x78, 0x54, 0x2d, 0x36, 0x9d, 0x46, 0xe4, 0xc9, 0x63, 0xe6, 0x26, 0xf3, - 0xec, 0xe6, 0x46, 0xfe, 0x9f, 0x04, 0xf2, 0xad, 0x36, 0x56, 0xc9, 0x31, 0x10, 0x97, 0xcd, 0x00, - 0x71, 0x49, 0x7e, 0x14, 0xe0, 0xfe, 0x24, 0x72, 0x96, 0xad, 0x10, 0x67, 0xb9, 0x30, 0x01, 0xe7, - 0xc9, 0x74, 0xe5, 0x75, 0x50, 0x74, 0xbb, 0x9b, 0x6e, 0x2f, 0x95, 0x7f, 0x9f, 0x01, 0x25, 0x5f, - 0x17, 0x53, 0xee, 0xc4, 0xf7, 0x02, 0xe5, 0x87, 0xed, 0x31, 0x6b, 0x69, 0x06, 0x52, 0x73, 0x4a, - 0xcd, 0x5b, 0x1a, 0x35, 0xfd, 0x67, 0xd5, 0x68, 0x05, 0x7a, 0x03, 0xcc, 0x51, 0x6c, 0x76, 0x09, - 0x75, 0x64, 0x7c, 0xc2, 0x8a, 0xde, 0xdd, 0xcd, 0x9d, 0x80, 0x14, 0x85, 0xb4, 0x57, 0x6e, 0x80, - 0xd9, 0x40, 0x67, 0xf0, 0x34, 0xc8, 0x1e, 0x92, 0xa1, 0xcd, 0xe0, 0x10, 0xfb, 0x09, 0x17, 0x41, - 0x7e, 0x80, 0xd5, 0xbe, 0x1d, 0xa2, 0x45, 0x64, 0x7f, 0x5c, 0xcf, 0xbc, 0x26, 0xc9, 0xbf, 0x61, - 0x93, 0xe3, 0xa5, 0xc2, 0x31, 0x44, 0xd7, 0xdb, 0x81, 0xe8, 0x4a, 0x7e, 0x9f, 0xf4, 0x27, 0x68, - 0x52, 0x8c, 0xa1, 0x50, 0x8c, 0xbd, 0x98, 0x0a, 0xed, 0xc9, 0x91, 0xf6, 0xef, 0x0c, 0x58, 0xf4, - 0x69, 0x7b, 0xcc, 0xf8, 0xfb, 0x01, 0x66, 0xbc, 0x1a, 0x62, 0xc6, 0xe5, 0x38, 0x9b, 0x6f, 0xa9, - 0xf1, 0x64, 0x6a, 0xfc, 0x17, 0x09, 0xcc, 0xfb, 0xe6, 0xee, 0x18, 0xb8, 0xf1, 0xed, 0x20, 0x37, - 0xbe, 0x90, 0x26, 0x68, 0x12, 0xc8, 0xf1, 0x75, 0xb0, 0xe0, 0x53, 0xda, 0x35, 0x3b, 0x8a, 0x86, - 0x55, 0x0b, 0x9e, 0x07, 0x79, 0x8b, 0x62, 0x93, 0x3a, 0x45, 0xc4, 0xb1, 0x6d, 0xb1, 0x46, 0x64, - 0xcb, 0xe4, 0xff, 0x48, 0xa0, 0xee, 0x33, 0x6e, 0x12, 0xd3, 0x52, 0x2c, 0x4a, 0x34, 0x7a, 0x57, - 0x57, 0xfb, 0x3d, 0xb2, 0xa1, 0x62, 0xa5, 0x87, 0x08, 0x6b, 0x50, 0x74, 0xad, 0xa9, 0xab, 0x4a, - 0x7b, 0x08, 0x31, 0x28, 0x7d, 0x70, 0x40, 0xb4, 0x4d, 0xa2, 0x12, 0x2a, 0x5e, 0xe0, 0x8a, 0x8d, - 0x37, 0x9d, 0x07, 0xa9, 0x77, 0x3d, 0xd1, 0xe3, 0x51, 0x75, 0x35, 0x0d, 0x22, 0x8f, 0x50, 0x3f, - 0x26, 0xfc, 0x19, 0x00, 0xec, 0x93, 0xef, 0x65, 0x1d, 0x11, 0xac, 0x6f, 0x38, 0x19, 0xfd, 0xae, - 0x2b, 0x99, 0xaa, 0x03, 0x1f, 0xa2, 0xfc, 0xc7, 0x42, 0x60, 0xbd, 0xbf, 0xf1, 0xb7, 0x9c, 0xbf, - 0x00, 0x8b, 0x03, 0x6f, 0x76, 0x1c, 0x05, 0xc6, 0xbf, 0xb3, 0xe1, 0x93, 0xbc, 0x0b, 0x1f, 0x37, - 0xaf, 0x1e, 0xeb, 0xbf, 0x1b, 0x03, 0x87, 0x62, 0x3b, 0x81, 0xaf, 0x80, 0x12, 0xe3, 0xcd, 0x4a, - 0x9b, 0xec, 0xe0, 0x9e, 0x93, 0x8b, 0xee, 0x03, 0x66, 0xcb, 0x13, 0x21, 0xbf, 0x1e, 0x3c, 0x00, - 0x0b, 0x86, 0xde, 0xd9, 0xc6, 0x1a, 0xee, 0x12, 0x46, 0x04, 0xed, 0xa5, 0xe4, 0x57, 0x9f, 0xc5, - 0xc6, 0xab, 0xce, 0xb5, 0x56, 0x33, 0xaa, 0xf2, 0x78, 0x54, 0x5d, 0x8e, 0x69, 0xe6, 0x41, 0x10, - 0x07, 0x09, 0xcd, 0xc8, 0xa3, 0xbb, 0xfd, 0xe8, 0xb0, 0x96, 0x26, 0x29, 0x8f, 0xf8, 0xec, 0x9e, - 0x74, 0xb3, 0x5b, 0x38, 0xd2, 0xcd, 0x6e, 0xcc, 0x11, 0xb7, 0x38, 0xe5, 0x11, 0xf7, 0x63, 0x09, - 0x5c, 0x30, 0x52, 0xe4, 0x52, 0x19, 0xf0, 0xb9, 0xb9, 0x95, 0x66, 0x6e, 0xd2, 0xe4, 0x66, 0x63, - 0x75, 0x3c, 0xaa, 0x5e, 0x48, 0xa3, 0x89, 0x52, 0xf9, 0x07, 0xef, 0x82, 0x82, 0x2e, 0xf6, 0xc0, - 0x72, 0x89, 0xfb, 0x7a, 0x39, 0x8d, 0xaf, 0xce, 0xbe, 0x69, 0xa7, 0xa5, 0xf3, 0x85, 0x5c, 0x2c, - 0xf9, 0xc3, 0x3c, 0x38, 0x13, 0xa9, 0xe0, 0x5f, 0xe2, 0xfd, 0x75, 0xe4, 0x30, 0x9d, 0x9d, 0xe2, - 0x30, 0xbd, 0x0e, 0xe6, 0xc5, 0x5f, 0x22, 0x42, 0x67, 0x71, 0x37, 0x60, 0x36, 0x82, 0x62, 0x14, - 0xd6, 0x8f, 0xbb, 0x3f, 0xcf, 0x4f, 0x79, 0x7f, 0xee, 0xf7, 0x42, 0xfc, 0xc5, 0xcf, 0x4e, 0xef, - 0xa8, 0x17, 0xe2, 0x9f, 0x7e, 0x61, 0x7d, 0x46, 0x5c, 0x6d, 0x54, 0x17, 0xe1, 0x64, 0x90, 0xb8, - 0xee, 0x05, 0xa4, 0x28, 0xa4, 0xfd, 0x85, 0x9e, 0xfd, 0x71, 0xcc, 0xb3, 0xff, 0x95, 0x34, 0xb1, - 0x96, 0xfe, 0xaa, 0x3c, 0xf6, 0xd2, 0xa3, 0x34, 0xfd, 0xa5, 0x87, 0xfc, 0x77, 0x09, 0x3c, 0x97, - 0xb8, 0x6b, 0xc1, 0xf5, 0x00, 0xad, 0xbc, 0x12, 0xa2, 0x95, 0xdf, 0x4b, 0x34, 0xf4, 0x71, 0x4b, - 0x33, 0xfe, 0x16, 0xfd, 0xf5, 0x74, 0xb7, 0xe8, 0x31, 0x27, 0xe1, 0xc9, 0xd7, 0xe9, 0x8d, 0x1f, - 0x3c, 0x7c, 0x54, 0x39, 0xf1, 0xc9, 0xa3, 0xca, 0x89, 0xcf, 0x1e, 0x55, 0x4e, 0xfc, 0x72, 0x5c, - 0x91, 0x1e, 0x8e, 0x2b, 0xd2, 0x27, 0xe3, 0x8a, 0xf4, 0xd9, 0xb8, 0x22, 0xfd, 0x63, 0x5c, 0x91, - 0x7e, 0xfb, 0x79, 0xe5, 0xc4, 0x7b, 0xcb, 0x09, 0x7f, 0x3a, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd1, 0xcb, 0x7d, 0xc7, 0xa7, 0x2c, 0x00, 0x00, + 0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x5b, 0xd2, 0x58, 0x1b, + 0xb6, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0x04, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1, + 0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x13, + 0xbd, 0xf4, 0x5a, 0xa0, 0x40, 0xdb, 0x6b, 0xff, 0x89, 0xa2, 0x97, 0xa2, 0x68, 0xd0, 0x4b, 0x11, + 0x04, 0x3e, 0x06, 0xbd, 0x24, 0x27, 0xa2, 0x66, 0x4e, 0x45, 0xd1, 0x5b, 0x7b, 0x31, 0x50, 0xa0, + 0x98, 0xd9, 0xd9, 0xef, 0x5d, 0x73, 0xa9, 0xd8, 0x4a, 0x13, 0xe4, 0xc6, 0x9d, 0xf7, 0xde, 0x6f, + 0xde, 0xcc, 0xbc, 0x37, 0xef, 0x37, 0x33, 0x04, 0x17, 0x1f, 0xbc, 0x6e, 0x37, 0x14, 0xa3, 0x89, + 0x4d, 0xa5, 0x89, 0x4d, 0xd3, 0x6e, 0x1e, 0x5c, 0xdb, 0x23, 0x14, 0xaf, 0x36, 0xfb, 0x44, 0x27, + 0x16, 0xa6, 0xa4, 0xd7, 0x30, 0x2d, 0x83, 0x1a, 0x70, 0xd9, 0x51, 0x6c, 0x60, 0x53, 0x69, 0x30, + 0xc5, 0x86, 0x50, 0x3c, 0x7d, 0xa5, 0xaf, 0xd0, 0xfd, 0xc1, 0x5e, 0xa3, 0x6b, 0x68, 0xcd, 0xbe, + 0xd1, 0x37, 0x9a, 0x5c, 0x7f, 0x6f, 0x70, 0x9f, 0x7f, 0xf1, 0x0f, 0xfe, 0xcb, 0xc1, 0x39, 0x2d, + 0x07, 0x3a, 0xec, 0x1a, 0x16, 0x69, 0x1e, 0x5c, 0x8b, 0xf6, 0x75, 0xfa, 0x15, 0x5f, 0x47, 0xc3, + 0xdd, 0x7d, 0x45, 0x27, 0xd6, 0xb0, 0x69, 0x3e, 0xe8, 0xb3, 0x06, 0xbb, 0xa9, 0x11, 0x8a, 0x93, + 0xac, 0x9a, 0x69, 0x56, 0xd6, 0x40, 0xa7, 0x8a, 0x46, 0x62, 0x06, 0xaf, 0x4d, 0x32, 0xb0, 0xbb, + 0xfb, 0x44, 0xc3, 0x31, 0xbb, 0x97, 0xd3, 0xec, 0x06, 0x54, 0x51, 0x9b, 0x8a, 0x4e, 0x6d, 0x6a, + 0x45, 0x8d, 0xe4, 0xff, 0x48, 0x00, 0xae, 0x1b, 0x3a, 0xb5, 0x0c, 0x55, 0x25, 0x16, 0x22, 0x07, + 0x8a, 0xad, 0x18, 0x3a, 0xbc, 0x07, 0x4a, 0x6c, 0x3c, 0x3d, 0x4c, 0x71, 0x55, 0x3a, 0x2b, 0xad, + 0x54, 0x56, 0xaf, 0x36, 0xfc, 0x99, 0xf6, 0xe0, 0x1b, 0xe6, 0x83, 0x3e, 0x6b, 0xb0, 0x1b, 0x4c, + 0xbb, 0x71, 0x70, 0xad, 0xb1, 0xb3, 0xf7, 0x01, 0xe9, 0xd2, 0x2d, 0x42, 0x71, 0x0b, 0x3e, 0x1a, + 0xd5, 0x8f, 0x8d, 0x47, 0x75, 0xe0, 0xb7, 0x21, 0x0f, 0x15, 0xee, 0x80, 0x02, 0x47, 0xcf, 0x71, + 0xf4, 0x2b, 0xa9, 0xe8, 0x62, 0xd0, 0x0d, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x3a, 0x73, 0xaf, + 0x75, 0x42, 0x40, 0x17, 0x36, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xcb, 0xa0, 0x64, 0x09, 0xf7, 0xab, + 0xf9, 0xb3, 0xd2, 0x4a, 0xbe, 0x75, 0x52, 0x68, 0x95, 0xdc, 0x61, 0x21, 0x4f, 0x43, 0x7e, 0x24, + 0x81, 0xa5, 0xf8, 0xb8, 0x37, 0x15, 0x9b, 0xc2, 0x1f, 0xc7, 0xc6, 0xde, 0xc8, 0x36, 0x76, 0x66, + 0xcd, 0x47, 0xee, 0x75, 0xec, 0xb6, 0x04, 0xc6, 0xdd, 0x06, 0x45, 0x85, 0x12, 0xcd, 0xae, 0xe6, + 0xce, 0xe6, 0x57, 0x2a, 0xab, 0x97, 0x1a, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x15, 0xb8, + 0xc5, 0xdb, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xbc, 0x81, 0x89, 0x66, 0xe8, 0x1d, + 0x42, 0x8f, 0x60, 0xe5, 0x6e, 0x81, 0x82, 0x6d, 0x92, 0xae, 0x58, 0xb9, 0x0b, 0xa9, 0x03, 0xf0, + 0x7c, 0xea, 0x98, 0xa4, 0xeb, 0x2f, 0x19, 0xfb, 0x42, 0x1c, 0x01, 0xb6, 0xc1, 0x8c, 0x4d, 0x31, + 0x1d, 0xd8, 0x7c, 0xc1, 0x2a, 0xab, 0x2b, 0x19, 0xb0, 0xb8, 0x7e, 0x6b, 0x4e, 0xa0, 0xcd, 0x38, + 0xdf, 0x48, 0xe0, 0xc8, 0xff, 0xc8, 0x01, 0xe8, 0xe9, 0xae, 0x1b, 0x7a, 0x4f, 0xa1, 0x2c, 0x9c, + 0xaf, 0x83, 0x02, 0x1d, 0x9a, 0x84, 0x4f, 0x48, 0xb9, 0x75, 0xc1, 0x75, 0xe5, 0xce, 0xd0, 0x24, + 0x4f, 0x46, 0xf5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0x37, 0x3d, 0x27, 0x73, 0xdc, 0xfa, + 0x95, 0x70, 0xd7, 0x4f, 0x46, 0xf5, 0x84, 0xbd, 0xa3, 0xe1, 0x21, 0x85, 0x1d, 0x84, 0x07, 0x00, + 0xaa, 0xd8, 0xa6, 0x77, 0x2c, 0xac, 0xdb, 0x4e, 0x4f, 0x8a, 0x46, 0xc4, 0xf0, 0x5f, 0xca, 0xb6, + 0x50, 0xcc, 0xa2, 0x75, 0x5a, 0x78, 0x01, 0x37, 0x63, 0x68, 0x28, 0xa1, 0x07, 0x78, 0x01, 0xcc, + 0x58, 0x04, 0xdb, 0x86, 0x5e, 0x2d, 0xf0, 0x51, 0x78, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8, + 0x22, 0x38, 0xae, 0x11, 0xdb, 0xc6, 0x7d, 0x52, 0x2d, 0x72, 0xc5, 0x79, 0xa1, 0x78, 0x7c, 0xcb, + 0x69, 0x46, 0xae, 0x5c, 0xfe, 0xa3, 0x04, 0x66, 0xbd, 0x99, 0x3b, 0x82, 0xcc, 0xb9, 0x19, 0xce, + 0x1c, 0x79, 0x72, 0xb0, 0xa4, 0x24, 0xcc, 0xc7, 0xf9, 0x80, 0xe3, 0x2c, 0x1c, 0xe1, 0x4f, 0x40, + 0xc9, 0x26, 0x2a, 0xe9, 0x52, 0xc3, 0x12, 0x8e, 0xbf, 0x9c, 0xd1, 0x71, 0xbc, 0x47, 0xd4, 0x8e, + 0x30, 0x6d, 0x9d, 0x60, 0x9e, 0xbb, 0x5f, 0xc8, 0x83, 0x84, 0xef, 0x82, 0x12, 0x25, 0x9a, 0xa9, + 0x62, 0x4a, 0x44, 0xd6, 0x9c, 0x0b, 0x3a, 0xcf, 0x62, 0x86, 0x81, 0xb5, 0x8d, 0xde, 0x1d, 0xa1, + 0xc6, 0x53, 0xc6, 0x9b, 0x0c, 0xb7, 0x15, 0x79, 0x30, 0xd0, 0x04, 0x73, 0x03, 0xb3, 0xc7, 0x34, + 0x29, 0xdb, 0xce, 0xfb, 0x43, 0x11, 0x43, 0x57, 0x27, 0xcf, 0xca, 0x6e, 0xc8, 0xae, 0xb5, 0x24, + 0x7a, 0x99, 0x0b, 0xb7, 0xa3, 0x08, 0x3e, 0x5c, 0x03, 0xf3, 0x9a, 0xa2, 0x23, 0x82, 0x7b, 0xc3, + 0x0e, 0xe9, 0x1a, 0x7a, 0xcf, 0xe6, 0xa1, 0x54, 0x6c, 0x2d, 0x0b, 0x80, 0xf9, 0xad, 0xb0, 0x18, + 0x45, 0xf5, 0xe1, 0x26, 0x58, 0x74, 0x37, 0xe0, 0x5b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa9, 0x68, + 0x0a, 0xad, 0xce, 0x70, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x44, 0x09, 0x72, 0x94, 0x68, 0x25, 0xff, + 0x76, 0x06, 0xcc, 0x47, 0xf6, 0x05, 0x78, 0x17, 0x2c, 0x75, 0x07, 0x96, 0x45, 0x74, 0xba, 0x3d, + 0xd0, 0xf6, 0x88, 0xd5, 0xe9, 0xee, 0x93, 0xde, 0x40, 0x25, 0x3d, 0xbe, 0xac, 0xc5, 0x56, 0x4d, + 0xf8, 0xba, 0xb4, 0x9e, 0xa8, 0x85, 0x52, 0xac, 0xe1, 0x3b, 0x00, 0xea, 0xbc, 0x69, 0x4b, 0xb1, + 0x6d, 0x0f, 0x33, 0xc7, 0x31, 0xbd, 0x54, 0xdc, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xe6, 0x63, 0x8f, + 0xd8, 0x8a, 0x45, 0x7a, 0x51, 0x1f, 0xf3, 0x61, 0x1f, 0x37, 0x12, 0xb5, 0x50, 0x8a, 0x35, 0x7c, + 0x15, 0x54, 0x9c, 0xde, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x10, 0x60, 0x95, 0x6d, 0x5f, 0x84, 0x82, + 0x7a, 0x6c, 0x68, 0xc6, 0x9e, 0x4d, 0xac, 0x03, 0xd2, 0xbb, 0xe9, 0x90, 0x03, 0x56, 0x41, 0x8b, + 0xbc, 0x82, 0x7a, 0x43, 0xdb, 0x89, 0x69, 0xa0, 0x04, 0x2b, 0x36, 0x34, 0x27, 0x6a, 0x62, 0x43, + 0x9b, 0x09, 0x0f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3b, 0xc0, + 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x3d, 0x1e, 0x8e, 0xbd, 0xed, 0xb0, 0x18, 0x45, 0xf5, 0xe1, 0x4d, + 0x70, 0xca, 0x69, 0xda, 0xd5, 0xb1, 0x07, 0x52, 0xe2, 0x20, 0x2f, 0x08, 0x90, 0x53, 0xdb, 0x51, + 0x05, 0x14, 0xb7, 0x81, 0xd7, 0xc1, 0x5c, 0xd7, 0x50, 0x55, 0x1e, 0x8f, 0xeb, 0xc6, 0x40, 0xa7, + 0xd5, 0x32, 0x47, 0x81, 0x2c, 0x87, 0xd6, 0x43, 0x12, 0x14, 0xd1, 0x84, 0x3f, 0x03, 0xa0, 0xeb, + 0x16, 0x06, 0xbb, 0x0a, 0x26, 0x30, 0x80, 0x78, 0x59, 0xf2, 0x2b, 0xb3, 0xd7, 0x64, 0xa3, 0x00, + 0xa4, 0xfc, 0xb1, 0x04, 0x96, 0x53, 0x12, 0x1d, 0xbe, 0x15, 0x2a, 0x82, 0x97, 0x22, 0x45, 0xf0, + 0x4c, 0x8a, 0x59, 0xa0, 0x12, 0xee, 0x83, 0x59, 0x46, 0x48, 0x14, 0xbd, 0xef, 0xa8, 0x88, 0xbd, + 0xac, 0x99, 0x3a, 0x00, 0x14, 0xd4, 0xf6, 0x77, 0xe5, 0x53, 0xe3, 0x51, 0x7d, 0x36, 0x24, 0x43, + 0x61, 0x60, 0xf9, 0x57, 0x39, 0x00, 0x36, 0x88, 0xa9, 0x1a, 0x43, 0x8d, 0xe8, 0x47, 0xc1, 0x69, + 0x6e, 0x87, 0x38, 0xcd, 0xc5, 0xf4, 0x25, 0xf1, 0x9c, 0x4a, 0x25, 0x35, 0xef, 0x46, 0x48, 0xcd, + 0x8b, 0x59, 0xc0, 0x9e, 0xce, 0x6a, 0x3e, 0xcb, 0x83, 0x05, 0x5f, 0xd9, 0xa7, 0x35, 0x37, 0x42, + 0x2b, 0x7a, 0x31, 0xb2, 0xa2, 0xcb, 0x09, 0x26, 0xcf, 0x8d, 0xd7, 0x7c, 0x00, 0xe6, 0x18, 0xeb, + 0x70, 0xd6, 0x8f, 0x73, 0x9a, 0x99, 0xa9, 0x39, 0x8d, 0x57, 0x89, 0x36, 0x43, 0x48, 0x28, 0x82, + 0x9c, 0xc2, 0xa1, 0x8e, 0x7f, 0x1d, 0x39, 0xd4, 0x9f, 0x24, 0x30, 0xe7, 0x2f, 0xd3, 0x11, 0x90, + 0xa8, 0x5b, 0x61, 0x12, 0x75, 0x2e, 0x43, 0x70, 0xa6, 0xb0, 0xa8, 0xcf, 0x0a, 0x41, 0xd7, 0x39, + 0x8d, 0x5a, 0x61, 0x47, 0x30, 0x53, 0x55, 0xba, 0xd8, 0x16, 0xf5, 0xf6, 0x84, 0x73, 0xfc, 0x72, + 0xda, 0x90, 0x27, 0x0d, 0x11, 0xae, 0xdc, 0xf3, 0x25, 0x5c, 0xf9, 0x67, 0x43, 0xb8, 0x7e, 0x04, + 0x4a, 0xb6, 0x4b, 0xb5, 0x0a, 0x1c, 0xf2, 0x52, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8, + 0x95, 0x07, 0x97, 0xc4, 0xac, 0x8a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a, + 0x3c, 0xa9, 0x4a, 0x7e, 0xa0, 0xb7, 0x79, 0x2b, 0x12, 0x52, 0xb8, 0x0b, 0x96, 0x4d, 0xcb, 0xe8, + 0x5b, 0xc4, 0xb6, 0x37, 0x08, 0xee, 0xa9, 0x8a, 0x4e, 0xdc, 0x01, 0x38, 0x35, 0xf1, 0xcc, 0x78, + 0x54, 0x5f, 0x6e, 0x27, 0xab, 0xa0, 0x34, 0x5b, 0xf9, 0xaf, 0x05, 0x70, 0x32, 0xba, 0x37, 0xa6, + 0xd0, 0x14, 0xe9, 0x50, 0x34, 0xe5, 0x72, 0x20, 0x4e, 0x1d, 0x0e, 0x17, 0xb8, 0x2a, 0x88, 0xc5, + 0xea, 0x1a, 0x98, 0x17, 0xb4, 0xc4, 0x15, 0x0a, 0xa2, 0xe6, 0x2d, 0xcf, 0x6e, 0x58, 0x8c, 0xa2, + 0xfa, 0xf0, 0x06, 0x98, 0xb5, 0x38, 0xf3, 0x72, 0x01, 0x1c, 0xf6, 0xf2, 0x1d, 0x01, 0x30, 0x8b, + 0x82, 0x42, 0x14, 0xd6, 0x65, 0xcc, 0xc5, 0x27, 0x24, 0x2e, 0x40, 0x21, 0xcc, 0x5c, 0xd6, 0xa2, + 0x0a, 0x28, 0x6e, 0x03, 0xb7, 0xc0, 0xc2, 0x40, 0x8f, 0x43, 0x39, 0xb1, 0x76, 0x46, 0x40, 0x2d, + 0xec, 0xc6, 0x55, 0x50, 0x92, 0x1d, 0xbc, 0x17, 0x22, 0x33, 0x33, 0x7c, 0x3f, 0xb9, 0x9c, 0x21, + 0x27, 0x32, 0xb3, 0x99, 0x04, 0xaa, 0x55, 0xca, 0x4a, 0xb5, 0xe4, 0x8f, 0x24, 0x00, 0xe3, 0x79, + 0x38, 0xf1, 0x26, 0x20, 0x66, 0x11, 0xa8, 0x98, 0x4a, 0x32, 0xff, 0xb9, 0x9a, 0x91, 0xff, 0xf8, + 0x1b, 0x6a, 0x36, 0x02, 0x24, 0x26, 0xfa, 0x68, 0x2e, 0x75, 0xb2, 0x12, 0x20, 0xdf, 0xa9, 0x67, + 0x40, 0x80, 0x02, 0x60, 0x4f, 0x27, 0x40, 0xff, 0xcc, 0x81, 0x05, 0x5f, 0x39, 0x33, 0x01, 0x4a, + 0x30, 0xf9, 0xf6, 0x62, 0x27, 0x1b, 0x29, 0xf1, 0xa7, 0xee, 0xff, 0x89, 0x94, 0xf8, 0x5e, 0xa5, + 0x90, 0x92, 0xdf, 0xe7, 0x82, 0xae, 0x4f, 0x49, 0x4a, 0x9e, 0xc1, 0x0d, 0xc7, 0xd7, 0x8e, 0xd7, + 0xc8, 0x9f, 0xe4, 0xc1, 0xc9, 0x68, 0x1e, 0x86, 0x0a, 0xa4, 0x34, 0xb1, 0x40, 0xb6, 0xc1, 0xe2, + 0xfd, 0x81, 0xaa, 0x0e, 0xf9, 0x18, 0x02, 0x55, 0xd2, 0x29, 0xad, 0xdf, 0x15, 0x96, 0x8b, 0x3f, + 0x4c, 0xd0, 0x41, 0x89, 0x96, 0xf1, 0x7a, 0x59, 0xf8, 0xb2, 0xf5, 0xb2, 0x78, 0x88, 0x7a, 0x99, + 0x4c, 0x39, 0xf2, 0x87, 0xa2, 0x1c, 0xd3, 0x15, 0xcb, 0x84, 0x8d, 0x6b, 0xe2, 0xd1, 0x7f, 0x2c, + 0x81, 0xa5, 0xe4, 0x03, 0x37, 0x54, 0xc1, 0x9c, 0x86, 0x1f, 0x06, 0x2f, 0x3e, 0x26, 0x15, 0x91, + 0x01, 0x55, 0xd4, 0x86, 0xf3, 0x64, 0xd4, 0xb8, 0xad, 0xd3, 0x1d, 0xab, 0x43, 0x2d, 0x45, 0xef, + 0x3b, 0x95, 0x77, 0x2b, 0x84, 0x85, 0x22, 0xd8, 0xf0, 0x7d, 0x50, 0xd2, 0xf0, 0xc3, 0xce, 0xc0, + 0xea, 0x27, 0x55, 0xc8, 0x6c, 0xfd, 0xf0, 0x04, 0xd8, 0x12, 0x28, 0xc8, 0xc3, 0x93, 0xbf, 0x90, + 0xc0, 0x72, 0x4a, 0x55, 0xfd, 0x06, 0x8d, 0xf2, 0x2f, 0x12, 0x38, 0x1b, 0x1a, 0x25, 0x4b, 0x4b, + 0x72, 0x7f, 0xa0, 0xf2, 0x0c, 0x15, 0x4c, 0xe6, 0x12, 0x28, 0x9b, 0xd8, 0xa2, 0x8a, 0xc7, 0x83, + 0x8b, 0xad, 0xd9, 0xf1, 0xa8, 0x5e, 0x6e, 0xbb, 0x8d, 0xc8, 0x97, 0x27, 0xcc, 0x4d, 0xee, 0xf9, + 0xcd, 0x8d, 0xfc, 0x5f, 0x09, 0x14, 0x3b, 0x5d, 0xac, 0x92, 0x23, 0x20, 0x2e, 0x1b, 0x21, 0xe2, + 0x92, 0xfe, 0x28, 0xc0, 0xfd, 0x49, 0xe5, 0x2c, 0x9b, 0x11, 0xce, 0x72, 0x7e, 0x02, 0xce, 0xd3, + 0xe9, 0xca, 0x1b, 0xa0, 0xec, 0x75, 0x37, 0xdd, 0x5e, 0x2a, 0xff, 0x2e, 0x07, 0x2a, 0x81, 0x2e, + 0xa6, 0xdc, 0x89, 0xef, 0x85, 0xca, 0x0f, 0xdb, 0x63, 0x56, 0xb3, 0x0c, 0xa4, 0xe1, 0x96, 0x9a, + 0xb7, 0x75, 0x6a, 0x05, 0xcf, 0xaa, 0xf1, 0x0a, 0xf4, 0x26, 0x98, 0xa3, 0xd8, 0xea, 0x13, 0xea, + 0xca, 0xf8, 0x84, 0x95, 0xfd, 0xbb, 0x9b, 0x3b, 0x21, 0x29, 0x8a, 0x68, 0x9f, 0xbe, 0x01, 0x66, + 0x43, 0x9d, 0xc1, 0x93, 0x20, 0xff, 0x80, 0x0c, 0x1d, 0x06, 0x87, 0xd8, 0x4f, 0xb8, 0x08, 0x8a, + 0x07, 0x58, 0x1d, 0x38, 0x21, 0x5a, 0x46, 0xce, 0xc7, 0xf5, 0xdc, 0xeb, 0x92, 0xfc, 0x6b, 0x36, + 0x39, 0x7e, 0x2a, 0x1c, 0x41, 0x74, 0xbd, 0x13, 0x8a, 0xae, 0xf4, 0xf7, 0xc9, 0x60, 0x82, 0xa6, + 0xc5, 0x18, 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xc5, 0x80, + 0xb6, 0xcf, 0x8c, 0xbf, 0x1f, 0x62, 0xc6, 0x2b, 0x11, 0x66, 0x5c, 0x4d, 0xb2, 0xf9, 0x96, 0x1a, + 0x4f, 0xa6, 0xc6, 0x7f, 0x96, 0xc0, 0x7c, 0x60, 0xee, 0x8e, 0x80, 0x1b, 0xdf, 0x0e, 0x73, 0xe3, + 0xf3, 0x59, 0x82, 0x26, 0x85, 0x1c, 0x5f, 0x07, 0x0b, 0x01, 0xa5, 0x1d, 0xab, 0xa7, 0xe8, 0x58, + 0xb5, 0xe1, 0x39, 0x50, 0xb4, 0x29, 0xb6, 0xa8, 0x5b, 0x44, 0x5c, 0xdb, 0x0e, 0x6b, 0x44, 0x8e, + 0x4c, 0xfe, 0xb7, 0x04, 0x9a, 0x01, 0xe3, 0x36, 0xb1, 0x6c, 0xc5, 0xa6, 0x44, 0xa7, 0x77, 0x0d, + 0x75, 0xa0, 0x91, 0x75, 0x15, 0x2b, 0x1a, 0x22, 0xac, 0x41, 0x31, 0xf4, 0xb6, 0xa1, 0x2a, 0xdd, + 0x21, 0xc4, 0xa0, 0xf2, 0xe1, 0x3e, 0xd1, 0x37, 0x88, 0x4a, 0xa8, 0x78, 0x81, 0x2b, 0xb7, 0xde, + 0x72, 0x1f, 0xa4, 0xde, 0xf3, 0x45, 0x4f, 0x46, 0xf5, 0x95, 0x2c, 0x88, 0x3c, 0x42, 0x83, 0x98, + 0xf0, 0xa7, 0x00, 0xb0, 0x4f, 0xbe, 0x97, 0xf5, 0x44, 0xb0, 0xbe, 0xe9, 0x66, 0xf4, 0x7b, 0x9e, + 0x64, 0xaa, 0x0e, 0x02, 0x88, 0xf2, 0x1f, 0x4a, 0xa1, 0xf5, 0xfe, 0xc6, 0xdf, 0x72, 0xfe, 0x1c, + 0x2c, 0x1e, 0xf8, 0xb3, 0xe3, 0x2a, 0x30, 0xfe, 0x9d, 0x8f, 0x9e, 0xe4, 0x3d, 0xf8, 0xa4, 0x79, + 0xf5, 0x59, 0xff, 0xdd, 0x04, 0x38, 0x94, 0xd8, 0x09, 0x7c, 0x15, 0x54, 0x18, 0x6f, 0x56, 0xba, + 0x64, 0x1b, 0x6b, 0x6e, 0x2e, 0x7a, 0x0f, 0x98, 0x1d, 0x5f, 0x84, 0x82, 0x7a, 0x70, 0x1f, 0x2c, + 0x98, 0x46, 0x6f, 0x0b, 0xeb, 0xb8, 0x4f, 0x18, 0x11, 0x74, 0x96, 0x92, 0x5f, 0x7d, 0x96, 0x5b, + 0xaf, 0xb9, 0xd7, 0x5a, 0xed, 0xb8, 0xca, 0x93, 0x51, 0x7d, 0x39, 0xa1, 0x99, 0x07, 0x41, 0x12, + 0x24, 0xb4, 0x62, 0x8f, 0xee, 0xce, 0xa3, 0xc3, 0x6a, 0x96, 0xa4, 0x3c, 0xe4, 0xb3, 0x7b, 0xda, + 0xcd, 0x6e, 0xe9, 0x50, 0x37, 0xbb, 0x09, 0x47, 0xdc, 0xf2, 0x94, 0x47, 0xdc, 0x4f, 0x24, 0x70, + 0xde, 0xcc, 0x90, 0x4b, 0x55, 0xc0, 0xe7, 0xe6, 0x56, 0x96, 0xb9, 0xc9, 0x92, 0x9b, 0xad, 0x95, + 0xf1, 0xa8, 0x7e, 0x3e, 0x8b, 0x26, 0xca, 0xe4, 0x1f, 0xbc, 0x0b, 0x4a, 0x86, 0xd8, 0x03, 0xab, + 0x15, 0xee, 0xeb, 0xe5, 0x2c, 0xbe, 0xba, 0xfb, 0xa6, 0x93, 0x96, 0xee, 0x17, 0xf2, 0xb0, 0xe4, + 0x8f, 0x8a, 0xe0, 0x54, 0xac, 0x82, 0x7f, 0x85, 0xf7, 0xd7, 0xb1, 0xc3, 0x74, 0x7e, 0x8a, 0xc3, + 0xf4, 0x1a, 0x98, 0x17, 0x7f, 0x89, 0x88, 0x9c, 0xc5, 0xbd, 0x80, 0x59, 0x0f, 0x8b, 0x51, 0x54, + 0x3f, 0xe9, 0xfe, 0xbc, 0x38, 0xe5, 0xfd, 0x79, 0xd0, 0x0b, 0xf1, 0x17, 0x3f, 0x27, 0xbd, 0xe3, + 0x5e, 0x88, 0x7f, 0xfa, 0x45, 0xf5, 0x19, 0x71, 0x75, 0x50, 0x3d, 0x84, 0xe3, 0x61, 0xe2, 0xba, + 0x1b, 0x92, 0xa2, 0x88, 0xf6, 0x97, 0x7a, 0xf6, 0xc7, 0x09, 0xcf, 0xfe, 0x57, 0xb2, 0xc4, 0x5a, + 0xf6, 0xab, 0xf2, 0xc4, 0x4b, 0x8f, 0xca, 0xf4, 0x97, 0x1e, 0xf2, 0xdf, 0x24, 0xf0, 0x42, 0xea, + 0xae, 0x05, 0xd7, 0x42, 0xb4, 0xf2, 0x4a, 0x84, 0x56, 0x7e, 0x2f, 0xd5, 0x30, 0xc0, 0x2d, 0xad, + 0xe4, 0x5b, 0xf4, 0x37, 0xb2, 0xdd, 0xa2, 0x27, 0x9c, 0x84, 0x27, 0x5f, 0xa7, 0xb7, 0x7e, 0xf0, + 0xe8, 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, + 0x7a, 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, + 0xcd, 0x17, 0xb5, 0x63, 0xef, 0x2f, 0xa7, 0xfc, 0xe9, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xa4, 0x79, 0xcd, 0x52, 0x8e, 0x2c, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index ddbe354411..d3db8956e8 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -71,7 +71,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -97,7 +97,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -113,7 +113,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -125,7 +125,7 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -133,7 +133,7 @@ message DaemonSetSpec { // selector is specified). // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -204,6 +204,8 @@ message DaemonSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DaemonSetCondition conditions = 10; } @@ -228,7 +230,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -248,10 +250,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -264,7 +266,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -280,11 +282,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -346,6 +348,8 @@ message DeploymentStatus { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -378,7 +382,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -404,7 +408,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -420,7 +424,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -446,13 +450,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -481,6 +485,8 @@ message ReplicaSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicaSetCondition conditions = 6; } @@ -501,7 +507,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -522,7 +528,7 @@ message RollingUpdateDaemonSet { // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -538,7 +544,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -552,7 +558,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -572,14 +578,14 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -628,7 +634,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -650,7 +656,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -664,7 +670,7 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -714,7 +720,7 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -723,7 +729,7 @@ message StatefulSetSpec { // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -733,7 +739,8 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + // +listType=atomic + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -778,9 +785,7 @@ message StatefulSetSpec { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } @@ -824,6 +829,8 @@ message StatefulSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index a97ac6fcf0..f93a5bea7e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -261,6 +261,7 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional + // +listType=atomic VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` // serviceName is the name of the service that governs this StatefulSet. @@ -306,9 +307,7 @@ type StatefulSetSpec struct { // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -352,6 +351,8 @@ type StatefulSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. @@ -555,6 +556,8 @@ type DeploymentStatus struct { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -765,6 +768,8 @@ type DaemonSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } @@ -951,6 +956,8 @@ type ReplicaSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index d7e9209915..0b8fe34af1 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -383,7 +383,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 1614265bdf..3bdc89badc 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 304bbd0744..6d922030c1 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto +// source: k8s.io/api/authentication/v1/generated.proto package v1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } func (*BoundObjectReference) ProtoMessage() {} func (*BoundObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{0} + return fileDescriptor_d1237cbf54dccd53, []int{0} } func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{1} + return fileDescriptor_d1237cbf54dccd53, []int{1} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } func (*SelfSubjectReview) ProtoMessage() {} func (*SelfSubjectReview) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{2} + return fileDescriptor_d1237cbf54dccd53, []int{2} } func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } func (*SelfSubjectReviewStatus) ProtoMessage() {} func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{3} + return fileDescriptor_d1237cbf54dccd53, []int{3} } func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{4} + return fileDescriptor_d1237cbf54dccd53, []int{4} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } func (*TokenRequestSpec) ProtoMessage() {} func (*TokenRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{5} + return fileDescriptor_d1237cbf54dccd53, []int{5} } func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } func (*TokenRequestStatus) ProtoMessage() {} func (*TokenRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{6} + return fileDescriptor_d1237cbf54dccd53, []int{6} } func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +245,7 @@ var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{7} + return fileDescriptor_d1237cbf54dccd53, []int{7} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +273,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{8} + return fileDescriptor_d1237cbf54dccd53, []int{8} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +301,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{9} + return fileDescriptor_d1237cbf54dccd53, []int{9} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +329,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{10} + return fileDescriptor_d1237cbf54dccd53, []int{10} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -370,71 +370,71 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) -} - -var fileDescriptor_2953ea822e7ffe1e = []byte{ - // 958 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0x45, - 0x10, 0xf6, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0x48, 0x7a, 0x59, 0x61, 0x85, 0xc5, 0x0e, 0xb3, 0x12, - 0x8a, 0x80, 0x9d, 0xd9, 0x58, 0x3c, 0x56, 0x8b, 0x84, 0x94, 0x21, 0x16, 0x58, 0x08, 0x76, 0xd5, - 0x4e, 0x02, 0x42, 0x42, 0xa2, 0x3d, 0xae, 0x38, 0x83, 0x77, 0x1e, 0xcc, 0xf4, 0x98, 0xf5, 0x6d, - 0x7f, 0x02, 0x47, 0x90, 0x38, 0xf0, 0x23, 0x90, 0xf8, 0x0b, 0x39, 0xae, 0x10, 0x87, 0x3d, 0x20, - 0x8b, 0x0c, 0x57, 0x8e, 0x9c, 0x38, 0xa1, 0xee, 0xe9, 0xf8, 0x99, 0x4c, 0x7c, 0xda, 0x9b, 0xa7, - 0x1e, 0x5f, 0x55, 0x7d, 0x55, 0x5d, 0x65, 0x68, 0x0d, 0xee, 0x47, 0x86, 0xe3, 0x9b, 0x83, 0xb8, - 0x8b, 0xa1, 0x87, 0x1c, 0x23, 0x73, 0x88, 0x5e, 0xcf, 0x0f, 0x4d, 0xa5, 0x60, 0x81, 0x63, 0xb2, - 0x98, 0x9f, 0xa2, 0xc7, 0x1d, 0x9b, 0x71, 0xc7, 0xf7, 0xcc, 0xe1, 0x9e, 0xd9, 0x47, 0x0f, 0x43, - 0xc6, 0xb1, 0x67, 0x04, 0xa1, 0xcf, 0x7d, 0x72, 0x3b, 0xb5, 0x36, 0x58, 0xe0, 0x18, 0xf3, 0xd6, - 0xc6, 0x70, 0x6f, 0xfb, 0x6e, 0xdf, 0xe1, 0xa7, 0x71, 0xd7, 0xb0, 0x7d, 0xd7, 0xec, 0xfb, 0x7d, - 0xdf, 0x94, 0x4e, 0xdd, 0xf8, 0x44, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0x05, 0xdb, 0x7e, 0x67, 0x1a, - 0xda, 0x65, 0xf6, 0xa9, 0xe3, 0x61, 0x38, 0x32, 0x83, 0x41, 0x5f, 0x08, 0x22, 0xd3, 0x45, 0xce, - 0x2e, 0x49, 0x61, 0xdb, 0xbc, 0xca, 0x2b, 0x8c, 0x3d, 0xee, 0xb8, 0xb8, 0xe4, 0xf0, 0xde, 0x75, - 0x0e, 0x91, 0x7d, 0x8a, 0x2e, 0x5b, 0xf4, 0xd3, 0x7f, 0xd7, 0xe0, 0x65, 0xcb, 0x8f, 0xbd, 0xde, - 0xc3, 0xee, 0xb7, 0x68, 0x73, 0x8a, 0x27, 0x18, 0xa2, 0x67, 0x23, 0xd9, 0x81, 0xe2, 0xc0, 0xf1, - 0x7a, 0x35, 0x6d, 0x47, 0xdb, 0xad, 0x58, 0x37, 0xce, 0xc6, 0x8d, 0x5c, 0x32, 0x6e, 0x14, 0x3f, - 0x75, 0xbc, 0x1e, 0x95, 0x1a, 0xd2, 0x04, 0x60, 0x81, 0x73, 0x8c, 0x61, 0xe4, 0xf8, 0x5e, 0x2d, - 0x2f, 0xed, 0x88, 0xb2, 0x83, 0xfd, 0x47, 0x6d, 0xa5, 0xa1, 0x33, 0x56, 0x02, 0xd5, 0x63, 0x2e, - 0xd6, 0x0a, 0xf3, 0xa8, 0x9f, 0x33, 0x17, 0xa9, 0xd4, 0x10, 0x0b, 0x0a, 0x71, 0xfb, 0xa0, 0x56, - 0x94, 0x06, 0xf7, 0x94, 0x41, 0xe1, 0xa8, 0x7d, 0xf0, 0xdf, 0xb8, 0xf1, 0xfa, 0x55, 0x45, 0xf2, - 0x51, 0x80, 0x91, 0x71, 0xd4, 0x3e, 0xa0, 0xc2, 0x59, 0x7f, 0x1f, 0xa0, 0xf5, 0x84, 0x87, 0xec, - 0x98, 0x3d, 0x8e, 0x91, 0x34, 0xa0, 0xe4, 0x70, 0x74, 0xa3, 0x9a, 0xb6, 0x53, 0xd8, 0xad, 0x58, - 0x95, 0x64, 0xdc, 0x28, 0xb5, 0x85, 0x80, 0xa6, 0xf2, 0x07, 0xe5, 0x1f, 0x7f, 0x69, 0xe4, 0x9e, - 0xfe, 0xb9, 0x93, 0xd3, 0xff, 0xd0, 0x60, 0xab, 0x83, 0x8f, 0x4f, 0x3a, 0xb1, 0x62, 0x63, 0xe8, - 0xe0, 0xf7, 0xe4, 0x1b, 0x28, 0x8b, 0x3e, 0xf5, 0x18, 0x67, 0x92, 0x8e, 0x6a, 0xf3, 0x9e, 0x31, - 0x1d, 0x91, 0x49, 0x26, 0x46, 0x30, 0xe8, 0x0b, 0x41, 0x64, 0x08, 0x6b, 0x63, 0xb8, 0x67, 0xa4, - 0x9c, 0x7e, 0x86, 0x9c, 0x4d, 0x89, 0x99, 0xca, 0xe8, 0x04, 0x95, 0x7c, 0x0d, 0x6b, 0x11, 0x67, - 0x3c, 0x8e, 0x24, 0x8d, 0xd5, 0xe6, 0xbb, 0x46, 0xd6, 0x08, 0x1a, 0x4b, 0x29, 0x76, 0xa4, 0xb3, - 0xb5, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x54, 0x81, 0xea, 0x3e, 0xbc, 0x72, 0x85, 0x0b, 0x39, 0x84, - 0x72, 0x1c, 0x61, 0xd8, 0xf6, 0x4e, 0x7c, 0x55, 0xdb, 0x1b, 0xd9, 0xb1, 0x8f, 0x94, 0xb5, 0xb5, - 0xa9, 0x82, 0x95, 0x2f, 0x24, 0x74, 0x82, 0xa4, 0xff, 0x9c, 0x87, 0x1b, 0x87, 0xfe, 0x00, 0x3d, - 0x8a, 0xdf, 0xc5, 0x18, 0xf1, 0x17, 0x40, 0xe1, 0x23, 0x28, 0x46, 0x01, 0xda, 0x8a, 0x40, 0x23, - 0xbb, 0x88, 0xd9, 0xdc, 0x3a, 0x01, 0xda, 0xd3, 0x49, 0x14, 0x5f, 0x54, 0x22, 0x91, 0x2f, 0x27, - 0x4d, 0x29, 0x2c, 0x65, 0x7c, 0x1d, 0x66, 0x76, 0x3f, 0xfe, 0xd5, 0x60, 0x73, 0x31, 0x05, 0xf2, - 0x16, 0x54, 0x58, 0xdc, 0x73, 0xc4, 0xe3, 0xbb, 0x18, 0xd5, 0xf5, 0x64, 0xdc, 0xa8, 0xec, 0x5f, - 0x08, 0xe9, 0x54, 0x4f, 0x3e, 0x82, 0x2d, 0x7c, 0x12, 0x38, 0xa1, 0x8c, 0xde, 0x41, 0xdb, 0xf7, - 0x7a, 0x91, 0x7c, 0x33, 0x05, 0xeb, 0x56, 0x32, 0x6e, 0x6c, 0xb5, 0x16, 0x95, 0x74, 0xd9, 0x9e, - 0x78, 0xb0, 0xd1, 0x9d, 0x7b, 0xfa, 0xaa, 0xd0, 0x66, 0x76, 0xa1, 0x97, 0xad, 0x0b, 0x8b, 0x24, - 0xe3, 0xc6, 0xc6, 0xbc, 0x86, 0x2e, 0xa0, 0xeb, 0xbf, 0x6a, 0x40, 0x96, 0x59, 0x22, 0x77, 0xa0, - 0xc4, 0x85, 0x54, 0xad, 0x9a, 0x75, 0x45, 0x5a, 0x29, 0x35, 0x4d, 0x75, 0x64, 0x04, 0x37, 0xa7, - 0x05, 0x1c, 0x3a, 0x2e, 0x46, 0x9c, 0xb9, 0x81, 0xea, 0xf6, 0x9b, 0xab, 0xcd, 0x92, 0x70, 0xb3, - 0x5e, 0x55, 0xf0, 0x37, 0x5b, 0xcb, 0x70, 0xf4, 0xb2, 0x18, 0xfa, 0x4f, 0x79, 0xa8, 0xaa, 0xb4, - 0x5f, 0xd0, 0x3a, 0x78, 0x38, 0x37, 0xcb, 0x77, 0x57, 0x9a, 0x3b, 0xf9, 0xa6, 0xaf, 0x1a, 0xe5, - 0x2f, 0x16, 0x46, 0xd9, 0x5c, 0x1d, 0x32, 0x7b, 0x92, 0x6d, 0x78, 0x69, 0x21, 0xfe, 0x6a, 0xed, - 0x9c, 0x1b, 0xf6, 0x7c, 0xf6, 0xb0, 0xeb, 0xff, 0x68, 0xb0, 0xb5, 0x94, 0x12, 0xf9, 0x00, 0xd6, - 0x67, 0x32, 0xc7, 0xf4, 0x52, 0x95, 0xad, 0x5b, 0x2a, 0xde, 0xfa, 0xfe, 0xac, 0x92, 0xce, 0xdb, - 0x92, 0x4f, 0xa0, 0x28, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37, 0xa1, 0x56, 0x48, 0xa8, 0x44, - 0x98, 0xaf, 0xa4, 0x78, 0xcd, 0xb3, 0xbd, 0x03, 0x25, 0x0c, 0x43, 0x3f, 0x54, 0xf7, 0x6f, 0xc2, - 0x4d, 0x4b, 0x08, 0x69, 0xaa, 0xd3, 0x7f, 0xcb, 0xc3, 0x64, 0xa7, 0x92, 0xb7, 0xd3, 0xfd, 0x2c, - 0x8f, 0x66, 0x4a, 0xe8, 0xdc, 0xde, 0x15, 0x72, 0x3a, 0xb1, 0x20, 0xaf, 0x41, 0x21, 0x76, 0x7a, - 0xea, 0x16, 0x57, 0x67, 0x8e, 0x27, 0x15, 0x72, 0xa2, 0xc3, 0x5a, 0x3f, 0xf4, 0xe3, 0x40, 0x8c, - 0x81, 0x48, 0x14, 0x44, 0x47, 0x3f, 0x96, 0x12, 0xaa, 0x34, 0xe4, 0x18, 0x4a, 0x28, 0x6e, 0xa7, - 0xac, 0xa5, 0xda, 0xdc, 0x5b, 0x8d, 0x1a, 0x43, 0xde, 0xdb, 0x96, 0xc7, 0xc3, 0xd1, 0x4c, 0x55, - 0x42, 0x46, 0x53, 0xb8, 0xed, 0xae, 0xba, 0xc9, 0xd2, 0x86, 0x6c, 0x42, 0x61, 0x80, 0xa3, 0xb4, - 0x22, 0x2a, 0x7e, 0x92, 0x0f, 0xa1, 0x34, 0x14, 0xe7, 0x5a, 0xb5, 0x64, 0x37, 0x3b, 0xee, 0xf4, - 0xbc, 0xd3, 0xd4, 0xed, 0x41, 0xfe, 0xbe, 0x66, 0x59, 0x67, 0xe7, 0xf5, 0xdc, 0xb3, 0xf3, 0x7a, - 0xee, 0xf9, 0x79, 0x3d, 0xf7, 0x34, 0xa9, 0x6b, 0x67, 0x49, 0x5d, 0x7b, 0x96, 0xd4, 0xb5, 0xe7, - 0x49, 0x5d, 0xfb, 0x2b, 0xa9, 0x6b, 0x3f, 0xfc, 0x5d, 0xcf, 0x7d, 0x75, 0x3b, 0xeb, 0xcf, 0xe0, - 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x9a, 0x38, 0x17, 0x44, 0x0a, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authentication/v1/generated.proto", fileDescriptor_d1237cbf54dccd53) +} + +var fileDescriptor_d1237cbf54dccd53 = []byte{ + // 947 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0xc5, + 0x13, 0xf7, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0xfe, 0x49, 0xef, 0x7f, 0x85, 0x15, 0x16, 0x4f, 0x98, + 0x95, 0x50, 0x04, 0xbb, 0x33, 0x1b, 0x8b, 0xc7, 0x6a, 0x91, 0x90, 0x32, 0xc4, 0x02, 0x0b, 0xc1, + 0xae, 0xda, 0x49, 0x40, 0x48, 0x48, 0xb4, 0xc7, 0x1d, 0xa7, 0xf1, 0xce, 0x83, 0x99, 0x1e, 0xb3, + 0xbe, 0xed, 0x47, 0xe0, 0x08, 0x12, 0x07, 0x3e, 0x04, 0x12, 0x5f, 0x21, 0xc7, 0x15, 0xe2, 0xb0, + 0x07, 0x64, 0x91, 0xe1, 0xca, 0x91, 0x13, 0x27, 0xd4, 0x3d, 0x1d, 0xdb, 0x63, 0x27, 0x13, 0x9f, + 0xf6, 0xe6, 0xa9, 0xc7, 0xaf, 0xaa, 0x7e, 0x55, 0x5d, 0x65, 0xb8, 0x3b, 0x7c, 0x10, 0x99, 0xcc, + 0xb7, 0x48, 0xc0, 0x2c, 0x12, 0xf3, 0x53, 0xea, 0x71, 0xe6, 0x10, 0xce, 0x7c, 0xcf, 0x1a, 0xed, + 0x59, 0x03, 0xea, 0xd1, 0x90, 0x70, 0xda, 0x37, 0x83, 0xd0, 0xe7, 0x3e, 0xba, 0x9d, 0x5a, 0x9b, + 0x24, 0x60, 0x66, 0xd6, 0xda, 0x1c, 0xed, 0x6d, 0xdf, 0x1b, 0x30, 0x7e, 0x1a, 0xf7, 0x4c, 0xc7, + 0x77, 0xad, 0x81, 0x3f, 0xf0, 0x2d, 0xe9, 0xd4, 0x8b, 0x4f, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x52, + 0xb0, 0xed, 0xb7, 0x67, 0xa1, 0x5d, 0xe2, 0x9c, 0x32, 0x8f, 0x86, 0x63, 0x2b, 0x18, 0x0e, 0x84, + 0x20, 0xb2, 0x5c, 0xca, 0xc9, 0x25, 0x29, 0x6c, 0x5b, 0x57, 0x79, 0x85, 0xb1, 0xc7, 0x99, 0x4b, + 0x97, 0x1c, 0xde, 0xbd, 0xce, 0x21, 0x72, 0x4e, 0xa9, 0x4b, 0x16, 0xfd, 0x8c, 0xdf, 0x34, 0xf8, + 0xbf, 0xed, 0xc7, 0x5e, 0xff, 0x51, 0xef, 0x1b, 0xea, 0x70, 0x4c, 0x4f, 0x68, 0x48, 0x3d, 0x87, + 0xa2, 0x1d, 0x28, 0x0f, 0x99, 0xd7, 0x6f, 0x68, 0x3b, 0xda, 0x6e, 0xcd, 0xbe, 0x71, 0x36, 0xd1, + 0x0b, 0xc9, 0x44, 0x2f, 0x7f, 0xc2, 0xbc, 0x3e, 0x96, 0x1a, 0xd4, 0x02, 0x20, 0x01, 0x3b, 0xa6, + 0x61, 0xc4, 0x7c, 0xaf, 0x51, 0x94, 0x76, 0x48, 0xd9, 0xc1, 0xfe, 0xe3, 0x8e, 0xd2, 0xe0, 0x39, + 0x2b, 0x81, 0xea, 0x11, 0x97, 0x36, 0x4a, 0x59, 0xd4, 0xcf, 0x88, 0x4b, 0xb1, 0xd4, 0x20, 0x1b, + 0x4a, 0x71, 0xe7, 0xa0, 0x51, 0x96, 0x06, 0xf7, 0x95, 0x41, 0xe9, 0xa8, 0x73, 0xf0, 0xef, 0x44, + 0x7f, 0xfd, 0xaa, 0x22, 0xf9, 0x38, 0xa0, 0x91, 0x79, 0xd4, 0x39, 0xc0, 0xc2, 0xd9, 0x78, 0x0f, + 0xa0, 0xfd, 0x94, 0x87, 0xe4, 0x98, 0x3c, 0x89, 0x29, 0xd2, 0xa1, 0xc2, 0x38, 0x75, 0xa3, 0x86, + 0xb6, 0x53, 0xda, 0xad, 0xd9, 0xb5, 0x64, 0xa2, 0x57, 0x3a, 0x42, 0x80, 0x53, 0xf9, 0xc3, 0xea, + 0x0f, 0x3f, 0xeb, 0x85, 0x67, 0x7f, 0xec, 0x14, 0x8c, 0xdf, 0x35, 0xd8, 0xea, 0xd2, 0x27, 0x27, + 0xdd, 0x58, 0xb1, 0x31, 0x62, 0xf4, 0x3b, 0xf4, 0x35, 0x54, 0x45, 0x9f, 0xfa, 0x84, 0x13, 0x49, + 0x47, 0xbd, 0x75, 0xdf, 0x9c, 0x8d, 0xc8, 0x34, 0x13, 0x33, 0x18, 0x0e, 0x84, 0x20, 0x32, 0x85, + 0xb5, 0x39, 0xda, 0x33, 0x53, 0x4e, 0x3f, 0xa5, 0x9c, 0xcc, 0x88, 0x99, 0xc9, 0xf0, 0x14, 0x15, + 0x7d, 0x05, 0x6b, 0x11, 0x27, 0x3c, 0x8e, 0x24, 0x8d, 0xf5, 0xd6, 0x3b, 0x66, 0xde, 0x08, 0x9a, + 0x4b, 0x29, 0x76, 0xa5, 0xb3, 0xbd, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x58, 0x81, 0x1a, 0x3e, 0xbc, + 0x72, 0x85, 0x0b, 0x3a, 0x84, 0x6a, 0x1c, 0xd1, 0xb0, 0xe3, 0x9d, 0xf8, 0xaa, 0xb6, 0x37, 0xf2, + 0x63, 0x1f, 0x29, 0x6b, 0x7b, 0x53, 0x05, 0xab, 0x5e, 0x48, 0xf0, 0x14, 0xc9, 0xf8, 0xa9, 0x08, + 0x37, 0x0e, 0xfd, 0x21, 0xf5, 0x30, 0xfd, 0x36, 0xa6, 0x11, 0x7f, 0x09, 0x14, 0x3e, 0x86, 0x72, + 0x14, 0x50, 0x47, 0x11, 0x68, 0xe6, 0x17, 0x31, 0x9f, 0x5b, 0x37, 0xa0, 0xce, 0x6c, 0x12, 0xc5, + 0x17, 0x96, 0x48, 0xe8, 0x8b, 0x69, 0x53, 0x4a, 0x4b, 0x19, 0x5f, 0x87, 0x99, 0xdf, 0x8f, 0x7f, + 0x34, 0xd8, 0x5c, 0x4c, 0x01, 0xbd, 0x05, 0x35, 0x12, 0xf7, 0x99, 0x78, 0x7c, 0x17, 0xa3, 0xba, + 0x9e, 0x4c, 0xf4, 0xda, 0xfe, 0x85, 0x10, 0xcf, 0xf4, 0xe8, 0x43, 0xd8, 0xa2, 0x4f, 0x03, 0x16, + 0xca, 0xe8, 0x5d, 0xea, 0xf8, 0x5e, 0x3f, 0x92, 0x6f, 0xa6, 0x64, 0xdf, 0x4a, 0x26, 0xfa, 0x56, + 0x7b, 0x51, 0x89, 0x97, 0xed, 0x91, 0x07, 0x1b, 0xbd, 0xcc, 0xd3, 0x57, 0x85, 0xb6, 0xf2, 0x0b, + 0xbd, 0x6c, 0x5d, 0xd8, 0x28, 0x99, 0xe8, 0x1b, 0x59, 0x0d, 0x5e, 0x40, 0x37, 0x7e, 0xd1, 0x00, + 0x2d, 0xb3, 0x84, 0xee, 0x40, 0x85, 0x0b, 0xa9, 0x5a, 0x35, 0xeb, 0x8a, 0xb4, 0x4a, 0x6a, 0x9a, + 0xea, 0xd0, 0x18, 0x6e, 0xce, 0x0a, 0x38, 0x64, 0x2e, 0x8d, 0x38, 0x71, 0x03, 0xd5, 0xed, 0x37, + 0x57, 0x9b, 0x25, 0xe1, 0x66, 0xbf, 0xaa, 0xe0, 0x6f, 0xb6, 0x97, 0xe1, 0xf0, 0x65, 0x31, 0x8c, + 0x1f, 0x8b, 0x50, 0x57, 0x69, 0xbf, 0xa4, 0x75, 0xf0, 0x28, 0x33, 0xcb, 0xf7, 0x56, 0x9a, 0x3b, + 0xf9, 0xa6, 0xaf, 0x1a, 0xe5, 0xcf, 0x17, 0x46, 0xd9, 0x5a, 0x1d, 0x32, 0x7f, 0x92, 0x1d, 0xf8, + 0xdf, 0x42, 0xfc, 0xd5, 0xda, 0x99, 0x19, 0xf6, 0x62, 0xfe, 0xb0, 0x1b, 0x7f, 0x6b, 0xb0, 0xb5, + 0x94, 0x12, 0x7a, 0x1f, 0xd6, 0xe7, 0x32, 0xa7, 0xe9, 0xa5, 0xaa, 0xda, 0xb7, 0x54, 0xbc, 0xf5, + 0xfd, 0x79, 0x25, 0xce, 0xda, 0xa2, 0x8f, 0xa1, 0x2c, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37, + 0xa5, 0x56, 0x48, 0xb0, 0x44, 0xc8, 0x56, 0x52, 0xbe, 0xe6, 0xd9, 0xde, 0x81, 0x0a, 0x0d, 0x43, + 0x3f, 0x54, 0xf7, 0x6f, 0xca, 0x4d, 0x5b, 0x08, 0x71, 0xaa, 0x33, 0x7e, 0x2d, 0xc2, 0x74, 0xa7, + 0xa2, 0xbb, 0xe9, 0x7e, 0x96, 0x47, 0x33, 0x25, 0x34, 0xb3, 0x77, 0x85, 0x1c, 0x4f, 0x2d, 0xd0, + 0x6b, 0x50, 0x8a, 0x59, 0x5f, 0xdd, 0xe2, 0xfa, 0xdc, 0xf1, 0xc4, 0x42, 0x8e, 0x0c, 0x58, 0x1b, + 0x84, 0x7e, 0x1c, 0x88, 0x31, 0x10, 0x89, 0x82, 0xe8, 0xe8, 0x47, 0x52, 0x82, 0x95, 0x06, 0x1d, + 0x43, 0x85, 0x8a, 0xdb, 0x29, 0x6b, 0xa9, 0xb7, 0xf6, 0x56, 0xa3, 0xc6, 0x94, 0xf7, 0xb6, 0xed, + 0xf1, 0x70, 0x3c, 0x57, 0x95, 0x90, 0xe1, 0x14, 0x6e, 0xbb, 0xa7, 0x6e, 0xb2, 0xb4, 0x41, 0x9b, + 0x50, 0x1a, 0xd2, 0x71, 0x5a, 0x11, 0x16, 0x3f, 0xd1, 0x07, 0x50, 0x19, 0x89, 0x73, 0xad, 0x5a, + 0xb2, 0x9b, 0x1f, 0x77, 0x76, 0xde, 0x71, 0xea, 0xf6, 0xb0, 0xf8, 0x40, 0xb3, 0xed, 0xb3, 0xf3, + 0x66, 0xe1, 0xf9, 0x79, 0xb3, 0xf0, 0xe2, 0xbc, 0x59, 0x78, 0x96, 0x34, 0xb5, 0xb3, 0xa4, 0xa9, + 0x3d, 0x4f, 0x9a, 0xda, 0x8b, 0xa4, 0xa9, 0xfd, 0x99, 0x34, 0xb5, 0xef, 0xff, 0x6a, 0x16, 0xbe, + 0xbc, 0x9d, 0xf7, 0x67, 0xf0, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xb7, 0xc1, 0xa0, 0x2b, + 0x0a, 0x00, 0x00, } func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 1632070c87..ae9763576c 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -63,7 +63,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -81,7 +81,7 @@ message TokenRequest { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenRequestSpec spec = 2; @@ -99,6 +99,7 @@ message TokenRequestSpec { // token issued for multiple audiences may be used to authenticate // against any of the audiences listed but implies a high degree of // trust between the target audiences. + // +listType=atomic repeated string audiences = 1; // ExpirationSeconds is the requested duration of validity of the request. The @@ -122,7 +123,7 @@ message TokenRequestStatus { optional string token = 1; // ExpirationTimestamp is the time of expiration of the returned token. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; } // TokenReview attempts to authenticate a token to a known user. @@ -132,7 +133,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; @@ -154,6 +155,7 @@ message TokenReviewSpec { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic repeated string audiences = 2; } @@ -177,6 +179,7 @@ message TokenReviewStatus { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic repeated string audiences = 4; // Error indicates that the token couldn't be checked @@ -199,6 +202,7 @@ message UserInfo { // The names of groups this user is a part of. // +optional + // +listType=atomic repeated string groups = 3; // Any additional information provided by the authenticator. diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index b498007c00..2dc0707c4f 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -45,6 +45,7 @@ const ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator @@ -75,6 +76,7 @@ type TokenReviewSpec struct { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } @@ -96,6 +98,7 @@ type TokenReviewStatus struct { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional @@ -115,6 +118,7 @@ type UserInfo struct { UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` // The names of groups this user is a part of. // +optional + // +listType=atomic Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` // Any additional information provided by the authenticator. // +optional @@ -131,6 +135,7 @@ func (t ExtraValue) String() string { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // TokenRequest requests a token for a given service account. type TokenRequest struct { @@ -156,6 +161,7 @@ type TokenRequestSpec struct { // token issued for multiple audiences may be used to authenticate // against any of the audiences listed but implies a high degree of // trust between the target audiences. + // +listType=atomic Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"` // ExpirationSeconds is the requested duration of validity of the request. The @@ -202,6 +208,7 @@ type BoundObjectReference struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. // When using impersonation, users will receive the user info of the user being impersonated. If impersonation or diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b612bdec48 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go b/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go index ea274ac07b..98c106ec65 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +// source: k8s.io/api/authentication/v1alpha1/generated.proto package v1alpha1 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } func (*SelfSubjectReview) ProtoMessage() {} func (*SelfSubjectReview) Descriptor() ([]byte, []int) { - return fileDescriptor_05a77aeb710b43c2, []int{0} + return fileDescriptor_f003acd72d3d5efb, []int{0} } func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } func (*SelfSubjectReviewStatus) ProtoMessage() {} func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_05a77aeb710b43c2, []int{1} + return fileDescriptor_f003acd72d3d5efb, []int{1} } func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,35 +105,34 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1alpha1/generated.proto", fileDescriptor_05a77aeb710b43c2) + proto.RegisterFile("k8s.io/api/authentication/v1alpha1/generated.proto", fileDescriptor_f003acd72d3d5efb) } -var fileDescriptor_05a77aeb710b43c2 = []byte{ - // 384 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xbd, 0x6e, 0xdb, 0x30, - 0x14, 0x85, 0xc5, 0x0e, 0x86, 0xa1, 0x02, 0x45, 0xab, 0xa5, 0x86, 0x07, 0xba, 0xd0, 0x50, 0x74, - 0x68, 0xc9, 0xba, 0x28, 0x8a, 0x02, 0xdd, 0x34, 0x35, 0x08, 0x82, 0x00, 0x72, 0xb2, 0x64, 0x0a, - 0x25, 0x5f, 0x4b, 0x8c, 0x2c, 0x52, 0x10, 0x49, 0x05, 0xd9, 0xf2, 0x08, 0x79, 0x2c, 0x8f, 0x1e, - 0x8d, 0x0c, 0x46, 0xac, 0xbc, 0x48, 0x20, 0x59, 0xb6, 0x11, 0x3b, 0xb6, 0x37, 0xde, 0xc3, 0xfb, - 0x9d, 0x7b, 0xf8, 0x63, 0x9f, 0x26, 0x7f, 0x15, 0xe1, 0x92, 0x26, 0x26, 0x80, 0x5c, 0x80, 0x06, - 0x45, 0x0b, 0x10, 0x43, 0x99, 0xd3, 0x66, 0x83, 0x65, 0x9c, 0x32, 0xa3, 0x63, 0x10, 0x9a, 0x87, - 0x4c, 0x73, 0x29, 0x68, 0xd1, 0x67, 0xe3, 0x2c, 0x66, 0x7d, 0x1a, 0x81, 0x80, 0x9c, 0x69, 0x18, - 0x92, 0x2c, 0x97, 0x5a, 0x3a, 0xee, 0x92, 0x21, 0x2c, 0xe3, 0xe4, 0x35, 0x43, 0x56, 0x4c, 0xf7, - 0x47, 0xc4, 0x75, 0x6c, 0x02, 0x12, 0xca, 0x94, 0x46, 0x32, 0x92, 0xb4, 0x46, 0x03, 0x33, 0xaa, - 0xab, 0xba, 0xa8, 0x57, 0x4b, 0xcb, 0xee, 0xf7, 0x43, 0x31, 0xb6, 0x03, 0x74, 0x7f, 0x6f, 0xba, - 0x53, 0x16, 0xc6, 0x5c, 0x40, 0x7e, 0x47, 0xb3, 0x24, 0xaa, 0x04, 0x45, 0x53, 0xd0, 0xec, 0x2d, - 0x8a, 0xee, 0xa3, 0x72, 0x23, 0x34, 0x4f, 0x61, 0x07, 0xf8, 0x73, 0x0c, 0x50, 0x61, 0x0c, 0x29, - 0xdb, 0xe6, 0xdc, 0x47, 0x64, 0x7f, 0x1a, 0xc0, 0x78, 0x34, 0x30, 0xc1, 0x0d, 0x84, 0xda, 0x87, - 0x82, 0xc3, 0xad, 0x73, 0x6d, 0xb7, 0xab, 0x64, 0x43, 0xa6, 0x59, 0x07, 0x7d, 0x41, 0xdf, 0xde, - 0xff, 0xfa, 0x49, 0x36, 0x17, 0xb9, 0x1e, 0x40, 0xb2, 0x24, 0xaa, 0x04, 0x45, 0xaa, 0x6e, 0x52, - 0xf4, 0xc9, 0x79, 0xed, 0x72, 0x06, 0x9a, 0x79, 0xce, 0x64, 0xde, 0xb3, 0xca, 0x79, 0xcf, 0xde, - 0x68, 0xfe, 0xda, 0xd5, 0x09, 0xed, 0x96, 0xd2, 0x4c, 0x1b, 0xd5, 0x79, 0x57, 0xfb, 0xff, 0x23, - 0xc7, 0x1f, 0x8a, 0xec, 0x04, 0x1d, 0xd4, 0x16, 0xde, 0x87, 0x66, 0x54, 0x6b, 0x59, 0xfb, 0x8d, - 0xb5, 0x2b, 0xed, 0xcf, 0x7b, 0x10, 0xe7, 0xc2, 0x6e, 0x1b, 0x05, 0xf9, 0x89, 0x18, 0xc9, 0xe6, - 0x84, 0x5f, 0x0f, 0x26, 0x20, 0x97, 0x4d, 0xb7, 0xf7, 0xb1, 0x19, 0xd6, 0x5e, 0x29, 0xfe, 0xda, - 0xc9, 0xfb, 0x3f, 0x59, 0x60, 0x6b, 0xba, 0xc0, 0xd6, 0x6c, 0x81, 0xad, 0xfb, 0x12, 0xa3, 0x49, - 0x89, 0xd1, 0xb4, 0xc4, 0x68, 0x56, 0x62, 0xf4, 0x54, 0x62, 0xf4, 0xf0, 0x8c, 0xad, 0x2b, 0xf7, - 0xf8, 0x3f, 0x7e, 0x09, 0x00, 0x00, 0xff, 0xff, 0xec, 0xf9, 0xa3, 0xcd, 0x05, 0x03, 0x00, 0x00, +var fileDescriptor_f003acd72d3d5efb = []byte{ + // 368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x4f, 0xe2, 0x40, + 0x14, 0xc7, 0x3b, 0x7b, 0x20, 0xa4, 0x9b, 0x6c, 0x76, 0x7b, 0x59, 0xc2, 0x61, 0x30, 0x3d, 0x18, + 0x0f, 0x3a, 0x23, 0xc4, 0x18, 0x13, 0x6f, 0x3d, 0xe9, 0xc1, 0x98, 0x14, 0xbd, 0x78, 0xf2, 0x51, + 0x1e, 0xed, 0x08, 0xed, 0x34, 0xed, 0x14, 0xe3, 0xcd, 0x8f, 0xe0, 0xc7, 0xe2, 0xc8, 0x91, 0x78, + 0x20, 0x52, 0xbf, 0x88, 0xe9, 0x50, 0x20, 0x82, 0xc0, 0xad, 0xef, 0xe5, 0xfd, 0x7e, 0xef, 0xdf, + 0x99, 0x31, 0x5b, 0xfd, 0x8b, 0x94, 0x09, 0xc9, 0x21, 0x16, 0x1c, 0x32, 0x15, 0x60, 0xa4, 0x84, + 0x07, 0x4a, 0xc8, 0x88, 0x0f, 0x9b, 0x30, 0x88, 0x03, 0x68, 0x72, 0x1f, 0x23, 0x4c, 0x40, 0x61, + 0x97, 0xc5, 0x89, 0x54, 0xd2, 0xb2, 0xe7, 0x0c, 0x83, 0x58, 0xb0, 0xef, 0x0c, 0x5b, 0x30, 0xf5, + 0x13, 0x5f, 0xa8, 0x20, 0xeb, 0x30, 0x4f, 0x86, 0xdc, 0x97, 0xbe, 0xe4, 0x1a, 0xed, 0x64, 0x3d, + 0x5d, 0xe9, 0x42, 0x7f, 0xcd, 0x95, 0xf5, 0xe3, 0x5d, 0x31, 0xd6, 0x03, 0xd4, 0xcf, 0x56, 0xd3, + 0x21, 0x78, 0x81, 0x88, 0x30, 0x79, 0xe1, 0x71, 0xdf, 0x2f, 0x1a, 0x29, 0x0f, 0x51, 0xc1, 0x4f, + 0x14, 0xdf, 0x46, 0x25, 0x59, 0xa4, 0x44, 0x88, 0x1b, 0xc0, 0xf9, 0x3e, 0x20, 0xf5, 0x02, 0x0c, + 0x61, 0x9d, 0xb3, 0xdf, 0x89, 0xf9, 0xaf, 0x8d, 0x83, 0x5e, 0x3b, 0xeb, 0x3c, 0xa1, 0xa7, 0x5c, + 0x1c, 0x0a, 0x7c, 0xb6, 0x1e, 0xcd, 0x6a, 0x91, 0xac, 0x0b, 0x0a, 0x6a, 0xe4, 0x80, 0x1c, 0xfd, + 0x6e, 0x9d, 0xb2, 0xd5, 0x41, 0x2e, 0x17, 0xb0, 0xb8, 0xef, 0x17, 0x8d, 0x94, 0x15, 0xd3, 0x6c, + 0xd8, 0x64, 0xb7, 0xda, 0x72, 0x83, 0x0a, 0x1c, 0x6b, 0x34, 0x6d, 0x18, 0xf9, 0xb4, 0x61, 0xae, + 0x7a, 0xee, 0xd2, 0x6a, 0x79, 0x66, 0x25, 0x55, 0xa0, 0xb2, 0xb4, 0xf6, 0x4b, 0xfb, 0x2f, 0xd9, + 0xfe, 0x8b, 0x62, 0x1b, 0x41, 0xdb, 0x5a, 0xe1, 0xfc, 0x29, 0x57, 0x55, 0xe6, 0xb5, 0x5b, 0xaa, + 0x6d, 0x69, 0xfe, 0xdf, 0x82, 0x58, 0x77, 0x66, 0x35, 0x4b, 0x31, 0xb9, 0x8e, 0x7a, 0xb2, 0xfc, + 0xc3, 0xc3, 0x9d, 0x09, 0xd8, 0x7d, 0x39, 0xed, 0xfc, 0x2d, 0x97, 0x55, 0x17, 0x1d, 0x77, 0x69, + 0x72, 0xae, 0x46, 0x33, 0x6a, 0x8c, 0x67, 0xd4, 0x98, 0xcc, 0xa8, 0xf1, 0x9a, 0x53, 0x32, 0xca, + 0x29, 0x19, 0xe7, 0x94, 0x4c, 0x72, 0x4a, 0x3e, 0x72, 0x4a, 0xde, 0x3e, 0xa9, 0xf1, 0x60, 0xef, + 0x7f, 0xc7, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x04, 0xfb, 0xb6, 0xfb, 0xec, 0x02, 0x00, 0x00, } func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto index 51d9252440..4585e5cdd3 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -46,6 +46,6 @@ message SelfSubjectReview { message SelfSubjectReviewStatus { // User attributes of the user making this request. // +optional - optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; } diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 7f1d5ca6ce..4153926447 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto +// source: k8s.io/api/authentication/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{0} + return fileDescriptor_fdc2de40fd7f3b21, []int{0} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } func (*SelfSubjectReview) ProtoMessage() {} func (*SelfSubjectReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} + return fileDescriptor_fdc2de40fd7f3b21, []int{1} } func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } func (*SelfSubjectReviewStatus) ProtoMessage() {} func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} + return fileDescriptor_fdc2de40fd7f3b21, []int{2} } func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} + return fileDescriptor_fdc2de40fd7f3b21, []int{3} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} + return fileDescriptor_fdc2de40fd7f3b21, []int{4} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{5} + return fileDescriptor_fdc2de40fd7f3b21, []int{5} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{6} + return fileDescriptor_fdc2de40fd7f3b21, []int{6} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -252,57 +252,56 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) -} - -var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 725 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x4f, 0x13, 0x41, - 0x14, 0xef, 0xf6, 0x0f, 0x69, 0xa7, 0x56, 0x61, 0x12, 0x23, 0x69, 0xe2, 0x16, 0x6a, 0x62, 0x48, - 0x80, 0x59, 0x21, 0x04, 0x09, 0x9e, 0x58, 0x25, 0x04, 0x13, 0x62, 0x32, 0x05, 0x0f, 0xea, 0xc1, - 0xe9, 0xf6, 0xb1, 0x5d, 0x4b, 0x77, 0x37, 0xbb, 0xb3, 0x55, 0x6e, 0x7c, 0x04, 0x8f, 0x1e, 0x4d, - 0xfc, 0x24, 0xde, 0x38, 0x72, 0xc4, 0xc4, 0x34, 0xb2, 0x7e, 0x02, 0xbf, 0x81, 0x99, 0xd9, 0x61, - 0xdb, 0x82, 0x14, 0xb8, 0x78, 0xdb, 0xf9, 0xcd, 0xfb, 0xfd, 0xde, 0x7b, 0xbf, 0xf7, 0x32, 0x8b, - 0x5e, 0x76, 0xd6, 0x42, 0xe2, 0x78, 0x46, 0x27, 0x6a, 0x42, 0xe0, 0x02, 0x87, 0xd0, 0xe8, 0x81, - 0xdb, 0xf2, 0x02, 0x43, 0x5d, 0x30, 0xdf, 0x31, 0x58, 0xc4, 0xdb, 0xe0, 0x72, 0xc7, 0x62, 0xdc, - 0xf1, 0x5c, 0xa3, 0xb7, 0xd4, 0x04, 0xce, 0x96, 0x0c, 0x1b, 0x5c, 0x08, 0x18, 0x87, 0x16, 0xf1, - 0x03, 0x8f, 0x7b, 0x78, 0x36, 0xa1, 0x10, 0xe6, 0x3b, 0x64, 0x94, 0x42, 0x14, 0xa5, 0xba, 0x68, - 0x3b, 0xbc, 0x1d, 0x35, 0x89, 0xe5, 0x75, 0x0d, 0xdb, 0xb3, 0x3d, 0x43, 0x32, 0x9b, 0xd1, 0xbe, - 0x3c, 0xc9, 0x83, 0xfc, 0x4a, 0x14, 0xab, 0x0b, 0xe3, 0x8a, 0xb8, 0x98, 0xbf, 0xba, 0x32, 0x88, - 0xee, 0x32, 0xab, 0xed, 0xb8, 0x10, 0x1c, 0x1a, 0x7e, 0xc7, 0x16, 0x40, 0x68, 0x74, 0x81, 0xb3, - 0x7f, 0xb1, 0x8c, 0xab, 0x58, 0x41, 0xe4, 0x72, 0xa7, 0x0b, 0x97, 0x08, 0xab, 0xd7, 0x11, 0x42, - 0xab, 0x0d, 0x5d, 0x76, 0x91, 0x57, 0x7f, 0x8a, 0xd0, 0xe6, 0x27, 0x1e, 0xb0, 0xd7, 0xec, 0x20, - 0x02, 0x5c, 0x43, 0x05, 0x87, 0x43, 0x37, 0x9c, 0xd6, 0x66, 0x72, 0x73, 0x25, 0xb3, 0x14, 0xf7, - 0x6b, 0x85, 0x6d, 0x01, 0xd0, 0x04, 0x5f, 0x2f, 0x7e, 0xf9, 0x5a, 0xcb, 0x1c, 0xfd, 0x9c, 0xc9, - 0xd4, 0x7f, 0x68, 0x68, 0xaa, 0x01, 0x07, 0xfb, 0x8d, 0xa8, 0xf9, 0x01, 0x2c, 0x4e, 0xa1, 0xe7, - 0xc0, 0x47, 0xfc, 0x1e, 0x15, 0x45, 0x4b, 0x2d, 0xc6, 0xd9, 0xb4, 0x36, 0xa3, 0xcd, 0x95, 0x97, - 0x9f, 0x90, 0xc1, 0x00, 0xd2, 0xca, 0x88, 0xdf, 0xb1, 0x05, 0x10, 0x12, 0x11, 0x4d, 0x7a, 0x4b, - 0xe4, 0x95, 0x54, 0xd9, 0x01, 0xce, 0x4c, 0x7c, 0xdc, 0xaf, 0x65, 0xe2, 0x7e, 0x0d, 0x0d, 0x30, - 0x9a, 0xaa, 0xe2, 0x26, 0x9a, 0x08, 0x39, 0xe3, 0x51, 0x38, 0x9d, 0x95, 0xfa, 0xeb, 0xe4, 0xda, - 0x01, 0x93, 0x4b, 0x75, 0x36, 0xa4, 0x82, 0x79, 0x57, 0x65, 0x9a, 0x48, 0xce, 0x54, 0x29, 0xd7, - 0x3d, 0xf4, 0xe0, 0x0a, 0x0a, 0xde, 0x45, 0xc5, 0x28, 0x84, 0x60, 0xdb, 0xdd, 0xf7, 0x54, 0x83, - 0x8f, 0xc7, 0x16, 0x40, 0xf6, 0x54, 0xb4, 0x39, 0xa9, 0x92, 0x15, 0xcf, 0x11, 0x9a, 0x2a, 0xd5, - 0xbf, 0x65, 0x51, 0x79, 0xd7, 0xeb, 0x80, 0xfb, 0xdf, 0x6c, 0xdc, 0x45, 0xf9, 0xd0, 0x07, 0x4b, - 0x99, 0xb8, 0x7c, 0x03, 0x13, 0x87, 0xea, 0x6b, 0xf8, 0x60, 0x99, 0x77, 0x94, 0x7e, 0x5e, 0x9c, - 0xa8, 0x54, 0xc3, 0xef, 0xd2, 0xe1, 0xe4, 0xa4, 0xee, 0xca, 0x2d, 0x75, 0xc7, 0x8f, 0xc5, 0x42, - 0xf7, 0x2e, 0x14, 0x81, 0x1f, 0xa1, 0x02, 0x17, 0x90, 0x74, 0xa9, 0x64, 0x56, 0x14, 0xb3, 0x90, - 0xc4, 0x25, 0x77, 0x78, 0x1e, 0x95, 0x58, 0xd4, 0x72, 0xc0, 0xb5, 0x40, 0x6c, 0x8d, 0xd8, 0xec, - 0x4a, 0xdc, 0xaf, 0x95, 0x36, 0xce, 0x41, 0x3a, 0xb8, 0xaf, 0xff, 0xd1, 0xd0, 0xd4, 0xa5, 0x92, - 0xf0, 0x33, 0x54, 0x19, 0x2a, 0x1f, 0x5a, 0x32, 0x5f, 0xd1, 0xbc, 0xaf, 0xf2, 0x55, 0x36, 0x86, - 0x2f, 0xe9, 0x68, 0x2c, 0xde, 0x41, 0x79, 0x31, 0x69, 0xe5, 0xf5, 0xfc, 0x0d, 0x3c, 0x49, 0x97, - 0x26, 0x35, 0x59, 0x20, 0x54, 0xca, 0x8c, 0xb6, 0x93, 0x1f, 0xdf, 0x8e, 0x30, 0x08, 0x82, 0xc0, - 0x0b, 0xe4, 0x40, 0x86, 0x0c, 0xda, 0x14, 0x20, 0x4d, 0xee, 0xea, 0xdf, 0xb3, 0x28, 0xdd, 0x4a, - 0xbc, 0x90, 0x6c, 0xb8, 0xcb, 0xba, 0xa0, 0x5c, 0x1d, 0xd9, 0x5c, 0x81, 0xd3, 0x34, 0x02, 0x3f, - 0x44, 0xb9, 0xc8, 0x69, 0xc9, 0xd6, 0x4a, 0x66, 0x59, 0x05, 0xe6, 0xf6, 0xb6, 0x5f, 0x50, 0x81, - 0xe3, 0x3a, 0x9a, 0xb0, 0x03, 0x2f, 0xf2, 0xc5, 0x42, 0x88, 0x42, 0x91, 0x18, 0xeb, 0x96, 0x44, - 0xa8, 0xba, 0xc1, 0x6f, 0x51, 0x01, 0xc4, 0x13, 0x24, 0x7b, 0x29, 0x2f, 0xaf, 0xde, 0xc2, 0x1f, - 0x22, 0xdf, 0xae, 0x4d, 0x97, 0x07, 0x87, 0x43, 0xad, 0x09, 0x8c, 0x26, 0x9a, 0x55, 0x5b, 0xbd, - 0x6f, 0x32, 0x06, 0x4f, 0xa2, 0x5c, 0x07, 0x0e, 0x93, 0xb6, 0xa8, 0xf8, 0xc4, 0xcf, 0x51, 0xa1, - 0x27, 0x9e, 0x3e, 0x35, 0x9c, 0xc5, 0x1b, 0x24, 0x1f, 0xbc, 0x97, 0x34, 0xe1, 0xae, 0x67, 0xd7, - 0x34, 0x73, 0xeb, 0xf8, 0x4c, 0xcf, 0x9c, 0x9c, 0xe9, 0x99, 0xd3, 0x33, 0x3d, 0x73, 0x14, 0xeb, - 0xda, 0x71, 0xac, 0x6b, 0x27, 0xb1, 0xae, 0x9d, 0xc6, 0xba, 0xf6, 0x2b, 0xd6, 0xb5, 0xcf, 0xbf, - 0xf5, 0xcc, 0x9b, 0xd9, 0x6b, 0x7f, 0x60, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x19, 0x49, - 0x3f, 0xfd, 0x06, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_fdc2de40fd7f3b21) +} + +var fileDescriptor_fdc2de40fd7f3b21 = []byte{ + // 711 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x4e, 0xdb, 0x4e, + 0x10, 0x8e, 0xf3, 0x07, 0x25, 0x9b, 0x5f, 0x7e, 0x85, 0x95, 0xaa, 0xa2, 0x48, 0x75, 0x20, 0x95, + 0x2a, 0x24, 0x60, 0xdd, 0x20, 0x44, 0x11, 0x3d, 0xe1, 0x16, 0x21, 0x0e, 0xa8, 0xd2, 0x06, 0x7a, + 0x68, 0x7b, 0xe8, 0xc6, 0x19, 0x1c, 0x37, 0xc4, 0xb6, 0xec, 0x75, 0x5a, 0x6e, 0x3c, 0x42, 0x8f, + 0x3d, 0x56, 0xea, 0x93, 0xf4, 0xc6, 0x91, 0x23, 0x95, 0xaa, 0xa8, 0xb8, 0x4f, 0xd0, 0x37, 0xa8, + 0x76, 0xbd, 0x38, 0x09, 0x94, 0x00, 0x97, 0xde, 0xbc, 0xdf, 0xce, 0xf7, 0xcd, 0xcc, 0x37, 0xa3, + 0x35, 0x6a, 0x74, 0xd7, 0x43, 0xe2, 0x78, 0x06, 0xf3, 0x1d, 0x83, 0x45, 0xbc, 0x03, 0x2e, 0x77, + 0x2c, 0xc6, 0x1d, 0xcf, 0x35, 0xfa, 0x8d, 0x16, 0x70, 0xd6, 0x30, 0x6c, 0x70, 0x21, 0x60, 0x1c, + 0xda, 0xc4, 0x0f, 0x3c, 0xee, 0xe1, 0xf9, 0x84, 0x42, 0x98, 0xef, 0x90, 0x71, 0x0a, 0x51, 0x94, + 0xea, 0xb2, 0xed, 0xf0, 0x4e, 0xd4, 0x22, 0x96, 0xd7, 0x33, 0x6c, 0xcf, 0xf6, 0x0c, 0xc9, 0x6c, + 0x45, 0x07, 0xf2, 0x24, 0x0f, 0xf2, 0x2b, 0x51, 0xac, 0x2e, 0x4d, 0x2a, 0xe2, 0x72, 0xfe, 0xea, + 0xea, 0x30, 0xba, 0xc7, 0xac, 0x8e, 0xe3, 0x42, 0x70, 0x64, 0xf8, 0x5d, 0x5b, 0x00, 0xa1, 0xd1, + 0x03, 0xce, 0xfe, 0xc6, 0x32, 0xae, 0x63, 0x05, 0x91, 0xcb, 0x9d, 0x1e, 0x5c, 0x21, 0xac, 0xdd, + 0x44, 0x08, 0xad, 0x0e, 0xf4, 0xd8, 0x65, 0x5e, 0xfd, 0x29, 0x42, 0x5b, 0x1f, 0x79, 0xc0, 0x5e, + 0xb1, 0xc3, 0x08, 0x70, 0x0d, 0x15, 0x1c, 0x0e, 0xbd, 0x70, 0x56, 0x9b, 0xcb, 0x2d, 0x94, 0xcc, + 0x52, 0x3c, 0xa8, 0x15, 0x76, 0x04, 0x40, 0x13, 0x7c, 0xa3, 0xf8, 0xf9, 0x4b, 0x2d, 0x73, 0xfc, + 0x63, 0x2e, 0x53, 0xff, 0xae, 0xa1, 0x99, 0x26, 0x1c, 0x1e, 0x34, 0xa3, 0xd6, 0x7b, 0xb0, 0x38, + 0x85, 0xbe, 0x03, 0x1f, 0xf0, 0x3b, 0x54, 0x14, 0x2d, 0xb5, 0x19, 0x67, 0xb3, 0xda, 0x9c, 0xb6, + 0x50, 0x5e, 0x79, 0x42, 0x86, 0x03, 0x48, 0x2b, 0x23, 0x7e, 0xd7, 0x16, 0x40, 0x48, 0x44, 0x34, + 0xe9, 0x37, 0xc8, 0x4b, 0xa9, 0xb2, 0x0b, 0x9c, 0x99, 0xf8, 0x64, 0x50, 0xcb, 0xc4, 0x83, 0x1a, + 0x1a, 0x62, 0x34, 0x55, 0xc5, 0x2d, 0x34, 0x15, 0x72, 0xc6, 0xa3, 0x70, 0x36, 0x2b, 0xf5, 0x37, + 0xc8, 0x8d, 0x03, 0x26, 0x57, 0xea, 0x6c, 0x4a, 0x05, 0xf3, 0x7f, 0x95, 0x69, 0x2a, 0x39, 0x53, + 0xa5, 0x5c, 0xf7, 0xd0, 0x83, 0x6b, 0x28, 0x78, 0x0f, 0x15, 0xa3, 0x10, 0x82, 0x1d, 0xf7, 0xc0, + 0x53, 0x0d, 0x3e, 0x9e, 0x58, 0x00, 0xd9, 0x57, 0xd1, 0xe6, 0xb4, 0x4a, 0x56, 0xbc, 0x40, 0x68, + 0xaa, 0x54, 0xff, 0x9a, 0x45, 0xe5, 0x3d, 0xaf, 0x0b, 0xee, 0x3f, 0xb3, 0x71, 0x0f, 0xe5, 0x43, + 0x1f, 0x2c, 0x65, 0xe2, 0xca, 0x2d, 0x4c, 0x1c, 0xa9, 0xaf, 0xe9, 0x83, 0x65, 0xfe, 0xa7, 0xf4, + 0xf3, 0xe2, 0x44, 0xa5, 0x1a, 0x7e, 0x9b, 0x0e, 0x27, 0x27, 0x75, 0x57, 0xef, 0xa8, 0x3b, 0x79, + 0x2c, 0x16, 0xba, 0x77, 0xa9, 0x08, 0xfc, 0x08, 0x15, 0xb8, 0x80, 0xa4, 0x4b, 0x25, 0xb3, 0xa2, + 0x98, 0x85, 0x24, 0x2e, 0xb9, 0xc3, 0x8b, 0xa8, 0xc4, 0xa2, 0xb6, 0x03, 0xae, 0x05, 0x62, 0x6b, + 0xc4, 0x66, 0x57, 0xe2, 0x41, 0xad, 0xb4, 0x79, 0x01, 0xd2, 0xe1, 0x7d, 0xfd, 0xb7, 0x86, 0x66, + 0xae, 0x94, 0x84, 0x9f, 0xa1, 0xca, 0x48, 0xf9, 0xd0, 0x96, 0xf9, 0x8a, 0xe6, 0x7d, 0x95, 0xaf, + 0xb2, 0x39, 0x7a, 0x49, 0xc7, 0x63, 0xf1, 0x2e, 0xca, 0x8b, 0x49, 0x2b, 0xaf, 0x17, 0x6f, 0xe1, + 0x49, 0xba, 0x34, 0xa9, 0xc9, 0x02, 0xa1, 0x52, 0x66, 0xbc, 0x9d, 0xfc, 0xe4, 0x76, 0x84, 0x41, + 0x10, 0x04, 0x5e, 0x20, 0x07, 0x32, 0x62, 0xd0, 0x96, 0x00, 0x69, 0x72, 0x57, 0xff, 0x96, 0x45, + 0xe9, 0x56, 0xe2, 0xa5, 0x64, 0xc3, 0x5d, 0xd6, 0x03, 0xe5, 0xea, 0xd8, 0xe6, 0x0a, 0x9c, 0xa6, + 0x11, 0xf8, 0x21, 0xca, 0x45, 0x4e, 0x5b, 0xb6, 0x56, 0x32, 0xcb, 0x2a, 0x30, 0xb7, 0xbf, 0xf3, + 0x82, 0x0a, 0x1c, 0xd7, 0xd1, 0x94, 0x1d, 0x78, 0x91, 0x2f, 0x16, 0x42, 0x14, 0x8a, 0xc4, 0x58, + 0xb7, 0x25, 0x42, 0xd5, 0x0d, 0x7e, 0x83, 0x0a, 0x20, 0x9e, 0x20, 0xd9, 0x4b, 0x79, 0x65, 0xed, + 0x0e, 0xfe, 0x10, 0xf9, 0x76, 0x6d, 0xb9, 0x3c, 0x38, 0x1a, 0x69, 0x4d, 0x60, 0x34, 0xd1, 0xac, + 0xda, 0xea, 0x7d, 0x93, 0x31, 0x78, 0x1a, 0xe5, 0xba, 0x70, 0x94, 0xb4, 0x45, 0xc5, 0x27, 0x7e, + 0x8e, 0x0a, 0x7d, 0xf1, 0xf4, 0xa9, 0xe1, 0x2c, 0xdf, 0x22, 0xf9, 0xf0, 0xbd, 0xa4, 0x09, 0x77, + 0x23, 0xbb, 0xae, 0x99, 0xdb, 0x27, 0xe7, 0x7a, 0xe6, 0xf4, 0x5c, 0xcf, 0x9c, 0x9d, 0xeb, 0x99, + 0xe3, 0x58, 0xd7, 0x4e, 0x62, 0x5d, 0x3b, 0x8d, 0x75, 0xed, 0x2c, 0xd6, 0xb5, 0x9f, 0xb1, 0xae, + 0x7d, 0xfa, 0xa5, 0x67, 0x5e, 0xcf, 0xdf, 0xf8, 0x03, 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0x45, + 0x72, 0x2b, 0xf2, 0xe4, 0x06, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index 53b4635d7e..d0f6fe4402 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -45,7 +45,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -55,7 +55,7 @@ message SelfSubjectReview { message SelfSubjectReviewStatus { // User attributes of the user making this request. // +optional - optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; } // TokenReview attempts to authenticate a token to a known user. @@ -65,7 +65,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; @@ -87,6 +87,7 @@ message TokenReviewSpec { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic repeated string audiences = 2; } @@ -110,6 +111,7 @@ message TokenReviewStatus { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic repeated string audiences = 4; // Error indicates that the token couldn't be checked @@ -132,6 +134,7 @@ message UserInfo { // The names of groups this user is a part of. // +optional + // +listType=atomic repeated string groups = 3; // Any additional information provided by the authenticator. diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 5bce82e7cf..8038ef7d34 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -60,6 +60,7 @@ type TokenReviewSpec struct { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } @@ -81,6 +82,7 @@ type TokenReviewStatus struct { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional @@ -100,6 +102,7 @@ type UserInfo struct { UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` // The names of groups this user is a part of. // +optional + // +listType=atomic Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` // Any additional information provided by the authenticator. // +optional diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index cf100e6b75..77e5a19c4c 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=authorization.k8s.io package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index 2e8e35a551..aed9a3a476 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto +// source: k8s.io/api/authorization/v1/generated.proto package v1 @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -47,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{0} + return fileDescriptor_aafd0e5e70cec678, []int{0} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -72,10 +73,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *FieldSelectorAttributes) Reset() { *m = FieldSelectorAttributes{} } +func (*FieldSelectorAttributes) ProtoMessage() {} +func (*FieldSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{1} +} +func (m *FieldSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorAttributes.Merge(m, src) +} +func (m *FieldSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorAttributes proto.InternalMessageInfo + +func (m *LabelSelectorAttributes) Reset() { *m = LabelSelectorAttributes{} } +func (*LabelSelectorAttributes) ProtoMessage() {} +func (*LabelSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{2} +} +func (m *LabelSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorAttributes.Merge(m, src) +} +func (m *LabelSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorAttributes proto.InternalMessageInfo + func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{1} + return fileDescriptor_aafd0e5e70cec678, []int{3} } func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +160,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } func (*NonResourceAttributes) ProtoMessage() {} func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{2} + return fileDescriptor_aafd0e5e70cec678, []int{4} } func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +188,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } func (*NonResourceRule) ProtoMessage() {} func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{3} + return fileDescriptor_aafd0e5e70cec678, []int{5} } func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +216,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (*ResourceAttributes) ProtoMessage() {} func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{4} + return fileDescriptor_aafd0e5e70cec678, []int{6} } func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +244,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo func (m *ResourceRule) Reset() { *m = ResourceRule{} } func (*ResourceRule) ProtoMessage() {} func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{5} + return fileDescriptor_aafd0e5e70cec678, []int{7} } func (m *ResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +272,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (*SelfSubjectAccessReview) ProtoMessage() {} func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{6} + return fileDescriptor_aafd0e5e70cec678, []int{8} } func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +300,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{7} + return fileDescriptor_aafd0e5e70cec678, []int{9} } func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +328,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } func (*SelfSubjectRulesReview) ProtoMessage() {} func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{8} + return fileDescriptor_aafd0e5e70cec678, []int{10} } func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +356,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{9} + return fileDescriptor_aafd0e5e70cec678, []int{11} } func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +384,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (*SubjectAccessReview) ProtoMessage() {} func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{10} + return fileDescriptor_aafd0e5e70cec678, []int{12} } func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{11} + return fileDescriptor_aafd0e5e70cec678, []int{13} } func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +440,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{12} + return fileDescriptor_aafd0e5e70cec678, []int{14} } func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +468,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{13} + return fileDescriptor_aafd0e5e70cec678, []int{15} } func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,6 +495,8 @@ var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*FieldSelectorAttributes)(nil), "k8s.io.api.authorization.v1.FieldSelectorAttributes") + proto.RegisterType((*LabelSelectorAttributes)(nil), "k8s.io.api.authorization.v1.LabelSelectorAttributes") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") @@ -455,83 +514,89 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) -} - -var fileDescriptor_e50da13573e369bd = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x9b, 0x08, 0x3b, 0x5a, 0x24, - 0x48, 0x45, 0xd9, 0x25, 0x56, 0xdb, 0x44, 0x95, 0x2a, 0x64, 0x2b, 0x11, 0x8a, 0xd4, 0x96, 0x6a, - 0xa2, 0x44, 0xa2, 0x08, 0xc4, 0x78, 0x3d, 0xb1, 0x97, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x38, - 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0xc0, 0x05, 0x89, 0x1b, 0x07, 0x0e, 0x28, 0xc7, - 0x1e, 0x8b, 0x84, 0x2c, 0xb2, 0x9c, 0xf9, 0x0e, 0x68, 0x66, 0xc7, 0xde, 0x75, 0xb2, 0x76, 0x13, - 0x0e, 0xed, 0xa5, 0x37, 0xef, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0xef, 0xdf, 0x3c, 0x83, 0xed, 0xa3, - 0x2d, 0x66, 0x3a, 0x9e, 0x75, 0x14, 0x34, 0x09, 0x75, 0x09, 0x27, 0xcc, 0xea, 0x13, 0xb7, 0xe5, - 0x51, 0x4b, 0x01, 0xd8, 0x77, 0x2c, 0x1c, 0xf0, 0x8e, 0x47, 0x9d, 0x6f, 0x31, 0x77, 0x3c, 0xd7, - 0xea, 0x6f, 0x58, 0x6d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x99, 0x3e, 0xf5, 0xb8, 0x07, 0x57, 0x23, - 0xb2, 0x89, 0x7d, 0xc7, 0x1c, 0x23, 0x9b, 0xfd, 0x8d, 0x95, 0x0f, 0xda, 0x0e, 0xef, 0x04, 0x4d, - 0xd3, 0xf6, 0x7a, 0x56, 0xdb, 0x6b, 0x7b, 0x96, 0xd4, 0x69, 0x06, 0x87, 0xf2, 0x4b, 0x7e, 0xc8, - 0x5f, 0x91, 0xad, 0x95, 0x3b, 0xf1, 0xc1, 0x3d, 0x6c, 0x77, 0x1c, 0x97, 0xd0, 0x13, 0xcb, 0x3f, - 0x6a, 0x0b, 0x01, 0xb3, 0x7a, 0x84, 0xe3, 0x14, 0x0f, 0x56, 0xac, 0x49, 0x5a, 0x34, 0x70, 0xb9, - 0xd3, 0x23, 0x17, 0x14, 0xee, 0xbd, 0x4c, 0x81, 0xd9, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0x8c, 0x4d, - 0x00, 0x76, 0xbe, 0xe1, 0x14, 0x1f, 0xe0, 0x6e, 0x40, 0x60, 0x15, 0x14, 0x1c, 0x4e, 0x7a, 0x4c, - 0xd7, 0xd6, 0x72, 0xeb, 0xa5, 0x46, 0x29, 0x1c, 0x54, 0x0b, 0xbb, 0x42, 0x80, 0x22, 0xf9, 0xfd, - 0xe2, 0x0f, 0x3f, 0x55, 0x33, 0xcf, 0xfe, 0x5a, 0xcb, 0x18, 0xbf, 0x64, 0x81, 0xfe, 0xd0, 0xb3, - 0x71, 0x77, 0x2f, 0x68, 0x7e, 0x45, 0x6c, 0x5e, 0xb7, 0x6d, 0xc2, 0x18, 0x22, 0x7d, 0x87, 0x1c, - 0xc3, 0x2f, 0x41, 0x51, 0xdc, 0xac, 0x85, 0x39, 0xd6, 0xb5, 0x35, 0x6d, 0xbd, 0x5c, 0xfb, 0xd0, - 0x8c, 0x63, 0x3a, 0x72, 0xd0, 0xf4, 0x8f, 0xda, 0x42, 0xc0, 0x4c, 0xc1, 0x36, 0xfb, 0x1b, 0xe6, - 0x27, 0xd2, 0xd6, 0x23, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0xaa, 0x99, 0x70, 0x50, 0x05, 0xb1, 0x0c, - 0x8d, 0xac, 0xc2, 0x03, 0x90, 0x67, 0x3e, 0xb1, 0xf5, 0xac, 0xb4, 0x7e, 0xc7, 0x9c, 0x92, 0x31, - 0x33, 0xc5, 0xc3, 0x3d, 0x9f, 0xd8, 0x8d, 0x6b, 0xea, 0x84, 0xbc, 0xf8, 0x42, 0xd2, 0x1e, 0xfc, - 0x02, 0xcc, 0x30, 0x8e, 0x79, 0xc0, 0xf4, 0x9c, 0xb4, 0x7c, 0xef, 0xca, 0x96, 0xa5, 0x76, 0xe3, - 0x2d, 0x65, 0x7b, 0x26, 0xfa, 0x46, 0xca, 0xaa, 0xf1, 0x19, 0x58, 0x7a, 0xec, 0xb9, 0x88, 0x30, - 0x2f, 0xa0, 0x36, 0xa9, 0x73, 0x4e, 0x9d, 0x66, 0xc0, 0x09, 0x83, 0x6b, 0x20, 0xef, 0x63, 0xde, - 0x91, 0xe1, 0x2a, 0xc5, 0xae, 0x3d, 0xc1, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, 0xf2, - 0xca, 0x09, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xe3, 0x6b, 0x30, 0x9f, 0x30, 0x8e, 0x82, 0xae, - 0xcc, 0xa8, 0x80, 0xc6, 0x32, 0x2a, 0x34, 0x18, 0x8a, 0xe4, 0xf0, 0x01, 0x98, 0x77, 0x63, 0x9d, - 0x7d, 0xf4, 0x90, 0xe9, 0x59, 0x49, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x39, 0x01, 0xa1, 0xf3, 0x5c, - 0xe3, 0xb7, 0x2c, 0x80, 0x29, 0xb7, 0xb1, 0x40, 0xc9, 0xc5, 0x3d, 0xc2, 0x7c, 0x6c, 0x13, 0x75, - 0xa5, 0xeb, 0xca, 0xe1, 0xd2, 0xe3, 0x21, 0x80, 0x62, 0xce, 0xcb, 0x2f, 0x07, 0xdf, 0x01, 0x85, - 0x36, 0xf5, 0x02, 0x5f, 0x26, 0xa6, 0xd4, 0x98, 0x53, 0x94, 0xc2, 0xc7, 0x42, 0x88, 0x22, 0x0c, - 0xde, 0x02, 0xb3, 0x7d, 0x42, 0x99, 0xe3, 0xb9, 0x7a, 0x5e, 0xd2, 0xe6, 0x15, 0x6d, 0xf6, 0x20, - 0x12, 0xa3, 0x21, 0x0e, 0x6f, 0x83, 0x22, 0x55, 0x8e, 0xeb, 0x05, 0xc9, 0x5d, 0x50, 0xdc, 0xe2, - 0x28, 0x82, 0x23, 0x06, 0xbc, 0x0b, 0xca, 0x2c, 0x68, 0x8e, 0x14, 0x66, 0xa4, 0xc2, 0xa2, 0x52, - 0x28, 0xef, 0xc5, 0x10, 0x4a, 0xf2, 0xc4, 0xb5, 0xc4, 0x1d, 0xf5, 0xd9, 0xf1, 0x6b, 0x89, 0x10, - 0x20, 0x89, 0x18, 0xbf, 0x6b, 0xe0, 0xda, 0xd5, 0x32, 0xf6, 0x3e, 0x28, 0x61, 0xdf, 0x91, 0xd7, - 0x1e, 0xe6, 0x6a, 0x4e, 0xc4, 0xb5, 0xfe, 0x64, 0x37, 0x12, 0xa2, 0x18, 0x17, 0xe4, 0xa1, 0x33, - 0xa2, 0xa4, 0x47, 0xe4, 0xe1, 0x91, 0x0c, 0xc5, 0x38, 0xdc, 0x04, 0x73, 0xc3, 0x0f, 0x99, 0x24, - 0x3d, 0x2f, 0x15, 0xae, 0x87, 0x83, 0xea, 0x1c, 0x4a, 0x02, 0x68, 0x9c, 0x67, 0xfc, 0x9a, 0x05, - 0xcb, 0x7b, 0xa4, 0x7b, 0xf8, 0x7a, 0x66, 0xc1, 0xd3, 0xb1, 0x59, 0xb0, 0x35, 0xbd, 0x63, 0xd3, - 0xbd, 0x7c, 0x6d, 0xf3, 0xe0, 0xc7, 0x2c, 0x58, 0x9d, 0xe2, 0x13, 0x3c, 0x06, 0x90, 0x5e, 0x68, - 0x2f, 0x15, 0x47, 0x6b, 0xaa, 0x2f, 0x17, 0xbb, 0xb2, 0x71, 0x23, 0x1c, 0x54, 0x53, 0xba, 0x15, - 0xa5, 0x1c, 0x01, 0xbf, 0xd3, 0xc0, 0x92, 0x9b, 0x36, 0xa9, 0x54, 0x98, 0x6b, 0x53, 0x0f, 0x4f, - 0x9d, 0x71, 0x8d, 0x9b, 0xe1, 0xa0, 0x9a, 0x3e, 0xfe, 0x50, 0xfa, 0x59, 0xe2, 0x95, 0xb9, 0x91, - 0x08, 0x8f, 0x68, 0x90, 0x57, 0x57, 0x57, 0x9f, 0x8e, 0xd5, 0xd5, 0xe6, 0x65, 0xeb, 0x2a, 0xe1, - 0xe4, 0xc4, 0xb2, 0xfa, 0xfc, 0x5c, 0x59, 0xdd, 0xbd, 0x4c, 0x59, 0x25, 0x0d, 0x4f, 0xaf, 0xaa, - 0x47, 0x60, 0x65, 0xb2, 0x43, 0x57, 0x1e, 0xce, 0xc6, 0xcf, 0x59, 0xb0, 0xf8, 0xe6, 0x99, 0xbf, - 0x4a, 0x5b, 0xff, 0x91, 0x07, 0xcb, 0x6f, 0x5a, 0x7a, 0xd2, 0xa2, 0x13, 0x30, 0x42, 0xd5, 0x33, - 0x3e, 0x4a, 0xce, 0x3e, 0x23, 0x14, 0x49, 0x04, 0x1a, 0x60, 0xa6, 0x1d, 0xbd, 0x6e, 0xd1, 0xfb, - 0x03, 0x44, 0x80, 0xd5, 0xd3, 0xa6, 0x10, 0xd8, 0x02, 0x05, 0x22, 0xf6, 0x56, 0xbd, 0xb0, 0x96, - 0x5b, 0x2f, 0xd7, 0x3e, 0xfa, 0x3f, 0x95, 0x61, 0xca, 0xcd, 0x77, 0xc7, 0xe5, 0xf4, 0x24, 0x5e, - 0x27, 0xa4, 0x0c, 0x45, 0xc6, 0xe1, 0xdb, 0x20, 0x17, 0x38, 0x2d, 0xf5, 0xda, 0x97, 0x15, 0x25, - 0xb7, 0xbf, 0xbb, 0x8d, 0x84, 0x7c, 0x05, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0x0b, 0x20, 0x77, 0x44, - 0x4e, 0xa2, 0x86, 0x42, 0xe2, 0x27, 0x7c, 0x00, 0x0a, 0x7d, 0xb1, 0x57, 0xab, 0xf8, 0xbe, 0x37, - 0xd5, 0xc9, 0x78, 0x0d, 0x47, 0x91, 0xd6, 0xfd, 0xec, 0x96, 0x66, 0xfc, 0xa9, 0x81, 0x9b, 0x13, - 0xcb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8e, 0x49, 0x4b, 0x1e, 0x5b, 0x8c, 0xd7, 0x9d, 0x7a, - 0x24, 0x46, 0x43, 0x1c, 0xbe, 0x0b, 0x66, 0x5a, 0xc4, 0x75, 0x48, 0x4b, 0x2e, 0x46, 0xc5, 0xb8, - 0x72, 0xb7, 0xa5, 0x14, 0x29, 0x54, 0xf0, 0x28, 0xc1, 0xcc, 0x73, 0xd5, 0x2a, 0x36, 0xe2, 0x21, - 0x29, 0x45, 0x0a, 0x85, 0x75, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xbd, 0x61, 0x46, - 0x97, 0x95, 0xc2, 0xfc, 0xce, 0x38, 0x8c, 0xce, 0xf3, 0x8d, 0x7f, 0xb3, 0x40, 0x9f, 0x34, 0xda, - 0xe0, 0x61, 0xbc, 0x8b, 0x48, 0x50, 0xae, 0x43, 0xe5, 0xda, 0xad, 0x4b, 0x35, 0x88, 0xd0, 0x68, - 0x2c, 0x29, 0x47, 0xe6, 0x92, 0xd2, 0xc4, 0xea, 0x22, 0x3f, 0x21, 0x05, 0x0b, 0xee, 0xf8, 0xce, - 0x1c, 0x2d, 0x55, 0xe5, 0xda, 0xed, 0xcb, 0xb6, 0x83, 0x3c, 0x4d, 0x57, 0xa7, 0x2d, 0x9c, 0x03, - 0x18, 0xba, 0x60, 0x1f, 0xd6, 0x00, 0x70, 0x5c, 0xdb, 0xeb, 0xf9, 0x5d, 0xc2, 0x89, 0x0c, 0x5b, - 0x31, 0x9e, 0x83, 0xbb, 0x23, 0x04, 0x25, 0x58, 0x69, 0xf1, 0xce, 0x5f, 0x2d, 0xde, 0x8d, 0xfa, - 0xe9, 0x59, 0x25, 0xf3, 0xfc, 0xac, 0x92, 0x79, 0x71, 0x56, 0xc9, 0x3c, 0x0b, 0x2b, 0xda, 0x69, - 0x58, 0xd1, 0x9e, 0x87, 0x15, 0xed, 0x45, 0x58, 0xd1, 0xfe, 0x0e, 0x2b, 0xda, 0xf7, 0xff, 0x54, - 0x32, 0x4f, 0x57, 0xa7, 0xfc, 0x53, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xea, 0x67, 0x63, 0x89, - 0x60, 0x0f, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authorization/v1/generated.proto", fileDescriptor_aafd0e5e70cec678) +} + +var fileDescriptor_aafd0e5e70cec678 = []byte{ + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x8f, 0xe3, 0x6f, 0xd2, 0xc9, 0x37, 0xcd, 0x36, 0x11, 0x76, 0x64, + 0x24, 0x48, 0xd5, 0xb2, 0x26, 0x51, 0xdb, 0x44, 0x95, 0x0a, 0xf2, 0xaa, 0x01, 0x45, 0x4a, 0x4b, + 0x35, 0x51, 0x22, 0x51, 0x04, 0x62, 0xbc, 0x9e, 0xd8, 0x4b, 0xec, 0xdd, 0xed, 0xcc, 0xac, 0xd3, + 0x70, 0xaa, 0xc4, 0x3f, 0xc0, 0x91, 0x43, 0x0f, 0xfc, 0x07, 0x5c, 0x90, 0xb8, 0x73, 0x40, 0x11, + 0xa7, 0x1e, 0x8b, 0x84, 0x2c, 0x62, 0xce, 0xfc, 0x0f, 0x68, 0x66, 0xc7, 0xde, 0xdd, 0xc4, 0x76, + 0x6d, 0x0e, 0x94, 0x43, 0x6f, 0x9e, 0xf7, 0x79, 0xbf, 0xe7, 0xbd, 0xb7, 0x6f, 0x0c, 0x6e, 0x1c, + 0x6f, 0x33, 0xc3, 0x76, 0x2b, 0xd8, 0xb3, 0x2b, 0xd8, 0xe7, 0x4d, 0x97, 0xda, 0x5f, 0x63, 0x6e, + 0xbb, 0x4e, 0xa5, 0xb3, 0x51, 0x69, 0x10, 0x87, 0x50, 0xcc, 0x49, 0xdd, 0xf0, 0xa8, 0xcb, 0x5d, + 0xb8, 0x1a, 0x30, 0x1b, 0xd8, 0xb3, 0x8d, 0x18, 0xb3, 0xd1, 0xd9, 0x58, 0x79, 0xaf, 0x61, 0xf3, + 0xa6, 0x5f, 0x33, 0x2c, 0xb7, 0x5d, 0x69, 0xb8, 0x0d, 0xb7, 0x22, 0x65, 0x6a, 0xfe, 0x91, 0x3c, + 0xc9, 0x83, 0xfc, 0x15, 0xe8, 0x5a, 0xb9, 0x15, 0x1a, 0x6e, 0x63, 0xab, 0x69, 0x3b, 0x84, 0x9e, + 0x56, 0xbc, 0xe3, 0x86, 0x20, 0xb0, 0x4a, 0x9b, 0x70, 0x3c, 0xc4, 0x83, 0x95, 0xca, 0x28, 0x29, + 0xea, 0x3b, 0xdc, 0x6e, 0x93, 0x4b, 0x02, 0x77, 0x5e, 0x25, 0xc0, 0xac, 0x26, 0x69, 0xe3, 0x8b, + 0x72, 0xe5, 0x2d, 0x00, 0x76, 0x9e, 0x72, 0x8a, 0x0f, 0x71, 0xcb, 0x27, 0xb0, 0x04, 0x32, 0x36, + 0x27, 0x6d, 0xa6, 0x6b, 0x6b, 0xa9, 0xf5, 0x9c, 0x99, 0xeb, 0x75, 0x4b, 0x99, 0x5d, 0x41, 0x40, + 0x01, 0xfd, 0x6e, 0xf6, 0xbb, 0xef, 0x4b, 0x89, 0x67, 0xbf, 0xaf, 0x25, 0xca, 0xbf, 0x6a, 0x60, + 0xf9, 0x23, 0x9b, 0xb4, 0xea, 0xfb, 0xa4, 0x45, 0x2c, 0xee, 0xd2, 0x2a, 0xe7, 0xd4, 0xae, 0xf9, + 0x9c, 0x30, 0x78, 0x1b, 0xe4, 0x29, 0x3e, 0xe9, 0x03, 0xba, 0xb6, 0xa6, 0xad, 0xe7, 0xcc, 0xc5, + 0xb3, 0x6e, 0x29, 0xd1, 0xeb, 0x96, 0xf2, 0x28, 0x84, 0x50, 0x94, 0x0f, 0x3e, 0x05, 0x73, 0x94, + 0x3c, 0xf1, 0x6d, 0x4a, 0xda, 0xc4, 0xe1, 0x4c, 0x4f, 0xae, 0xa5, 0xd6, 0xf3, 0x9b, 0x1f, 0x18, + 0xe1, 0x6d, 0x0c, 0x42, 0x33, 0xbc, 0xe3, 0x86, 0x20, 0x30, 0x43, 0x64, 0xd0, 0xe8, 0x6c, 0x18, + 0x31, 0x5f, 0x50, 0xa8, 0xc6, 0xfc, 0xbf, 0xb2, 0x3b, 0x17, 0x21, 0x32, 0x14, 0xb3, 0x24, 0x83, + 0xd9, 0xc3, 0x35, 0xd2, 0xfa, 0x8f, 0x04, 0x13, 0xf3, 0x65, 0xda, 0x60, 0x7e, 0x4c, 0x02, 0x7d, + 0xcf, 0xb5, 0x70, 0x6b, 0xdf, 0xaf, 0x7d, 0x45, 0x2c, 0x5e, 0xb5, 0x2c, 0xc2, 0x18, 0x22, 0x1d, + 0x9b, 0x9c, 0xc0, 0x2f, 0x41, 0x56, 0x18, 0xa9, 0x63, 0x8e, 0x65, 0x28, 0xf9, 0xcd, 0xf7, 0x27, + 0x73, 0xe9, 0x13, 0xa9, 0xeb, 0x01, 0xe1, 0xd8, 0x84, 0xca, 0x09, 0x10, 0xd2, 0xd0, 0x40, 0x2b, + 0x3c, 0x04, 0x69, 0xe6, 0x11, 0x4b, 0x4f, 0x4a, 0xed, 0xb7, 0x8c, 0x31, 0xbd, 0x64, 0x0c, 0xf1, + 0x70, 0xdf, 0x23, 0x96, 0x39, 0xa7, 0x2c, 0xa4, 0xc5, 0x09, 0x49, 0x7d, 0xf0, 0x0b, 0x30, 0xc3, + 0x38, 0xe6, 0x3e, 0xd3, 0x53, 0x52, 0xf3, 0x9d, 0xa9, 0x35, 0x4b, 0x69, 0xf3, 0x7f, 0x4a, 0xf7, + 0x4c, 0x70, 0x46, 0x4a, 0x6b, 0xf9, 0x33, 0xb0, 0xf4, 0xd0, 0x75, 0x10, 0x61, 0xae, 0x4f, 0x2d, + 0x12, 0x29, 0x80, 0x35, 0x90, 0xf6, 0x30, 0x6f, 0xaa, 0x9b, 0x1f, 0xb8, 0xf6, 0x08, 0xf3, 0x26, + 0x92, 0x88, 0xe0, 0xe8, 0x10, 0x5a, 0x93, 0x21, 0x47, 0x38, 0x0e, 0x09, 0xad, 0x21, 0x89, 0x94, + 0x9f, 0x80, 0xf9, 0x88, 0x72, 0xe4, 0xb7, 0x64, 0xaf, 0x09, 0x28, 0xd6, 0x6b, 0x42, 0x82, 0xa1, + 0x80, 0x0e, 0xef, 0x81, 0x79, 0x27, 0x94, 0x39, 0x40, 0x7b, 0x41, 0x11, 0xe5, 0xcc, 0xc5, 0x5e, + 0xb7, 0x14, 0x55, 0x27, 0x20, 0x74, 0x91, 0xb7, 0xfc, 0x3c, 0x0d, 0xe0, 0x90, 0x68, 0x2a, 0x20, + 0xe7, 0xe0, 0x36, 0x61, 0x1e, 0xb6, 0x88, 0x0a, 0xe9, 0x8a, 0x72, 0x38, 0xf7, 0xb0, 0x0f, 0xa0, + 0x90, 0xe7, 0xd5, 0xc1, 0xc1, 0xb7, 0x41, 0xa6, 0x41, 0x5d, 0xdf, 0x93, 0x17, 0x93, 0x33, 0x0b, + 0x8a, 0x25, 0xf3, 0xb1, 0x20, 0xa2, 0x00, 0x83, 0xd7, 0xc1, 0x6c, 0x87, 0x50, 0x66, 0xbb, 0x8e, + 0x9e, 0x96, 0x6c, 0xf3, 0x8a, 0x6d, 0xf6, 0x30, 0x20, 0xa3, 0x3e, 0x0e, 0x6f, 0x82, 0x2c, 0x55, + 0x8e, 0xeb, 0x19, 0xc9, 0xbb, 0xa0, 0x78, 0xb3, 0x83, 0x0c, 0x0e, 0x38, 0x44, 0x7f, 0x32, 0xbf, + 0x36, 0x10, 0x98, 0x89, 0xf7, 0xe7, 0x7e, 0x08, 0xa1, 0x28, 0x9f, 0x08, 0x4b, 0xc4, 0xa8, 0xcf, + 0xc6, 0xc3, 0x12, 0x29, 0x40, 0x12, 0x81, 0x6d, 0x50, 0x38, 0x8a, 0x0e, 0x15, 0x3d, 0x3b, 0x41, + 0x45, 0x8f, 0x18, 0x89, 0xe6, 0x95, 0x5e, 0xb7, 0x54, 0x88, 0xcf, 0xa8, 0xb8, 0x76, 0x61, 0xae, + 0x15, 0x6d, 0x7b, 0x3d, 0x37, 0x81, 0xb9, 0x11, 0x43, 0x2b, 0x30, 0x17, 0x9f, 0x22, 0x71, 0xed, + 0xe5, 0x9f, 0x35, 0x30, 0x37, 0x5d, 0x3d, 0xde, 0x00, 0x39, 0xec, 0xd9, 0xf2, 0x52, 0xfb, 0x95, + 0x58, 0x10, 0x55, 0x53, 0x7d, 0xb4, 0x1b, 0x10, 0x51, 0x88, 0x0b, 0xe6, 0x7e, 0xaa, 0x45, 0xc3, + 0x0e, 0x98, 0xfb, 0x26, 0x19, 0x0a, 0x71, 0xb8, 0x05, 0x0a, 0xfd, 0x83, 0x2c, 0x41, 0x3d, 0x2d, + 0x05, 0x64, 0x10, 0x28, 0x0a, 0xa0, 0x38, 0x5f, 0xf9, 0xa7, 0x24, 0x58, 0xde, 0x27, 0xad, 0xa3, + 0xd7, 0x33, 0xe9, 0x1e, 0xc7, 0x26, 0xdd, 0xf6, 0xf8, 0x79, 0x34, 0xdc, 0xcb, 0xd7, 0x36, 0xed, + 0x9e, 0x27, 0xc1, 0xea, 0x18, 0x9f, 0xe0, 0x09, 0x80, 0xf4, 0xd2, 0xf0, 0x50, 0x79, 0xac, 0x8c, + 0xf5, 0xe5, 0xf2, 0xcc, 0x31, 0xaf, 0xf6, 0xba, 0xa5, 0x21, 0xb3, 0x08, 0x0d, 0x31, 0x01, 0xbf, + 0xd1, 0xc0, 0x92, 0x33, 0x6c, 0x0e, 0xab, 0x34, 0x6f, 0x8e, 0x35, 0x3e, 0x74, 0x82, 0x9b, 0xd7, + 0x7a, 0xdd, 0xd2, 0xf0, 0xe1, 0x8e, 0x86, 0xdb, 0x12, 0xdf, 0xd0, 0xab, 0x91, 0xf4, 0x88, 0x06, + 0xf9, 0xf7, 0xea, 0xea, 0xd3, 0x58, 0x5d, 0x6d, 0x4d, 0x5a, 0x57, 0x11, 0x27, 0x47, 0x96, 0xd5, + 0xe7, 0x17, 0xca, 0xea, 0xf6, 0x24, 0x65, 0x15, 0x55, 0x3c, 0xbe, 0xaa, 0x1e, 0x80, 0x95, 0xd1, + 0x0e, 0x4d, 0xfd, 0xe9, 0x29, 0xff, 0x90, 0x04, 0x8b, 0x6f, 0x96, 0x98, 0x69, 0xda, 0xfa, 0x97, + 0x34, 0x58, 0x7e, 0xd3, 0xd2, 0xa3, 0xd6, 0x38, 0x9f, 0x11, 0xaa, 0x96, 0x94, 0xc1, 0xe5, 0x1c, + 0x30, 0x42, 0x91, 0x44, 0x60, 0x19, 0xcc, 0x34, 0x82, 0xaf, 0x5b, 0xf0, 0xfd, 0x01, 0x22, 0xc1, + 0xea, 0xd3, 0xa6, 0x10, 0x58, 0x07, 0x19, 0x22, 0xde, 0x4b, 0x7a, 0x46, 0xee, 0xf3, 0x1f, 0xfe, + 0x93, 0xca, 0x30, 0xe4, 0x8b, 0x6b, 0xc7, 0xe1, 0xf4, 0x34, 0x5c, 0x96, 0x24, 0x0d, 0x05, 0xca, + 0xe1, 0x5b, 0x20, 0xe5, 0xdb, 0x75, 0xb5, 0xcb, 0xe4, 0x15, 0x4b, 0xea, 0x60, 0xf7, 0x3e, 0x12, + 0xf4, 0x15, 0xac, 0x1e, 0x6d, 0x52, 0x05, 0x5c, 0x00, 0xa9, 0x63, 0x72, 0x1a, 0x34, 0x14, 0x12, + 0x3f, 0xe1, 0x3d, 0x90, 0xe9, 0x88, 0xf7, 0x9c, 0xca, 0xef, 0xbb, 0x63, 0x9d, 0x0c, 0x9f, 0x7f, + 0x28, 0x90, 0xba, 0x9b, 0xdc, 0xd6, 0xca, 0xbf, 0x69, 0xe0, 0xda, 0xc8, 0xf2, 0x13, 0xcb, 0x1c, + 0x6e, 0xb5, 0xdc, 0x13, 0x52, 0x97, 0x66, 0xb3, 0xe1, 0x32, 0x57, 0x0d, 0xc8, 0xa8, 0x8f, 0xc3, + 0x77, 0xc0, 0x4c, 0x9d, 0x38, 0x36, 0xa9, 0xcb, 0xb5, 0x2f, 0x1b, 0x56, 0xee, 0x7d, 0x49, 0x45, + 0x0a, 0x15, 0x7c, 0x94, 0x60, 0xe6, 0x3a, 0x6a, 0xd1, 0x1c, 0xf0, 0x21, 0x49, 0x45, 0x0a, 0x85, + 0x55, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xdd, 0xfe, 0x8d, 0x2e, 0x2b, 0x81, 0xf9, + 0x9d, 0x38, 0x8c, 0x2e, 0xf2, 0x97, 0xff, 0x4a, 0x02, 0x7d, 0xd4, 0x68, 0x83, 0x47, 0xe1, 0x2e, + 0x22, 0x41, 0xb9, 0x0e, 0xe5, 0x37, 0xaf, 0x4f, 0xd4, 0x20, 0x42, 0xc2, 0x5c, 0x52, 0x8e, 0x14, + 0xa2, 0xd4, 0xc8, 0xea, 0x22, 0x8f, 0x90, 0x82, 0x05, 0x27, 0xfe, 0x22, 0xe8, 0xbf, 0x11, 0x6f, + 0x4e, 0xda, 0x0e, 0xd2, 0x9a, 0xae, 0xac, 0x2d, 0x5c, 0x00, 0x18, 0xba, 0xa4, 0x1f, 0x6e, 0x02, + 0x60, 0x3b, 0x96, 0xdb, 0xf6, 0x5a, 0x84, 0x13, 0x99, 0xb6, 0x6c, 0x38, 0x07, 0x77, 0x07, 0x08, + 0x8a, 0x70, 0x0d, 0xcb, 0x77, 0x7a, 0xba, 0x7c, 0x9b, 0xd5, 0xb3, 0xf3, 0x62, 0xe2, 0xc5, 0x79, + 0x31, 0xf1, 0xf2, 0xbc, 0x98, 0x78, 0xd6, 0x2b, 0x6a, 0x67, 0xbd, 0xa2, 0xf6, 0xa2, 0x57, 0xd4, + 0x5e, 0xf6, 0x8a, 0xda, 0x1f, 0xbd, 0xa2, 0xf6, 0xed, 0x9f, 0xc5, 0xc4, 0xe3, 0xd5, 0x31, 0xff, + 0xd0, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x8c, 0x77, 0x0f, 0xbf, 0x11, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -566,6 +631,90 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LabelSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -713,6 +862,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1278,6 +1451,40 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *FieldSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *LocalSubjectAccessReview) Size() (n int) { if m == nil { return 0 @@ -1347,6 +1554,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1537,6 +1752,38 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *FieldSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]FieldSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&FieldSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]LabelSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&LabelSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} func (this *LocalSubjectAccessReview) String() string { if this == nil { return "nil" @@ -1583,6 +1830,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(this.FieldSelector.String(), "FieldSelectorAttributes", "FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(this.LabelSelector.String(), "LabelSelectorAttributes", "LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -1808,6 +2057,238 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.FieldSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.LabelSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2438,6 +2919,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index 47d3a57a07..37b05b8552 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -37,6 +37,60 @@ message ExtraValue { repeated string items = 1; } +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message FieldSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement requirements = 2; +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message LabelSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement requirements = 2; +} + // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. @@ -44,7 +98,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -69,11 +123,13 @@ message NonResourceAttributes { // NonResourceRule holds information that describes a rule for the non-resource message NonResourceRule { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic repeated string verbs = 1; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic repeated string nonResourceURLs = 2; } @@ -109,26 +165,44 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, // may contain duplicates, and possibly be incomplete. message ResourceRule { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic repeated string resourceNames = 4; } @@ -139,7 +213,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -171,7 +245,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -192,7 +266,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; @@ -220,6 +294,7 @@ message SubjectAccessReviewSpec { // Groups is the groups you're testing for. // +optional + // +listType=atomic repeated string groups = 4; // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer @@ -262,10 +337,12 @@ message SubjectAccessReviewStatus { message SubjectRulesReviewStatus { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated ResourceRule resourceRules = 1; // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated NonResourceRule nonResourceRules = 2; // Incomplete is true when the rules returned by this call are incomplete. This is most commonly diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index d1fe483f96..36f5fa4107 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -26,6 +26,7 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { @@ -47,6 +48,7 @@ type SubjectAccessReview struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able @@ -69,6 +71,7 @@ type SelfSubjectAccessReview struct { // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions @@ -115,6 +118,72 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type LabelSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.LabelSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` +} + +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type FieldSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.FieldSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface @@ -143,6 +212,7 @@ type SubjectAccessReviewSpec struct { User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Groups is the groups you're testing for. // +optional + // +listType=atomic Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer // it needs a reflection here. @@ -197,6 +267,7 @@ type SubjectAccessReviewStatus struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. // The returned list of actions may be incomplete depending on the server's authorization mode, @@ -232,9 +303,11 @@ type SelfSubjectRulesReviewSpec struct { type SubjectRulesReviewStatus struct { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` // Incomplete is true when the rules returned by this call are incomplete. This is most commonly // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. @@ -250,28 +323,34 @@ type SubjectRulesReviewStatus struct { // may contain duplicates, and possibly be incomplete. type ResourceRule struct { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` } // NonResourceRule holds information that describes a rule for the non-resource type NonResourceRule struct { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` } diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 93229485cc..dc6b8a89ec 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -27,6 +27,26 @@ package v1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FieldSelectorAttributes = map[string]string{ + "": "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (FieldSelectorAttributes) SwaggerDoc() map[string]string { + return map_FieldSelectorAttributes +} + +var map_LabelSelectorAttributes = map[string]string{ + "": "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (LabelSelectorAttributes) SwaggerDoc() map[string]string { + return map_LabelSelectorAttributes +} + var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -59,14 +79,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index f1d49eb386..7f040f5c56 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -45,6 +46,52 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorAttributes) DeepCopyInto(out *FieldSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.FieldSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorAttributes. +func (in *FieldSelectorAttributes) DeepCopy() *FieldSelectorAttributes { + if in == nil { + return nil + } + out := new(FieldSelectorAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorAttributes) DeepCopyInto(out *LabelSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorAttributes. +func (in *LabelSelectorAttributes) DeepCopy() *LabelSelectorAttributes { + if in == nil { + return nil + } + out := new(LabelSelectorAttributes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in @@ -118,6 +165,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +258,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +356,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b0c0475b48 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LocalSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectRulesReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index aadcf82404..5007d1b496 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto +// source: k8s.io/api/authorization/v1beta1/generated.proto package v1beta1 @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/authorization/v1" math "math" math_bits "math/bits" @@ -47,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{0} + return fileDescriptor_8eab727787743457, []int{0} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +76,7 @@ var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{1} + return fileDescriptor_8eab727787743457, []int{1} } func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +104,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } func (*NonResourceAttributes) ProtoMessage() {} func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{2} + return fileDescriptor_8eab727787743457, []int{2} } func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +132,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } func (*NonResourceRule) ProtoMessage() {} func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{3} + return fileDescriptor_8eab727787743457, []int{3} } func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +160,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (*ResourceAttributes) ProtoMessage() {} func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{4} + return fileDescriptor_8eab727787743457, []int{4} } func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +188,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo func (m *ResourceRule) Reset() { *m = ResourceRule{} } func (*ResourceRule) ProtoMessage() {} func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{5} + return fileDescriptor_8eab727787743457, []int{5} } func (m *ResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +216,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (*SelfSubjectAccessReview) ProtoMessage() {} func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{6} + return fileDescriptor_8eab727787743457, []int{6} } func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +244,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{7} + return fileDescriptor_8eab727787743457, []int{7} } func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +272,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } func (*SelfSubjectRulesReview) ProtoMessage() {} func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{8} + return fileDescriptor_8eab727787743457, []int{8} } func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +300,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{9} + return fileDescriptor_8eab727787743457, []int{9} } func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +328,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (*SubjectAccessReview) ProtoMessage() {} func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{10} + return fileDescriptor_8eab727787743457, []int{10} } func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +356,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{11} + return fileDescriptor_8eab727787743457, []int{11} } func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +384,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{12} + return fileDescriptor_8eab727787743457, []int{12} } func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{13} + return fileDescriptor_8eab727787743457, []int{13} } func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -455,83 +456,86 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) -} - -var fileDescriptor_43130d8376f09103 = []byte{ - // 1143 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, - 0x05, 0x51, 0x76, 0x49, 0x54, 0x48, 0x09, 0xf4, 0x10, 0x2b, 0x01, 0x45, 0x6a, 0x4b, 0x35, 0x51, - 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, - 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0x77, 0xe0, 0x82, 0x04, 0xa7, 0x1c, 0x7b, 0x0c, 0x12, 0xb2, - 0xc8, 0xf2, 0x21, 0xb8, 0xa2, 0x99, 0x1d, 0x7b, 0xd7, 0xc9, 0x26, 0x8e, 0x73, 0xa0, 0x97, 0xde, - 0x3c, 0xef, 0xf7, 0x7b, 0x6f, 0xde, 0x7b, 0xf3, 0xde, 0xdb, 0x67, 0xb0, 0xdb, 0x79, 0xc0, 0x0c, - 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, - 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, - 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, - 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xbb, 0x2d, 0x87, 0xb7, 0x03, - 0xcb, 0xb0, 0xbd, 0x9e, 0xd9, 0xf2, 0x5a, 0x9e, 0x29, 0x15, 0xad, 0xe0, 0x50, 0x9e, 0xe4, 0x41, - 0xfe, 0x8a, 0x0c, 0xae, 0xdc, 0x8f, 0x5d, 0xe8, 0x61, 0xbb, 0xed, 0xb8, 0x84, 0x1e, 0x9b, 0x7e, - 0xa7, 0x25, 0x04, 0xcc, 0xec, 0x11, 0x8e, 0xcd, 0xfe, 0x05, 0x37, 0x56, 0xcc, 0xcb, 0xb4, 0x68, - 0xe0, 0x72, 0xa7, 0x47, 0x2e, 0x28, 0x7c, 0x30, 0x49, 0x81, 0xd9, 0x6d, 0xd2, 0xc3, 0xe7, 0xf5, - 0xea, 0x1b, 0x00, 0xec, 0x7c, 0xcb, 0x29, 0x3e, 0xc0, 0xdd, 0x80, 0xc0, 0x2a, 0x28, 0x38, 0x9c, - 0xf4, 0x98, 0xae, 0xd5, 0x72, 0xab, 0xa5, 0x46, 0x29, 0x1c, 0x54, 0x0b, 0xbb, 0x42, 0x80, 0x22, - 0xf9, 0x66, 0xf1, 0xa7, 0x9f, 0xab, 0x99, 0xe7, 0x7f, 0xd5, 0x32, 0xf5, 0xdf, 0xb2, 0x40, 0x7f, - 0xe4, 0xd9, 0xb8, 0xbb, 0x17, 0x58, 0x5f, 0x13, 0x9b, 0x6f, 0xd9, 0x36, 0x61, 0x0c, 0x91, 0xbe, - 0x43, 0x8e, 0xe0, 0x57, 0xa0, 0x28, 0x22, 0x6b, 0x62, 0x8e, 0x75, 0xad, 0xa6, 0xad, 0x96, 0xd7, - 0xdf, 0x33, 0xe2, 0xc4, 0x8e, 0x1c, 0x34, 0xfc, 0x4e, 0x4b, 0x08, 0x98, 0x21, 0xd8, 0x46, 0x7f, - 0xcd, 0xf8, 0x4c, 0xda, 0x7a, 0x4c, 0x38, 0x6e, 0xc0, 0x93, 0x41, 0x35, 0x13, 0x0e, 0xaa, 0x20, - 0x96, 0xa1, 0x91, 0x55, 0xf8, 0x0c, 0xe4, 0x99, 0x4f, 0x6c, 0x3d, 0x2b, 0xad, 0x7f, 0x68, 0x4c, - 0x7a, 0x36, 0x23, 0xc5, 0xcd, 0x3d, 0x9f, 0xd8, 0x8d, 0x5b, 0xea, 0x9a, 0xbc, 0x38, 0x21, 0x69, - 0x14, 0xda, 0x60, 0x86, 0x71, 0xcc, 0x03, 0xa6, 0xe7, 0xa4, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x9a, - 0x68, 0xbc, 0xa6, 0x2e, 0x98, 0x89, 0xce, 0x48, 0x99, 0xae, 0x3f, 0x03, 0x4b, 0x4f, 0x3c, 0x17, - 0x11, 0xe6, 0x05, 0xd4, 0x26, 0x5b, 0x9c, 0x53, 0xc7, 0x0a, 0x38, 0x61, 0xb0, 0x06, 0xf2, 0x3e, - 0xe6, 0x6d, 0x99, 0xb8, 0x52, 0xec, 0xdf, 0x53, 0xcc, 0xdb, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xd4, - 0x92, 0xc1, 0x27, 0x18, 0x07, 0x84, 0x5a, 0x48, 0x22, 0xf5, 0x6f, 0xc0, 0x7c, 0xc2, 0x38, 0x0a, - 0xba, 0xf2, 0x6d, 0x05, 0x34, 0xf6, 0xb6, 0x42, 0x83, 0xa1, 0x48, 0x0e, 0x1f, 0x82, 0x79, 0x37, - 0xd6, 0xd9, 0x47, 0x8f, 0x98, 0x9e, 0x95, 0xd4, 0xc5, 0x70, 0x50, 0x4d, 0x9a, 0x13, 0x10, 0x3a, - 0xcf, 0x15, 0x05, 0x01, 0x53, 0xa2, 0x31, 0x41, 0xc9, 0xc5, 0x3d, 0xc2, 0x7c, 0x6c, 0x13, 0x15, - 0xd2, 0x6d, 0xe5, 0x70, 0xe9, 0xc9, 0x10, 0x40, 0x31, 0x67, 0x72, 0x70, 0xf0, 0x4d, 0x50, 0x68, - 0x51, 0x2f, 0xf0, 0xe5, 0xeb, 0x94, 0x1a, 0x73, 0x8a, 0x52, 0xf8, 0x54, 0x08, 0x51, 0x84, 0xc1, - 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0x78, 0xae, 0x9e, 0x97, 0xb4, 0x79, 0x45, 0x9b, 0x3d, 0x88, - 0xc4, 0x68, 0x88, 0xc3, 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0x72, 0x17, 0x14, 0xb7, 0x38, - 0xca, 0xe0, 0x88, 0x01, 0xdf, 0x07, 0x65, 0x16, 0x58, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, 0x14, - 0xca, 0x7b, 0x31, 0x84, 0x92, 0x3c, 0x11, 0x96, 0x88, 0x51, 0x9f, 0x1d, 0x0f, 0x4b, 0xa4, 0x00, - 0x49, 0xa4, 0xfe, 0x87, 0x06, 0x6e, 0x4d, 0xf7, 0x62, 0xef, 0x80, 0x12, 0xf6, 0x1d, 0x19, 0xf6, - 0xf0, 0xad, 0xe6, 0x44, 0x5e, 0xb7, 0x9e, 0xee, 0x46, 0x42, 0x14, 0xe3, 0x82, 0x3c, 0x74, 0x46, - 0xd4, 0xf5, 0x88, 0x3c, 0xbc, 0x92, 0xa1, 0x18, 0x87, 0x1b, 0x60, 0x6e, 0x78, 0x90, 0x8f, 0xa4, - 0xe7, 0xa5, 0xc2, 0xed, 0x70, 0x50, 0x9d, 0x43, 0x49, 0x00, 0x8d, 0xf3, 0xea, 0xbf, 0x67, 0xc1, - 0xf2, 0x1e, 0xe9, 0x1e, 0xbe, 0x9c, 0xa9, 0xf0, 0xe5, 0xd8, 0x54, 0x78, 0x78, 0x8d, 0xb6, 0x4d, - 0x77, 0xf5, 0xe5, 0x4e, 0x86, 0x5f, 0xb2, 0xe0, 0xf5, 0x2b, 0x1c, 0x83, 0xdf, 0x03, 0x48, 0x2f, - 0x34, 0x9a, 0xca, 0xe8, 0xfd, 0xc9, 0x0e, 0x5d, 0x6c, 0xd2, 0xc6, 0x9d, 0x70, 0x50, 0x4d, 0x69, - 0x5e, 0x94, 0x72, 0x0f, 0xfc, 0x41, 0x03, 0x4b, 0x6e, 0xda, 0xe0, 0x52, 0x59, 0xdf, 0x98, 0xec, - 0x41, 0xea, 0xdc, 0x6b, 0xdc, 0x0d, 0x07, 0xd5, 0xf4, 0x91, 0x88, 0xd2, 0x2f, 0x14, 0x23, 0xe7, - 0x4e, 0x22, 0x51, 0xa2, 0x69, 0xfe, 0xbf, 0x5a, 0xfb, 0x62, 0xac, 0xd6, 0x3e, 0x9e, 0xaa, 0xd6, - 0x12, 0x9e, 0x5e, 0x5a, 0x6a, 0xd6, 0xb9, 0x52, 0xdb, 0xbc, 0x76, 0xa9, 0x25, 0xad, 0x5f, 0x5d, - 0x69, 0x8f, 0xc1, 0xca, 0xe5, 0x5e, 0x4d, 0x3d, 0xba, 0xeb, 0xbf, 0x66, 0xc1, 0xe2, 0xab, 0x75, - 0xe0, 0x66, 0x4d, 0x7f, 0x9a, 0x07, 0xcb, 0xaf, 0x1a, 0xfe, 0xea, 0x86, 0x17, 0x1f, 0xd1, 0x80, - 0x11, 0xaa, 0x3e, 0xfc, 0xa3, 0xb7, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0x70, 0x37, 0x88, - 0x3e, 0x58, 0x40, 0x64, 0x5a, 0x7d, 0x0b, 0xd5, 0x62, 0xe0, 0x80, 0x02, 0x11, 0x1b, 0xaf, 0x5e, - 0xa8, 0xe5, 0x56, 0xcb, 0xeb, 0xdb, 0x37, 0xae, 0x15, 0x43, 0x2e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, - 0xf1, 0x0e, 0x22, 0x65, 0x28, 0xba, 0x01, 0xbe, 0x01, 0x72, 0x81, 0xd3, 0x54, 0x2b, 0x42, 0x59, - 0x51, 0x72, 0xfb, 0xbb, 0xdb, 0x48, 0xc8, 0x57, 0x0e, 0xd5, 0xee, 0x2d, 0x4d, 0xc0, 0x05, 0x90, - 0xeb, 0x90, 0xe3, 0xa8, 0xcf, 0x90, 0xf8, 0x09, 0x1b, 0xa0, 0xd0, 0x17, 0x6b, 0xb9, 0xca, 0xf3, - 0xbd, 0xc9, 0x9e, 0xc6, 0xab, 0x3c, 0x8a, 0x54, 0x37, 0xb3, 0x0f, 0xb4, 0xfa, 0x9f, 0x1a, 0xb8, - 0x7b, 0x69, 0x41, 0x8a, 0x45, 0x09, 0x77, 0xbb, 0xde, 0x11, 0x69, 0xca, 0xbb, 0x8b, 0xf1, 0xa2, - 0xb4, 0x15, 0x89, 0xd1, 0x10, 0x87, 0x6f, 0x81, 0x99, 0x26, 0x71, 0x1d, 0xd2, 0x94, 0x2b, 0x55, - 0x31, 0xae, 0xe5, 0x6d, 0x29, 0x45, 0x0a, 0x15, 0x3c, 0x4a, 0x30, 0xf3, 0x5c, 0xb5, 0xc4, 0x8d, - 0x78, 0x48, 0x4a, 0x91, 0x42, 0xe1, 0x16, 0x98, 0x27, 0xc2, 0x4d, 0x19, 0xc4, 0x0e, 0xa5, 0xde, - 0xf0, 0x65, 0x97, 0x95, 0xc2, 0xfc, 0xce, 0x38, 0x8c, 0xce, 0xf3, 0xeb, 0xff, 0x66, 0x81, 0x7e, - 0xd9, 0xd8, 0x83, 0x9d, 0x78, 0x8b, 0x91, 0xa0, 0x5c, 0xa4, 0xca, 0xeb, 0xc6, 0xf5, 0x5b, 0x46, - 0xa8, 0x35, 0x96, 0x94, 0x37, 0x73, 0x49, 0x69, 0x62, 0xf3, 0x91, 0x47, 0x78, 0x04, 0x16, 0xdc, - 0xf1, 0x95, 0x3b, 0xda, 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x35, 0x88, 0xbc, 0x52, 0x57, 0x57, 0x2e, - 0x9c, 0x03, 0x18, 0xba, 0x70, 0x09, 0x5c, 0x07, 0xc0, 0x71, 0x6d, 0xaf, 0xe7, 0x77, 0x09, 0x27, - 0x32, 0x81, 0xc5, 0x78, 0x5a, 0xee, 0x8e, 0x10, 0x94, 0x60, 0xa5, 0x65, 0x3e, 0x3f, 0x5d, 0xe6, - 0x1b, 0x9f, 0x9c, 0x9c, 0x55, 0x32, 0x2f, 0xce, 0x2a, 0x99, 0xd3, 0xb3, 0x4a, 0xe6, 0x79, 0x58, - 0xd1, 0x4e, 0xc2, 0x8a, 0xf6, 0x22, 0xac, 0x68, 0xa7, 0x61, 0x45, 0xfb, 0x3b, 0xac, 0x68, 0x3f, - 0xfe, 0x53, 0xc9, 0x7c, 0x5e, 0x9b, 0xf4, 0x0f, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x58, - 0x16, 0x3a, 0xdf, 0xbd, 0x0f, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_8eab727787743457) +} + +var fileDescriptor_8eab727787743457 = []byte{ + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0xf3, 0xa7, 0x4d, 0x26, 0x1b, 0xda, 0x9d, 0xaa, 0x5b, 0x6f, 0x11, 0x49, 0x14, 0x24, + 0x54, 0xb4, 0x8b, 0xb3, 0xad, 0x0a, 0x5d, 0x0a, 0x7b, 0xa8, 0xd5, 0x2e, 0xaa, 0xd4, 0x5d, 0x56, + 0x53, 0xb5, 0x07, 0x56, 0x02, 0x26, 0xce, 0x34, 0x31, 0x75, 0x6c, 0xe3, 0x19, 0xa7, 0x14, 0x71, + 0xd8, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x70, 0xe2, 0x3b, 0x70, 0x41, 0x82, 0x53, 0x8f, 0x7b, 0x2c, + 0x12, 0x8a, 0xa8, 0xf9, 0x10, 0x5c, 0xd1, 0x8c, 0x27, 0xb1, 0x9d, 0xba, 0x4d, 0xdb, 0x03, 0x7b, + 0xd9, 0x5b, 0xe6, 0xfd, 0x7e, 0xef, 0xcf, 0xbc, 0x79, 0xf3, 0xfc, 0x26, 0xe0, 0xc1, 0xe1, 0x43, + 0xaa, 0x99, 0x4e, 0x13, 0xbb, 0x66, 0x13, 0xfb, 0xac, 0xeb, 0x78, 0xe6, 0xb7, 0x98, 0x99, 0x8e, + 0xdd, 0xec, 0x2f, 0xb7, 0x08, 0xc3, 0xcb, 0xcd, 0x0e, 0xb1, 0x89, 0x87, 0x19, 0x69, 0x6b, 0xae, + 0xe7, 0x30, 0x07, 0xd6, 0x43, 0x0d, 0x0d, 0xbb, 0xa6, 0x96, 0xd0, 0xd0, 0xa4, 0xc6, 0xe2, 0x7b, + 0x1d, 0x93, 0x75, 0xfd, 0x96, 0x66, 0x38, 0xbd, 0x66, 0xc7, 0xe9, 0x38, 0x4d, 0xa1, 0xd8, 0xf2, + 0x0f, 0xc4, 0x4a, 0x2c, 0xc4, 0xaf, 0xd0, 0xe0, 0xe2, 0xbd, 0x4b, 0x42, 0x18, 0xf7, 0xbe, 0xb8, + 0x1a, 0x91, 0x7b, 0xd8, 0xe8, 0x9a, 0x36, 0xf1, 0x8e, 0x9b, 0xee, 0x61, 0x87, 0x0b, 0x68, 0xb3, + 0x47, 0x18, 0x4e, 0xd3, 0x6a, 0x5e, 0xa4, 0xe5, 0xf9, 0x36, 0x33, 0x7b, 0xe4, 0x9c, 0xc2, 0x07, + 0x93, 0x14, 0xa8, 0xd1, 0x25, 0x3d, 0x3c, 0xae, 0xd7, 0x58, 0x03, 0x60, 0xeb, 0x1b, 0xe6, 0xe1, + 0x7d, 0x6c, 0xf9, 0x04, 0xd6, 0x40, 0xc1, 0x64, 0xa4, 0x47, 0x55, 0xa5, 0x9e, 0x5b, 0x2a, 0xe9, + 0xa5, 0x60, 0x50, 0x2b, 0x6c, 0x73, 0x01, 0x0a, 0xe5, 0xeb, 0xc5, 0x1f, 0x7f, 0xae, 0x65, 0x5e, + 0xfc, 0x55, 0xcf, 0x34, 0x7e, 0xcb, 0x02, 0x75, 0xc7, 0x31, 0xb0, 0xb5, 0xeb, 0xb7, 0xbe, 0x22, + 0x06, 0xdb, 0x30, 0x0c, 0x42, 0x29, 0x22, 0x7d, 0x93, 0x1c, 0xc1, 0x2f, 0x41, 0x91, 0xef, 0xac, + 0x8d, 0x19, 0x56, 0x95, 0xba, 0xb2, 0x54, 0x5e, 0x79, 0xa0, 0x45, 0xa7, 0x30, 0x0a, 0x50, 0x73, + 0x0f, 0x3b, 0x5c, 0x40, 0x35, 0xce, 0xd6, 0xfa, 0xcb, 0xda, 0xa7, 0xc2, 0xd6, 0x13, 0xc2, 0xb0, + 0x0e, 0x4f, 0x06, 0xb5, 0x4c, 0x30, 0xa8, 0x81, 0x48, 0x86, 0x46, 0x56, 0xe1, 0x73, 0x90, 0xa7, + 0x2e, 0x31, 0xd4, 0xac, 0xb0, 0xfe, 0xa1, 0x36, 0xe9, 0x8c, 0xb5, 0x94, 0x30, 0x77, 0x5d, 0x62, + 0xe8, 0xb7, 0xa4, 0x9b, 0x3c, 0x5f, 0x21, 0x61, 0x14, 0x1a, 0x60, 0x8a, 0x32, 0xcc, 0x7c, 0xaa, + 0xe6, 0x84, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x98, 0xd0, 0xdf, 0x90, 0x0e, 0xa6, 0xc2, 0x35, 0x92, + 0xa6, 0x1b, 0xcf, 0xc1, 0xfc, 0x53, 0xc7, 0x46, 0x84, 0x3a, 0xbe, 0x67, 0x90, 0x0d, 0xc6, 0x3c, + 0xb3, 0xe5, 0x33, 0x42, 0x61, 0x1d, 0xe4, 0x5d, 0xcc, 0xba, 0x22, 0x71, 0xa5, 0x28, 0xbe, 0x67, + 0x98, 0x75, 0x91, 0x40, 0x38, 0xa3, 0x4f, 0xbc, 0x96, 0xd8, 0x7c, 0x8c, 0xb1, 0x4f, 0xbc, 0x16, + 0x12, 0x48, 0xe3, 0x6b, 0x30, 0x13, 0x33, 0x8e, 0x7c, 0x4b, 0x9c, 0x2d, 0x87, 0x12, 0x67, 0xcb, + 0x35, 0x28, 0x0a, 0xe5, 0xf0, 0x11, 0x98, 0xb1, 0x23, 0x9d, 0x3d, 0xb4, 0x43, 0xd5, 0xac, 0xa0, + 0xce, 0x05, 0x83, 0x5a, 0xdc, 0x1c, 0x87, 0xd0, 0x38, 0xb7, 0xf1, 0x53, 0x1e, 0xc0, 0x94, 0xdd, + 0x34, 0x41, 0xc9, 0xc6, 0x3d, 0x42, 0x5d, 0x6c, 0x10, 0xb9, 0xa5, 0xdb, 0x32, 0xe0, 0xd2, 0xd3, + 0x21, 0x80, 0x22, 0xce, 0xe4, 0xcd, 0xc1, 0xb7, 0x41, 0xa1, 0xe3, 0x39, 0xbe, 0x2b, 0x4e, 0xa7, + 0xa4, 0x57, 0x24, 0xa5, 0xf0, 0x09, 0x17, 0xa2, 0x10, 0x83, 0xef, 0x82, 0xe9, 0x3e, 0xf1, 0xa8, + 0xe9, 0xd8, 0x6a, 0x5e, 0xd0, 0x66, 0x24, 0x6d, 0x7a, 0x3f, 0x14, 0xa3, 0x21, 0x0e, 0xef, 0x83, + 0xa2, 0x27, 0x03, 0x57, 0x0b, 0x82, 0x3b, 0x2b, 0xb9, 0xc5, 0x51, 0x06, 0x47, 0x0c, 0xf8, 0x3e, + 0x28, 0x53, 0xbf, 0x35, 0x52, 0x98, 0x12, 0x0a, 0x73, 0x52, 0xa1, 0xbc, 0x1b, 0x41, 0x28, 0xce, + 0xe3, 0xdb, 0xe2, 0x7b, 0x54, 0xa7, 0x93, 0xdb, 0xe2, 0x29, 0x40, 0x02, 0x81, 0x3d, 0x50, 0x39, + 0x30, 0x89, 0xd5, 0xde, 0x25, 0x16, 0x31, 0x98, 0xe3, 0xa9, 0x45, 0x51, 0x7c, 0xab, 0x97, 0x15, + 0x9f, 0xf6, 0x38, 0xae, 0x11, 0xa5, 0x5d, 0xbf, 0x1d, 0x0c, 0x6a, 0x95, 0x04, 0x88, 0x92, 0xd6, + 0xb9, 0x3b, 0x0b, 0xb7, 0x88, 0x35, 0x72, 0x57, 0xba, 0x82, 0xbb, 0x9d, 0xb8, 0xc6, 0xb8, 0xbb, + 0x04, 0x88, 0x92, 0xd6, 0x1b, 0x7f, 0x28, 0xe0, 0xd6, 0xf5, 0xea, 0xf1, 0x1e, 0x28, 0x61, 0xd7, + 0x14, 0x87, 0x3a, 0xac, 0xc4, 0x0a, 0xaf, 0x9a, 0x8d, 0x67, 0xdb, 0xa1, 0x10, 0x45, 0x38, 0x27, + 0x0f, 0x53, 0xcd, 0x6f, 0xed, 0x88, 0x3c, 0x74, 0x49, 0x51, 0x84, 0xc3, 0x35, 0x50, 0x19, 0x2e, + 0x44, 0x09, 0xaa, 0x79, 0xa1, 0x20, 0x36, 0x81, 0xe2, 0x00, 0x4a, 0xf2, 0x1a, 0xbf, 0x67, 0xc1, + 0xc2, 0x2e, 0xb1, 0x0e, 0x5e, 0x4d, 0xcf, 0xfb, 0x22, 0xd1, 0xf3, 0x1e, 0x5d, 0xa1, 0x29, 0xa5, + 0x87, 0xfa, 0x6a, 0xfb, 0xde, 0x2f, 0x59, 0xf0, 0xe6, 0x25, 0x81, 0xc1, 0xef, 0x00, 0xf4, 0xce, + 0xb5, 0x11, 0x99, 0xd1, 0xd5, 0xc9, 0x01, 0x9d, 0x6f, 0x41, 0xfa, 0x9d, 0x60, 0x50, 0x4b, 0x69, + 0x4d, 0x28, 0xc5, 0x0f, 0xfc, 0x5e, 0x01, 0xf3, 0x76, 0x5a, 0x5b, 0x96, 0x59, 0x5f, 0x9b, 0x1c, + 0x41, 0x6a, 0x57, 0xd7, 0xef, 0x06, 0x83, 0x5a, 0x7a, 0xc3, 0x47, 0xe9, 0x0e, 0xf9, 0x17, 0xf6, + 0x4e, 0x2c, 0x51, 0xfc, 0xd2, 0xfc, 0x7f, 0xb5, 0xf6, 0x79, 0xa2, 0xd6, 0x3e, 0xbe, 0x56, 0xad, + 0xc5, 0x22, 0xbd, 0xb0, 0xd4, 0x5a, 0x63, 0xa5, 0xb6, 0x7e, 0xe5, 0x52, 0x8b, 0x5b, 0xbf, 0xbc, + 0xd2, 0x9e, 0x80, 0xc5, 0x8b, 0xa3, 0xba, 0xf6, 0x87, 0xa9, 0xf1, 0x6b, 0x16, 0xcc, 0xbd, 0x1e, + 0x76, 0x6e, 0x76, 0xe9, 0x4f, 0xf3, 0x60, 0xe1, 0xf5, 0x85, 0xbf, 0xfc, 0xc2, 0xf3, 0x11, 0xc1, + 0xa7, 0xc4, 0x93, 0x63, 0xcd, 0xe8, 0xac, 0xf6, 0x28, 0xf1, 0x90, 0x40, 0x60, 0x7d, 0x38, 0xf9, + 0x84, 0x1f, 0x2c, 0xc0, 0x33, 0x2d, 0xbf, 0x85, 0x72, 0xec, 0x31, 0x41, 0x81, 0xf0, 0x79, 0x5e, + 0x2d, 0xd4, 0x73, 0x4b, 0xe5, 0x95, 0xcd, 0x1b, 0xd7, 0x8a, 0x26, 0x9e, 0x05, 0x5b, 0x36, 0xf3, + 0x8e, 0xa3, 0x09, 0x4b, 0xc8, 0x50, 0xe8, 0x01, 0xbe, 0x05, 0x72, 0xbe, 0xd9, 0x96, 0x03, 0x50, + 0x59, 0x52, 0x72, 0x7b, 0xdb, 0x9b, 0x88, 0xcb, 0x17, 0x0f, 0xe4, 0xcb, 0x42, 0x98, 0x80, 0xb3, + 0x20, 0x77, 0x48, 0x8e, 0xc3, 0x7b, 0x86, 0xf8, 0x4f, 0xa8, 0x83, 0x42, 0x9f, 0x3f, 0x3a, 0x64, + 0x9e, 0xef, 0x4f, 0x8e, 0x34, 0x7a, 0xa8, 0xa0, 0x50, 0x75, 0x3d, 0xfb, 0x50, 0x69, 0xfc, 0xa9, + 0x80, 0xbb, 0x17, 0x16, 0x24, 0x1f, 0x03, 0xb1, 0x65, 0x39, 0x47, 0xa4, 0x2d, 0x7c, 0x17, 0xa3, + 0x31, 0x70, 0x23, 0x14, 0xa3, 0x21, 0x0e, 0xdf, 0x01, 0x53, 0x6d, 0x62, 0x9b, 0xa4, 0x2d, 0x06, + 0xc6, 0x62, 0x54, 0xcb, 0x9b, 0x42, 0x8a, 0x24, 0xca, 0x79, 0x1e, 0xc1, 0xd4, 0xb1, 0xe5, 0x88, + 0x3a, 0xe2, 0x21, 0x21, 0x45, 0x12, 0x85, 0x1b, 0x60, 0x86, 0xf0, 0x30, 0xc5, 0x26, 0xb6, 0x3c, + 0xcf, 0x19, 0x9e, 0xec, 0x82, 0x54, 0x98, 0xd9, 0x4a, 0xc2, 0x68, 0x9c, 0xdf, 0xf8, 0x37, 0x0b, + 0xd4, 0x8b, 0xda, 0x1e, 0x3c, 0x8c, 0xa6, 0x18, 0x01, 0x8a, 0x41, 0xaa, 0xbc, 0xa2, 0x5d, 0xfd, + 0xca, 0x70, 0x35, 0x7d, 0x5e, 0x46, 0x53, 0x89, 0x4b, 0x63, 0x93, 0x8f, 0x58, 0xc2, 0x23, 0x30, + 0x6b, 0x27, 0x1f, 0x14, 0xe1, 0x4c, 0x56, 0x5e, 0x59, 0xbe, 0xd6, 0x05, 0x11, 0x2e, 0x55, 0xe9, + 0x72, 0x76, 0x0c, 0xa0, 0xe8, 0x9c, 0x13, 0xb8, 0x02, 0x80, 0x69, 0x1b, 0x4e, 0xcf, 0xb5, 0x08, + 0x23, 0x22, 0x81, 0xc5, 0xa8, 0x5b, 0x6e, 0x8f, 0x10, 0x14, 0x63, 0xa5, 0x65, 0x3e, 0x7f, 0xbd, + 0xcc, 0xeb, 0x8f, 0x4f, 0xce, 0xaa, 0x99, 0x97, 0x67, 0xd5, 0xcc, 0xe9, 0x59, 0x35, 0xf3, 0x22, + 0xa8, 0x2a, 0x27, 0x41, 0x55, 0x79, 0x19, 0x54, 0x95, 0xd3, 0xa0, 0xaa, 0xfc, 0x1d, 0x54, 0x95, + 0x1f, 0xfe, 0xa9, 0x66, 0x3e, 0xab, 0x4f, 0xfa, 0x33, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x46, 0xf7, 0xe0, 0x3d, 0xaf, 0x10, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -713,6 +717,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1347,6 +1375,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1583,6 +1619,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(fmt.Sprintf("%v", this.FieldSelector), "FieldSelectorAttributes", "v11.FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelectorAttributes", "v11.LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -2438,6 +2476,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &v11.FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 01736202f8..8738768b89 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authorization.v1beta1; +import "k8s.io/api/authorization/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -44,7 +45,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -69,11 +70,13 @@ message NonResourceAttributes { // NonResourceRule holds information that describes a rule for the non-resource message NonResourceRule { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic repeated string verbs = 1; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic repeated string nonResourceURLs = 2; } @@ -109,26 +112,38 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, // may contain duplicates, and possibly be incomplete. message ResourceRule { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic repeated string resourceNames = 4; } @@ -139,7 +154,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -171,7 +186,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -192,7 +207,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; @@ -220,6 +235,7 @@ message SubjectAccessReviewSpec { // Groups is the groups you're testing for. // +optional + // +listType=atomic repeated string group = 4; // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer @@ -262,10 +278,12 @@ message SubjectAccessReviewStatus { message SubjectRulesReviewStatus { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated ResourceRule resourceRules = 1; // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated NonResourceRule nonResourceRules = 2; // Incomplete is true when the rules returned by this call are incomplete. This is most commonly diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index 2653098655..8b8e5a9867 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -124,6 +125,12 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + FieldSelector *authorizationv1.FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + LabelSelector *authorizationv1.LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface @@ -152,6 +159,7 @@ type SubjectAccessReviewSpec struct { User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Groups is the groups you're testing for. // +optional + // +listType=atomic Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer // it needs a reflection here. @@ -244,9 +252,11 @@ type SelfSubjectRulesReviewSpec struct { type SubjectRulesReviewStatus struct { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` // Incomplete is true when the rules returned by this call are incomplete. This is most commonly // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. @@ -262,28 +272,34 @@ type SubjectRulesReviewStatus struct { // may contain duplicates, and possibly be incomplete. type ResourceRule struct { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` } // NonResourceRule holds information that describes a rule for the non-resource type NonResourceRule struct { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` } diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index e0846be7a4..bb1352a2d9 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -59,14 +59,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index 13f09cf2d2..d76993dba4 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/authorization/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -118,6 +119,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(v1.FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +212,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +310,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 8c9c09b5cb..d64c9cbc1a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 289d1b827f..3e3c231351 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto +// source: k8s.io/api/autoscaling/v1/generated.proto package v1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{0} + return fileDescriptor_1972394c0c7aac8b, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{1} + return fileDescriptor_1972394c0c7aac8b, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{2} + return fileDescriptor_1972394c0c7aac8b, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{3} + return fileDescriptor_1972394c0c7aac8b, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{4} + return fileDescriptor_1972394c0c7aac8b, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{5} + return fileDescriptor_1972394c0c7aac8b, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{6} + return fileDescriptor_1972394c0c7aac8b, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{7} + return fileDescriptor_1972394c0c7aac8b, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{8} + return fileDescriptor_1972394c0c7aac8b, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{9} + return fileDescriptor_1972394c0c7aac8b, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{10} + return fileDescriptor_1972394c0c7aac8b, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{11} + return fileDescriptor_1972394c0c7aac8b, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{12} + return fileDescriptor_1972394c0c7aac8b, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{13} + return fileDescriptor_1972394c0c7aac8b, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{14} + return fileDescriptor_1972394c0c7aac8b, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{15} + return fileDescriptor_1972394c0c7aac8b, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{16} + return fileDescriptor_1972394c0c7aac8b, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{17} + return fileDescriptor_1972394c0c7aac8b, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +554,7 @@ var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{18} + return fileDescriptor_1972394c0c7aac8b, []int{18} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +582,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{19} + return fileDescriptor_1972394c0c7aac8b, []int{19} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +610,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{20} + return fileDescriptor_1972394c0c7aac8b, []int{20} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,112 +660,111 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) -} - -var fileDescriptor_2bb1f2101a7f10e2 = []byte{ - // 1608 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0xd4, 0xc6, - 0x17, 0xcf, 0x7e, 0x24, 0x24, 0x6f, 0x43, 0x3e, 0x06, 0xfe, 0x90, 0x84, 0x3f, 0xeb, 0xc8, 0x7f, - 0x84, 0xf2, 0x6f, 0x8b, 0xdd, 0x6c, 0x29, 0xa2, 0xa7, 0x2a, 0xde, 0x96, 0x82, 0x9a, 0x85, 0x30, - 0x09, 0x94, 0x7e, 0x8a, 0x89, 0x77, 0xd8, 0x98, 0xac, 0xed, 0x95, 0xed, 0x5d, 0x11, 0x24, 0xa4, - 0xf6, 0xd0, 0x7b, 0x2f, 0xb4, 0xd7, 0x56, 0xea, 0xb5, 0x67, 0xce, 0xbd, 0x71, 0xe4, 0x80, 0x54, - 0x4e, 0xab, 0xe2, 0x1e, 0x7a, 0xe8, 0xa9, 0x57, 0x4e, 0x95, 0xc7, 0x63, 0xaf, 0xbd, 0xbb, 0x76, - 0x36, 0x9b, 0x10, 0xb5, 0x15, 0xb7, 0x78, 0xe7, 0xbd, 0xdf, 0x9b, 0x79, 0xdf, 0xef, 0x05, 0x94, - 0xed, 0x8b, 0xb6, 0xa4, 0x99, 0xf2, 0x76, 0x73, 0x93, 0x5a, 0x06, 0x75, 0xa8, 0x2d, 0xb7, 0xa8, - 0x51, 0x35, 0x2d, 0x99, 0x1f, 0x90, 0x86, 0x26, 0x93, 0xa6, 0x63, 0xda, 0x2a, 0xa9, 0x6b, 0x46, - 0x4d, 0x6e, 0x2d, 0xcb, 0x35, 0x6a, 0x50, 0x8b, 0x38, 0xb4, 0x2a, 0x35, 0x2c, 0xd3, 0x31, 0xd1, - 0xbc, 0x4f, 0x2a, 0x91, 0x86, 0x26, 0x45, 0x48, 0xa5, 0xd6, 0xf2, 0xc2, 0xb9, 0x9a, 0xe6, 0x6c, - 0x35, 0x37, 0x25, 0xd5, 0xd4, 0xe5, 0x9a, 0x59, 0x33, 0x65, 0xc6, 0xb1, 0xd9, 0xbc, 0xc3, 0xbe, - 0xd8, 0x07, 0xfb, 0xcb, 0x47, 0x5a, 0x10, 0x23, 0x42, 0x55, 0xd3, 0xa2, 0x7d, 0xa4, 0x2d, 0x9c, - 0xef, 0xd0, 0xe8, 0x44, 0xdd, 0xd2, 0x0c, 0x6a, 0xed, 0xc8, 0x8d, 0xed, 0x1a, 0x63, 0xb2, 0xa8, - 0x6d, 0x36, 0x2d, 0x95, 0xee, 0x89, 0xcb, 0x96, 0x75, 0xea, 0x90, 0x7e, 0xb2, 0xe4, 0x24, 0x2e, - 0xab, 0x69, 0x38, 0x9a, 0xde, 0x2b, 0xe6, 0xc2, 0x6e, 0x0c, 0xb6, 0xba, 0x45, 0x75, 0xd2, 0xcd, - 0x27, 0xfe, 0x9e, 0x85, 0xd3, 0x65, 0xd3, 0x70, 0x88, 0xc7, 0x81, 0xf9, 0x23, 0x2a, 0xd4, 0xb1, - 0x34, 0x75, 0x9d, 0xfd, 0x8d, 0xca, 0x90, 0x37, 0x88, 0x4e, 0xe7, 0x32, 0x8b, 0x99, 0xa5, 0x09, - 0x45, 0x7e, 0xdc, 0x16, 0x46, 0xdc, 0xb6, 0x90, 0xbf, 0x4a, 0x74, 0xfa, 0xa2, 0x2d, 0x08, 0xbd, - 0x8a, 0x93, 0x02, 0x18, 0x8f, 0x04, 0x33, 0x66, 0x74, 0x0b, 0xe6, 0x1c, 0x62, 0xd5, 0xa8, 0xb3, - 0xd2, 0xa2, 0x16, 0xa9, 0xd1, 0x1b, 0x8e, 0x56, 0xd7, 0xee, 0x13, 0x47, 0x33, 0x8d, 0xb9, 0xec, - 0x62, 0x66, 0x69, 0x54, 0xf9, 0xaf, 0xdb, 0x16, 0xe6, 0x36, 0x12, 0x68, 0x70, 0x22, 0x37, 0x6a, - 0x01, 0x8a, 0x9d, 0xdd, 0x24, 0xf5, 0x26, 0x9d, 0xcb, 0x2d, 0x66, 0x96, 0x0a, 0x25, 0x49, 0xea, - 0x38, 0x48, 0xa8, 0x15, 0xa9, 0xb1, 0x5d, 0x63, 0x1e, 0x13, 0x98, 0x4c, 0xba, 0xde, 0x24, 0x86, - 0xa3, 0x39, 0x3b, 0xca, 0x09, 0xb7, 0x2d, 0xa0, 0x8d, 0x1e, 0x34, 0xdc, 0x47, 0x02, 0x92, 0x61, - 0x42, 0x0d, 0xf4, 0x36, 0x37, 0xca, 0x74, 0x33, 0xcb, 0x75, 0x33, 0xd1, 0x51, 0x68, 0x87, 0x46, - 0xfc, 0x33, 0x45, 0xd3, 0x0e, 0x71, 0x9a, 0xf6, 0xc1, 0x68, 0xfa, 0x53, 0x98, 0x57, 0x9b, 0x96, - 0x45, 0x8d, 0x64, 0x55, 0x9f, 0x76, 0xdb, 0xc2, 0x7c, 0x39, 0x89, 0x08, 0x27, 0xf3, 0xa3, 0x07, - 0x70, 0x2c, 0x7e, 0xb8, 0x1f, 0x6d, 0x9f, 0xe2, 0x0f, 0x3c, 0x56, 0xee, 0x85, 0xc4, 0xfd, 0xe4, - 0xc4, 0x75, 0x9e, 0x1f, 0x40, 0xe7, 0x0f, 0x33, 0x70, 0xaa, 0x6c, 0x99, 0xb6, 0x7d, 0x93, 0x5a, - 0xb6, 0x66, 0x1a, 0xd7, 0x36, 0xef, 0x52, 0xd5, 0xc1, 0xf4, 0x0e, 0xb5, 0xa8, 0xa1, 0x52, 0xb4, - 0x08, 0xf9, 0x6d, 0xcd, 0xa8, 0x72, 0x8d, 0x4f, 0x06, 0x1a, 0xff, 0x50, 0x33, 0xaa, 0x98, 0x9d, - 0x78, 0x14, 0xcc, 0x26, 0xd9, 0x38, 0x45, 0x44, 0xe1, 0x25, 0x00, 0xd2, 0xd0, 0xb8, 0x00, 0xa6, - 0x8a, 0x09, 0x05, 0x71, 0x3a, 0x58, 0x59, 0xbb, 0xc2, 0x4f, 0x70, 0x84, 0x4a, 0xfc, 0x36, 0x07, - 0xc7, 0xdf, 0xbf, 0xe7, 0x50, 0xcb, 0x20, 0xf5, 0x58, 0xb0, 0x95, 0x00, 0x74, 0xf6, 0x7d, 0xb5, - 0xe3, 0x08, 0x21, 0x58, 0x25, 0x3c, 0xc1, 0x11, 0x2a, 0x64, 0xc2, 0x94, 0xff, 0xb5, 0x4e, 0xeb, - 0x54, 0x75, 0x4c, 0x8b, 0x5d, 0xb6, 0x50, 0x7a, 0x2b, 0xcd, 0x1e, 0xb6, 0xe4, 0xa5, 0x1e, 0xa9, - 0xb5, 0x2c, 0xad, 0x92, 0x4d, 0x5a, 0x0f, 0x58, 0x15, 0xe4, 0xb6, 0x85, 0xa9, 0x4a, 0x0c, 0x0e, - 0x77, 0xc1, 0x23, 0x02, 0x05, 0x3f, 0x20, 0xf6, 0x63, 0xfd, 0x69, 0xb7, 0x2d, 0x14, 0x36, 0x3a, - 0x30, 0x38, 0x8a, 0x99, 0x10, 0xd5, 0xf9, 0x97, 0x1d, 0xd5, 0xe2, 0xf7, 0xbd, 0x86, 0xf1, 0x63, - 0xf3, 0x1f, 0x61, 0x98, 0x2d, 0x98, 0xe4, 0x61, 0xb3, 0x1f, 0xcb, 0x1c, 0xe7, 0xcf, 0x9a, 0x2c, - 0x47, 0xb0, 0x70, 0x0c, 0x19, 0xed, 0xf4, 0x4f, 0x04, 0xc3, 0x19, 0xe8, 0xe4, 0x5e, 0x92, 0x80, - 0xf8, 0x28, 0x0b, 0x27, 0x2f, 0x9b, 0x96, 0x76, 0xdf, 0x8b, 0xf2, 0xfa, 0x9a, 0x59, 0x5d, 0xe1, - 0x95, 0x9f, 0x5a, 0xe8, 0x36, 0x8c, 0x7b, 0xda, 0xab, 0x12, 0x87, 0x30, 0x1b, 0x15, 0x4a, 0x6f, - 0x0e, 0xa6, 0x6b, 0x3f, 0x31, 0x54, 0xa8, 0x43, 0x3a, 0x56, 0xed, 0xfc, 0x86, 0x43, 0x54, 0x74, - 0x0b, 0xf2, 0x76, 0x83, 0xaa, 0xdc, 0x92, 0x17, 0xa4, 0xc4, 0x0e, 0x44, 0x4a, 0xb8, 0xe3, 0x7a, - 0x83, 0xaa, 0x9d, 0x3c, 0xe2, 0x7d, 0x61, 0x86, 0x88, 0x6e, 0xc3, 0x98, 0xcd, 0x7c, 0x8d, 0x9b, - 0xed, 0xe2, 0x10, 0xd8, 0x8c, 0x5f, 0x99, 0xe2, 0xe8, 0x63, 0xfe, 0x37, 0xe6, 0xb8, 0xe2, 0xd7, - 0x39, 0x58, 0x4c, 0xe0, 0x2c, 0x9b, 0x46, 0x55, 0x63, 0x29, 0xfe, 0x32, 0xe4, 0x9d, 0x9d, 0x46, - 0xe0, 0xe2, 0xe7, 0x83, 0x8b, 0x6e, 0xec, 0x34, 0xbc, 0x22, 0x74, 0x66, 0x37, 0x7e, 0x8f, 0x0e, - 0x33, 0x04, 0xb4, 0x1a, 0x3e, 0x28, 0x1b, 0xc3, 0xe2, 0xd7, 0x7a, 0xd1, 0x16, 0xfa, 0x74, 0x5d, - 0x52, 0x88, 0x14, 0xbf, 0xbc, 0x97, 0x11, 0xea, 0xc4, 0x76, 0x36, 0x2c, 0x62, 0xd8, 0xbe, 0x24, - 0x4d, 0x0f, 0x3c, 0xfc, 0xb5, 0xc1, 0x8c, 0xec, 0x71, 0x28, 0x0b, 0xfc, 0x16, 0x68, 0xb5, 0x07, - 0x0d, 0xf7, 0x91, 0x80, 0xce, 0xc2, 0x98, 0x45, 0x89, 0x6d, 0x1a, 0xbc, 0xe0, 0x84, 0xca, 0xc5, - 0xec, 0x57, 0xcc, 0x4f, 0xd1, 0xff, 0xe1, 0x88, 0x4e, 0x6d, 0x9b, 0xd4, 0x28, 0xef, 0x06, 0xa6, - 0x39, 0xe1, 0x91, 0x8a, 0xff, 0x33, 0x0e, 0xce, 0xc5, 0xa7, 0x19, 0x38, 0x95, 0xa0, 0xc7, 0x55, - 0xcd, 0x76, 0xd0, 0x67, 0x3d, 0x5e, 0x2c, 0x0d, 0x98, 0x31, 0x34, 0xdb, 0xf7, 0xe1, 0x19, 0x2e, - 0x7b, 0x3c, 0xf8, 0x25, 0xe2, 0xc1, 0x1f, 0xc1, 0xa8, 0xe6, 0x50, 0xdd, 0xb3, 0x4a, 0x6e, 0xa9, - 0x50, 0x2a, 0xed, 0xdd, 0xcd, 0x94, 0xa3, 0x1c, 0x7e, 0xf4, 0x8a, 0x07, 0x84, 0x7d, 0x3c, 0xf1, - 0x8f, 0x6c, 0xe2, 0xb3, 0x3c, 0x37, 0x47, 0x2d, 0x98, 0x62, 0x5f, 0x7e, 0x2a, 0xc6, 0xf4, 0x0e, - 0x7f, 0x5c, 0x5a, 0x10, 0xa5, 0x14, 0x6f, 0xe5, 0x04, 0xbf, 0xc5, 0xd4, 0x7a, 0x0c, 0x15, 0x77, - 0x49, 0x41, 0xcb, 0x50, 0xd0, 0x35, 0x03, 0xd3, 0x46, 0x5d, 0x53, 0x89, 0xcd, 0x7b, 0x20, 0x56, - 0x7e, 0x2a, 0x9d, 0x9f, 0x71, 0x94, 0x06, 0xbd, 0x0d, 0x05, 0x9d, 0xdc, 0x0b, 0x59, 0x72, 0x8c, - 0xe5, 0x18, 0x97, 0x57, 0xa8, 0x74, 0x8e, 0x70, 0x94, 0x0e, 0xdd, 0x85, 0xa2, 0x5f, 0x53, 0xca, - 0x6b, 0x37, 0x22, 0x6d, 0xd3, 0x1a, 0xb5, 0x54, 0x6a, 0x38, 0x9e, 0x6b, 0xe4, 0x19, 0x92, 0xe8, - 0xb6, 0x85, 0xe2, 0x46, 0x2a, 0x25, 0xde, 0x05, 0x49, 0xfc, 0x39, 0x07, 0xa7, 0x53, 0xd3, 0x00, - 0xba, 0x04, 0xc8, 0xdc, 0xb4, 0xa9, 0xd5, 0xa2, 0xd5, 0x0f, 0xfc, 0xae, 0xdf, 0x6b, 0x50, 0x3c, - 0x9d, 0xe7, 0xfc, 0x9a, 0x78, 0xad, 0xe7, 0x14, 0xf7, 0xe1, 0x40, 0x2a, 0x1c, 0xf5, 0xe2, 0xc2, - 0xd7, 0xb2, 0xc6, 0x7b, 0xa1, 0xbd, 0x05, 0xdd, 0xac, 0xdb, 0x16, 0x8e, 0xae, 0x46, 0x41, 0x70, - 0x1c, 0x13, 0xad, 0xc0, 0x34, 0x4f, 0xf6, 0x5d, 0x5a, 0x3f, 0xc9, 0xb5, 0x3e, 0x5d, 0x8e, 0x1f, - 0xe3, 0x6e, 0x7a, 0x0f, 0xa2, 0x4a, 0x6d, 0xcd, 0xa2, 0xd5, 0x10, 0x22, 0x1f, 0x87, 0x78, 0x2f, - 0x7e, 0x8c, 0xbb, 0xe9, 0x91, 0x0e, 0x02, 0x47, 0x4d, 0xb4, 0xe0, 0x28, 0x83, 0xfc, 0x9f, 0xdb, - 0x16, 0x84, 0x72, 0x3a, 0x29, 0xde, 0x0d, 0x4b, 0x7c, 0x98, 0x07, 0xde, 0x3b, 0xb0, 0x00, 0x39, - 0x1f, 0x4b, 0xbd, 0x8b, 0x5d, 0xa9, 0x77, 0x26, 0xda, 0x28, 0x46, 0xd2, 0xec, 0x75, 0x18, 0x33, - 0x59, 0x64, 0x70, 0xbb, 0x9c, 0x4b, 0x09, 0xa7, 0xb0, 0xa4, 0x85, 0x40, 0x0a, 0x78, 0xb9, 0x8c, - 0x87, 0x16, 0x07, 0x42, 0x57, 0x20, 0xdf, 0x30, 0xab, 0x41, 0x21, 0x7a, 0x3d, 0x05, 0x70, 0xcd, - 0xac, 0xda, 0x31, 0xb8, 0x71, 0xef, 0xc6, 0xde, 0xaf, 0x98, 0x41, 0xa0, 0x8f, 0x61, 0x3c, 0x28, - 0xf8, 0xbc, 0x3b, 0x90, 0x53, 0xe0, 0xfa, 0x0d, 0xa0, 0xca, 0xa4, 0x97, 0xc8, 0x82, 0x13, 0x1c, - 0xc2, 0xa1, 0x07, 0x30, 0xab, 0x76, 0xcf, 0x53, 0x73, 0x47, 0x76, 0xad, 0x9d, 0xa9, 0xd3, 0xae, - 0xf2, 0x1f, 0xb7, 0x2d, 0xcc, 0xf6, 0x90, 0xe0, 0x5e, 0x49, 0xde, 0xcb, 0x28, 0xef, 0x14, 0x99, - 0x53, 0xa4, 0xbf, 0xac, 0x5f, 0xb7, 0xef, 0xbf, 0x2c, 0x38, 0xc1, 0x21, 0x9c, 0xf8, 0x5d, 0x1e, - 0x26, 0x63, 0xdd, 0xe7, 0x21, 0x7b, 0x86, 0xdf, 0x46, 0x1c, 0x98, 0x67, 0xf8, 0x70, 0x07, 0xea, - 0x19, 0x3e, 0xe4, 0x21, 0x79, 0x86, 0x2f, 0xec, 0x90, 0x3c, 0x23, 0xf2, 0xb2, 0x3e, 0x9e, 0xf1, - 0x34, 0x07, 0xa8, 0x37, 0x88, 0xd1, 0x17, 0x30, 0xe6, 0x97, 0x8b, 0x7d, 0x96, 0xd4, 0xb0, 0xb9, - 0xe1, 0xd5, 0x93, 0xa3, 0x76, 0x4d, 0x3f, 0xd9, 0x81, 0xa6, 0x1f, 0x7a, 0x10, 0x53, 0x62, 0x58, - 0x73, 0x13, 0x27, 0xc5, 0xcf, 0x61, 0xdc, 0x0e, 0xc6, 0xab, 0xfc, 0xf0, 0xe3, 0x15, 0x53, 0x78, - 0x38, 0x58, 0x85, 0x90, 0xa8, 0x0a, 0x93, 0x24, 0x3a, 0xe1, 0x8c, 0x0e, 0xf5, 0x8c, 0x19, 0x6f, - 0x9c, 0x8a, 0x8d, 0x36, 0x31, 0x54, 0xf1, 0x97, 0x6e, 0xb3, 0xfa, 0x61, 0xff, 0x77, 0x34, 0xeb, - 0xe1, 0xcd, 0x98, 0xff, 0x0a, 0xcb, 0xfe, 0x90, 0x85, 0x99, 0xee, 0x22, 0x39, 0xd4, 0x32, 0xe1, - 0x7e, 0xdf, 0x8d, 0x48, 0x76, 0xa8, 0x4b, 0x87, 0x33, 0xd0, 0x80, 0xbb, 0xce, 0xa8, 0x25, 0x72, - 0x07, 0x6e, 0x09, 0xf1, 0xc7, 0xb8, 0x8e, 0x86, 0x5f, 0xb8, 0x24, 0xac, 0x27, 0xb3, 0x87, 0xb4, - 0x9e, 0x7c, 0xc9, 0x6a, 0xfa, 0x29, 0x0b, 0xc7, 0x5f, 0x6d, 0xe8, 0x07, 0xdf, 0xe5, 0x3d, 0xea, - 0xd5, 0xd7, 0xab, 0x3d, 0xfb, 0x40, 0x2b, 0xb6, 0xaf, 0xb2, 0x30, 0xca, 0x46, 0xb3, 0x43, 0x58, - 0xa8, 0x5d, 0x8a, 0x2d, 0xd4, 0xce, 0xa4, 0x54, 0x38, 0x76, 0xa3, 0xc4, 0xf5, 0xd9, 0xd5, 0xae, - 0xf5, 0xd9, 0xd9, 0x5d, 0x91, 0xd2, 0x97, 0x65, 0xef, 0xc0, 0x44, 0x28, 0x10, 0xbd, 0xe1, 0xf5, - 0xaa, 0x7c, 0xa6, 0xcc, 0x30, 0xdb, 0x86, 0x1b, 0x96, 0x70, 0x98, 0x0c, 0x29, 0x44, 0x0d, 0x0a, - 0x11, 0x09, 0x7b, 0x63, 0xf6, 0xa8, 0xed, 0xe8, 0xba, 0x78, 0xa2, 0x43, 0xdd, 0x9b, 0x13, 0x94, - 0x77, 0x1f, 0x3f, 0x2f, 0x8e, 0x3c, 0x79, 0x5e, 0x1c, 0x79, 0xf6, 0xbc, 0x38, 0xf2, 0xa5, 0x5b, - 0xcc, 0x3c, 0x76, 0x8b, 0x99, 0x27, 0x6e, 0x31, 0xf3, 0xcc, 0x2d, 0x66, 0x7e, 0x75, 0x8b, 0x99, - 0x6f, 0x7e, 0x2b, 0x8e, 0x7c, 0x32, 0x9f, 0xf8, 0x2f, 0xd5, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x5d, 0x92, 0x55, 0x29, 0x87, 0x1d, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_1972394c0c7aac8b) +} + +var fileDescriptor_1972394c0c7aac8b = []byte{ + // 1593 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x13, 0xd7, + 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x92, 0xf0, 0xf0, 0x44, 0xf3, 0x10, + 0x0a, 0xef, 0x3d, 0xc6, 0x8d, 0x4b, 0x11, 0x5d, 0x55, 0xb1, 0x5b, 0x0a, 0x6a, 0x0c, 0xe1, 0x26, + 0x50, 0xfa, 0x2b, 0x6e, 0xc6, 0x17, 0x67, 0x88, 0x67, 0xc6, 0x9a, 0x19, 0x5b, 0x04, 0x09, 0xa9, + 0x5d, 0x74, 0xdf, 0x0d, 0xed, 0xb6, 0x95, 0xba, 0xed, 0x9a, 0x75, 0x77, 0x2c, 0x59, 0x20, 0x95, + 0x95, 0x55, 0xa6, 0x8b, 0x2e, 0xba, 0xea, 0x96, 0x55, 0x75, 0xef, 0xdc, 0x19, 0xcf, 0xd8, 0x9e, + 0x89, 0xe3, 0x84, 0xa8, 0xad, 0xd8, 0x65, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x9c, 0xc0, + 0xb9, 0xed, 0x4b, 0xb6, 0xa2, 0x99, 0x05, 0xd2, 0xd0, 0x0a, 0xa4, 0xe9, 0x98, 0xb6, 0x4a, 0xea, + 0x9a, 0x51, 0x2b, 0xb4, 0x96, 0x0b, 0x35, 0x6a, 0x50, 0x8b, 0x38, 0xb4, 0xaa, 0x34, 0x2c, 0xd3, + 0x31, 0xd1, 0xbc, 0x47, 0xaa, 0x90, 0x86, 0xa6, 0x84, 0x48, 0x95, 0xd6, 0xf2, 0xc2, 0xf9, 0x9a, + 0xe6, 0x6c, 0x35, 0x37, 0x15, 0xd5, 0xd4, 0x0b, 0x35, 0xb3, 0x66, 0x16, 0x38, 0xc7, 0x66, 0xf3, + 0x2e, 0xff, 0xe2, 0x1f, 0xfc, 0x2f, 0x0f, 0x69, 0x41, 0x0e, 0x09, 0x55, 0x4d, 0x8b, 0xf6, 0x91, + 0xb6, 0x70, 0xa1, 0x43, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x53, 0x68, 0x6c, 0xd7, 0x38, + 0x93, 0x45, 0x6d, 0xb3, 0x69, 0xa9, 0x74, 0x4f, 0x5c, 0x76, 0x41, 0xa7, 0x0e, 0xe9, 0x27, 0xab, + 0x10, 0xc7, 0x65, 0x35, 0x0d, 0x47, 0xd3, 0x7b, 0xc5, 0x5c, 0xdc, 0x8d, 0xc1, 0x56, 0xb7, 0xa8, + 0x4e, 0xba, 0xf9, 0xe4, 0xdf, 0xd2, 0x70, 0xba, 0x6c, 0x1a, 0x0e, 0x61, 0x1c, 0x58, 0x3c, 0xa2, + 0x42, 0x1d, 0x4b, 0x53, 0xd7, 0xf9, 0xdf, 0xa8, 0x0c, 0x59, 0x83, 0xe8, 0x74, 0x2e, 0xb5, 0x98, + 0x5a, 0x9a, 0x28, 0x15, 0x9e, 0xb4, 0xa5, 0x11, 0xb7, 0x2d, 0x65, 0xaf, 0x11, 0x9d, 0xbe, 0x6c, + 0x4b, 0x52, 0xaf, 0xe2, 0x14, 0x1f, 0x86, 0x91, 0x60, 0xce, 0x8c, 0x6e, 0xc3, 0x9c, 0x43, 0xac, + 0x1a, 0x75, 0x56, 0x5a, 0xd4, 0x22, 0x35, 0x7a, 0xd3, 0xd1, 0xea, 0xda, 0x03, 0xe2, 0x68, 0xa6, + 0x31, 0x97, 0x5e, 0x4c, 0x2d, 0x8d, 0x96, 0xfe, 0xed, 0xb6, 0xa5, 0xb9, 0x8d, 0x18, 0x1a, 0x1c, + 0xcb, 0x8d, 0x5a, 0x80, 0x22, 0x67, 0xb7, 0x48, 0xbd, 0x49, 0xe7, 0x32, 0x8b, 0xa9, 0xa5, 0x5c, + 0x51, 0x51, 0x3a, 0x0e, 0x12, 0x68, 0x45, 0x69, 0x6c, 0xd7, 0xb8, 0xc7, 0xf8, 0x26, 0x53, 0x6e, + 0x34, 0x89, 0xe1, 0x68, 0xce, 0x4e, 0xe9, 0x84, 0xdb, 0x96, 0xd0, 0x46, 0x0f, 0x1a, 0xee, 0x23, + 0x01, 0x15, 0x60, 0x42, 0xf5, 0xf5, 0x36, 0x37, 0xca, 0x75, 0x33, 0x2b, 0x74, 0x33, 0xd1, 0x51, + 0x68, 0x87, 0x46, 0xfe, 0x23, 0x41, 0xd3, 0x0e, 0x71, 0x9a, 0xf6, 0xc1, 0x68, 0xfa, 0x13, 0x98, + 0x57, 0x9b, 0x96, 0x45, 0x8d, 0x78, 0x55, 0x9f, 0x76, 0xdb, 0xd2, 0x7c, 0x39, 0x8e, 0x08, 0xc7, + 0xf3, 0xa3, 0x87, 0x70, 0x2c, 0x7a, 0xb8, 0x1f, 0x6d, 0x9f, 0x12, 0x0f, 0x3c, 0x56, 0xee, 0x85, + 0xc4, 0xfd, 0xe4, 0x44, 0x75, 0x9e, 0x1d, 0x40, 0xe7, 0x8f, 0x52, 0x70, 0xaa, 0x6c, 0x99, 0xb6, + 0x7d, 0x8b, 0x5a, 0xb6, 0x66, 0x1a, 0xd7, 0x37, 0xef, 0x51, 0xd5, 0xc1, 0xf4, 0x2e, 0xb5, 0xa8, + 0xa1, 0x52, 0xb4, 0x08, 0xd9, 0x6d, 0xcd, 0xa8, 0x0a, 0x8d, 0x4f, 0xfa, 0x1a, 0xff, 0x40, 0x33, + 0xaa, 0x98, 0x9f, 0x30, 0x0a, 0x6e, 0x93, 0x74, 0x94, 0x22, 0xa4, 0xf0, 0x22, 0x00, 0x69, 0x68, + 0x42, 0x00, 0x57, 0xc5, 0x44, 0x09, 0x09, 0x3a, 0x58, 0x59, 0xbb, 0x2a, 0x4e, 0x70, 0x88, 0x4a, + 0xfe, 0x26, 0x03, 0xc7, 0xdf, 0xbb, 0xef, 0x50, 0xcb, 0x20, 0xf5, 0x48, 0xb0, 0x15, 0x01, 0x74, + 0xfe, 0x7d, 0xad, 0xe3, 0x08, 0x01, 0x58, 0x25, 0x38, 0xc1, 0x21, 0x2a, 0x64, 0xc2, 0x94, 0xf7, + 0xb5, 0x4e, 0xeb, 0x54, 0x75, 0x4c, 0x8b, 0x5f, 0x36, 0x57, 0x7c, 0x33, 0xc9, 0x1e, 0xb6, 0xc2, + 0x52, 0x8f, 0xd2, 0x5a, 0x56, 0x56, 0xc9, 0x26, 0xad, 0xfb, 0xac, 0x25, 0xe4, 0xb6, 0xa5, 0xa9, + 0x4a, 0x04, 0x0e, 0x77, 0xc1, 0x23, 0x02, 0x39, 0x2f, 0x20, 0xf6, 0x63, 0xfd, 0x69, 0xb7, 0x2d, + 0xe5, 0x36, 0x3a, 0x30, 0x38, 0x8c, 0x19, 0x13, 0xd5, 0xd9, 0x57, 0x1d, 0xd5, 0xf2, 0x77, 0xbd, + 0x86, 0xf1, 0x62, 0xf3, 0x6f, 0x61, 0x98, 0x2d, 0x98, 0x14, 0x61, 0xb3, 0x1f, 0xcb, 0x1c, 0x17, + 0xcf, 0x9a, 0x2c, 0x87, 0xb0, 0x70, 0x04, 0x19, 0xed, 0xf4, 0x4f, 0x04, 0xc3, 0x19, 0xe8, 0xe4, + 0x5e, 0x92, 0x80, 0xfc, 0x38, 0x0d, 0x27, 0xaf, 0x98, 0x96, 0xf6, 0x80, 0x45, 0x79, 0x7d, 0xcd, + 0xac, 0xae, 0x88, 0xca, 0x4f, 0x2d, 0x74, 0x07, 0xc6, 0x99, 0xf6, 0xaa, 0xc4, 0x21, 0xdc, 0x46, + 0xb9, 0xe2, 0x1b, 0x83, 0xe9, 0xda, 0x4b, 0x0c, 0x15, 0xea, 0x90, 0x8e, 0x55, 0x3b, 0xbf, 0xe1, + 0x00, 0x15, 0xdd, 0x86, 0xac, 0xdd, 0xa0, 0xaa, 0xb0, 0xe4, 0x45, 0x25, 0xb6, 0x03, 0x51, 0x62, + 0xee, 0xb8, 0xde, 0xa0, 0x6a, 0x27, 0x8f, 0xb0, 0x2f, 0xcc, 0x11, 0xd1, 0x1d, 0x18, 0xb3, 0xb9, + 0xaf, 0x09, 0xb3, 0x5d, 0x1a, 0x02, 0x9b, 0xf3, 0x97, 0xa6, 0x04, 0xfa, 0x98, 0xf7, 0x8d, 0x05, + 0xae, 0xfc, 0x55, 0x06, 0x16, 0x63, 0x38, 0xcb, 0xa6, 0x51, 0xd5, 0x78, 0x8a, 0xbf, 0x02, 0x59, + 0x67, 0xa7, 0xe1, 0xbb, 0xf8, 0x05, 0xff, 0xa2, 0x1b, 0x3b, 0x0d, 0x56, 0x84, 0xce, 0xec, 0xc6, + 0xcf, 0xe8, 0x30, 0x47, 0x40, 0xab, 0xc1, 0x83, 0xd2, 0x11, 0x2c, 0x71, 0xad, 0x97, 0x6d, 0xa9, + 0x4f, 0xd7, 0xa5, 0x04, 0x48, 0xd1, 0xcb, 0xb3, 0x8c, 0x50, 0x27, 0xb6, 0xb3, 0x61, 0x11, 0xc3, + 0xf6, 0x24, 0x69, 0xba, 0xef, 0xe1, 0xff, 0x1d, 0xcc, 0xc8, 0x8c, 0xa3, 0xb4, 0x20, 0x6e, 0x81, + 0x56, 0x7b, 0xd0, 0x70, 0x1f, 0x09, 0xe8, 0x2c, 0x8c, 0x59, 0x94, 0xd8, 0xa6, 0x21, 0x0a, 0x4e, + 0xa0, 0x5c, 0xcc, 0x7f, 0xc5, 0xe2, 0x14, 0x9d, 0x83, 0x23, 0x3a, 0xb5, 0x6d, 0x52, 0xa3, 0xa2, + 0x1b, 0x98, 0x16, 0x84, 0x47, 0x2a, 0xde, 0xcf, 0xd8, 0x3f, 0x97, 0x9f, 0xa5, 0xe0, 0x54, 0x8c, + 0x1e, 0x57, 0x35, 0xdb, 0x41, 0x9f, 0xf6, 0x78, 0xb1, 0x32, 0x60, 0xc6, 0xd0, 0x6c, 0xcf, 0x87, + 0x67, 0x84, 0xec, 0x71, 0xff, 0x97, 0x90, 0x07, 0x7f, 0x08, 0xa3, 0x9a, 0x43, 0x75, 0x66, 0x95, + 0xcc, 0x52, 0xae, 0x58, 0xdc, 0xbb, 0x9b, 0x95, 0x8e, 0x0a, 0xf8, 0xd1, 0xab, 0x0c, 0x08, 0x7b, + 0x78, 0xf2, 0xef, 0xe9, 0xd8, 0x67, 0x31, 0x37, 0x47, 0x2d, 0x98, 0xe2, 0x5f, 0x5e, 0x2a, 0xc6, + 0xf4, 0xae, 0x78, 0x5c, 0x52, 0x10, 0x25, 0x14, 0xef, 0xd2, 0x09, 0x71, 0x8b, 0xa9, 0xf5, 0x08, + 0x2a, 0xee, 0x92, 0x82, 0x96, 0x21, 0xa7, 0x6b, 0x06, 0xa6, 0x8d, 0xba, 0xa6, 0x12, 0x5b, 0xf4, + 0x40, 0xbc, 0xfc, 0x54, 0x3a, 0x3f, 0xe3, 0x30, 0x0d, 0x7a, 0x0b, 0x72, 0x3a, 0xb9, 0x1f, 0xb0, + 0x64, 0x38, 0xcb, 0x31, 0x21, 0x2f, 0x57, 0xe9, 0x1c, 0xe1, 0x30, 0x1d, 0xba, 0x07, 0x79, 0xaf, + 0xa6, 0x94, 0xd7, 0x6e, 0x86, 0xda, 0xa6, 0x35, 0x6a, 0xa9, 0xd4, 0x70, 0x98, 0x6b, 0x64, 0x39, + 0x92, 0xec, 0xb6, 0xa5, 0xfc, 0x46, 0x22, 0x25, 0xde, 0x05, 0x49, 0xfe, 0x29, 0x03, 0xa7, 0x13, + 0xd3, 0x00, 0xba, 0x0c, 0xc8, 0xdc, 0xb4, 0xa9, 0xd5, 0xa2, 0xd5, 0xf7, 0xbd, 0xae, 0x9f, 0x35, + 0x28, 0x4c, 0xe7, 0x19, 0xaf, 0x26, 0x5e, 0xef, 0x39, 0xc5, 0x7d, 0x38, 0x90, 0x0a, 0x47, 0x59, + 0x5c, 0x78, 0x5a, 0xd6, 0x44, 0x2f, 0xb4, 0xb7, 0xa0, 0x9b, 0x75, 0xdb, 0xd2, 0xd1, 0xd5, 0x30, + 0x08, 0x8e, 0x62, 0xa2, 0x15, 0x98, 0x16, 0xc9, 0xbe, 0x4b, 0xeb, 0x27, 0x85, 0xd6, 0xa7, 0xcb, + 0xd1, 0x63, 0xdc, 0x4d, 0xcf, 0x20, 0xaa, 0xd4, 0xd6, 0x2c, 0x5a, 0x0d, 0x20, 0xb2, 0x51, 0x88, + 0x77, 0xa3, 0xc7, 0xb8, 0x9b, 0x1e, 0xe9, 0x20, 0x09, 0xd4, 0x58, 0x0b, 0x8e, 0x72, 0xc8, 0xff, + 0xb8, 0x6d, 0x49, 0x2a, 0x27, 0x93, 0xe2, 0xdd, 0xb0, 0xe4, 0x47, 0x59, 0x10, 0xbd, 0x03, 0x0f, + 0x90, 0x0b, 0x91, 0xd4, 0xbb, 0xd8, 0x95, 0x7a, 0x67, 0xc2, 0x8d, 0x62, 0x28, 0xcd, 0xde, 0x80, + 0x31, 0x93, 0x47, 0x86, 0xb0, 0xcb, 0xf9, 0x84, 0x70, 0x0a, 0x4a, 0x5a, 0x00, 0x54, 0x02, 0x96, + 0xcb, 0x44, 0x68, 0x09, 0x20, 0x74, 0x15, 0xb2, 0x0d, 0xb3, 0xea, 0x17, 0xa2, 0xff, 0x25, 0x00, + 0xae, 0x99, 0x55, 0x3b, 0x02, 0x37, 0xce, 0x6e, 0xcc, 0x7e, 0xc5, 0x1c, 0x02, 0x7d, 0x04, 0xe3, + 0x7e, 0xc1, 0x17, 0xdd, 0x41, 0x21, 0x01, 0xae, 0xdf, 0x00, 0x5a, 0x9a, 0x64, 0x89, 0xcc, 0x3f, + 0xc1, 0x01, 0x1c, 0x7a, 0x08, 0xb3, 0x6a, 0xf7, 0x3c, 0x35, 0x77, 0x64, 0xd7, 0xda, 0x99, 0x38, + 0xed, 0x96, 0xfe, 0xe5, 0xb6, 0xa5, 0xd9, 0x1e, 0x12, 0xdc, 0x2b, 0x89, 0xbd, 0x8c, 0x8a, 0x4e, + 0x91, 0x3b, 0x45, 0xf2, 0xcb, 0xfa, 0x75, 0xfb, 0xde, 0xcb, 0xfc, 0x13, 0x1c, 0xc0, 0xc9, 0xdf, + 0x66, 0x61, 0x32, 0xd2, 0x7d, 0x1e, 0xb2, 0x67, 0x78, 0x6d, 0xc4, 0x81, 0x79, 0x86, 0x07, 0x77, + 0xa0, 0x9e, 0xe1, 0x41, 0x1e, 0x92, 0x67, 0x78, 0xc2, 0x0e, 0xc9, 0x33, 0x42, 0x2f, 0xeb, 0xe3, + 0x19, 0xcf, 0x32, 0x80, 0x7a, 0x83, 0x18, 0x7d, 0x0e, 0x63, 0x5e, 0xb9, 0xd8, 0x67, 0x49, 0x0d, + 0x9a, 0x1b, 0x51, 0x3d, 0x05, 0x6a, 0xd7, 0xf4, 0x93, 0x1e, 0x68, 0xfa, 0xa1, 0x07, 0x31, 0x25, + 0x06, 0x35, 0x37, 0x76, 0x52, 0xfc, 0x0c, 0xc6, 0x6d, 0x7f, 0xbc, 0xca, 0x0e, 0x3f, 0x5e, 0x71, + 0x85, 0x07, 0x83, 0x55, 0x00, 0x89, 0xaa, 0x30, 0x49, 0xc2, 0x13, 0xce, 0xe8, 0x50, 0xcf, 0x98, + 0x61, 0xe3, 0x54, 0x64, 0xb4, 0x89, 0xa0, 0xca, 0x3f, 0x77, 0x9b, 0xd5, 0x0b, 0xfb, 0xbf, 0xa2, + 0x59, 0x0f, 0x6f, 0xc6, 0xfc, 0x47, 0x58, 0xf6, 0xfb, 0x34, 0xcc, 0x74, 0x17, 0xc9, 0xa1, 0x96, + 0x09, 0x0f, 0xfa, 0x6e, 0x44, 0xd2, 0x43, 0x5d, 0x3a, 0x98, 0x81, 0x06, 0xdc, 0x75, 0x86, 0x2d, + 0x91, 0x39, 0x70, 0x4b, 0xc8, 0x3f, 0x44, 0x75, 0x34, 0xfc, 0xc2, 0x25, 0x66, 0x3d, 0x99, 0x3e, + 0xa4, 0xf5, 0xe4, 0x2b, 0x56, 0xd3, 0x8f, 0x69, 0x38, 0xfe, 0x7a, 0x43, 0x3f, 0xf8, 0x2e, 0xef, + 0x71, 0xaf, 0xbe, 0x5e, 0xef, 0xd9, 0x07, 0x5a, 0xb1, 0x7d, 0x99, 0x86, 0x51, 0x3e, 0x9a, 0x1d, + 0xc2, 0x42, 0xed, 0x72, 0x64, 0xa1, 0x76, 0x26, 0xa1, 0xc2, 0xf1, 0x1b, 0xc5, 0xae, 0xcf, 0xae, + 0x75, 0xad, 0xcf, 0xce, 0xee, 0x8a, 0x94, 0xbc, 0x2c, 0x7b, 0x1b, 0x26, 0x02, 0x81, 0xe8, 0xff, + 0xac, 0x57, 0x15, 0x33, 0x65, 0x8a, 0xdb, 0x36, 0xd8, 0xb0, 0x04, 0xc3, 0x64, 0x40, 0x21, 0x6b, + 0x90, 0x0b, 0x49, 0xd8, 0x1b, 0x33, 0xa3, 0xb6, 0xc3, 0xeb, 0xe2, 0x89, 0x0e, 0x75, 0x6f, 0x4e, + 0x28, 0xbd, 0xf3, 0xe4, 0x45, 0x7e, 0xe4, 0xe9, 0x8b, 0xfc, 0xc8, 0xf3, 0x17, 0xf9, 0x91, 0x2f, + 0xdc, 0x7c, 0xea, 0x89, 0x9b, 0x4f, 0x3d, 0x75, 0xf3, 0xa9, 0xe7, 0x6e, 0x3e, 0xf5, 0x8b, 0x9b, + 0x4f, 0x7d, 0xfd, 0x6b, 0x7e, 0xe4, 0xe3, 0xf9, 0xd8, 0x7f, 0xa9, 0xfe, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd7, 0x67, 0xd4, 0x08, 0x6e, 0x1d, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 1dbafd1a53..0a961312f4 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target. optional string container = 5; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling taget optional string container = 4; @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,21 +131,21 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -168,7 +168,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -184,7 +184,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -222,7 +222,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; @@ -336,18 +336,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric. // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -360,18 +360,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -384,13 +384,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -401,13 +401,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -431,7 +431,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -455,14 +455,14 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 4508290176..b31425b3b7 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -83,6 +83,7 @@ type HorizontalPodAutoscalerStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { @@ -101,6 +102,7 @@ type HorizontalPodAutoscaler struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { @@ -114,6 +116,7 @@ type HorizontalPodAutoscalerList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Scale represents a scaling request for a resource. type Scale struct { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..36d86a5ec3 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Scale) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go index f96a059b6c..aafa2d4de2 100644 --- a/vendor/k8s.io/api/autoscaling/v2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v2 // import "k8s.io/api/autoscaling/v2" diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go index 9f57916d7c..ece6dedadb 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto +// source: k8s.io/api/autoscaling/v2/generated.proto package v2 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{0} + return fileDescriptor_4d5f2c8767749221, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{1} + return fileDescriptor_4d5f2c8767749221, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{2} + return fileDescriptor_4d5f2c8767749221, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{3} + return fileDescriptor_4d5f2c8767749221, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{4} + return fileDescriptor_4d5f2c8767749221, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{5} + return fileDescriptor_4d5f2c8767749221, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{6} + return fileDescriptor_4d5f2c8767749221, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{7} + return fileDescriptor_4d5f2c8767749221, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{8} + return fileDescriptor_4d5f2c8767749221, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{9} + return fileDescriptor_4d5f2c8767749221, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{10} + return fileDescriptor_4d5f2c8767749221, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{11} + return fileDescriptor_4d5f2c8767749221, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{12} + return fileDescriptor_4d5f2c8767749221, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{13} + return fileDescriptor_4d5f2c8767749221, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{14} + return fileDescriptor_4d5f2c8767749221, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{15} + return fileDescriptor_4d5f2c8767749221, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{16} + return fileDescriptor_4d5f2c8767749221, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{17} + return fileDescriptor_4d5f2c8767749221, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{18} + return fileDescriptor_4d5f2c8767749221, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{19} + return fileDescriptor_4d5f2c8767749221, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{20} + return fileDescriptor_4d5f2c8767749221, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{21} + return fileDescriptor_4d5f2c8767749221, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -666,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{22} + return fileDescriptor_4d5f2c8767749221, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -694,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{23} + return fileDescriptor_4d5f2c8767749221, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -747,120 +747,119 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_b14d4df4b5f3935e) -} - -var fileDescriptor_b14d4df4b5f3935e = []byte{ - // 1738 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x13, 0xc9, - 0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x8b, 0x97, 0x19, 0x84, 0x3d, 0xea, 0x90, 0x40, 0x48, - 0x68, 0x07, 0x87, 0x20, 0x14, 0x0e, 0xd1, 0xf4, 0x90, 0x84, 0x11, 0x33, 0x19, 0x53, 0x06, 0x26, - 0x89, 0x92, 0x88, 0x72, 0x77, 0x8d, 0xa7, 0x32, 0x76, 0xb7, 0xd5, 0xdd, 0x36, 0x0c, 0x52, 0xa4, - 0x5c, 0x72, 0x8f, 0x12, 0xa1, 0x28, 0xff, 0x03, 0xca, 0x29, 0x11, 0x39, 0xec, 0x4a, 0x2b, 0xed, - 0x1e, 0xb8, 0xac, 0xc4, 0x61, 0x0f, 0x9c, 0xac, 0xc5, 0x2b, 0xed, 0x71, 0xff, 0x00, 0x4e, 0xab, - 0x7a, 0xf4, 0xd3, 0xaf, 0x31, 0x3b, 0x8c, 0x34, 0x37, 0x57, 0xd5, 0xf7, 0xfd, 0xbe, 0x47, 0x7d, - 0xaf, 0x6a, 0x03, 0x7d, 0xff, 0x96, 0xab, 0x51, 0xbb, 0xb8, 0xdf, 0xaa, 0x12, 0xc7, 0x22, 0x1e, - 0x71, 0x8b, 0x6d, 0x62, 0x99, 0xb6, 0x53, 0x94, 0x07, 0xb8, 0x49, 0x8b, 0xb8, 0xe5, 0xd9, 0xae, - 0x81, 0xeb, 0xd4, 0xaa, 0x15, 0xdb, 0xa5, 0x62, 0x8d, 0x58, 0xc4, 0xc1, 0x1e, 0x31, 0xb5, 0xa6, - 0x63, 0x7b, 0x36, 0x3c, 0x2f, 0x48, 0x35, 0xdc, 0xa4, 0x5a, 0x84, 0x54, 0x6b, 0x97, 0x56, 0xae, - 0xd5, 0xa8, 0xb7, 0xd7, 0xaa, 0x6a, 0x86, 0xdd, 0x28, 0xd6, 0xec, 0x9a, 0x5d, 0xe4, 0x1c, 0xd5, - 0xd6, 0x2e, 0x5f, 0xf1, 0x05, 0xff, 0x25, 0x90, 0x56, 0xd4, 0x88, 0x50, 0xc3, 0x76, 0x48, 0xb1, - 0x7d, 0x3d, 0x29, 0x6d, 0xe5, 0x46, 0x48, 0xd3, 0xc0, 0xc6, 0x1e, 0xb5, 0x88, 0x73, 0x50, 0x6c, - 0xee, 0xd7, 0x38, 0x93, 0x43, 0x5c, 0xbb, 0xe5, 0x18, 0x64, 0x2c, 0x2e, 0xb7, 0xd8, 0x20, 0x1e, - 0xee, 0x27, 0xab, 0x38, 0x88, 0xcb, 0x69, 0x59, 0x1e, 0x6d, 0xf4, 0x8a, 0xb9, 0x39, 0x8a, 0xc1, - 0x35, 0xf6, 0x48, 0x03, 0x27, 0xf9, 0xd4, 0xaf, 0x15, 0x70, 0x71, 0xdd, 0xb6, 0x3c, 0xcc, 0x38, - 0x90, 0x34, 0x62, 0x8b, 0x78, 0x0e, 0x35, 0x2a, 0xfc, 0x37, 0x5c, 0x07, 0x19, 0x0b, 0x37, 0x48, - 0x4e, 0x59, 0x55, 0xae, 0xcc, 0xea, 0xc5, 0x57, 0x9d, 0xc2, 0x44, 0xb7, 0x53, 0xc8, 0xfc, 0x06, - 0x37, 0xc8, 0xbb, 0x4e, 0xa1, 0xd0, 0xeb, 0x38, 0xcd, 0x87, 0x61, 0x24, 0x88, 0x33, 0xc3, 0x6d, - 0x30, 0xe5, 0x61, 0xa7, 0x46, 0xbc, 0x5c, 0x6a, 0x55, 0xb9, 0x92, 0x2d, 0x5d, 0xd6, 0x06, 0x5e, - 0x9d, 0x26, 0xa4, 0x3f, 0xe0, 0xe4, 0xfa, 0x82, 0x94, 0x37, 0x25, 0xd6, 0x48, 0xc2, 0xc0, 0x22, - 0x98, 0x35, 0x7c, 0xb5, 0x73, 0x69, 0xae, 0xda, 0xb2, 0x24, 0x9d, 0x0d, 0xed, 0x09, 0x69, 0xd4, - 0x6f, 0x86, 0x18, 0xea, 0x61, 0xaf, 0xe5, 0x1e, 0x8d, 0xa1, 0x3b, 0x60, 0xda, 0x68, 0x39, 0x0e, - 0xb1, 0x7c, 0x4b, 0x7f, 0x3c, 0xd2, 0xd2, 0x47, 0xb8, 0xde, 0x22, 0x42, 0x07, 0x7d, 0x51, 0x4a, - 0x9d, 0x5e, 0x17, 0x20, 0xc8, 0x47, 0x1b, 0xdf, 0xe0, 0xe7, 0x0a, 0xb8, 0xb0, 0xee, 0xd8, 0xae, - 0xfb, 0x88, 0x38, 0x2e, 0xb5, 0xad, 0xed, 0xea, 0x9f, 0x89, 0xe1, 0x21, 0xb2, 0x4b, 0x1c, 0x62, - 0x19, 0x04, 0xae, 0x82, 0xcc, 0x3e, 0xb5, 0x4c, 0x69, 0xee, 0x9c, 0x6f, 0xee, 0x3d, 0x6a, 0x99, - 0x88, 0x9f, 0x30, 0x0a, 0xee, 0x90, 0x54, 0x9c, 0x22, 0x62, 0x6d, 0x09, 0x00, 0xdc, 0xa4, 0x52, - 0x80, 0xd4, 0x0a, 0x4a, 0x3a, 0xb0, 0x56, 0xde, 0x90, 0x27, 0x28, 0x42, 0xa5, 0x7e, 0xa4, 0x80, - 0xd3, 0xbf, 0x7c, 0xea, 0x11, 0xc7, 0xc2, 0xf5, 0x58, 0xa0, 0x55, 0xc0, 0x54, 0x83, 0xaf, 0xb9, - 0x4a, 0xd9, 0xd2, 0x8f, 0x46, 0x7a, 0x6e, 0xc3, 0x24, 0x96, 0x47, 0x77, 0x29, 0x71, 0xc2, 0x38, - 0x11, 0x27, 0x48, 0x42, 0x1d, 0x79, 0xe0, 0xa9, 0x9f, 0xf5, 0xaa, 0x2f, 0xc2, 0xe7, 0x83, 0xa8, - 0xff, 0xa1, 0xc2, 0x49, 0xfd, 0x8f, 0x02, 0x96, 0xee, 0x96, 0xd7, 0x2a, 0x82, 0xbb, 0x6c, 0xd7, - 0xa9, 0x71, 0x00, 0x6f, 0x81, 0x8c, 0x77, 0xd0, 0xf4, 0x33, 0xe0, 0x92, 0x7f, 0xe1, 0x0f, 0x0e, - 0x9a, 0x2c, 0x03, 0x4e, 0x27, 0xe9, 0xd9, 0x3e, 0xe2, 0x1c, 0xf0, 0x7b, 0x60, 0xb2, 0xcd, 0xe4, - 0x72, 0x2d, 0x27, 0xf5, 0x79, 0xc9, 0x3a, 0xc9, 0x95, 0x41, 0xe2, 0x0c, 0xde, 0x06, 0xf3, 0x4d, - 0xe2, 0x50, 0xdb, 0xac, 0x10, 0xc3, 0xb6, 0x4c, 0x97, 0x07, 0xcc, 0xa4, 0x7e, 0x46, 0x12, 0xcf, - 0x97, 0xa3, 0x87, 0x28, 0x4e, 0xab, 0xfe, 0x3b, 0x05, 0x16, 0x43, 0x05, 0x50, 0xab, 0x4e, 0x5c, - 0xf8, 0x27, 0xb0, 0xe2, 0x7a, 0xb8, 0x4a, 0xeb, 0xf4, 0x19, 0xf6, 0xa8, 0x6d, 0xed, 0x50, 0xcb, - 0xb4, 0x9f, 0xc4, 0xd1, 0xf3, 0xdd, 0x4e, 0x61, 0xa5, 0x32, 0x90, 0x0a, 0x0d, 0x41, 0x80, 0xf7, - 0xc0, 0x9c, 0x4b, 0xea, 0xc4, 0xf0, 0x84, 0xbd, 0xd2, 0x2f, 0x97, 0xbb, 0x9d, 0xc2, 0x5c, 0x25, - 0xb2, 0xff, 0xae, 0x53, 0x38, 0x15, 0x73, 0x8c, 0x38, 0x44, 0x31, 0x66, 0xf8, 0x3b, 0x30, 0xd3, - 0x64, 0xbf, 0x28, 0x71, 0x73, 0xa9, 0xd5, 0xf4, 0x88, 0x08, 0x49, 0xfa, 0x5a, 0x5f, 0x92, 0x5e, - 0x9a, 0x29, 0x4b, 0x10, 0x14, 0xc0, 0xa9, 0x2f, 0x53, 0xe0, 0xdc, 0x5d, 0xdb, 0xa1, 0xcf, 0x58, - 0xf2, 0xd7, 0xcb, 0xb6, 0xb9, 0x26, 0xc1, 0x88, 0x03, 0x1f, 0x83, 0x19, 0xd6, 0x64, 0x4c, 0xec, - 0x61, 0x19, 0x98, 0x3f, 0x89, 0x88, 0x0d, 0x7a, 0x85, 0xd6, 0xdc, 0xaf, 0xb1, 0x0d, 0x57, 0x63, - 0xd4, 0x5a, 0xfb, 0xba, 0x26, 0xea, 0xc5, 0x16, 0xf1, 0x70, 0x98, 0xd2, 0xe1, 0x1e, 0x0a, 0x50, - 0xe1, 0x6f, 0x41, 0xc6, 0x6d, 0x12, 0x43, 0x06, 0xe8, 0xcd, 0x61, 0x46, 0xf5, 0xd7, 0xb1, 0xd2, - 0x24, 0x46, 0x58, 0x5e, 0xd8, 0x0a, 0x71, 0x44, 0xf8, 0x18, 0x4c, 0xb9, 0x3c, 0x90, 0xf9, 0x5d, - 0x66, 0x4b, 0xb7, 0xde, 0x03, 0x5b, 0x24, 0x42, 0x90, 0x5f, 0x62, 0x8d, 0x24, 0xae, 0xfa, 0xb9, - 0x02, 0x0a, 0x03, 0x38, 0x75, 0xb2, 0x87, 0xdb, 0xd4, 0x76, 0xe0, 0x7d, 0x30, 0xcd, 0x77, 0x1e, - 0x36, 0xa5, 0x03, 0xaf, 0x1e, 0xea, 0xde, 0x78, 0x88, 0xea, 0x59, 0x96, 0x7d, 0x15, 0xc1, 0x8e, - 0x7c, 0x1c, 0xb8, 0x03, 0x66, 0xf9, 0xcf, 0x3b, 0xf6, 0x13, 0x4b, 0xfa, 0x6d, 0x1c, 0xd0, 0x79, - 0x56, 0xf4, 0x2b, 0x3e, 0x00, 0x0a, 0xb1, 0xd4, 0xbf, 0xa5, 0xc1, 0xea, 0x00, 0x7b, 0xd6, 0x6d, - 0xcb, 0xa4, 0x2c, 0xc6, 0xe1, 0xdd, 0x58, 0x9a, 0xdf, 0x48, 0xa4, 0xf9, 0xa5, 0x51, 0xfc, 0x91, - 0xb4, 0xdf, 0x0c, 0x2e, 0x28, 0x15, 0xc3, 0x92, 0x6e, 0x7e, 0xd7, 0x29, 0xf4, 0x19, 0xac, 0xb4, - 0x00, 0x29, 0x7e, 0x19, 0xb0, 0x0d, 0x60, 0x1d, 0xbb, 0xde, 0x03, 0x07, 0x5b, 0xae, 0x90, 0x44, - 0x1b, 0x44, 0x5e, 0xfd, 0xd5, 0xc3, 0x05, 0x2d, 0xe3, 0xd0, 0x57, 0xa4, 0x16, 0x70, 0xb3, 0x07, - 0x0d, 0xf5, 0x91, 0x00, 0x7f, 0x00, 0xa6, 0x1c, 0x82, 0x5d, 0xdb, 0xca, 0x65, 0xb8, 0x15, 0x41, - 0xb0, 0x20, 0xbe, 0x8b, 0xe4, 0x29, 0xfc, 0x21, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b, - 0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0xbf, 0x50, 0xc0, 0x85, 0x01, 0x7e, - 0xdc, 0xa4, 0xae, 0x07, 0xff, 0xd0, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83, + proto.RegisterFile("k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_4d5f2c8767749221) +} + +var fileDescriptor_4d5f2c8767749221 = []byte{ + // 1722 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x1b, 0x49, + 0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x2b, 0x2f, 0x67, 0xa2, 0xd8, 0xa3, 0x26, 0x90, 0x07, + 0xa4, 0x4d, 0x4c, 0x88, 0x22, 0x72, 0x40, 0xd3, 0x13, 0x20, 0xa3, 0xcc, 0x30, 0x4e, 0x39, 0xc9, + 0x00, 0x02, 0x94, 0x72, 0x77, 0x8d, 0xa7, 0x18, 0xbb, 0xdb, 0xea, 0x6e, 0x3b, 0x99, 0x48, 0x48, + 0x5c, 0xb8, 0x23, 0x50, 0x84, 0xf8, 0x1f, 0x22, 0x4e, 0xa0, 0x70, 0x00, 0x09, 0x69, 0xf7, 0x90, + 0xcb, 0x4a, 0x39, 0xec, 0x21, 0x27, 0x6b, 0xe3, 0x95, 0xf6, 0xb8, 0x7f, 0x40, 0x4e, 0xab, 0x7a, + 0xf4, 0xd3, 0xaf, 0x71, 0x76, 0x32, 0xd2, 0xdc, 0x5c, 0x55, 0xdf, 0xf7, 0xfb, 0x1e, 0xf5, 0xbd, + 0xaa, 0x0d, 0xae, 0xee, 0xdf, 0x76, 0x35, 0x6a, 0x17, 0x71, 0x93, 0x16, 0x71, 0xcb, 0xb3, 0x5d, + 0x03, 0xd7, 0xa9, 0x55, 0x2b, 0xb6, 0x4b, 0xc5, 0x1a, 0xb1, 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x4d, + 0xc7, 0xf6, 0x6c, 0x78, 0x5e, 0x90, 0x6a, 0xb8, 0x49, 0xb5, 0x08, 0xa9, 0xd6, 0x2e, 0xad, 0x5c, + 0xaf, 0x51, 0x6f, 0xaf, 0x55, 0xd5, 0x0c, 0xbb, 0x51, 0xac, 0xd9, 0x35, 0xbb, 0xc8, 0x39, 0xaa, + 0xad, 0x5d, 0xbe, 0xe2, 0x0b, 0xfe, 0x4b, 0x20, 0xad, 0xa8, 0x11, 0xa1, 0x86, 0xed, 0x90, 0x62, + 0xfb, 0x46, 0x52, 0xda, 0xca, 0xcd, 0x90, 0xa6, 0x81, 0x8d, 0x3d, 0x6a, 0x11, 0xe7, 0xa0, 0xd8, + 0xdc, 0xaf, 0x71, 0x26, 0x87, 0xb8, 0x76, 0xcb, 0x31, 0xc8, 0x58, 0x5c, 0x6e, 0xb1, 0x41, 0x3c, + 0xdc, 0x4f, 0x56, 0x71, 0x10, 0x97, 0xd3, 0xb2, 0x3c, 0xda, 0xe8, 0x15, 0x73, 0x6b, 0x14, 0x83, + 0x6b, 0xec, 0x91, 0x06, 0x4e, 0xf2, 0xa9, 0x5f, 0x29, 0xe0, 0xe2, 0xba, 0x6d, 0x79, 0x98, 0x71, + 0x20, 0x69, 0xc4, 0x16, 0xf1, 0x1c, 0x6a, 0x54, 0xf8, 0x6f, 0xb8, 0x0e, 0x32, 0x16, 0x6e, 0x90, + 0x9c, 0xb2, 0xaa, 0x5c, 0x99, 0xd5, 0x8b, 0xaf, 0x3b, 0x85, 0x89, 0x6e, 0xa7, 0x90, 0xf9, 0x25, + 0x6e, 0x90, 0xf7, 0x9d, 0x42, 0xa1, 0xd7, 0x71, 0x9a, 0x0f, 0xc3, 0x48, 0x10, 0x67, 0x86, 0xdb, + 0x60, 0xca, 0xc3, 0x4e, 0x8d, 0x78, 0xb9, 0xd4, 0xaa, 0x72, 0x25, 0x5b, 0xba, 0xac, 0x0d, 0xbc, + 0x3a, 0x4d, 0x48, 0x7f, 0xc8, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, + 0x30, 0x6b, 0xf8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, + 0x5f, 0x0f, 0x31, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1a, 0x43, 0x77, 0xc0, 0xb4, 0xd1, 0x72, 0x1c, + 0x62, 0xf9, 0x96, 0xfe, 0x60, 0xa4, 0xa5, 0x8f, 0x71, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, + 0x3a, 0xbd, 0x2e, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x14, 0x70, 0x61, 0xdd, 0xb1, 0x5d, + 0xf7, 0x31, 0x71, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x3f, 0x10, 0xc3, 0x43, 0x64, 0x97, 0x38, 0xc4, + 0x32, 0x08, 0x5c, 0x05, 0x99, 0x7d, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0xfb, 0xd4, 0x32, + 0x11, 0x3f, 0x61, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, + 0x00, 0xa9, 0x15, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xaf, 0x02, + 0x4e, 0xff, 0xec, 0x99, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, + 0x2a, 0x65, 0x4b, 0xdf, 0x1f, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa5, 0xc4, 0x09, 0xe3, + 0x44, 0x9c, 0x20, 0x09, 0x75, 0xe4, 0x81, 0xa7, 0x7e, 0xda, 0xab, 0xbe, 0x08, 0x9f, 0x8f, 0xa2, + 0xfe, 0xc7, 0x0a, 0x27, 0xf5, 0x9f, 0x0a, 0x58, 0xba, 0x57, 0x5e, 0xab, 0x08, 0xee, 0xb2, 0x5d, + 0xa7, 0xc6, 0x01, 0xbc, 0x0d, 0x32, 0xde, 0x41, 0xd3, 0xcf, 0x80, 0x4b, 0xfe, 0x85, 0x3f, 0x3c, + 0x68, 0xb2, 0x0c, 0x38, 0x9d, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xef, 0x80, 0xc9, 0x36, 0x93, + 0xcb, 0xb5, 0x9c, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x07, 0xcc, 0x37, + 0x89, 0x43, 0x6d, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x19, 0x49, 0x3c, + 0x5f, 0x8e, 0x1e, 0xa2, 0x38, 0xad, 0xfa, 0x8f, 0x14, 0x58, 0x0c, 0x15, 0x40, 0xad, 0x3a, 0x71, + 0xe1, 0xef, 0xc1, 0x8a, 0xeb, 0xe1, 0x2a, 0xad, 0xd3, 0xe7, 0xd8, 0xa3, 0xb6, 0xb5, 0x43, 0x2d, + 0xd3, 0x7e, 0x1a, 0x47, 0xcf, 0x77, 0x3b, 0x85, 0x95, 0xca, 0x40, 0x2a, 0x34, 0x04, 0x01, 0xde, + 0x07, 0x73, 0x2e, 0xa9, 0x13, 0xc3, 0x13, 0xf6, 0x4a, 0xbf, 0x5c, 0xee, 0x76, 0x0a, 0x73, 0x95, + 0xc8, 0xfe, 0xfb, 0x4e, 0xe1, 0x54, 0xcc, 0x31, 0xe2, 0x10, 0xc5, 0x98, 0xe1, 0xaf, 0xc1, 0x4c, + 0x93, 0xfd, 0xa2, 0xc4, 0xcd, 0xa5, 0x56, 0xd3, 0x23, 0x22, 0x24, 0xe9, 0x6b, 0x7d, 0x49, 0x7a, + 0x69, 0xa6, 0x2c, 0x41, 0x50, 0x00, 0xa7, 0xbe, 0x4a, 0x81, 0x73, 0xf7, 0x6c, 0x87, 0x3e, 0x67, + 0xc9, 0x5f, 0x2f, 0xdb, 0xe6, 0x9a, 0x04, 0x23, 0x0e, 0x7c, 0x02, 0x66, 0x58, 0x93, 0x31, 0xb1, + 0x87, 0x65, 0x60, 0xfe, 0x30, 0x22, 0x36, 0xe8, 0x15, 0x5a, 0x73, 0xbf, 0xc6, 0x36, 0x5c, 0x8d, + 0x51, 0x6b, 0xed, 0x1b, 0x9a, 0xa8, 0x17, 0x5b, 0xc4, 0xc3, 0x61, 0x4a, 0x87, 0x7b, 0x28, 0x40, + 0x85, 0xbf, 0x02, 0x19, 0xb7, 0x49, 0x0c, 0x19, 0xa0, 0xb7, 0x86, 0x19, 0xd5, 0x5f, 0xc7, 0x4a, + 0x93, 0x18, 0x61, 0x79, 0x61, 0x2b, 0xc4, 0x11, 0xe1, 0x13, 0x30, 0xe5, 0xf2, 0x40, 0xe6, 0x77, + 0x99, 0x2d, 0xdd, 0xfe, 0x00, 0x6c, 0x91, 0x08, 0x41, 0x7e, 0x89, 0x35, 0x92, 0xb8, 0xea, 0x67, + 0x0a, 0x28, 0x0c, 0xe0, 0xd4, 0xc9, 0x1e, 0x6e, 0x53, 0xdb, 0x81, 0x0f, 0xc0, 0x34, 0xdf, 0x79, + 0xd4, 0x94, 0x0e, 0xbc, 0x76, 0xa8, 0x7b, 0xe3, 0x21, 0xaa, 0x67, 0x59, 0xf6, 0x55, 0x04, 0x3b, + 0xf2, 0x71, 0xe0, 0x0e, 0x98, 0xe5, 0x3f, 0xef, 0xda, 0x4f, 0x2d, 0xe9, 0xb7, 0x71, 0x40, 0xe7, + 0x59, 0xd1, 0xaf, 0xf8, 0x00, 0x28, 0xc4, 0x52, 0xff, 0x9c, 0x06, 0xab, 0x03, 0xec, 0x59, 0xb7, + 0x2d, 0x93, 0xb2, 0x18, 0x87, 0xf7, 0x62, 0x69, 0x7e, 0x33, 0x91, 0xe6, 0x97, 0x46, 0xf1, 0x47, + 0xd2, 0x7e, 0x33, 0xb8, 0xa0, 0x54, 0x0c, 0x4b, 0xba, 0xf9, 0x7d, 0xa7, 0xd0, 0x67, 0xb0, 0xd2, + 0x02, 0xa4, 0xf8, 0x65, 0xc0, 0x36, 0x80, 0x75, 0xec, 0x7a, 0x0f, 0x1d, 0x6c, 0xb9, 0x42, 0x12, + 0x6d, 0x10, 0x79, 0xf5, 0xd7, 0x0e, 0x17, 0xb4, 0x8c, 0x43, 0x5f, 0x91, 0x5a, 0xc0, 0xcd, 0x1e, + 0x34, 0xd4, 0x47, 0x02, 0xfc, 0x1e, 0x98, 0x72, 0x08, 0x76, 0x6d, 0x2b, 0x97, 0xe1, 0x56, 0x04, + 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x2a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b, + 0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0x3f, 0x57, 0xc0, 0x85, 0x01, 0x7e, + 0xdc, 0xa4, 0xae, 0x07, 0x7f, 0xdb, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83, 0x7a, 0xe0, 0xef, 0x44, 0x32, 0x72, 0x07, 0x4c, 0x52, 0x8f, 0x34, 0xfc, 0x3a, 0x53, 0x1a, 0x3f, - 0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa6, 0x07, 0x9a, 0xc5, 0xd2, 0x16, + 0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa5, 0x07, 0x9a, 0xc5, 0xd2, 0x16, 0xb6, 0xc1, 0x02, 0x5f, 0xc9, 0x9e, 0x49, 0x76, 0xa5, 0x71, 0xc3, 0x8a, 0xc2, 0x90, 0x19, 0x45, - 0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x0e, 0xb2, 0x0d, 0x6a, 0x21, + 0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x01, 0xb2, 0x0d, 0x6a, 0x21, 0xd2, 0xac, 0x53, 0x03, 0xbb, 0xb2, 0x09, 0x2d, 0x76, 0x3b, 0x85, 0xec, 0x56, 0xb8, 0x8d, 0xa2, - 0x34, 0xf0, 0x67, 0x20, 0xdb, 0xc0, 0x4f, 0x03, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad, - 0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xfd, + 0x34, 0xf0, 0xc7, 0x20, 0xdb, 0xc0, 0xcf, 0x02, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad, + 0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xdd, 0x91, 0x0d, 0x99, 0x97, 0xb7, 0x48, 0xa8, 0x70, 0x6e, 0xe4, 0xc3, 0x40, 0x13, 0xcc, 0x54, 0x65, - 0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xfc, 0x3d, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc, - 0x15, 0x0a, 0x90, 0xd5, 0x17, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0xbf, 0x02, 0xd0, 0xae, 0xba, - 0xc4, 0x69, 0x13, 0xf3, 0xd7, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb, + 0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xe4, 0x03, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc, + 0x15, 0x0a, 0x90, 0xd5, 0x97, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0x3f, 0x07, 0xd0, 0xae, 0xba, + 0xc4, 0x69, 0x13, 0xf3, 0x17, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb, 0xa6, 0xed, 0x9e, 0x53, 0xd4, 0x87, 0x03, 0x1a, 0x60, 0x9e, 0xe5, 0x98, 0xb8, 0x31, 0x2a, 0xc7, 0xc7, 0xf1, 0x12, 0x78, 0x99, 0x4d, 0x03, 0x9b, 0x51, 0x10, 0x14, 0xc7, 0x84, 0x6b, 0x60, 0x51, 0x4e, 0x32, 0x89, 0x1b, 0x3c, 0x27, 0xfd, 0xbc, 0xb8, 0x1e, 0x3f, 0x46, 0x49, 0x7a, 0x06, 0x61, - 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0x3b, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35, + 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0xbb, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35, 0xb0, 0x20, 0x51, 0xe5, 0xad, 0xe6, 0x26, 0x79, 0x4c, 0x8c, 0x1e, 0x32, 0x65, 0x5b, 0x0a, 0xe2, 0x7b, 0x3d, 0x06, 0x83, 0x12, 0xb0, 0xd0, 0x06, 0xc0, 0xf0, 0x8b, 0xa6, 0x9b, 0x9b, 0xe2, 0x42, - 0x6e, 0x8f, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x9f, - 0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x23, 0x98, 0x11, 0x33, - 0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xe9, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20, - 0xf6, 0x57, 0x28, 0x80, 0x54, 0x9f, 0x67, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x88, 0xf5, 0xb1, 0xd5, - 0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x3e, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a, - 0x5e, 0x1b, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08, + 0xee, 0x8c, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x6f, + 0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x1d, 0x98, 0x11, 0x33, + 0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xd1, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20, + 0xf6, 0x57, 0x28, 0x80, 0x54, 0x5f, 0x64, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x8c, 0xf5, 0xb1, 0xd5, + 0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x01, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a, + 0x5e, 0x1f, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08, 0x6e, 0x80, 0x4c, 0xd3, 0x36, 0xfd, 0x29, 0x65, 0xd8, 0x58, 0x57, 0xb6, 0x4d, 0x37, 0x06, 0x37, 0xc3, 0x34, 0x66, 0xbb, 0x88, 0x43, 0xb0, 0x29, 0xd1, 0xff, 0x94, 0xc0, 0xc3, 0x31, 0x5b, 0x2a, - 0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x02, 0x96, 0x8d, + 0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x08, 0x96, 0x8d, 0xe4, 0x03, 0x38, 0x37, 0x3d, 0x72, 0xb0, 0x1a, 0xfa, 0x75, 0x40, 0x3f, 0xd3, 0xed, 0x14, 0x96, 0x7b, 0x48, 0x50, 0xaf, 0x24, 0x66, 0x19, 0x91, 0xef, 0x26, 0x59, 0xe7, 0x86, 0x59, 0xd6, 0xef, - 0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x2b, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63, + 0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x3d, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63, 0x8e, 0x0c, 0x91, 0xcc, 0x47, 0x16, 0x19, 0x02, 0xee, 0x48, 0x23, 0x43, 0x40, 0x1e, 0x53, 0x64, - 0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0xa7, 0x29, 0x3f, 0x32, 0xc4, 0xb0, + 0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0x27, 0x29, 0x3f, 0x32, 0xc4, 0xb0, 0x70, 0xb8, 0xc8, 0x10, 0xb4, 0x91, 0xc8, 0xd8, 0x8e, 0x3e, 0x6f, 0x47, 0xcc, 0x6a, 0x9a, 0xef, - 0x56, 0xed, 0x7e, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70, + 0x56, 0xed, 0x41, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70, 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, 0x4b, 0x74, 0x2d, 0x82, - 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x43, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa, - 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x23, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51, - 0x3e, 0x90, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x5f, 0x0a, 0xc0, 0xde, 0xfe, - 0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x1d, 0x67, 0xe0, 0xe8, 0x38, + 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x23, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa, + 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x35, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51, + 0x3e, 0x92, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x77, 0x0a, 0xc0, 0xde, 0xfe, + 0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x2d, 0x67, 0xe0, 0xe8, 0x38, 0x12, 0x85, 0x45, 0x49, 0x39, 0x47, 0xff, 0x91, 0x35, 0xfc, 0xa4, 0x95, 0x3e, 0xb2, 0x4f, 0x5a, - 0xea, 0xc7, 0x49, 0xbf, 0x9d, 0xc0, 0xcf, 0x67, 0xfd, 0x6e, 0x39, 0x7d, 0x3c, 0xb7, 0xac, 0xfe, - 0x5f, 0x01, 0x4b, 0xc9, 0x31, 0xe2, 0x84, 0x7c, 0x3b, 0xfd, 0x24, 0xae, 0xfa, 0x49, 0xfc, 0x6e, - 0xfa, 0x42, 0x01, 0xa7, 0x4f, 0xce, 0xdf, 0x24, 0xea, 0x7f, 0x7b, 0xd5, 0x3d, 0x01, 0x7f, 0x76, - 0xe8, 0xbf, 0x78, 0xf5, 0x36, 0x3f, 0xf1, 0xfa, 0x6d, 0x7e, 0xe2, 0xcd, 0xdb, 0xfc, 0xc4, 0x5f, - 0xbb, 0x79, 0xe5, 0x55, 0x37, 0xaf, 0xbc, 0xee, 0xe6, 0x95, 0x37, 0xdd, 0xbc, 0xf2, 0x65, 0x37, - 0xaf, 0xfc, 0xfd, 0xab, 0xfc, 0xc4, 0xef, 0xcf, 0x0f, 0xfc, 0xa7, 0xf0, 0xdb, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xc5, 0xb7, 0xf9, 0x52, 0x5e, 0x1c, 0x00, 0x00, + 0xea, 0xff, 0x92, 0x7e, 0x3b, 0x81, 0x9f, 0xcf, 0xfa, 0xdd, 0x72, 0xfa, 0x78, 0x6e, 0x59, 0xfd, + 0x8f, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x09, 0xf9, 0x76, 0xfa, 0xff, 0xb8, 0xea, 0x27, 0xf1, 0xbb, + 0xe9, 0x4b, 0x05, 0x9c, 0x3e, 0x39, 0x7f, 0x93, 0xa8, 0xff, 0xea, 0x55, 0xf7, 0x04, 0xfc, 0xd9, + 0xa1, 0xff, 0xf4, 0xf5, 0xbb, 0xfc, 0xc4, 0x9b, 0x77, 0xf9, 0x89, 0xb7, 0xef, 0xf2, 0x13, 0x7f, + 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0xa6, 0x9b, 0x57, 0xde, 0x76, 0xf3, 0xca, 0x17, 0xdd, + 0xbc, 0xf2, 0x97, 0x2f, 0xf3, 0x13, 0xbf, 0x39, 0x3f, 0xf0, 0x9f, 0xc2, 0x6f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xca, 0x8b, 0x47, 0xba, 0x45, 0x1c, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index a9e36975fc..8f2ee58031 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -293,7 +293,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -393,12 +393,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -412,12 +412,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index c12a83df1b..69a7b27012 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -26,6 +26,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource @@ -573,6 +574,7 @@ type MetricValueStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..99ae748651 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index edda3581e7..69567089b6 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +// source: k8s.io/api/autoscaling/v2beta1/generated.proto package v2beta1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{0} + return fileDescriptor_ea74040359c1ed83, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{1} + return fileDescriptor_ea74040359c1ed83, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{2} + return fileDescriptor_ea74040359c1ed83, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{3} + return fileDescriptor_ea74040359c1ed83, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{4} + return fileDescriptor_ea74040359c1ed83, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{5} + return fileDescriptor_ea74040359c1ed83, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{6} + return fileDescriptor_ea74040359c1ed83, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{7} + return fileDescriptor_ea74040359c1ed83, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{8} + return fileDescriptor_ea74040359c1ed83, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{9} + return fileDescriptor_ea74040359c1ed83, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{10} + return fileDescriptor_ea74040359c1ed83, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{11} + return fileDescriptor_ea74040359c1ed83, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{12} + return fileDescriptor_ea74040359c1ed83, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{13} + return fileDescriptor_ea74040359c1ed83, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{14} + return fileDescriptor_ea74040359c1ed83, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{15} + return fileDescriptor_ea74040359c1ed83, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{16} + return fileDescriptor_ea74040359c1ed83, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{17} + return fileDescriptor_ea74040359c1ed83, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -573,109 +573,108 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) -} - -var fileDescriptor_26c1bfc7a52d0478 = []byte{ - // 1565 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x1b, 0x45, - 0x1b, 0x8e, 0xed, 0x4d, 0x9a, 0xbc, 0x4e, 0xf3, 0x33, 0xed, 0xd7, 0xba, 0xe9, 0x57, 0x3b, 0x5a, - 0x7d, 0xfa, 0x94, 0xaf, 0xfa, 0xd8, 0x6d, 0x4d, 0xf8, 0x91, 0x10, 0x12, 0xb1, 0x0b, 0x6d, 0x45, - 0xd2, 0x96, 0x49, 0x5a, 0x21, 0x68, 0x11, 0x93, 0xf5, 0xd4, 0x59, 0x62, 0xef, 0x5a, 0x3b, 0x63, - 0xab, 0x29, 0x42, 0x42, 0x48, 0xdc, 0xb9, 0xc0, 0x19, 0x24, 0xae, 0x08, 0x71, 0x81, 0x33, 0xb7, - 0x1e, 0x7b, 0x6c, 0x05, 0xb2, 0xa8, 0x39, 0x70, 0xe6, 0xda, 0x13, 0x9a, 0xd9, 0xd9, 0xf5, 0xae, - 0xff, 0xe3, 0xa6, 0xe1, 0x47, 0xbd, 0x79, 0x77, 0xde, 0xf7, 0x79, 0x67, 0x9e, 0xf7, 0x6f, 0xde, - 0x35, 0x5c, 0xdc, 0x7d, 0x99, 0x19, 0xb6, 0x6b, 0xee, 0xd6, 0xb7, 0xa9, 0xe7, 0x50, 0x4e, 0x99, - 0xd9, 0xa0, 0x4e, 0xc9, 0xf5, 0x4c, 0xb5, 0x40, 0x6a, 0xb6, 0x49, 0xea, 0xdc, 0x65, 0x16, 0xa9, - 0xd8, 0x4e, 0xd9, 0x6c, 0xe4, 0xb7, 0x29, 0x27, 0xe7, 0xcd, 0x32, 0x75, 0xa8, 0x47, 0x38, 0x2d, - 0x19, 0x35, 0xcf, 0xe5, 0x2e, 0xca, 0xfa, 0xf2, 0x06, 0xa9, 0xd9, 0x46, 0x44, 0xde, 0x50, 0xf2, - 0x4b, 0xcf, 0x95, 0x6d, 0xbe, 0x53, 0xdf, 0x36, 0x2c, 0xb7, 0x6a, 0x96, 0xdd, 0xb2, 0x6b, 0x4a, - 0xb5, 0xed, 0xfa, 0x6d, 0xf9, 0x24, 0x1f, 0xe4, 0x2f, 0x1f, 0x6e, 0x49, 0x8f, 0x98, 0xb7, 0x5c, - 0x8f, 0x9a, 0x8d, 0x2e, 0x93, 0x4b, 0xab, 0x6d, 0x99, 0x2a, 0xb1, 0x76, 0x6c, 0x87, 0x7a, 0x7b, - 0x66, 0x6d, 0xb7, 0x2c, 0x95, 0x3c, 0xca, 0xdc, 0xba, 0x67, 0xd1, 0x7d, 0x69, 0x31, 0xb3, 0x4a, - 0x39, 0xe9, 0x65, 0xcb, 0xec, 0xa7, 0xe5, 0xd5, 0x1d, 0x6e, 0x57, 0xbb, 0xcd, 0xbc, 0x38, 0x4c, - 0x81, 0x59, 0x3b, 0xb4, 0x4a, 0x3a, 0xf5, 0xf4, 0xdf, 0x92, 0x70, 0xa6, 0xe8, 0x3a, 0x9c, 0x08, - 0x0d, 0xac, 0x0e, 0xb1, 0x41, 0xb9, 0x67, 0x5b, 0x9b, 0xf2, 0x37, 0x2a, 0x82, 0xe6, 0x90, 0x2a, - 0xcd, 0x24, 0x96, 0x13, 0x2b, 0x33, 0x05, 0xf3, 0x5e, 0x33, 0x37, 0xd1, 0x6a, 0xe6, 0xb4, 0x2b, - 0xa4, 0x4a, 0x1f, 0x37, 0x73, 0xb9, 0x6e, 0xe2, 0x8c, 0x00, 0x46, 0x88, 0x60, 0xa9, 0x8c, 0xde, - 0x86, 0x0c, 0x27, 0x5e, 0x99, 0xf2, 0xb5, 0x06, 0xf5, 0x48, 0x99, 0x5e, 0xe7, 0x76, 0xc5, 0xbe, - 0x4b, 0xb8, 0xed, 0x3a, 0x99, 0xe4, 0x72, 0x62, 0x65, 0xb2, 0xf0, 0xef, 0x56, 0x33, 0x97, 0xd9, - 0xea, 0x23, 0x83, 0xfb, 0x6a, 0xa3, 0x06, 0xa0, 0xd8, 0xda, 0x0d, 0x52, 0xa9, 0xd3, 0x4c, 0x6a, - 0x39, 0xb1, 0x92, 0xce, 0x1b, 0x46, 0x3b, 0x4a, 0x42, 0x56, 0x8c, 0xda, 0x6e, 0x59, 0x86, 0x4d, - 0xe0, 0x32, 0xe3, 0xad, 0x3a, 0x71, 0xb8, 0xcd, 0xf7, 0x0a, 0x27, 0x5a, 0xcd, 0x1c, 0xda, 0xea, - 0x42, 0xc3, 0x3d, 0x2c, 0x20, 0x13, 0x66, 0xac, 0x80, 0xb7, 0x8c, 0x26, 0xb9, 0x59, 0x54, 0xdc, - 0xcc, 0xb4, 0x09, 0x6d, 0xcb, 0xe8, 0xbf, 0x0f, 0x60, 0x9a, 0x13, 0x5e, 0x67, 0x07, 0xc3, 0xf4, - 0xbb, 0x70, 0xca, 0xaa, 0x7b, 0x1e, 0x75, 0xfa, 0x53, 0x7d, 0xa6, 0xd5, 0xcc, 0x9d, 0x2a, 0xf6, - 0x13, 0xc2, 0xfd, 0xf5, 0xd1, 0x47, 0x70, 0x2c, 0xbe, 0xf8, 0x24, 0x6c, 0x9f, 0x56, 0x07, 0x3c, - 0x56, 0xec, 0x86, 0xc4, 0xbd, 0xec, 0xec, 0x9f, 0xf3, 0xcf, 0x13, 0x70, 0xba, 0xe8, 0xb9, 0x8c, - 0xdd, 0xa0, 0x1e, 0xb3, 0x5d, 0xe7, 0xea, 0xf6, 0x07, 0xd4, 0xe2, 0x98, 0xde, 0xa6, 0x1e, 0x75, - 0x2c, 0x8a, 0x96, 0x41, 0xdb, 0xb5, 0x9d, 0x92, 0x62, 0x7c, 0x36, 0x60, 0xfc, 0x4d, 0xdb, 0x29, - 0x61, 0xb9, 0x22, 0x24, 0xa4, 0x4f, 0x92, 0x71, 0x89, 0x08, 0xe1, 0x79, 0x00, 0x52, 0xb3, 0x95, - 0x01, 0x49, 0xc5, 0x4c, 0x01, 0x29, 0x39, 0x58, 0xbb, 0x76, 0x59, 0xad, 0xe0, 0x88, 0x94, 0xfe, - 0x45, 0x0a, 0x8e, 0xbf, 0x7e, 0x87, 0x53, 0xcf, 0x21, 0x95, 0x58, 0xb2, 0xe5, 0x01, 0xaa, 0xf2, - 0xf9, 0x4a, 0x3b, 0x10, 0x42, 0xb0, 0x8d, 0x70, 0x05, 0x47, 0xa4, 0x90, 0x0b, 0x73, 0xfe, 0xd3, - 0x26, 0xad, 0x50, 0x8b, 0xbb, 0x9e, 0xdc, 0x6c, 0x3a, 0xff, 0xfc, 0x20, 0x7f, 0x30, 0x43, 0x94, - 0x1e, 0xa3, 0x71, 0xde, 0x58, 0x27, 0xdb, 0xb4, 0x12, 0xa8, 0x16, 0x50, 0xab, 0x99, 0x9b, 0xdb, - 0x88, 0xc1, 0xe1, 0x0e, 0x78, 0x44, 0x20, 0xed, 0x27, 0xc4, 0x93, 0x78, 0x7f, 0xbe, 0xd5, 0xcc, - 0xa5, 0xb7, 0xda, 0x30, 0x38, 0x8a, 0xd9, 0x27, 0xab, 0xb5, 0xa7, 0x9d, 0xd5, 0xfa, 0x97, 0xdd, - 0x8e, 0xf1, 0x73, 0xf3, 0x6f, 0xe1, 0x98, 0x1d, 0x98, 0x55, 0x69, 0xf3, 0x24, 0x9e, 0x39, 0xae, - 0x8e, 0x35, 0x5b, 0x8c, 0x60, 0xe1, 0x18, 0x32, 0xda, 0xeb, 0x5d, 0x08, 0xc6, 0x73, 0xd0, 0xc9, - 0xfd, 0x14, 0x01, 0xfd, 0xc7, 0x24, 0x9c, 0xbc, 0xe4, 0x7a, 0xf6, 0x5d, 0x91, 0xe5, 0x95, 0x6b, - 0x6e, 0x69, 0x4d, 0xb5, 0x7f, 0xea, 0xa1, 0xf7, 0x61, 0x5a, 0xb0, 0x57, 0x22, 0x9c, 0x48, 0x1f, - 0xa5, 0xf3, 0xe7, 0x46, 0xe3, 0xda, 0x2f, 0x0c, 0x1b, 0x94, 0x93, 0xb6, 0x57, 0xdb, 0xef, 0x70, - 0x88, 0x8a, 0x6e, 0x81, 0xc6, 0x6a, 0xd4, 0x52, 0x9e, 0x7c, 0xc5, 0x18, 0x7c, 0x0d, 0x31, 0xfa, - 0x6c, 0x74, 0xb3, 0x46, 0xad, 0x76, 0x31, 0x11, 0x4f, 0x58, 0xc2, 0x22, 0x0a, 0x53, 0x4c, 0x06, - 0x9c, 0xf2, 0xdd, 0xab, 0xe3, 0x1a, 0x90, 0x20, 0x85, 0x39, 0x65, 0x62, 0xca, 0x7f, 0xc6, 0x0a, - 0x5c, 0xff, 0x34, 0x05, 0xcb, 0x7d, 0x34, 0x8b, 0xae, 0x53, 0xb2, 0x65, 0xb1, 0xbf, 0x04, 0x1a, - 0xdf, 0xab, 0x05, 0xc1, 0xbe, 0x1a, 0xec, 0x76, 0x6b, 0xaf, 0x26, 0xda, 0xd1, 0x7f, 0x86, 0xe9, - 0x0b, 0x39, 0x2c, 0x11, 0xd0, 0x7a, 0x78, 0xaa, 0x64, 0x0c, 0x4b, 0x6d, 0xeb, 0x71, 0x33, 0xd7, - 0xe3, 0xfe, 0x65, 0x84, 0x48, 0xf1, 0xcd, 0x8b, 0xda, 0x50, 0x21, 0x8c, 0x6f, 0x79, 0xc4, 0x61, - 0xbe, 0x25, 0xbb, 0x1a, 0xc4, 0xfa, 0xd9, 0xd1, 0xdc, 0x2d, 0x34, 0x0a, 0x4b, 0x6a, 0x17, 0x68, - 0xbd, 0x0b, 0x0d, 0xf7, 0xb0, 0x80, 0xfe, 0x0b, 0x53, 0x1e, 0x25, 0xcc, 0x75, 0x54, 0xeb, 0x09, - 0xc9, 0xc5, 0xf2, 0x2d, 0x56, 0xab, 0xe8, 0x7f, 0x70, 0xa4, 0x4a, 0x19, 0x23, 0x65, 0x9a, 0x99, - 0x94, 0x82, 0xf3, 0x4a, 0xf0, 0xc8, 0x86, 0xff, 0x1a, 0x07, 0xeb, 0xfa, 0xc3, 0x04, 0x9c, 0xee, - 0xc3, 0xe3, 0xba, 0xcd, 0x38, 0xba, 0xd9, 0x15, 0xcf, 0xc6, 0x88, 0xb5, 0xc3, 0x66, 0x7e, 0x34, - 0x2f, 0x28, 0xdb, 0xd3, 0xc1, 0x9b, 0x48, 0x2c, 0xdf, 0x84, 0x49, 0x9b, 0xd3, 0xaa, 0xf0, 0x4a, - 0x6a, 0x25, 0x9d, 0x7f, 0x69, 0xcc, 0x58, 0x2b, 0x1c, 0x55, 0x36, 0x26, 0x2f, 0x0b, 0x34, 0xec, - 0x83, 0xea, 0x3f, 0x25, 0xfb, 0x9e, 0x4d, 0x04, 0x3c, 0xfa, 0x10, 0xe6, 0xe4, 0x93, 0x5f, 0x99, - 0x31, 0xbd, 0xad, 0x4e, 0x38, 0x34, 0xa7, 0x06, 0x34, 0xf4, 0xc2, 0x09, 0xb5, 0x95, 0xb9, 0xcd, - 0x18, 0x34, 0xee, 0x30, 0x85, 0xce, 0x43, 0xba, 0x6a, 0x3b, 0x98, 0xd6, 0x2a, 0xb6, 0x45, 0x98, - 0xba, 0x17, 0xc9, 0x96, 0xb4, 0xd1, 0x7e, 0x8d, 0xa3, 0x32, 0xe8, 0x05, 0x48, 0x57, 0xc9, 0x9d, - 0x50, 0x25, 0x25, 0x55, 0x8e, 0x29, 0x7b, 0xe9, 0x8d, 0xf6, 0x12, 0x8e, 0xca, 0xa1, 0xeb, 0x22, - 0x1a, 0x44, 0x95, 0x66, 0x19, 0x4d, 0xd2, 0x7c, 0x76, 0xd8, 0xf9, 0x54, 0x91, 0x17, 0x25, 0x22, - 0x12, 0x39, 0x12, 0x02, 0x07, 0x58, 0xfa, 0xf7, 0x1a, 0x9c, 0x19, 0x98, 0xfb, 0xe8, 0x0d, 0x40, - 0xee, 0x36, 0xa3, 0x5e, 0x83, 0x96, 0x2e, 0xfa, 0x97, 0x7e, 0x71, 0x3f, 0x11, 0x1c, 0xa7, 0xfc, - 0x96, 0x78, 0xb5, 0x6b, 0x15, 0xf7, 0xd0, 0x40, 0x16, 0x1c, 0x15, 0xc9, 0xe0, 0x13, 0x6a, 0xab, - 0xab, 0xd0, 0xfe, 0x32, 0x6d, 0xb1, 0xd5, 0xcc, 0x1d, 0x5d, 0x8f, 0x82, 0xe0, 0x38, 0x26, 0x5a, - 0x83, 0x79, 0x55, 0xeb, 0x3b, 0x08, 0x3e, 0xa9, 0x18, 0x98, 0x2f, 0xc6, 0x97, 0x71, 0xa7, 0xbc, - 0x80, 0x28, 0x51, 0x66, 0x7b, 0xb4, 0x14, 0x42, 0x68, 0x71, 0x88, 0x0b, 0xf1, 0x65, 0xdc, 0x29, - 0x8f, 0x2a, 0x30, 0xa7, 0x50, 0x15, 0xdf, 0x99, 0x49, 0xe9, 0xb2, 0xff, 0x8f, 0xe8, 0x32, 0xbf, - 0xe8, 0x86, 0x31, 0x58, 0x8c, 0x61, 0xe1, 0x0e, 0x6c, 0xc4, 0x01, 0xac, 0xa0, 0xc4, 0xb1, 0xcc, - 0x94, 0xb4, 0xf4, 0xda, 0x98, 0x39, 0x18, 0xd6, 0xca, 0x76, 0xfb, 0x0a, 0x5f, 0x31, 0x1c, 0xb1, - 0xa3, 0x7f, 0xab, 0x01, 0xb4, 0x23, 0x0c, 0xad, 0xc6, 0x8a, 0xfc, 0x72, 0x47, 0x91, 0x5f, 0x88, - 0x5e, 0x4e, 0x23, 0x05, 0xfd, 0x06, 0x4c, 0xb9, 0x32, 0xf3, 0x54, 0x30, 0xe4, 0x87, 0x6d, 0x3b, - 0xec, 0xa5, 0x21, 0x5a, 0x01, 0x44, 0xe9, 0x54, 0xf9, 0xab, 0xd0, 0xd0, 0x15, 0xd0, 0x6a, 0x6e, - 0x29, 0x68, 0x7e, 0xe7, 0x86, 0xa1, 0x5e, 0x73, 0x4b, 0x2c, 0x86, 0x39, 0x2d, 0xf6, 0x2e, 0xde, - 0x62, 0x89, 0x83, 0xde, 0x83, 0xe9, 0xe0, 0xba, 0xa1, 0xee, 0x26, 0xab, 0xc3, 0x30, 0x7b, 0xcd, - 0xc0, 0x85, 0x59, 0x51, 0x41, 0x83, 0x15, 0x1c, 0x62, 0xa2, 0x4f, 0x12, 0xb0, 0x68, 0x75, 0xce, - 0x74, 0x99, 0x23, 0xa3, 0xb5, 0xee, 0x81, 0x63, 0x77, 0xe1, 0x5f, 0xad, 0x66, 0x6e, 0xb1, 0x4b, - 0x04, 0x77, 0x9b, 0x13, 0x87, 0xa4, 0xea, 0xca, 0x2a, 0x1b, 0xce, 0x08, 0x87, 0xec, 0x35, 0x7b, - 0xf8, 0x87, 0x0c, 0x56, 0x70, 0x88, 0xa9, 0x7f, 0xa7, 0xc1, 0x6c, 0xec, 0x2e, 0xfc, 0x67, 0xc4, - 0x8c, 0x9f, 0x5a, 0x07, 0x1b, 0x33, 0x3e, 0xe6, 0xc1, 0xc7, 0x8c, 0x8f, 0x7b, 0xa8, 0x31, 0xe3, - 0x9b, 0x3c, 0xcc, 0x98, 0x89, 0x1c, 0xb2, 0x47, 0xcc, 0x3c, 0x4c, 0x01, 0xea, 0xce, 0x79, 0x64, - 0xc1, 0x94, 0x3f, 0x74, 0x1d, 0x44, 0xaf, 0x0f, 0xef, 0x5f, 0xaa, 0xad, 0x2b, 0xe8, 0x8e, 0x51, - 0x2d, 0x39, 0xd2, 0xa8, 0x46, 0x0f, 0x62, 0xa4, 0x0d, 0x2f, 0x03, 0x7d, 0xc7, 0xda, 0x5b, 0x30, - 0xcd, 0x82, 0x59, 0x50, 0x1b, 0x7f, 0x16, 0x94, 0xac, 0x87, 0x53, 0x60, 0x08, 0x89, 0x4a, 0x30, - 0x4b, 0xa2, 0xe3, 0xd8, 0xe4, 0x58, 0xc7, 0x58, 0x10, 0xb3, 0x5f, 0x6c, 0x0e, 0x8b, 0xa1, 0xea, - 0x3f, 0x77, 0xfa, 0xd6, 0xaf, 0x0a, 0x7f, 0x59, 0xdf, 0x1e, 0xde, 0x54, 0xfc, 0x8f, 0x70, 0xef, - 0x57, 0x49, 0x58, 0xe8, 0x6c, 0xac, 0x63, 0x7d, 0xfe, 0xb8, 0xdb, 0xf3, 0x1b, 0x4e, 0x72, 0xac, - 0x4d, 0x87, 0xb3, 0xda, 0x88, 0x5f, 0x67, 0xa3, 0x9e, 0x48, 0x1d, 0xb8, 0x27, 0xf4, 0xaf, 0xe3, - 0x1c, 0x8d, 0xff, 0x89, 0xa8, 0xcf, 0x07, 0xd5, 0xe4, 0x21, 0x7d, 0x50, 0x7d, 0xca, 0x34, 0x7d, - 0x93, 0x84, 0xe3, 0xcf, 0xfe, 0x53, 0x18, 0xfd, 0xeb, 0xe3, 0x0f, 0xdd, 0x7c, 0x3d, 0xfb, 0x67, - 0x60, 0x94, 0x40, 0x2e, 0x5c, 0xb8, 0xf7, 0x28, 0x3b, 0x71, 0xff, 0x51, 0x76, 0xe2, 0xc1, 0xa3, - 0xec, 0xc4, 0xc7, 0xad, 0x6c, 0xe2, 0x5e, 0x2b, 0x9b, 0xb8, 0xdf, 0xca, 0x26, 0x1e, 0xb4, 0xb2, - 0x89, 0x5f, 0x5a, 0xd9, 0xc4, 0x67, 0xbf, 0x66, 0x27, 0xde, 0xc9, 0x0e, 0xfe, 0x93, 0xf1, 0x8f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, 0x76, 0xa2, 0x69, 0x9e, 0x1c, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_ea74040359c1ed83) +} + +var fileDescriptor_ea74040359c1ed83 = []byte{ + // 1549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x1b, 0xc5, + 0x17, 0x8f, 0xed, 0x4d, 0x9a, 0x3c, 0xa7, 0xf9, 0x98, 0xf6, 0xdf, 0xba, 0xe9, 0xbf, 0x76, 0xb4, + 0xfa, 0xeb, 0xaf, 0x50, 0xc1, 0xba, 0x35, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x2e, 0xd0, 0x8a, 0xb8, + 0x2d, 0x93, 0xb4, 0x42, 0xd0, 0x22, 0x26, 0xeb, 0xa9, 0xb3, 0xc4, 0xde, 0xb5, 0x76, 0xc6, 0x51, + 0x53, 0x84, 0x84, 0x90, 0xb8, 0x73, 0x81, 0x33, 0x48, 0x5c, 0x11, 0xe2, 0x02, 0x67, 0x6e, 0x3d, + 0xf6, 0xd8, 0x0a, 0x64, 0x51, 0x73, 0xe0, 0xcc, 0xb5, 0x27, 0x34, 0xb3, 0xb3, 0xeb, 0x5d, 0xdb, + 0x6b, 0x3b, 0x6e, 0x1a, 0x3e, 0xd4, 0x9b, 0x77, 0xe7, 0xbd, 0xdf, 0x9b, 0xf9, 0xbd, 0xaf, 0x79, + 0x6b, 0x30, 0x76, 0x5e, 0x66, 0x86, 0xe5, 0xe4, 0x49, 0xc3, 0xca, 0x93, 0x26, 0x77, 0x98, 0x49, + 0x6a, 0x96, 0x5d, 0xcd, 0xef, 0x16, 0xb6, 0x28, 0x27, 0xe7, 0xf3, 0x55, 0x6a, 0x53, 0x97, 0x70, + 0x5a, 0x31, 0x1a, 0xae, 0xc3, 0x1d, 0x94, 0xf5, 0xe4, 0x0d, 0xd2, 0xb0, 0x8c, 0x90, 0xbc, 0xa1, + 0xe4, 0x97, 0x9e, 0xab, 0x5a, 0x7c, 0xbb, 0xb9, 0x65, 0x98, 0x4e, 0x3d, 0x5f, 0x75, 0xaa, 0x4e, + 0x5e, 0xaa, 0x6d, 0x35, 0x6f, 0xc9, 0x27, 0xf9, 0x20, 0x7f, 0x79, 0x70, 0x4b, 0x7a, 0xc8, 0xbc, + 0xe9, 0xb8, 0x34, 0xbf, 0xdb, 0x63, 0x72, 0x69, 0xb5, 0x23, 0x53, 0x27, 0xe6, 0xb6, 0x65, 0x53, + 0x77, 0x2f, 0xdf, 0xd8, 0xa9, 0x4a, 0x25, 0x97, 0x32, 0xa7, 0xe9, 0x9a, 0x74, 0x5f, 0x5a, 0x2c, + 0x5f, 0xa7, 0x9c, 0xf4, 0xb3, 0x95, 0x8f, 0xd3, 0x72, 0x9b, 0x36, 0xb7, 0xea, 0xbd, 0x66, 0x5e, + 0x1c, 0xa6, 0xc0, 0xcc, 0x6d, 0x5a, 0x27, 0xdd, 0x7a, 0xfa, 0xef, 0x49, 0x38, 0x53, 0x72, 0x6c, + 0x4e, 0x84, 0x06, 0x56, 0x87, 0x28, 0x53, 0xee, 0x5a, 0xe6, 0x86, 0xfc, 0x8d, 0x4a, 0xa0, 0xd9, + 0xa4, 0x4e, 0x33, 0x89, 0xe5, 0xc4, 0xca, 0x4c, 0x31, 0x7f, 0xb7, 0x95, 0x9b, 0x68, 0xb7, 0x72, + 0xda, 0x65, 0x52, 0xa7, 0x8f, 0x5a, 0xb9, 0x5c, 0x2f, 0x71, 0x86, 0x0f, 0x23, 0x44, 0xb0, 0x54, + 0x46, 0xef, 0x40, 0x86, 0x13, 0xb7, 0x4a, 0xf9, 0xda, 0x2e, 0x75, 0x49, 0x95, 0x5e, 0xe3, 0x56, + 0xcd, 0xba, 0x43, 0xb8, 0xe5, 0xd8, 0x99, 0xe4, 0x72, 0x62, 0x65, 0xb2, 0xf8, 0xdf, 0x76, 0x2b, + 0x97, 0xd9, 0x8c, 0x91, 0xc1, 0xb1, 0xda, 0x68, 0x17, 0x50, 0x64, 0xed, 0x3a, 0xa9, 0x35, 0x69, + 0x26, 0xb5, 0x9c, 0x58, 0x49, 0x17, 0x0c, 0xa3, 0x13, 0x25, 0x01, 0x2b, 0x46, 0x63, 0xa7, 0x2a, + 0xc3, 0xc6, 0x77, 0x99, 0xf1, 0x76, 0x93, 0xd8, 0xdc, 0xe2, 0x7b, 0xc5, 0x13, 0xed, 0x56, 0x0e, + 0x6d, 0xf6, 0xa0, 0xe1, 0x3e, 0x16, 0x50, 0x1e, 0x66, 0x4c, 0x9f, 0xb7, 0x8c, 0x26, 0xb9, 0x59, + 0x54, 0xdc, 0xcc, 0x74, 0x08, 0xed, 0xc8, 0xe8, 0x7f, 0x0c, 0x60, 0x9a, 0x13, 0xde, 0x64, 0x07, + 0xc3, 0xf4, 0x7b, 0x70, 0xca, 0x6c, 0xba, 0x2e, 0xb5, 0xe3, 0xa9, 0x3e, 0xd3, 0x6e, 0xe5, 0x4e, + 0x95, 0xe2, 0x84, 0x70, 0xbc, 0x3e, 0xfa, 0x18, 0x8e, 0x45, 0x17, 0x1f, 0x87, 0xed, 0xd3, 0xea, + 0x80, 0xc7, 0x4a, 0xbd, 0x90, 0xb8, 0x9f, 0x9d, 0xfd, 0x73, 0xfe, 0x45, 0x02, 0x4e, 0x97, 0x5c, + 0x87, 0xb1, 0xeb, 0xd4, 0x65, 0x96, 0x63, 0x5f, 0xd9, 0xfa, 0x90, 0x9a, 0x1c, 0xd3, 0x5b, 0xd4, + 0xa5, 0xb6, 0x49, 0xd1, 0x32, 0x68, 0x3b, 0x96, 0x5d, 0x51, 0x8c, 0xcf, 0xfa, 0x8c, 0xbf, 0x65, + 0xd9, 0x15, 0x2c, 0x57, 0x84, 0x84, 0xf4, 0x49, 0x32, 0x2a, 0x11, 0x22, 0xbc, 0x00, 0x40, 0x1a, + 0x96, 0x32, 0x20, 0xa9, 0x98, 0x29, 0x22, 0x25, 0x07, 0x6b, 0x57, 0x2f, 0xa9, 0x15, 0x1c, 0x92, + 0xd2, 0xbf, 0x4c, 0xc1, 0xf1, 0xd7, 0x6f, 0x73, 0xea, 0xda, 0xa4, 0x16, 0x49, 0xb6, 0x02, 0x40, + 0x5d, 0x3e, 0x5f, 0xee, 0x04, 0x42, 0x00, 0x56, 0x0e, 0x56, 0x70, 0x48, 0x0a, 0x39, 0x30, 0xe7, + 0x3d, 0x6d, 0xd0, 0x1a, 0x35, 0xb9, 0xe3, 0xca, 0xcd, 0xa6, 0x0b, 0xcf, 0x0f, 0xf2, 0x07, 0x33, + 0x44, 0xe9, 0x31, 0x76, 0xcf, 0x1b, 0xeb, 0x64, 0x8b, 0xd6, 0x7c, 0xd5, 0x22, 0x6a, 0xb7, 0x72, + 0x73, 0xe5, 0x08, 0x1c, 0xee, 0x82, 0x47, 0x04, 0xd2, 0x5e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0xdb, + 0xad, 0x5c, 0x7a, 0xb3, 0x03, 0x83, 0xc3, 0x98, 0x31, 0x59, 0xad, 0x3d, 0xe9, 0xac, 0xd6, 0xbf, + 0xea, 0x75, 0x8c, 0x97, 0x9b, 0xff, 0x08, 0xc7, 0x6c, 0xc3, 0xac, 0x4a, 0x9b, 0xc7, 0xf1, 0xcc, + 0x71, 0x75, 0xac, 0xd9, 0x52, 0x08, 0x0b, 0x47, 0x90, 0xd1, 0x5e, 0xff, 0x42, 0x30, 0x9e, 0x83, + 0x4e, 0xee, 0xa7, 0x08, 0xe8, 0x3f, 0x25, 0xe1, 0xe4, 0x45, 0xc7, 0xb5, 0xee, 0x88, 0x2c, 0xaf, + 0x5d, 0x75, 0x2a, 0x6b, 0xaa, 0xfd, 0x53, 0x17, 0x7d, 0x00, 0xd3, 0x82, 0xbd, 0x0a, 0xe1, 0x44, + 0xfa, 0x28, 0x5d, 0x38, 0x37, 0x1a, 0xd7, 0x5e, 0x61, 0x28, 0x53, 0x4e, 0x3a, 0x5e, 0xed, 0xbc, + 0xc3, 0x01, 0x2a, 0xba, 0x09, 0x1a, 0x6b, 0x50, 0x53, 0x79, 0xf2, 0x15, 0x63, 0xf0, 0x35, 0xc4, + 0x88, 0xd9, 0xe8, 0x46, 0x83, 0x9a, 0x9d, 0x62, 0x22, 0x9e, 0xb0, 0x84, 0x45, 0x14, 0xa6, 0x98, + 0x0c, 0x38, 0xe5, 0xbb, 0x57, 0xc7, 0x35, 0x20, 0x41, 0x8a, 0x73, 0xca, 0xc4, 0x94, 0xf7, 0x8c, + 0x15, 0xb8, 0xfe, 0x59, 0x0a, 0x96, 0x63, 0x34, 0x4b, 0x8e, 0x5d, 0xb1, 0x64, 0xb1, 0xbf, 0x08, + 0x1a, 0xdf, 0x6b, 0xf8, 0xc1, 0xbe, 0xea, 0xef, 0x76, 0x73, 0xaf, 0x21, 0xda, 0xd1, 0xff, 0x86, + 0xe9, 0x0b, 0x39, 0x2c, 0x11, 0xd0, 0x7a, 0x70, 0xaa, 0x64, 0x04, 0x4b, 0x6d, 0xeb, 0x51, 0x2b, + 0xd7, 0xe7, 0xfe, 0x65, 0x04, 0x48, 0xd1, 0xcd, 0x8b, 0xda, 0x50, 0x23, 0x8c, 0x6f, 0xba, 0xc4, + 0x66, 0x9e, 0x25, 0xab, 0xee, 0xc7, 0xfa, 0xd9, 0xd1, 0xdc, 0x2d, 0x34, 0x8a, 0x4b, 0x6a, 0x17, + 0x68, 0xbd, 0x07, 0x0d, 0xf7, 0xb1, 0x80, 0xfe, 0x0f, 0x53, 0x2e, 0x25, 0xcc, 0xb1, 0x55, 0xeb, + 0x09, 0xc8, 0xc5, 0xf2, 0x2d, 0x56, 0xab, 0xe8, 0x19, 0x38, 0x52, 0xa7, 0x8c, 0x91, 0x2a, 0xcd, + 0x4c, 0x4a, 0xc1, 0x79, 0x25, 0x78, 0xa4, 0xec, 0xbd, 0xc6, 0xfe, 0xba, 0xfe, 0x20, 0x01, 0xa7, + 0x63, 0x78, 0x5c, 0xb7, 0x18, 0x47, 0x37, 0x7a, 0xe2, 0xd9, 0x18, 0xb1, 0x76, 0x58, 0xcc, 0x8b, + 0xe6, 0x05, 0x65, 0x7b, 0xda, 0x7f, 0x13, 0x8a, 0xe5, 0x1b, 0x30, 0x69, 0x71, 0x5a, 0x17, 0x5e, + 0x49, 0xad, 0xa4, 0x0b, 0x2f, 0x8d, 0x19, 0x6b, 0xc5, 0xa3, 0xca, 0xc6, 0xe4, 0x25, 0x81, 0x86, + 0x3d, 0x50, 0xfd, 0xe7, 0x64, 0xec, 0xd9, 0x44, 0xc0, 0xa3, 0x8f, 0x60, 0x4e, 0x3e, 0x79, 0x95, + 0x19, 0xd3, 0x5b, 0xea, 0x84, 0x43, 0x73, 0x6a, 0x40, 0x43, 0x2f, 0x9e, 0x50, 0x5b, 0x99, 0xdb, + 0x88, 0x40, 0xe3, 0x2e, 0x53, 0xe8, 0x3c, 0xa4, 0xeb, 0x96, 0x8d, 0x69, 0xa3, 0x66, 0x99, 0x84, + 0xa9, 0x7b, 0x91, 0x6c, 0x49, 0xe5, 0xce, 0x6b, 0x1c, 0x96, 0x41, 0x2f, 0x40, 0xba, 0x4e, 0x6e, + 0x07, 0x2a, 0x29, 0xa9, 0x72, 0x4c, 0xd9, 0x4b, 0x97, 0x3b, 0x4b, 0x38, 0x2c, 0x87, 0xae, 0x89, + 0x68, 0x10, 0x55, 0x9a, 0x65, 0x34, 0x49, 0xf3, 0xd9, 0x61, 0xe7, 0x53, 0x45, 0x5e, 0x94, 0x88, + 0x50, 0xe4, 0x48, 0x08, 0xec, 0x63, 0xe9, 0x3f, 0x68, 0x70, 0x66, 0x60, 0xee, 0xa3, 0x37, 0x00, + 0x39, 0x5b, 0x8c, 0xba, 0xbb, 0xb4, 0xf2, 0xa6, 0x77, 0xe9, 0x17, 0xf7, 0x13, 0xc1, 0x71, 0xca, + 0x6b, 0x89, 0x57, 0x7a, 0x56, 0x71, 0x1f, 0x0d, 0x64, 0xc2, 0x51, 0x91, 0x0c, 0x1e, 0xa1, 0x96, + 0xba, 0x0a, 0xed, 0x2f, 0xd3, 0x16, 0xdb, 0xad, 0xdc, 0xd1, 0xf5, 0x30, 0x08, 0x8e, 0x62, 0xa2, + 0x35, 0x98, 0x57, 0xb5, 0xbe, 0x8b, 0xe0, 0x93, 0x8a, 0x81, 0xf9, 0x52, 0x74, 0x19, 0x77, 0xcb, + 0x0b, 0x88, 0x0a, 0x65, 0x96, 0x4b, 0x2b, 0x01, 0x84, 0x16, 0x85, 0xb8, 0x10, 0x5d, 0xc6, 0xdd, + 0xf2, 0xa8, 0x06, 0x73, 0x0a, 0x55, 0xf1, 0x9d, 0x99, 0x94, 0x2e, 0x7b, 0x76, 0x44, 0x97, 0x79, + 0x45, 0x37, 0x88, 0xc1, 0x52, 0x04, 0x0b, 0x77, 0x61, 0x23, 0x0e, 0x60, 0xfa, 0x25, 0x8e, 0x65, + 0xa6, 0xa4, 0xa5, 0xd7, 0xc6, 0xcc, 0xc1, 0xa0, 0x56, 0x76, 0xda, 0x57, 0xf0, 0x8a, 0xe1, 0x90, + 0x1d, 0xfd, 0x3b, 0x0d, 0xa0, 0x13, 0x61, 0x68, 0x35, 0x52, 0xe4, 0x97, 0xbb, 0x8a, 0xfc, 0x42, + 0xf8, 0x72, 0x1a, 0x2a, 0xe8, 0xd7, 0x61, 0xca, 0x91, 0x99, 0xa7, 0x82, 0xa1, 0x30, 0x6c, 0xdb, + 0x41, 0x2f, 0x0d, 0xd0, 0x8a, 0x20, 0x4a, 0xa7, 0xca, 0x5f, 0x85, 0x86, 0x2e, 0x83, 0xd6, 0x70, + 0x2a, 0x7e, 0xf3, 0x3b, 0x37, 0x0c, 0xf5, 0xaa, 0x53, 0x61, 0x11, 0xcc, 0x69, 0xb1, 0x77, 0xf1, + 0x16, 0x4b, 0x1c, 0xf4, 0x3e, 0x4c, 0xfb, 0xd7, 0x0d, 0x75, 0x37, 0x59, 0x1d, 0x86, 0xd9, 0x6f, + 0x06, 0x2e, 0xce, 0x8a, 0x0a, 0xea, 0xaf, 0xe0, 0x00, 0x13, 0x7d, 0x9a, 0x80, 0x45, 0xb3, 0x7b, + 0xa6, 0xcb, 0x1c, 0x19, 0xad, 0x75, 0x0f, 0x1c, 0xbb, 0x8b, 0xff, 0x69, 0xb7, 0x72, 0x8b, 0x3d, + 0x22, 0xb8, 0xd7, 0x9c, 0x38, 0x24, 0x55, 0x57, 0x56, 0xd9, 0x70, 0x46, 0x38, 0x64, 0xbf, 0xd9, + 0xc3, 0x3b, 0xa4, 0xbf, 0x82, 0x03, 0x4c, 0xfd, 0x7b, 0x0d, 0x66, 0x23, 0x77, 0xe1, 0xbf, 0x22, + 0x66, 0xbc, 0xd4, 0x3a, 0xd8, 0x98, 0xf1, 0x30, 0x0f, 0x3e, 0x66, 0x3c, 0xdc, 0x43, 0x8d, 0x19, + 0xcf, 0xe4, 0x61, 0xc6, 0x4c, 0xe8, 0x90, 0x7d, 0x62, 0xe6, 0x41, 0x0a, 0x50, 0x6f, 0xce, 0x23, + 0x13, 0xa6, 0xbc, 0xa1, 0xeb, 0x20, 0x7a, 0x7d, 0x70, 0xff, 0x52, 0x6d, 0x5d, 0x41, 0x77, 0x8d, + 0x6a, 0xc9, 0x91, 0x46, 0x35, 0x7a, 0x10, 0x23, 0x6d, 0x70, 0x19, 0x88, 0x1d, 0x6b, 0x6f, 0xc2, + 0x34, 0xf3, 0x67, 0x41, 0x6d, 0xfc, 0x59, 0x50, 0xb2, 0x1e, 0x4c, 0x81, 0x01, 0x24, 0xaa, 0xc0, + 0x2c, 0x09, 0x8f, 0x63, 0x93, 0x63, 0x1d, 0x63, 0x41, 0xcc, 0x7e, 0x91, 0x39, 0x2c, 0x82, 0xaa, + 0xff, 0xd2, 0xed, 0x5b, 0xaf, 0x2a, 0xfc, 0x6d, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0xbf, 0xc2, 0xbd, + 0x5f, 0x27, 0x61, 0xa1, 0xbb, 0xb1, 0x8e, 0xf5, 0xf9, 0xe3, 0x4e, 0xdf, 0x6f, 0x38, 0xc9, 0xb1, + 0x36, 0x1d, 0xcc, 0x6a, 0x23, 0x7e, 0x9d, 0x0d, 0x7b, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x89, + 0x72, 0x34, 0xfe, 0x27, 0xa2, 0x98, 0x0f, 0xaa, 0xc9, 0x43, 0xfa, 0xa0, 0xfa, 0x84, 0x69, 0xfa, + 0x36, 0x09, 0xc7, 0x9f, 0xfe, 0xa7, 0x30, 0xfa, 0xd7, 0xc7, 0x1f, 0x7b, 0xf9, 0x7a, 0xfa, 0xcf, + 0xc0, 0x28, 0x81, 0x5c, 0xbc, 0x70, 0xf7, 0x61, 0x76, 0xe2, 0xde, 0xc3, 0xec, 0xc4, 0xfd, 0x87, + 0xd9, 0x89, 0x4f, 0xda, 0xd9, 0xc4, 0xdd, 0x76, 0x36, 0x71, 0xaf, 0x9d, 0x4d, 0xdc, 0x6f, 0x67, + 0x13, 0xbf, 0xb6, 0xb3, 0x89, 0xcf, 0x7f, 0xcb, 0x4e, 0xbc, 0x9b, 0x1d, 0xfc, 0x27, 0xe3, 0x9f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x05, 0xaa, 0x18, 0x85, 0x1c, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 6b3d415212..232a598158 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,14 +131,14 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // HorizontalPodAutoscaler is the configuration for a horizontal pod @@ -148,7 +148,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -172,7 +172,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -188,7 +188,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -220,6 +220,7 @@ message HorizontalPodAutoscalerSpec { // increased, and vice-versa. See the individual metric source types for // more information about how each type of metric must respond. // +optional + // +listType=atomic repeated MetricSpec metrics = 4; } @@ -232,7 +233,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -244,11 +245,13 @@ message HorizontalPodAutoscalerStatus { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic repeated MetricStatus currentMetrics = 5; // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic repeated HorizontalPodAutoscalerCondition conditions = 6; } @@ -352,18 +355,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -376,18 +379,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -400,13 +403,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -417,13 +420,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -447,7 +450,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -471,6 +474,6 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 842284072d..193cc43549 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -56,6 +56,7 @@ type HorizontalPodAutoscalerSpec struct { // increased, and vice-versa. See the individual metric source types for // more information about how each type of metric must respond. // +optional + // +listType=atomic Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` } @@ -260,11 +261,13 @@ type HorizontalPodAutoscalerStatus struct { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index 211acd1ae3..741979505d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +// source: k8s.io/api/autoscaling/v2beta2/generated.proto package v2beta2 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{0} + return fileDescriptor_1076ab1fac987148, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{1} + return fileDescriptor_1076ab1fac987148, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{2} + return fileDescriptor_1076ab1fac987148, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} + return fileDescriptor_1076ab1fac987148, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} + return fileDescriptor_1076ab1fac987148, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} + return fileDescriptor_1076ab1fac987148, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} + return fileDescriptor_1076ab1fac987148, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} + return fileDescriptor_1076ab1fac987148, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} + return fileDescriptor_1076ab1fac987148, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} + return fileDescriptor_1076ab1fac987148, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} + return fileDescriptor_1076ab1fac987148, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} + return fileDescriptor_1076ab1fac987148, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} + return fileDescriptor_1076ab1fac987148, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} + return fileDescriptor_1076ab1fac987148, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} + return fileDescriptor_1076ab1fac987148, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} + return fileDescriptor_1076ab1fac987148, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} + return fileDescriptor_1076ab1fac987148, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} + return fileDescriptor_1076ab1fac987148, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} + return fileDescriptor_1076ab1fac987148, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} + return fileDescriptor_1076ab1fac987148, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} + return fileDescriptor_1076ab1fac987148, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} + return fileDescriptor_1076ab1fac987148, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -666,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{22} + return fileDescriptor_1076ab1fac987148, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -694,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{23} + return fileDescriptor_1076ab1fac987148, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -747,120 +747,119 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) + proto.RegisterFile("k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_1076ab1fac987148) } -var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1743 bytes of a gzipped FileDescriptorProto +var fileDescriptor_1076ab1fac987148 = []byte{ + // 1727 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, 0x15, 0xd7, 0x92, 0xd4, 0xd7, 0x50, 0x9f, 0xe3, 0x2f, 0x42, 0x86, 0x49, 0x61, 0x6b, 0xb4, 0xae, - 0xd1, 0x2e, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0x5b, 0xdb, 0xb0, 0x64, 0xab, 0x43, - 0x59, 0x2d, 0x0a, 0xd9, 0xe8, 0x70, 0x77, 0x44, 0x4d, 0x45, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, + 0xd1, 0x2e, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0xdb, 0xda, 0xb0, 0x64, 0xab, 0x43, + 0x59, 0x2d, 0x02, 0xd9, 0xc8, 0x70, 0x77, 0x44, 0x4d, 0x44, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, 0x80, 0x20, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e, 0x60, 0x18, 0x41, 0x10, 0xf8, 0x16, 0xe7, 0x42, 0xc4, 0xcc, 0x31, 0xc7, 0xdc, 0x7c, 0x0a, 0xe6, 0x63, 0x3f, 0x49, 0x89, 0x94, 0x20, 0x29, 0xd0, 0x8d, 0x3b, 0xf3, 0xde, 0xef, 0xcd, 0x7b, 0xf3, - 0x7b, 0x6f, 0xde, 0x0c, 0xc1, 0xcd, 0x9d, 0x6b, 0xae, 0x46, 0xed, 0xe2, 0x4e, 0xb3, 0x42, 0x1c, - 0x8b, 0x78, 0xc4, 0x2d, 0xb6, 0x88, 0x65, 0xda, 0x4e, 0x51, 0x4e, 0xe0, 0x06, 0x2d, 0xe2, 0xa6, - 0x67, 0xbb, 0x06, 0xae, 0x51, 0xab, 0x5a, 0x6c, 0x95, 0x2a, 0xc4, 0xc3, 0xa5, 0x62, 0x95, 0x58, - 0xc4, 0xc1, 0x1e, 0x31, 0xb5, 0x86, 0x63, 0x7b, 0x36, 0xcc, 0x0b, 0x79, 0x0d, 0x37, 0xa8, 0x16, - 0x91, 0xd7, 0xa4, 0xfc, 0xdc, 0xef, 0xab, 0xd4, 0xdb, 0x6e, 0x56, 0x34, 0xc3, 0xae, 0x17, 0xab, - 0x76, 0xd5, 0x2e, 0x72, 0xb5, 0x4a, 0x73, 0x8b, 0x7f, 0xf1, 0x0f, 0xfe, 0x4b, 0xc0, 0xcd, 0xa9, - 0x11, 0xf3, 0x86, 0xed, 0x90, 0x62, 0x6b, 0x21, 0x69, 0x72, 0x6e, 0x31, 0x94, 0xa9, 0x63, 0x63, - 0x9b, 0x5a, 0xc4, 0xd9, 0x2d, 0x36, 0x76, 0xaa, 0x5c, 0xc9, 0x21, 0xae, 0xdd, 0x74, 0x0c, 0x72, - 0x20, 0x2d, 0xb7, 0x58, 0x27, 0x1e, 0xee, 0x65, 0xab, 0xb8, 0x97, 0x96, 0xd3, 0xb4, 0x3c, 0x5a, - 0xef, 0x36, 0xf3, 0xe7, 0x7e, 0x0a, 0xae, 0xb1, 0x4d, 0xea, 0x38, 0xa9, 0xa7, 0xfe, 0xa8, 0x80, - 0x4b, 0xcb, 0xb6, 0xe5, 0x61, 0xa6, 0x81, 0xa4, 0x13, 0xab, 0xc4, 0x73, 0xa8, 0x51, 0xe6, 0xbf, - 0xe1, 0x32, 0xc8, 0x58, 0xb8, 0x4e, 0x72, 0xca, 0xbc, 0x72, 0x65, 0x5c, 0x2f, 0x3e, 0x6f, 0x17, - 0x86, 0x3a, 0xed, 0x42, 0xe6, 0x2e, 0xae, 0x93, 0xd7, 0xed, 0x42, 0xa1, 0x3b, 0x70, 0x9a, 0x0f, - 0xc3, 0x44, 0x10, 0x57, 0x86, 0xeb, 0x60, 0xc4, 0xc3, 0x4e, 0x95, 0x78, 0xb9, 0xd4, 0xbc, 0x72, - 0x25, 0x5b, 0xfa, 0x9d, 0xb6, 0xff, 0xfe, 0x69, 0x62, 0x09, 0xeb, 0x5c, 0x47, 0x9f, 0x92, 0x46, - 0x47, 0xc4, 0x37, 0x92, 0x58, 0xb0, 0x08, 0xc6, 0x0d, 0x7f, 0xed, 0xb9, 0x34, 0x5f, 0xdf, 0xac, - 0x14, 0x1d, 0x0f, 0x9d, 0x0a, 0x65, 0xd4, 0x9f, 0xf6, 0xf1, 0xd6, 0xc3, 0x5e, 0xd3, 0x3d, 0x1a, - 0x6f, 0x37, 0xc1, 0xa8, 0xd1, 0x74, 0x1c, 0x62, 0xf9, 0xee, 0x2e, 0x0c, 0xe6, 0xee, 0x06, 0xae, - 0x35, 0x89, 0x58, 0x88, 0x3e, 0x2d, 0x4d, 0x8f, 0x2e, 0x0b, 0x24, 0xe4, 0x43, 0x1e, 0xdc, 0xeb, - 0x0f, 0x14, 0x70, 0x71, 0xd9, 0xb1, 0x5d, 0x77, 0x83, 0x38, 0x2e, 0xb5, 0xad, 0x7b, 0x95, 0xff, - 0x13, 0xc3, 0x43, 0x64, 0x8b, 0x38, 0xc4, 0x32, 0x08, 0x9c, 0x07, 0x99, 0x1d, 0x6a, 0x99, 0xd2, - 0xe7, 0x09, 0xdf, 0xe7, 0x3b, 0xd4, 0x32, 0x11, 0x9f, 0x61, 0x12, 0x3c, 0x2a, 0xa9, 0xb8, 0x44, - 0xc4, 0xe5, 0x12, 0x00, 0xb8, 0x41, 0xa5, 0x01, 0xb9, 0x2a, 0x28, 0xe5, 0xc0, 0xd2, 0xda, 0x6d, - 0x39, 0x83, 0x22, 0x52, 0xea, 0x33, 0x05, 0x9c, 0xfd, 0xc7, 0x63, 0x8f, 0x38, 0x16, 0xae, 0xc5, - 0x28, 0xf7, 0x1f, 0x30, 0x52, 0xe7, 0xdf, 0x7c, 0x49, 0xd9, 0xd2, 0x1f, 0x06, 0x0b, 0xdf, 0x6d, + 0x7b, 0x6f, 0xde, 0x0c, 0x81, 0xb6, 0x73, 0xcd, 0xd5, 0xa8, 0x5d, 0xc4, 0x0d, 0x5a, 0xc4, 0x4d, + 0xcf, 0x76, 0x0d, 0x5c, 0xa3, 0x56, 0xb5, 0xd8, 0x2a, 0x55, 0x88, 0x87, 0x4b, 0xc5, 0x2a, 0xb1, + 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x0d, 0xc7, 0xf6, 0x6c, 0x98, 0x17, 0xf2, 0x1a, 0x6e, 0x50, 0x2d, + 0x22, 0xaf, 0x49, 0xf9, 0xb9, 0x3f, 0x56, 0xa9, 0xb7, 0xdd, 0xac, 0x68, 0x86, 0x5d, 0x2f, 0x56, + 0xed, 0xaa, 0x5d, 0xe4, 0x6a, 0x95, 0xe6, 0x16, 0xff, 0xe2, 0x1f, 0xfc, 0x97, 0x80, 0x9b, 0x53, + 0x23, 0xe6, 0x0d, 0xdb, 0x21, 0xc5, 0xd6, 0x42, 0xd2, 0xe4, 0xdc, 0x62, 0x28, 0x53, 0xc7, 0xc6, + 0x36, 0xb5, 0x88, 0xb3, 0x5b, 0x6c, 0xec, 0x54, 0xb9, 0x92, 0x43, 0x5c, 0xbb, 0xe9, 0x18, 0xe4, + 0x40, 0x5a, 0x6e, 0xb1, 0x4e, 0x3c, 0xdc, 0xcb, 0x56, 0x71, 0x2f, 0x2d, 0xa7, 0x69, 0x79, 0xb4, + 0xde, 0x6d, 0xe6, 0xaf, 0xfd, 0x14, 0x5c, 0x63, 0x9b, 0xd4, 0x71, 0x52, 0x4f, 0xfd, 0x49, 0x01, + 0x97, 0x96, 0x6d, 0xcb, 0xc3, 0x4c, 0x03, 0x49, 0x27, 0x56, 0x89, 0xe7, 0x50, 0xa3, 0xcc, 0x7f, + 0xc3, 0x65, 0x90, 0xb1, 0x70, 0x9d, 0xe4, 0x94, 0x79, 0xe5, 0xca, 0xb8, 0x5e, 0x7c, 0xd6, 0x2e, + 0x0c, 0x75, 0xda, 0x85, 0xcc, 0x1d, 0x5c, 0x27, 0xaf, 0xda, 0x85, 0x42, 0x77, 0xe0, 0x34, 0x1f, + 0x86, 0x89, 0x20, 0xae, 0x0c, 0xd7, 0xc1, 0x88, 0x87, 0x9d, 0x2a, 0xf1, 0x72, 0xa9, 0x79, 0xe5, + 0x4a, 0xb6, 0xf4, 0x07, 0x6d, 0xff, 0xfd, 0xd3, 0xc4, 0x12, 0xd6, 0xb9, 0x8e, 0x3e, 0x25, 0x8d, + 0x8e, 0x88, 0x6f, 0x24, 0xb1, 0x60, 0x11, 0x8c, 0x1b, 0xfe, 0xda, 0x73, 0x69, 0xbe, 0xbe, 0x59, + 0x29, 0x3a, 0x1e, 0x3a, 0x15, 0xca, 0xa8, 0x3f, 0xef, 0xe3, 0xad, 0x87, 0xbd, 0xa6, 0x7b, 0x34, + 0xde, 0x6e, 0x82, 0x51, 0xa3, 0xe9, 0x38, 0xc4, 0xf2, 0xdd, 0x5d, 0x18, 0xcc, 0xdd, 0x0d, 0x5c, + 0x6b, 0x12, 0xb1, 0x10, 0x7d, 0x5a, 0x9a, 0x1e, 0x5d, 0x16, 0x48, 0xc8, 0x87, 0x3c, 0xb8, 0xd7, + 0x1f, 0x2a, 0xe0, 0xe2, 0xb2, 0x63, 0xbb, 0xee, 0x06, 0x71, 0x5c, 0x6a, 0x5b, 0x77, 0x2b, 0x6f, + 0x10, 0xc3, 0x43, 0x64, 0x8b, 0x38, 0xc4, 0x32, 0x08, 0x9c, 0x07, 0x99, 0x1d, 0x6a, 0x99, 0xd2, + 0xe7, 0x09, 0xdf, 0xe7, 0xdb, 0xd4, 0x32, 0x11, 0x9f, 0x61, 0x12, 0x3c, 0x2a, 0xa9, 0xb8, 0x44, + 0xc4, 0xe5, 0x12, 0x00, 0xb8, 0x41, 0xa5, 0x01, 0xb9, 0x2a, 0x28, 0xe5, 0xc0, 0xd2, 0xda, 0x2d, + 0x39, 0x83, 0x22, 0x52, 0xea, 0x53, 0x05, 0x9c, 0xfd, 0xd7, 0x23, 0x8f, 0x38, 0x16, 0xae, 0xc5, + 0x28, 0xf7, 0x7f, 0x30, 0x52, 0xe7, 0xdf, 0x7c, 0x49, 0xd9, 0xd2, 0x9f, 0x06, 0x0b, 0xdf, 0x2d, 0x93, 0x58, 0x1e, 0xdd, 0xa2, 0xc4, 0x09, 0x19, 0x23, 0x66, 0x90, 0xc4, 0x3b, 0x1e, 0x1e, 0xaa, - 0xdf, 0x74, 0x3b, 0x22, 0xd8, 0x74, 0x7c, 0x8e, 0x1c, 0x2b, 0xc5, 0xd4, 0x8f, 0x14, 0x30, 0x73, - 0x6b, 0x6d, 0xa9, 0x2c, 0x20, 0xd6, 0xec, 0x1a, 0x35, 0x76, 0xe1, 0x35, 0x90, 0xf1, 0x76, 0x1b, + 0xdf, 0x76, 0x3b, 0x22, 0xd8, 0x74, 0x7c, 0x8e, 0x1c, 0x2b, 0xc5, 0xd4, 0x8f, 0x15, 0x30, 0x73, + 0x73, 0x6d, 0xa9, 0x2c, 0x20, 0xd6, 0xec, 0x1a, 0x35, 0x76, 0xe1, 0x35, 0x90, 0xf1, 0x76, 0x1b, 0x7e, 0x6a, 0x5c, 0xf6, 0x49, 0xb0, 0xbe, 0xdb, 0x60, 0xa9, 0x71, 0x36, 0x29, 0xcf, 0xc6, 0x11, - 0xd7, 0x80, 0xbf, 0x02, 0xc3, 0x2d, 0x66, 0x97, 0x2f, 0x75, 0x58, 0x9f, 0x94, 0xaa, 0xc3, 0x7c, + 0xd7, 0x80, 0xbf, 0x01, 0xc3, 0x2d, 0x66, 0x97, 0x2f, 0x75, 0x58, 0x9f, 0x94, 0xaa, 0xc3, 0x7c, 0x31, 0x48, 0xcc, 0xc1, 0xeb, 0x60, 0xb2, 0x41, 0x1c, 0x6a, 0x9b, 0x65, 0x62, 0xd8, 0x96, 0xe9, - 0x72, 0x12, 0x0d, 0xeb, 0xe7, 0xa4, 0xf0, 0xe4, 0x5a, 0x74, 0x12, 0xc5, 0x65, 0xd5, 0x0f, 0x53, - 0x60, 0x3a, 0x5c, 0x00, 0x6a, 0xd6, 0x88, 0x0b, 0x1f, 0x82, 0x39, 0xd7, 0xc3, 0x15, 0x5a, 0xa3, - 0x4f, 0xb0, 0x47, 0x6d, 0xeb, 0xdf, 0xd4, 0x32, 0xed, 0x47, 0x71, 0xf4, 0x7c, 0xa7, 0x5d, 0x98, - 0x2b, 0xef, 0x29, 0x85, 0xf6, 0x41, 0x80, 0x77, 0xc0, 0x84, 0x4b, 0x6a, 0xc4, 0xf0, 0x84, 0xbf, - 0x32, 0x2e, 0xbf, 0xe9, 0xb4, 0x0b, 0x13, 0xe5, 0xc8, 0xf8, 0xeb, 0x76, 0xe1, 0x4c, 0x2c, 0x30, - 0x62, 0x12, 0xc5, 0x94, 0xe1, 0x43, 0x30, 0xd6, 0x60, 0xbf, 0x28, 0x71, 0x73, 0xa9, 0xf9, 0xf4, + 0x72, 0x12, 0x0d, 0xeb, 0xe7, 0xa4, 0xf0, 0xe4, 0x5a, 0x74, 0x12, 0xc5, 0x65, 0xd5, 0x8f, 0x52, + 0x60, 0x3a, 0x5c, 0x00, 0x6a, 0xd6, 0x88, 0x0b, 0x1f, 0x80, 0x39, 0xd7, 0xc3, 0x15, 0x5a, 0xa3, + 0x8f, 0xb1, 0x47, 0x6d, 0xeb, 0x7f, 0xd4, 0x32, 0xed, 0x87, 0x71, 0xf4, 0x7c, 0xa7, 0x5d, 0x98, + 0x2b, 0xef, 0x29, 0x85, 0xf6, 0x41, 0x80, 0xb7, 0xc1, 0x84, 0x4b, 0x6a, 0xc4, 0xf0, 0x84, 0xbf, + 0x32, 0x2e, 0xbf, 0xeb, 0xb4, 0x0b, 0x13, 0xe5, 0xc8, 0xf8, 0xab, 0x76, 0xe1, 0x4c, 0x2c, 0x30, + 0x62, 0x12, 0xc5, 0x94, 0xe1, 0x03, 0x30, 0xd6, 0x60, 0xbf, 0x28, 0x71, 0x73, 0xa9, 0xf9, 0xf4, 0x20, 0x5c, 0x49, 0x06, 0x5c, 0x9f, 0x91, 0xa1, 0x1a, 0x5b, 0x93, 0x48, 0x28, 0xc0, 0x54, 0x3f, - 0x4b, 0x81, 0x0b, 0xb7, 0x6c, 0x87, 0x3e, 0x61, 0x55, 0xa1, 0xb6, 0x66, 0x9b, 0x4b, 0x12, 0x91, - 0x38, 0xf0, 0x7f, 0x60, 0x8c, 0x9d, 0x43, 0x26, 0xf6, 0x70, 0x0f, 0x9e, 0x06, 0xc7, 0x89, 0xd6, - 0xd8, 0xa9, 0xb2, 0x01, 0x57, 0x63, 0xd2, 0x5a, 0x6b, 0x41, 0x13, 0x85, 0x64, 0x95, 0x78, 0x38, - 0xcc, 0xf5, 0x70, 0x0c, 0x05, 0xa8, 0xf0, 0x01, 0xc8, 0xb8, 0x0d, 0x62, 0x48, 0xaa, 0x5e, 0xef, - 0xeb, 0x59, 0xef, 0x85, 0x96, 0x1b, 0xc4, 0x08, 0x8b, 0x0f, 0xfb, 0x42, 0x1c, 0x16, 0x12, 0x30, - 0xe2, 0x72, 0x4a, 0xf3, 0x5d, 0xcd, 0x96, 0xfe, 0x7a, 0x58, 0x03, 0x22, 0x2f, 0x82, 0x9c, 0x13, - 0xdf, 0x48, 0x82, 0xab, 0xdf, 0x2a, 0xa0, 0xb0, 0x87, 0xa6, 0x4e, 0xb6, 0x71, 0x8b, 0xda, 0x0e, - 0xdc, 0x00, 0xa3, 0x7c, 0xe4, 0x7e, 0x43, 0x86, 0xb2, 0x38, 0xf8, 0x36, 0x72, 0xda, 0xea, 0x59, - 0x96, 0x91, 0x65, 0x81, 0x81, 0x7c, 0x30, 0xb8, 0x09, 0xc6, 0xf9, 0xcf, 0x1b, 0xf6, 0x23, 0x4b, - 0x86, 0xf1, 0xc0, 0xc8, 0x93, 0xec, 0x84, 0x28, 0xfb, 0x28, 0x28, 0x04, 0x54, 0xdf, 0x49, 0x83, - 0xf9, 0x3d, 0x3c, 0x5b, 0xb6, 0x2d, 0x93, 0x32, 0xf2, 0xc3, 0x5b, 0xb1, 0xfc, 0x5f, 0x4c, 0xe4, - 0xff, 0xe5, 0x7e, 0xfa, 0x91, 0x7a, 0xb0, 0x12, 0xec, 0x57, 0x2a, 0x86, 0x25, 0x03, 0xfe, 0xba, - 0x5d, 0xe8, 0xd1, 0x8f, 0x69, 0x01, 0x52, 0x7c, 0x5b, 0x60, 0x0b, 0xc0, 0x1a, 0x76, 0xbd, 0x75, - 0x07, 0x5b, 0xae, 0xb0, 0x44, 0xeb, 0x44, 0x32, 0xe1, 0xea, 0x60, 0x44, 0x66, 0x1a, 0xfa, 0x9c, - 0x5c, 0x05, 0x5c, 0xe9, 0x42, 0x43, 0x3d, 0x2c, 0xc0, 0x5f, 0x83, 0x11, 0x87, 0x60, 0xd7, 0xb6, - 0x72, 0x19, 0xee, 0x45, 0x40, 0x1b, 0xc4, 0x47, 0x91, 0x9c, 0x85, 0xbf, 0x05, 0xa3, 0x75, 0xe2, - 0xba, 0xb8, 0x4a, 0x72, 0xc3, 0x5c, 0x30, 0xa8, 0xbb, 0xab, 0x62, 0x18, 0xf9, 0xf3, 0xea, 0x77, - 0x0a, 0xb8, 0xb8, 0x47, 0x1c, 0x57, 0xa8, 0xeb, 0xc1, 0xcd, 0xae, 0x4c, 0xd5, 0x06, 0x73, 0x90, - 0x69, 0xf3, 0x3c, 0x0d, 0x6a, 0x84, 0x3f, 0x12, 0xc9, 0xd2, 0x4d, 0x30, 0x4c, 0x3d, 0x52, 0xf7, - 0x0b, 0xd0, 0x5f, 0x0e, 0x99, 0x45, 0x61, 0x7d, 0xbf, 0xcd, 0xd0, 0x90, 0x00, 0x55, 0x9f, 0xa5, - 0xf7, 0xf4, 0x8d, 0xa5, 0x32, 0x7c, 0x03, 0x4c, 0xf1, 0x2f, 0x79, 0xb6, 0x92, 0x2d, 0xe9, 0x61, - 0xdf, 0x6a, 0xb1, 0x4f, 0x6b, 0xa3, 0x9f, 0x97, 0x4b, 0x99, 0x2a, 0xc7, 0xa0, 0x51, 0xc2, 0x14, - 0x5c, 0x00, 0xd9, 0x3a, 0xb5, 0x10, 0x69, 0xd4, 0xa8, 0x81, 0x5d, 0x79, 0x4e, 0x4d, 0x77, 0xda, - 0x85, 0xec, 0x6a, 0x38, 0x8c, 0xa2, 0x32, 0xf0, 0x4f, 0x20, 0x5b, 0xc7, 0x8f, 0x03, 0x15, 0x71, - 0x9e, 0x9c, 0x91, 0xf6, 0xb2, 0xab, 0xe1, 0x14, 0x8a, 0xca, 0xc1, 0xfb, 0x8c, 0x0d, 0xec, 0x24, - 0x76, 0x73, 0x19, 0x1e, 0xe6, 0xab, 0x83, 0x1d, 0xdc, 0xbc, 0xf8, 0x45, 0x98, 0xc3, 0x21, 0x90, - 0x8f, 0x05, 0x29, 0x18, 0xab, 0xc8, 0x1a, 0xc4, 0x59, 0x96, 0x2d, 0xfd, 0xed, 0xb0, 0xdb, 0x27, - 0x61, 0xf4, 0x09, 0x46, 0x13, 0xff, 0x0b, 0x05, 0xf0, 0xea, 0x27, 0x19, 0x70, 0x69, 0xdf, 0x02, - 0x0a, 0xff, 0x09, 0xa0, 0x5d, 0x71, 0x89, 0xd3, 0x22, 0xe6, 0x4d, 0x71, 0xdf, 0x60, 0x4d, 0x21, - 0xdb, 0xce, 0xb4, 0x7e, 0x9e, 0x65, 0xd8, 0xbd, 0xae, 0x59, 0xd4, 0x43, 0x03, 0x1a, 0x60, 0x92, - 0xe5, 0x9d, 0xd8, 0x3b, 0x2a, 0xfb, 0xcf, 0x83, 0x25, 0xf5, 0x2c, 0x6b, 0x1d, 0x56, 0xa2, 0x20, - 0x28, 0x8e, 0x09, 0x97, 0xc0, 0xb4, 0x6c, 0x7b, 0x12, 0x7b, 0x79, 0x41, 0x06, 0x7b, 0x7a, 0x39, - 0x3e, 0x8d, 0x92, 0xf2, 0x0c, 0xc2, 0x24, 0x2e, 0x75, 0x88, 0x19, 0x40, 0x64, 0xe2, 0x10, 0x37, - 0xe2, 0xd3, 0x28, 0x29, 0x0f, 0x6b, 0x60, 0x4a, 0xa2, 0xca, 0xad, 0xcd, 0x0d, 0x73, 0x76, 0x0c, - 0xd8, 0xa0, 0xca, 0x93, 0x2b, 0xa0, 0xfb, 0x72, 0x0c, 0x0b, 0x25, 0xb0, 0xa1, 0x07, 0x80, 0xe1, - 0x57, 0x53, 0x37, 0x37, 0xc2, 0x2d, 0xfd, 0xfd, 0x90, 0x7c, 0x09, 0xca, 0x72, 0xd8, 0x03, 0x04, - 0x43, 0x2e, 0x8a, 0xd8, 0x51, 0xdf, 0x57, 0xc0, 0x4c, 0xb2, 0xc1, 0x0d, 0xae, 0x16, 0xca, 0x9e, - 0x57, 0x8b, 0x07, 0x60, 0x4c, 0xb4, 0x4a, 0xb6, 0x23, 0x09, 0xf0, 0xc7, 0x01, 0x8b, 0x1e, 0xae, - 0x90, 0x5a, 0x59, 0xaa, 0x0a, 0x3a, 0xfb, 0x5f, 0x28, 0x80, 0x54, 0x3f, 0xce, 0x00, 0x10, 0xa6, - 0x18, 0x5c, 0x8c, 0x9d, 0x72, 0xf3, 0x89, 0x53, 0x6e, 0x26, 0x7a, 0x4f, 0x89, 0x9c, 0x68, 0x1b, - 0x60, 0xc4, 0xe6, 0xa5, 0x47, 0xae, 0xb0, 0xd4, 0x2f, 0x98, 0x41, 0x9b, 0x14, 0xa0, 0xe9, 0x80, - 0x9d, 0x1d, 0xb2, 0x80, 0x49, 0x34, 0x78, 0x17, 0x64, 0x1a, 0xb6, 0xe9, 0xf7, 0x35, 0x7d, 0x5b, - 0xc2, 0x35, 0xdb, 0x74, 0x63, 0x98, 0x63, 0x6c, 0xed, 0x6c, 0x14, 0x71, 0x1c, 0xd6, 0x66, 0xfa, - 0x2f, 0x15, 0x9c, 0xa2, 0xd9, 0xd2, 0x62, 0x3f, 0xcc, 0x5e, 0x8f, 0x02, 0x22, 0x98, 0xfe, 0x0c, - 0x0a, 0x30, 0xe1, 0xdb, 0x0a, 0x98, 0x35, 0x92, 0x17, 0xec, 0xdc, 0xe8, 0x60, 0x5d, 0xd9, 0xbe, - 0xef, 0x10, 0xfa, 0xb9, 0x4e, 0xbb, 0x30, 0xdb, 0x25, 0x82, 0xba, 0xcd, 0x31, 0x27, 0x89, 0xbc, - 0x8d, 0xc9, 0x5a, 0xd8, 0xd7, 0xc9, 0x5e, 0xd7, 0x50, 0xe1, 0xa4, 0x3f, 0x83, 0x02, 0x4c, 0xf5, - 0x69, 0x06, 0x4c, 0xc4, 0xae, 0x79, 0xbf, 0x04, 0x67, 0x44, 0xc2, 0x1f, 0x2d, 0x67, 0x04, 0xe6, - 0xd1, 0x73, 0x46, 0xe0, 0x9e, 0x28, 0x67, 0x84, 0xc9, 0x93, 0xe4, 0x4c, 0xc4, 0xc9, 0x1e, 0x9c, - 0xf9, 0x22, 0xe5, 0x73, 0x46, 0x34, 0x1d, 0x83, 0x71, 0x46, 0xc8, 0x46, 0x38, 0x73, 0x2f, 0x7a, - 0x93, 0xee, 0xd3, 0xfd, 0x69, 0x7e, 0x84, 0xb5, 0x7f, 0x35, 0xb1, 0xe5, 0x51, 0x6f, 0x57, 0x1f, - 0xef, 0xba, 0x75, 0x9b, 0x60, 0x02, 0xb7, 0x88, 0x83, 0xab, 0x84, 0x0f, 0x4b, 0xd2, 0x1c, 0x14, - 0x77, 0x86, 0x5d, 0x7a, 0x97, 0x22, 0x38, 0x28, 0x86, 0xca, 0x1a, 0x02, 0xf9, 0x7d, 0xdf, 0x0b, - 0x6e, 0xd3, 0xf2, 0x8c, 0xe4, 0x0d, 0xc1, 0x52, 0xd7, 0x2c, 0xea, 0xa1, 0xa1, 0xbe, 0x97, 0x02, - 0xb3, 0x5d, 0xef, 0x18, 0x61, 0x50, 0x94, 0x63, 0x0a, 0x4a, 0xea, 0x04, 0x83, 0x92, 0x3e, 0x70, - 0x50, 0xbe, 0x4c, 0x01, 0xd8, 0x7d, 0x9c, 0xc0, 0x37, 0x79, 0x53, 0x62, 0x38, 0xb4, 0x42, 0x4c, - 0x31, 0x7d, 0x14, 0x0d, 0x75, 0xb4, 0xa3, 0x89, 0x62, 0xa3, 0xa4, 0xb1, 0x63, 0x7a, 0xf2, 0x0d, - 0x5f, 0xd4, 0xd2, 0x47, 0xfb, 0xa2, 0xa6, 0x7e, 0x9d, 0x0c, 0xe3, 0xa9, 0x7e, 0xc2, 0xeb, 0xb5, - 0xfd, 0xe9, 0x13, 0xdc, 0x7e, 0xf5, 0x73, 0x05, 0xcc, 0x24, 0xdb, 0x91, 0x53, 0xf7, 0xb0, 0xfb, - 0x55, 0xdc, 0x89, 0xd3, 0xfd, 0xa8, 0xfb, 0x54, 0x01, 0x67, 0x4f, 0xd9, 0x3f, 0x3c, 0xea, 0xa7, - 0xdd, 0x6b, 0x3e, 0x2d, 0xff, 0xd3, 0xe8, 0x37, 0x9e, 0xbf, 0xca, 0x0f, 0xbd, 0x78, 0x95, 0x1f, - 0x7a, 0xf9, 0x2a, 0x3f, 0xf4, 0x56, 0x27, 0xaf, 0x3c, 0xef, 0xe4, 0x95, 0x17, 0x9d, 0xbc, 0xf2, - 0xb2, 0x93, 0x57, 0xbe, 0xef, 0xe4, 0x95, 0x77, 0x7f, 0xc8, 0x0f, 0xfd, 0x37, 0xbf, 0xff, 0x1f, - 0x9f, 0x3f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x10, 0x14, 0x03, 0x76, 0x32, 0x1d, 0x00, 0x00, + 0x4f, 0x81, 0x0b, 0x37, 0x6d, 0x87, 0x3e, 0x66, 0x55, 0xa1, 0xb6, 0x66, 0x9b, 0x4b, 0x12, 0x91, + 0x38, 0xf0, 0x75, 0x30, 0xc6, 0xce, 0x21, 0x13, 0x7b, 0xb8, 0x07, 0x4f, 0x83, 0xe3, 0x44, 0x6b, + 0xec, 0x54, 0xd9, 0x80, 0xab, 0x31, 0x69, 0xad, 0xb5, 0xa0, 0x89, 0x42, 0xb2, 0x4a, 0x3c, 0x1c, + 0xe6, 0x7a, 0x38, 0x86, 0x02, 0x54, 0x78, 0x1f, 0x64, 0xdc, 0x06, 0x31, 0x24, 0x55, 0xaf, 0xf7, + 0xf5, 0xac, 0xf7, 0x42, 0xcb, 0x0d, 0x62, 0x84, 0xc5, 0x87, 0x7d, 0x21, 0x0e, 0x0b, 0x09, 0x18, + 0x71, 0x39, 0xa5, 0xf9, 0xae, 0x66, 0x4b, 0x7f, 0x3f, 0xac, 0x01, 0x91, 0x17, 0x41, 0xce, 0x89, + 0x6f, 0x24, 0xc1, 0xd5, 0xef, 0x14, 0x50, 0xd8, 0x43, 0x53, 0x27, 0xdb, 0xb8, 0x45, 0x6d, 0x07, + 0x6e, 0x80, 0x51, 0x3e, 0x72, 0xaf, 0x21, 0x43, 0x59, 0x1c, 0x7c, 0x1b, 0x39, 0x6d, 0xf5, 0x2c, + 0xcb, 0xc8, 0xb2, 0xc0, 0x40, 0x3e, 0x18, 0xdc, 0x04, 0xe3, 0xfc, 0xe7, 0x0d, 0xfb, 0xa1, 0x25, + 0xc3, 0x78, 0x60, 0xe4, 0x49, 0x76, 0x42, 0x94, 0x7d, 0x14, 0x14, 0x02, 0xaa, 0xef, 0xa6, 0xc1, + 0xfc, 0x1e, 0x9e, 0x2d, 0xdb, 0x96, 0x49, 0x19, 0xf9, 0xe1, 0xcd, 0x58, 0xfe, 0x2f, 0x26, 0xf2, + 0xff, 0x72, 0x3f, 0xfd, 0x48, 0x3d, 0x58, 0x09, 0xf6, 0x2b, 0x15, 0xc3, 0x92, 0x01, 0x7f, 0xd5, + 0x2e, 0xf4, 0xe8, 0xc7, 0xb4, 0x00, 0x29, 0xbe, 0x2d, 0xb0, 0x05, 0x60, 0x0d, 0xbb, 0xde, 0xba, + 0x83, 0x2d, 0x57, 0x58, 0xa2, 0x75, 0x22, 0x99, 0x70, 0x75, 0x30, 0x22, 0x33, 0x0d, 0x7d, 0x4e, + 0xae, 0x02, 0xae, 0x74, 0xa1, 0xa1, 0x1e, 0x16, 0xe0, 0x6f, 0xc1, 0x88, 0x43, 0xb0, 0x6b, 0x5b, + 0xb9, 0x0c, 0xf7, 0x22, 0xa0, 0x0d, 0xe2, 0xa3, 0x48, 0xce, 0xc2, 0xdf, 0x83, 0xd1, 0x3a, 0x71, + 0x5d, 0x5c, 0x25, 0xb9, 0x61, 0x2e, 0x18, 0xd4, 0xdd, 0x55, 0x31, 0x8c, 0xfc, 0x79, 0xf5, 0x7b, + 0x05, 0x5c, 0xdc, 0x23, 0x8e, 0x2b, 0xd4, 0xf5, 0xe0, 0x66, 0x57, 0xa6, 0x6a, 0x83, 0x39, 0xc8, + 0xb4, 0x79, 0x9e, 0x06, 0x35, 0xc2, 0x1f, 0x89, 0x64, 0xe9, 0x26, 0x18, 0xa6, 0x1e, 0xa9, 0xfb, + 0x05, 0xe8, 0x6f, 0x87, 0xcc, 0xa2, 0xb0, 0xbe, 0xdf, 0x62, 0x68, 0x48, 0x80, 0xaa, 0x4f, 0xd3, + 0x7b, 0xfa, 0xc6, 0x52, 0x19, 0xbe, 0x09, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0, + 0x6f, 0xb5, 0xd8, 0xa7, 0xb5, 0xd1, 0xcf, 0xcb, 0xa5, 0x4c, 0x95, 0x63, 0xd0, 0x28, 0x61, 0x0a, + 0x2e, 0x80, 0x6c, 0x9d, 0x5a, 0x88, 0x34, 0x6a, 0xd4, 0xc0, 0xae, 0x3c, 0xa7, 0xa6, 0x3b, 0xed, + 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x17, 0x90, 0xad, 0xe3, 0x47, 0x81, 0x8a, 0x38, + 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x3d, 0xc6, 0x06, 0x76, 0x12, + 0xbb, 0xb9, 0x0c, 0x0f, 0xf3, 0xd5, 0xc1, 0x0e, 0x6e, 0x5e, 0xfc, 0x22, 0xcc, 0xe1, 0x10, 0xc8, + 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x71, 0xd8, 0xed, 0x93, + 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xd3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01, + 0x85, 0xff, 0x06, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x3f, 0xe2, 0xbe, 0xc1, 0x9a, 0x42, + 0xb6, 0x9d, 0x69, 0xfd, 0x3c, 0xcb, 0xb0, 0xbb, 0x5d, 0xb3, 0xa8, 0x87, 0x06, 0x34, 0xc0, 0x24, + 0xcb, 0x3b, 0xb1, 0x77, 0x54, 0xf6, 0x9f, 0x07, 0x4b, 0xea, 0x59, 0xd6, 0x3a, 0xac, 0x44, 0x41, + 0x50, 0x1c, 0x13, 0x2e, 0x81, 0x69, 0xd9, 0xf6, 0x24, 0xf6, 0xf2, 0x82, 0x0c, 0xf6, 0xf4, 0x72, + 0x7c, 0x1a, 0x25, 0xe5, 0x19, 0x84, 0x49, 0x5c, 0xea, 0x10, 0x33, 0x80, 0xc8, 0xc4, 0x21, 0x6e, + 0xc4, 0xa7, 0x51, 0x52, 0x1e, 0xd6, 0xc0, 0x94, 0x44, 0x95, 0x5b, 0x9b, 0x1b, 0xe6, 0xec, 0x18, + 0xb0, 0x41, 0x95, 0x27, 0x57, 0x40, 0xf7, 0xe5, 0x18, 0x16, 0x4a, 0x60, 0x43, 0x0f, 0x00, 0xc3, + 0xaf, 0xa6, 0x6e, 0x6e, 0x84, 0x5b, 0xfa, 0xe7, 0x21, 0xf9, 0x12, 0x94, 0xe5, 0xb0, 0x07, 0x08, + 0x86, 0x5c, 0x14, 0xb1, 0xa3, 0x7e, 0xa0, 0x80, 0x99, 0x64, 0x83, 0x1b, 0x5c, 0x2d, 0x94, 0x3d, + 0xaf, 0x16, 0xf7, 0xc1, 0x98, 0x68, 0x95, 0x6c, 0x47, 0x12, 0xe0, 0xcf, 0x03, 0x16, 0x3d, 0x5c, + 0x21, 0xb5, 0xb2, 0x54, 0x15, 0x74, 0xf6, 0xbf, 0x50, 0x00, 0xa9, 0x7e, 0x92, 0x01, 0x20, 0x4c, + 0x31, 0xb8, 0x18, 0x3b, 0xe5, 0xe6, 0x13, 0xa7, 0xdc, 0x4c, 0xf4, 0x9e, 0x12, 0x39, 0xd1, 0x36, + 0xc0, 0x88, 0xcd, 0x4b, 0x8f, 0x5c, 0x61, 0xa9, 0x5f, 0x30, 0x83, 0x36, 0x29, 0x40, 0xd3, 0x01, + 0x3b, 0x3b, 0x64, 0x01, 0x93, 0x68, 0xf0, 0x0e, 0xc8, 0x34, 0x6c, 0xd3, 0xef, 0x6b, 0xfa, 0xb6, + 0x84, 0x6b, 0xb6, 0xe9, 0xc6, 0x30, 0xc7, 0xd8, 0xda, 0xd9, 0x28, 0xe2, 0x38, 0xac, 0xcd, 0xf4, + 0x5f, 0x2a, 0x38, 0x45, 0xb3, 0xa5, 0xc5, 0x7e, 0x98, 0xbd, 0x1e, 0x05, 0x44, 0x30, 0xfd, 0x19, + 0x14, 0x60, 0xc2, 0x77, 0x14, 0x30, 0x6b, 0x24, 0x2f, 0xd8, 0xb9, 0xd1, 0xc1, 0xba, 0xb2, 0x7d, + 0xdf, 0x21, 0xf4, 0x73, 0x9d, 0x76, 0x61, 0xb6, 0x4b, 0x04, 0x75, 0x9b, 0x63, 0x4e, 0x12, 0x79, + 0x1b, 0x93, 0xb5, 0xb0, 0xaf, 0x93, 0xbd, 0xae, 0xa1, 0xc2, 0x49, 0x7f, 0x06, 0x05, 0x98, 0xea, + 0x93, 0x0c, 0x98, 0x88, 0x5d, 0xf3, 0x7e, 0x0d, 0xce, 0x88, 0x84, 0x3f, 0x5a, 0xce, 0x08, 0xcc, + 0xa3, 0xe7, 0x8c, 0xc0, 0x3d, 0x51, 0xce, 0x08, 0x93, 0x27, 0xc9, 0x99, 0x88, 0x93, 0x3d, 0x38, + 0xf3, 0x65, 0xca, 0xe7, 0x8c, 0x68, 0x3a, 0x06, 0xe3, 0x8c, 0x90, 0x8d, 0x70, 0xe6, 0x6e, 0xf4, + 0x26, 0xdd, 0xa7, 0xfb, 0xd3, 0xfc, 0x08, 0x6b, 0xff, 0x6d, 0x62, 0xcb, 0xa3, 0xde, 0xae, 0x3e, + 0xde, 0x75, 0xeb, 0x36, 0xc1, 0x04, 0x6e, 0x11, 0x07, 0x57, 0x09, 0x1f, 0x96, 0xa4, 0x39, 0x28, + 0xee, 0x0c, 0xbb, 0xf4, 0x2e, 0x45, 0x70, 0x50, 0x0c, 0x95, 0x35, 0x04, 0xf2, 0xfb, 0x9e, 0x17, + 0xdc, 0xa6, 0xe5, 0x19, 0xc9, 0x1b, 0x82, 0xa5, 0xae, 0x59, 0xd4, 0x43, 0x43, 0x7d, 0x3f, 0x05, + 0x66, 0xbb, 0xde, 0x31, 0xc2, 0xa0, 0x28, 0xc7, 0x14, 0x94, 0xd4, 0x09, 0x06, 0x25, 0x7d, 0xe0, + 0xa0, 0x7c, 0x95, 0x02, 0xb0, 0xfb, 0x38, 0x81, 0x6f, 0xf1, 0xa6, 0xc4, 0x70, 0x68, 0x85, 0x98, + 0x62, 0xfa, 0x28, 0x1a, 0xea, 0x68, 0x47, 0x13, 0xc5, 0x46, 0x49, 0x63, 0xc7, 0xf4, 0xe4, 0x1b, + 0xbe, 0xa8, 0xa5, 0x8f, 0xf6, 0x45, 0x4d, 0xfd, 0x26, 0x19, 0xc6, 0x53, 0xfd, 0x84, 0xd7, 0x6b, + 0xfb, 0xd3, 0x27, 0xb8, 0xfd, 0xea, 0x17, 0x0a, 0x98, 0x49, 0xb6, 0x23, 0xa7, 0xee, 0x61, 0xf7, + 0xeb, 0xb8, 0x13, 0xa7, 0xfb, 0x51, 0xf7, 0x89, 0x02, 0xce, 0x9e, 0xb2, 0x7f, 0x78, 0xd4, 0xcf, + 0xba, 0xd7, 0x7c, 0x5a, 0xfe, 0xa7, 0xd1, 0x6f, 0x3c, 0x7b, 0x99, 0x1f, 0x7a, 0xfe, 0x32, 0x3f, + 0xf4, 0xe2, 0x65, 0x7e, 0xe8, 0xed, 0x4e, 0x5e, 0x79, 0xd6, 0xc9, 0x2b, 0xcf, 0x3b, 0x79, 0xe5, + 0x45, 0x27, 0xaf, 0xfc, 0xd0, 0xc9, 0x2b, 0xef, 0xfd, 0x98, 0x1f, 0x7a, 0x2d, 0xbf, 0xff, 0x1f, + 0x9f, 0xbf, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x27, 0xde, 0xc0, 0x19, 0x1d, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 5b2fe9442a..c88fc1fe26 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -136,6 +136,7 @@ message HPAScalingRules { // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional + // +listType=atomic repeated HPAScalingPolicy policies = 2; } @@ -146,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -189,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -205,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -238,6 +239,7 @@ message HorizontalPodAutoscalerSpec { // more information about how each type of metric must respond. // If not set, the default metric will be set to 80% average CPU utilization. // +optional + // +listType=atomic repeated MetricSpec metrics = 4; // behavior configures the scaling behavior of the target @@ -256,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -268,11 +270,13 @@ message HorizontalPodAutoscalerStatus { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic repeated MetricStatus currentMetrics = 5; // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic repeated HorizontalPodAutoscalerCondition conditions = 6; } @@ -285,7 +289,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -385,12 +389,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -404,12 +408,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index b0b7681c0e..2fee0b8a0f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -76,6 +76,7 @@ type HorizontalPodAutoscalerSpec struct { // more information about how each type of metric must respond. // If not set, the default metric will be set to 80% average CPU utilization. // +optional + // +listType=atomic Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` // behavior configures the scaling behavior of the target @@ -199,6 +200,7 @@ type HPAScalingRules struct { // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional + // +listType=atomic Policies []HPAScalingPolicy `json:"policies,omitempty" protobuf:"bytes,2,rep,name=policies"` } @@ -393,11 +395,13 @@ type HorizontalPodAutoscalerStatus struct { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index c4a8db6e78..cb5cbb6002 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -17,5 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 59a7482a0d..6108a60839 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto +// source: k8s.io/api/batch/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} func (*CronJob) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{0} + return fileDescriptor_79228dc2c4001a22, []int{0} } func (m *CronJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_CronJob proto.InternalMessageInfo func (m *CronJobList) Reset() { *m = CronJobList{} } func (*CronJobList) ProtoMessage() {} func (*CronJobList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{1} + return fileDescriptor_79228dc2c4001a22, []int{1} } func (m *CronJobList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_CronJobList proto.InternalMessageInfo func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } func (*CronJobSpec) ProtoMessage() {} func (*CronJobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{2} + return fileDescriptor_79228dc2c4001a22, []int{2} } func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } func (*CronJobStatus) ProtoMessage() {} func (*CronJobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{3} + return fileDescriptor_79228dc2c4001a22, []int{3} } func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo func (m *Job) Reset() { *m = Job{} } func (*Job) ProtoMessage() {} func (*Job) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{4} + return fileDescriptor_79228dc2c4001a22, []int{4} } func (m *Job) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_Job proto.InternalMessageInfo func (m *JobCondition) Reset() { *m = JobCondition{} } func (*JobCondition) ProtoMessage() {} func (*JobCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{5} + return fileDescriptor_79228dc2c4001a22, []int{5} } func (m *JobCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_JobCondition proto.InternalMessageInfo func (m *JobList) Reset() { *m = JobList{} } func (*JobList) ProtoMessage() {} func (*JobList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{6} + return fileDescriptor_79228dc2c4001a22, []int{6} } func (m *JobList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_JobList proto.InternalMessageInfo func (m *JobSpec) Reset() { *m = JobSpec{} } func (*JobSpec) ProtoMessage() {} func (*JobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{7} + return fileDescriptor_79228dc2c4001a22, []int{7} } func (m *JobSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_JobSpec proto.InternalMessageInfo func (m *JobStatus) Reset() { *m = JobStatus{} } func (*JobStatus) ProtoMessage() {} func (*JobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{8} + return fileDescriptor_79228dc2c4001a22, []int{8} } func (m *JobStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_JobStatus proto.InternalMessageInfo func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{9} + return fileDescriptor_79228dc2c4001a22, []int{9} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo func (m *PodFailurePolicy) Reset() { *m = PodFailurePolicy{} } func (*PodFailurePolicy) ProtoMessage() {} func (*PodFailurePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{10} + return fileDescriptor_79228dc2c4001a22, []int{10} } func (m *PodFailurePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -361,7 +361,7 @@ func (m *PodFailurePolicyOnExitCodesRequirement) Reset() { } func (*PodFailurePolicyOnExitCodesRequirement) ProtoMessage() {} func (*PodFailurePolicyOnExitCodesRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{11} + return fileDescriptor_79228dc2c4001a22, []int{11} } func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -391,7 +391,7 @@ func (m *PodFailurePolicyOnPodConditionsPattern) Reset() { } func (*PodFailurePolicyOnPodConditionsPattern) ProtoMessage() {} func (*PodFailurePolicyOnPodConditionsPattern) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{12} + return fileDescriptor_79228dc2c4001a22, []int{12} } func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -419,7 +419,7 @@ var xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern proto.InternalMessage func (m *PodFailurePolicyRule) Reset() { *m = PodFailurePolicyRule{} } func (*PodFailurePolicyRule) ProtoMessage() {} func (*PodFailurePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{13} + return fileDescriptor_79228dc2c4001a22, []int{13} } func (m *PodFailurePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,10 +444,66 @@ func (m *PodFailurePolicyRule) XXX_DiscardUnknown() { var xxx_messageInfo_PodFailurePolicyRule proto.InternalMessageInfo +func (m *SuccessPolicy) Reset() { *m = SuccessPolicy{} } +func (*SuccessPolicy) ProtoMessage() {} +func (*SuccessPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{14} +} +func (m *SuccessPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuccessPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuccessPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuccessPolicy.Merge(m, src) +} +func (m *SuccessPolicy) XXX_Size() int { + return m.Size() +} +func (m *SuccessPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_SuccessPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_SuccessPolicy proto.InternalMessageInfo + +func (m *SuccessPolicyRule) Reset() { *m = SuccessPolicyRule{} } +func (*SuccessPolicyRule) ProtoMessage() {} +func (*SuccessPolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{15} +} +func (m *SuccessPolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuccessPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuccessPolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuccessPolicyRule.Merge(m, src) +} +func (m *SuccessPolicyRule) XXX_Size() int { + return m.Size() +} +func (m *SuccessPolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_SuccessPolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_SuccessPolicyRule proto.InternalMessageInfo + func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} } func (*UncountedTerminatedPods) ProtoMessage() {} func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{14} + return fileDescriptor_79228dc2c4001a22, []int{16} } func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -487,128 +543,135 @@ func init() { proto.RegisterType((*PodFailurePolicyOnExitCodesRequirement)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement") proto.RegisterType((*PodFailurePolicyOnPodConditionsPattern)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern") proto.RegisterType((*PodFailurePolicyRule)(nil), "k8s.io.api.batch.v1.PodFailurePolicyRule") + proto.RegisterType((*SuccessPolicy)(nil), "k8s.io.api.batch.v1.SuccessPolicy") + proto.RegisterType((*SuccessPolicyRule)(nil), "k8s.io.api.batch.v1.SuccessPolicyRule") proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) -} - -var fileDescriptor_3b52da57c93de713 = []byte{ - // 1797 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x23, 0x49, - 0x15, 0x8f, 0x93, 0x38, 0xb1, 0xcb, 0xf9, 0xf0, 0xd4, 0x64, 0x66, 0x4c, 0x58, 0xb9, 0xb3, 0x9e, - 0xdd, 0x55, 0x16, 0x2d, 0xed, 0x9d, 0xec, 0x88, 0xe5, 0x5b, 0x3b, 0x9d, 0x61, 0x96, 0x09, 0xce, - 0x8e, 0x29, 0x67, 0x40, 0x5a, 0x16, 0x44, 0xb9, 0xbb, 0xec, 0xf4, 0xa6, 0xdd, 0xd5, 0x74, 0x55, - 0x47, 0x93, 0x0b, 0x42, 0xe2, 0x0f, 0x80, 0xbf, 0x82, 0x23, 0x17, 0x38, 0xc3, 0x0d, 0xcd, 0x71, - 0xc5, 0x69, 0xc5, 0xa1, 0xc5, 0x34, 0x7f, 0x00, 0xf7, 0x20, 0x24, 0x54, 0xd5, 0xe5, 0xfe, 0x72, - 0x77, 0xc8, 0xac, 0xc4, 0x88, 0x5b, 0xfa, 0xbd, 0xdf, 0xfb, 0xd5, 0xc7, 0x7b, 0xf5, 0x7b, 0x2f, - 0x06, 0xdf, 0x3e, 0xfb, 0x3a, 0xd3, 0x6d, 0xda, 0x3f, 0x0b, 0xc6, 0xc4, 0x77, 0x09, 0x27, 0xac, - 0x7f, 0x4e, 0x5c, 0x8b, 0xfa, 0x7d, 0xe5, 0xc0, 0x9e, 0xdd, 0x1f, 0x63, 0x6e, 0x9e, 0xf6, 0xcf, - 0xef, 0xf5, 0xa7, 0xc4, 0x25, 0x3e, 0xe6, 0xc4, 0xd2, 0x3d, 0x9f, 0x72, 0x0a, 0x6f, 0xc6, 0x20, - 0x1d, 0x7b, 0xb6, 0x2e, 0x41, 0xfa, 0xf9, 0xbd, 0xdd, 0xaf, 0x4e, 0x6d, 0x7e, 0x1a, 0x8c, 0x75, - 0x93, 0xce, 0xfa, 0x53, 0x3a, 0xa5, 0x7d, 0x89, 0x1d, 0x07, 0x13, 0xf9, 0x25, 0x3f, 0xe4, 0x5f, - 0x31, 0xc7, 0x6e, 0x2f, 0xb3, 0x90, 0x49, 0x7d, 0x52, 0xb2, 0xce, 0xee, 0xfd, 0x14, 0x33, 0xc3, - 0xe6, 0xa9, 0xed, 0x12, 0xff, 0xa2, 0xef, 0x9d, 0x4d, 0x85, 0x81, 0xf5, 0x67, 0x84, 0xe3, 0xb2, - 0xa8, 0x7e, 0x55, 0x94, 0x1f, 0xb8, 0xdc, 0x9e, 0x91, 0x85, 0x80, 0xaf, 0xfd, 0xb7, 0x00, 0x66, - 0x9e, 0x92, 0x19, 0x2e, 0xc6, 0xf5, 0xfe, 0x55, 0x03, 0xeb, 0x87, 0x3e, 0x75, 0x8f, 0xe8, 0x18, - 0xfe, 0x1c, 0x34, 0xc4, 0x7e, 0x2c, 0xcc, 0x71, 0xa7, 0xb6, 0x57, 0xdb, 0x6f, 0x1d, 0xbc, 0xab, - 0xa7, 0xb7, 0x94, 0xd0, 0xea, 0xde, 0xd9, 0x54, 0x18, 0x98, 0x2e, 0xd0, 0xfa, 0xf9, 0x3d, 0xfd, - 0xc9, 0xf8, 0x53, 0x62, 0xf2, 0x63, 0xc2, 0xb1, 0x01, 0x9f, 0x87, 0xda, 0x52, 0x14, 0x6a, 0x20, - 0xb5, 0xa1, 0x84, 0x15, 0x1a, 0x60, 0x95, 0x79, 0xc4, 0xec, 0x2c, 0x4b, 0xf6, 0x3d, 0xbd, 0x24, - 0x07, 0xba, 0xda, 0xcd, 0xc8, 0x23, 0xa6, 0xb1, 0xa1, 0xd8, 0x56, 0xc5, 0x17, 0x92, 0xb1, 0xf0, - 0x08, 0xac, 0x31, 0x8e, 0x79, 0xc0, 0x3a, 0x2b, 0x92, 0xa5, 0x77, 0x25, 0x8b, 0x44, 0x1a, 0x5b, - 0x8a, 0x67, 0x2d, 0xfe, 0x46, 0x8a, 0xa1, 0xf7, 0xfb, 0x1a, 0x68, 0x29, 0xe4, 0xc0, 0x66, 0x1c, - 0x7e, 0xb2, 0x70, 0x03, 0xfa, 0xf5, 0x6e, 0x40, 0x44, 0xcb, 0xf3, 0xb7, 0xd5, 0x4a, 0x8d, 0xb9, - 0x25, 0x73, 0xfa, 0x07, 0xa0, 0x6e, 0x73, 0x32, 0x63, 0x9d, 0xe5, 0xbd, 0x95, 0xfd, 0xd6, 0xc1, - 0x6b, 0x57, 0x6d, 0xdc, 0xd8, 0x54, 0x44, 0xf5, 0xc7, 0x22, 0x04, 0xc5, 0x91, 0xbd, 0xbf, 0xae, - 0x26, 0x1b, 0x16, 0x57, 0x02, 0xdf, 0x01, 0x0d, 0x91, 0x58, 0x2b, 0x70, 0x88, 0xdc, 0x70, 0x33, - 0xdd, 0xc0, 0x48, 0xd9, 0x51, 0x82, 0x80, 0xfb, 0xa0, 0x21, 0x6a, 0xe1, 0x63, 0xea, 0x92, 0x4e, - 0x43, 0xa2, 0x37, 0x04, 0xf2, 0x44, 0xd9, 0x50, 0xe2, 0x85, 0x4f, 0xc1, 0x1d, 0xc6, 0xb1, 0xcf, - 0x6d, 0x77, 0xfa, 0x90, 0x60, 0xcb, 0xb1, 0x5d, 0x32, 0x22, 0x26, 0x75, 0x2d, 0x26, 0x73, 0xb7, - 0x62, 0x7c, 0x39, 0x0a, 0xb5, 0x3b, 0xa3, 0x72, 0x08, 0xaa, 0x8a, 0x85, 0x9f, 0x80, 0x1b, 0x26, - 0x75, 0xcd, 0xc0, 0xf7, 0x89, 0x6b, 0x5e, 0x0c, 0xa9, 0x63, 0x9b, 0x17, 0x32, 0x8d, 0x4d, 0x43, - 0x57, 0xfb, 0xbe, 0x71, 0x58, 0x04, 0x5c, 0x96, 0x19, 0xd1, 0x22, 0x11, 0x7c, 0x13, 0xac, 0xb3, - 0x80, 0x79, 0xc4, 0xb5, 0x3a, 0xab, 0x7b, 0xb5, 0xfd, 0x86, 0xd1, 0x8a, 0x42, 0x6d, 0x7d, 0x14, - 0x9b, 0xd0, 0xdc, 0x07, 0x7f, 0x02, 0x5a, 0x9f, 0xd2, 0xf1, 0x09, 0x99, 0x79, 0x0e, 0xe6, 0xa4, - 0x53, 0x97, 0x79, 0x7e, 0xa3, 0x34, 0x19, 0x47, 0x29, 0x4e, 0xd6, 0xe3, 0x4d, 0xb5, 0xc9, 0x56, - 0xc6, 0x81, 0xb2, 0x6c, 0xf0, 0x67, 0x60, 0x97, 0x05, 0xa6, 0x49, 0x18, 0x9b, 0x04, 0xce, 0x11, - 0x1d, 0xb3, 0xef, 0xdb, 0x8c, 0x53, 0xff, 0x62, 0x60, 0xcf, 0x6c, 0xde, 0x59, 0xdb, 0xab, 0xed, - 0xd7, 0x8d, 0x6e, 0x14, 0x6a, 0xbb, 0xa3, 0x4a, 0x14, 0xba, 0x82, 0x01, 0x22, 0x70, 0x7b, 0x82, - 0x6d, 0x87, 0x58, 0x0b, 0xdc, 0xeb, 0x92, 0x7b, 0x37, 0x0a, 0xb5, 0xdb, 0x8f, 0x4a, 0x11, 0xa8, - 0x22, 0xb2, 0xf7, 0xa7, 0x65, 0xb0, 0x99, 0x7b, 0x2f, 0xf0, 0x07, 0x60, 0x0d, 0x9b, 0xdc, 0x3e, - 0x17, 0x45, 0x25, 0x4a, 0xf5, 0x6e, 0xf6, 0x76, 0x84, 0xd2, 0xa5, 0xaf, 0x1e, 0x91, 0x09, 0x11, - 0x49, 0x20, 0xe9, 0x23, 0x7b, 0x20, 0x43, 0x91, 0xa2, 0x80, 0x0e, 0x68, 0x3b, 0x98, 0xf1, 0x79, - 0x3d, 0x8a, 0x6a, 0x93, 0xf9, 0x69, 0x1d, 0x7c, 0xe5, 0x7a, 0x8f, 0x4b, 0x44, 0x18, 0x3b, 0x51, - 0xa8, 0xb5, 0x07, 0x05, 0x1e, 0xb4, 0xc0, 0x0c, 0x7d, 0x00, 0xa5, 0x2d, 0xb9, 0x42, 0xb9, 0x5e, - 0xfd, 0xa5, 0xd7, 0xbb, 0x1d, 0x85, 0x1a, 0x1c, 0x2c, 0x30, 0xa1, 0x12, 0xf6, 0xde, 0x3f, 0x6b, - 0x60, 0xe5, 0xd5, 0x08, 0xe8, 0x77, 0x73, 0x02, 0xfa, 0x5a, 0x55, 0xd1, 0x56, 0x8a, 0xe7, 0xa3, - 0x82, 0x78, 0x76, 0x2b, 0x19, 0xae, 0x16, 0xce, 0xbf, 0xac, 0x80, 0x8d, 0x23, 0x3a, 0x3e, 0xa4, - 0xae, 0x65, 0x73, 0x9b, 0xba, 0xf0, 0x3e, 0x58, 0xe5, 0x17, 0xde, 0x5c, 0x84, 0xf6, 0xe6, 0x4b, - 0x9f, 0x5c, 0x78, 0xe4, 0x32, 0xd4, 0xda, 0x59, 0xac, 0xb0, 0x21, 0x89, 0x86, 0x83, 0x64, 0x3b, - 0xcb, 0x32, 0xee, 0x7e, 0x7e, 0xb9, 0xcb, 0x50, 0x2b, 0x69, 0xb1, 0x7a, 0xc2, 0x94, 0xdf, 0x14, - 0x9c, 0x82, 0x4d, 0x91, 0x9c, 0xa1, 0x4f, 0xc7, 0x71, 0x95, 0xad, 0xbc, 0x74, 0xd6, 0x6f, 0xa9, - 0x0d, 0x6c, 0x0e, 0xb2, 0x44, 0x28, 0xcf, 0x0b, 0xcf, 0xe3, 0x1a, 0x3b, 0xf1, 0xb1, 0xcb, 0xe2, - 0x23, 0x7d, 0xb1, 0x9a, 0xde, 0x55, 0xab, 0xc9, 0x3a, 0xcb, 0xb3, 0xa1, 0x92, 0x15, 0xe0, 0x5b, - 0x60, 0xcd, 0x27, 0x98, 0x51, 0x57, 0xd6, 0x73, 0x33, 0xcd, 0x0e, 0x92, 0x56, 0xa4, 0xbc, 0xf0, - 0x6d, 0xb0, 0x3e, 0x23, 0x8c, 0xe1, 0x29, 0x91, 0x8a, 0xd3, 0x34, 0xb6, 0x15, 0x70, 0xfd, 0x38, - 0x36, 0xa3, 0xb9, 0xbf, 0xf7, 0xbb, 0x1a, 0x58, 0x7f, 0x35, 0xdd, 0xef, 0x3b, 0xf9, 0xee, 0xd7, - 0xa9, 0xaa, 0xbc, 0x8a, 0xce, 0xf7, 0x9b, 0x86, 0xdc, 0xa8, 0xec, 0x7a, 0xf7, 0x40, 0xcb, 0xc3, - 0x3e, 0x76, 0x1c, 0xe2, 0xd8, 0x6c, 0x26, 0xf7, 0x5a, 0x37, 0xb6, 0x85, 0x2e, 0x0f, 0x53, 0x33, - 0xca, 0x62, 0x44, 0x88, 0x49, 0x67, 0x9e, 0x43, 0xc4, 0x65, 0xc6, 0xe5, 0xa6, 0x42, 0x0e, 0x53, - 0x33, 0xca, 0x62, 0xe0, 0x13, 0x70, 0x2b, 0x56, 0xb0, 0x62, 0x07, 0x5c, 0x91, 0x1d, 0xf0, 0x4b, - 0x51, 0xa8, 0xdd, 0x7a, 0x50, 0x06, 0x40, 0xe5, 0x71, 0x70, 0x0a, 0xda, 0x1e, 0xb5, 0x84, 0x38, - 0x07, 0x3e, 0x51, 0xcd, 0xaf, 0x25, 0xef, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x16, 0xc0, 0xb1, 0x06, - 0x16, 0xad, 0x68, 0x81, 0x14, 0xde, 0x07, 0x1b, 0x63, 0x6c, 0x9e, 0xd1, 0xc9, 0x24, 0xdb, 0x1a, - 0xda, 0x51, 0xa8, 0x6d, 0x18, 0x19, 0x3b, 0xca, 0xa1, 0xe0, 0x00, 0xec, 0x64, 0xbf, 0x87, 0xc4, - 0x7f, 0xec, 0x5a, 0xe4, 0x59, 0x67, 0x43, 0x46, 0x77, 0xa2, 0x50, 0xdb, 0x31, 0x4a, 0xfc, 0xa8, - 0x34, 0x0a, 0x7e, 0x00, 0xda, 0x33, 0xfc, 0x2c, 0xee, 0x44, 0xd2, 0x42, 0x58, 0x67, 0x53, 0x32, - 0xc9, 0x53, 0x1c, 0x17, 0x7c, 0x68, 0x01, 0x0d, 0x7f, 0x0a, 0x1a, 0x8c, 0x38, 0xc4, 0xe4, 0xd4, - 0x57, 0x6f, 0xeb, 0xbd, 0x6b, 0x96, 0x23, 0x1e, 0x13, 0x67, 0xa4, 0x42, 0xe3, 0x11, 0x67, 0xfe, - 0x85, 0x12, 0x4a, 0xf8, 0x4d, 0xb0, 0x35, 0xc3, 0x6e, 0x80, 0x13, 0xa4, 0x7c, 0x54, 0x0d, 0x03, - 0x46, 0xa1, 0xb6, 0x75, 0x9c, 0xf3, 0xa0, 0x02, 0x12, 0xfe, 0x10, 0x34, 0xf8, 0x7c, 0x7e, 0x58, - 0x93, 0x5b, 0x2b, 0xed, 0x90, 0x43, 0x6a, 0xe5, 0xc6, 0x87, 0xe4, 0x79, 0x24, 0xb3, 0x43, 0x42, - 0x23, 0x26, 0x2e, 0xce, 0x1d, 0x55, 0x2a, 0x0f, 0x26, 0x9c, 0xf8, 0x8f, 0x6c, 0xd7, 0x66, 0xa7, - 0xc4, 0x92, 0xa3, 0x5a, 0x3d, 0x9e, 0xb8, 0x4e, 0x4e, 0x06, 0x65, 0x10, 0x54, 0x15, 0x0b, 0x07, - 0x60, 0x2b, 0xad, 0xe9, 0x63, 0x6a, 0x91, 0x4e, 0x53, 0x2a, 0xc2, 0x1b, 0xe2, 0x94, 0x87, 0x39, - 0xcf, 0xe5, 0x82, 0x05, 0x15, 0x62, 0xb3, 0x13, 0x16, 0xb8, 0x62, 0xc2, 0xb2, 0xc0, 0x8e, 0x47, - 0x2d, 0x44, 0x3c, 0x07, 0x9b, 0x64, 0x46, 0x5c, 0xae, 0x8a, 0x7d, 0x4b, 0x2e, 0xfd, 0xae, 0xa8, - 0xa4, 0x61, 0x89, 0xff, 0xb2, 0xc2, 0x8e, 0x4a, 0xd9, 0x7a, 0xff, 0xae, 0x83, 0x66, 0x3a, 0xb2, - 0x3c, 0x05, 0xc0, 0x9c, 0xf7, 0x05, 0xa6, 0xc6, 0x96, 0xd7, 0xab, 0x34, 0x26, 0xe9, 0x20, 0x69, - 0xbb, 0x4d, 0x4c, 0x0c, 0x65, 0x88, 0xe0, 0x8f, 0x41, 0x53, 0x0e, 0xb3, 0x52, 0xe1, 0x97, 0x5f, - 0x5a, 0xe1, 0x37, 0xa3, 0x50, 0x6b, 0x8e, 0xe6, 0x04, 0x28, 0xe5, 0x82, 0x93, 0x6c, 0x62, 0xbe, - 0x60, 0xb7, 0x82, 0xf9, 0x24, 0xca, 0x25, 0x0a, 0xac, 0xa2, 0x67, 0xa8, 0x51, 0x6e, 0x55, 0x96, - 0x51, 0xd5, 0x94, 0xd6, 0x07, 0x4d, 0x39, 0x76, 0x12, 0x8b, 0x58, 0xf2, 0x25, 0xd4, 0x8d, 0x1b, - 0x0a, 0xda, 0x1c, 0xcd, 0x1d, 0x28, 0xc5, 0x08, 0xe2, 0x78, 0x9e, 0x54, 0x53, 0x6d, 0x42, 0x1c, - 0xbf, 0x62, 0xa4, 0xbc, 0x42, 0x79, 0x39, 0xf1, 0x67, 0xb6, 0x8b, 0xc5, 0x7f, 0x04, 0x52, 0xf0, - 0x94, 0xf2, 0x9e, 0xa4, 0x66, 0x94, 0xc5, 0xc0, 0x87, 0xa0, 0xad, 0x4e, 0x91, 0x6a, 0xc7, 0xba, - 0xac, 0x9d, 0x8e, 0x5a, 0xa4, 0x7d, 0x58, 0xf0, 0xa3, 0x85, 0x08, 0xf8, 0x3e, 0xd8, 0x9c, 0xe4, - 0xe4, 0x07, 0x48, 0x8a, 0x1b, 0xa2, 0xbd, 0xe7, 0xb5, 0x27, 0x8f, 0x83, 0xbf, 0xae, 0x81, 0x3b, - 0x81, 0x6b, 0xd2, 0xc0, 0xe5, 0xc4, 0x9a, 0x6f, 0x92, 0x58, 0x43, 0x6a, 0x31, 0xf9, 0x16, 0x5b, - 0x07, 0xef, 0x94, 0x16, 0xd6, 0xd3, 0xf2, 0x98, 0xf8, 0xe5, 0x56, 0x38, 0x51, 0xd5, 0x4a, 0x50, - 0x03, 0x75, 0x9f, 0x60, 0xeb, 0x42, 0x3e, 0xd8, 0xba, 0xd1, 0x14, 0x1d, 0x11, 0x09, 0x03, 0x8a, - 0xed, 0xbd, 0x3f, 0xd4, 0xc0, 0x76, 0xe1, 0x1f, 0x94, 0xff, 0xff, 0x09, 0xb4, 0x37, 0x06, 0x0b, - 0x1d, 0x0c, 0x7e, 0x04, 0xea, 0x7e, 0xe0, 0x90, 0xf9, 0xb3, 0x7d, 0xfb, 0x5a, 0xdd, 0x10, 0x05, - 0x0e, 0x49, 0x67, 0x05, 0xf1, 0xc5, 0x50, 0x4c, 0xd3, 0xfb, 0x5b, 0x0d, 0xbc, 0x55, 0x84, 0x3f, - 0x71, 0xbf, 0xf7, 0xcc, 0xe6, 0x87, 0xd4, 0x22, 0x0c, 0x91, 0x5f, 0x04, 0xb6, 0x2f, 0xa5, 0x44, - 0x14, 0x89, 0x49, 0x5d, 0x8e, 0xc5, 0xb5, 0x7c, 0x84, 0x67, 0xf3, 0x01, 0x56, 0x16, 0xc9, 0x61, - 0xd6, 0x81, 0xf2, 0x38, 0x38, 0x02, 0x0d, 0xea, 0x11, 0x1f, 0x8b, 0xc6, 0x11, 0x0f, 0xaf, 0xef, - 0xcf, 0xd5, 0xfd, 0x89, 0xb2, 0x5f, 0x86, 0xda, 0xdd, 0x2b, 0xb6, 0x31, 0x87, 0xa1, 0x84, 0x08, - 0xf6, 0xc0, 0xda, 0x39, 0x76, 0x02, 0x22, 0x66, 0x8c, 0x95, 0xfd, 0xba, 0x01, 0xc4, 0x7b, 0xfa, - 0x91, 0xb4, 0x20, 0xe5, 0xe9, 0xfd, 0xb9, 0xf4, 0x70, 0x43, 0x6a, 0xa5, 0x0a, 0x36, 0xc4, 0x9c, - 0x13, 0xdf, 0x85, 0x1f, 0xe6, 0x86, 0xf2, 0xf7, 0x0a, 0x43, 0xf9, 0xdd, 0x92, 0xd1, 0x3a, 0x4b, - 0xf3, 0xbf, 0x9a, 0xd3, 0x7b, 0xcf, 0x97, 0xc1, 0x4e, 0x59, 0x36, 0xe1, 0x07, 0xb1, 0x56, 0x51, - 0x57, 0xed, 0x78, 0x3f, 0xab, 0x55, 0xd4, 0xbd, 0x0c, 0xb5, 0xdb, 0xc5, 0xb8, 0xd8, 0x83, 0x54, - 0x1c, 0x74, 0x41, 0x8b, 0xa6, 0x37, 0xac, 0x8a, 0xf4, 0x5b, 0xd7, 0xaa, 0xa7, 0xf2, 0x02, 0x89, - 0x95, 0x2a, 0xeb, 0xcb, 0x2e, 0x00, 0x7f, 0x09, 0xb6, 0x69, 0xfe, 0xee, 0x65, 0xe6, 0xae, 0xbf, - 0x66, 0x59, 0xde, 0x8c, 0x3b, 0xea, 0xdc, 0xdb, 0x05, 0x3f, 0x2a, 0x2e, 0xd6, 0xfb, 0x63, 0x0d, - 0x54, 0x29, 0x0b, 0x1c, 0x66, 0x15, 0x5d, 0xbc, 0xac, 0xa6, 0x71, 0x90, 0x53, 0xf3, 0xcb, 0x50, - 0x7b, 0xbd, 0xea, 0x67, 0x43, 0x91, 0x76, 0xa6, 0x3f, 0x7d, 0xfc, 0x30, 0x2b, 0xf9, 0x1f, 0x26, - 0x92, 0xbf, 0x2c, 0xe9, 0xfa, 0xa9, 0xdc, 0x5f, 0x8f, 0x4b, 0x85, 0x1b, 0xdf, 0x78, 0xfe, 0xa2, - 0xbb, 0xf4, 0xd9, 0x8b, 0xee, 0xd2, 0xe7, 0x2f, 0xba, 0x4b, 0xbf, 0x8a, 0xba, 0xb5, 0xe7, 0x51, - 0xb7, 0xf6, 0x59, 0xd4, 0xad, 0x7d, 0x1e, 0x75, 0x6b, 0x7f, 0x8f, 0xba, 0xb5, 0xdf, 0xfe, 0xa3, - 0xbb, 0xf4, 0xf1, 0xcd, 0x92, 0xdf, 0x71, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x43, 0xdf, 0xa6, - 0x7c, 0xf6, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/batch/v1/generated.proto", fileDescriptor_79228dc2c4001a22) +} + +var fileDescriptor_79228dc2c4001a22 = []byte{ + // 1882 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x37, 0x6d, 0xcb, 0x96, 0x46, 0xfe, 0x90, 0x27, 0x4e, 0xa2, 0xba, 0x0b, 0xd1, 0xab, 0xec, + 0x06, 0xde, 0x76, 0x2b, 0x6d, 0xbc, 0x41, 0xb7, 0x1f, 0x68, 0xb1, 0xa1, 0xd2, 0x6c, 0xe3, 0x95, + 0x37, 0xea, 0xc8, 0x69, 0x81, 0xdd, 0xb4, 0xe8, 0x88, 0x1c, 0xc9, 0xdc, 0x50, 0x1c, 0x96, 0x1c, + 0x1a, 0xf1, 0xa5, 0x28, 0xd0, 0x7f, 0xa0, 0x3d, 0xf6, 0x1f, 0xe8, 0xb1, 0x97, 0xf6, 0xdc, 0xde, + 0x8a, 0x1c, 0x17, 0x3d, 0x2d, 0x7a, 0x20, 0x1a, 0xf6, 0x0f, 0xe8, 0xdd, 0x45, 0x81, 0x62, 0x86, + 0xc3, 0x4f, 0x91, 0x5e, 0x67, 0x81, 0x06, 0xbd, 0x89, 0xef, 0xfd, 0xde, 0x6f, 0x1e, 0xe7, 0x7d, + 0x52, 0xe0, 0xd6, 0xd3, 0x6f, 0x79, 0x3d, 0x93, 0xf6, 0xb1, 0x63, 0xf6, 0x27, 0x98, 0xe9, 0xa7, + 0xfd, 0xb3, 0x3b, 0xfd, 0x19, 0xb1, 0x89, 0x8b, 0x19, 0x31, 0x7a, 0x8e, 0x4b, 0x19, 0x85, 0xd7, + 0x22, 0x50, 0x0f, 0x3b, 0x66, 0x4f, 0x80, 0x7a, 0x67, 0x77, 0xf6, 0xbe, 0x31, 0x33, 0xd9, 0xa9, + 0x3f, 0xe9, 0xe9, 0x74, 0xde, 0x9f, 0xd1, 0x19, 0xed, 0x0b, 0xec, 0xc4, 0x9f, 0x8a, 0x27, 0xf1, + 0x20, 0x7e, 0x45, 0x1c, 0x7b, 0xdd, 0xcc, 0x41, 0x3a, 0x75, 0x49, 0xc9, 0x39, 0x7b, 0x77, 0x53, + 0xcc, 0x1c, 0xeb, 0xa7, 0xa6, 0x4d, 0xdc, 0xf3, 0xbe, 0xf3, 0x74, 0xc6, 0x05, 0x5e, 0x7f, 0x4e, + 0x18, 0x2e, 0xb3, 0xea, 0x57, 0x59, 0xb9, 0xbe, 0xcd, 0xcc, 0x39, 0x59, 0x30, 0xf8, 0xe6, 0x17, + 0x19, 0x78, 0xfa, 0x29, 0x99, 0xe3, 0xa2, 0x5d, 0xf7, 0xdf, 0x0a, 0x58, 0x1f, 0xb8, 0xd4, 0x3e, + 0xa2, 0x13, 0xf8, 0x73, 0x50, 0xe7, 0xfe, 0x18, 0x98, 0xe1, 0xb6, 0xb2, 0xaf, 0x1c, 0x34, 0x0f, + 0xdf, 0xe9, 0xa5, 0xb7, 0x94, 0xd0, 0xf6, 0x9c, 0xa7, 0x33, 0x2e, 0xf0, 0x7a, 0x1c, 0xdd, 0x3b, + 0xbb, 0xd3, 0x7b, 0x34, 0xf9, 0x94, 0xe8, 0xec, 0x98, 0x30, 0xac, 0xc1, 0xe7, 0x81, 0xba, 0x14, + 0x06, 0x2a, 0x48, 0x65, 0x28, 0x61, 0x85, 0x1a, 0x58, 0xf5, 0x1c, 0xa2, 0xb7, 0x97, 0x05, 0xfb, + 0x7e, 0xaf, 0x24, 0x06, 0x3d, 0xe9, 0xcd, 0xd8, 0x21, 0xba, 0xb6, 0x21, 0xd9, 0x56, 0xf9, 0x13, + 0x12, 0xb6, 0xf0, 0x08, 0xac, 0x79, 0x0c, 0x33, 0xdf, 0x6b, 0xaf, 0x08, 0x96, 0xee, 0xa5, 0x2c, + 0x02, 0xa9, 0x6d, 0x49, 0x9e, 0xb5, 0xe8, 0x19, 0x49, 0x86, 0xee, 0x1f, 0x14, 0xd0, 0x94, 0xc8, + 0xa1, 0xe9, 0x31, 0xf8, 0x64, 0xe1, 0x06, 0x7a, 0x57, 0xbb, 0x01, 0x6e, 0x2d, 0xde, 0xbf, 0x25, + 0x4f, 0xaa, 0xc7, 0x92, 0xcc, 0xdb, 0xdf, 0x03, 0x35, 0x93, 0x91, 0xb9, 0xd7, 0x5e, 0xde, 0x5f, + 0x39, 0x68, 0x1e, 0xbe, 0x76, 0x99, 0xe3, 0xda, 0xa6, 0x24, 0xaa, 0x3d, 0xe4, 0x26, 0x28, 0xb2, + 0xec, 0xfe, 0x6d, 0x35, 0x71, 0x98, 0x5f, 0x09, 0x7c, 0x1b, 0xd4, 0x79, 0x60, 0x0d, 0xdf, 0x22, + 0xc2, 0xe1, 0x46, 0xea, 0xc0, 0x58, 0xca, 0x51, 0x82, 0x80, 0x07, 0xa0, 0xce, 0x73, 0xe1, 0x63, + 0x6a, 0x93, 0x76, 0x5d, 0xa0, 0x37, 0x38, 0xf2, 0x44, 0xca, 0x50, 0xa2, 0x85, 0x8f, 0xc1, 0x4d, + 0x8f, 0x61, 0x97, 0x99, 0xf6, 0xec, 0x3e, 0xc1, 0x86, 0x65, 0xda, 0x64, 0x4c, 0x74, 0x6a, 0x1b, + 0x9e, 0x88, 0xdd, 0x8a, 0xf6, 0xd5, 0x30, 0x50, 0x6f, 0x8e, 0xcb, 0x21, 0xa8, 0xca, 0x16, 0x3e, + 0x01, 0x3b, 0x3a, 0xb5, 0x75, 0xdf, 0x75, 0x89, 0xad, 0x9f, 0x8f, 0xa8, 0x65, 0xea, 0xe7, 0x22, + 0x8c, 0x0d, 0xad, 0x27, 0xfd, 0xde, 0x19, 0x14, 0x01, 0x17, 0x65, 0x42, 0xb4, 0x48, 0x04, 0xdf, + 0x04, 0xeb, 0x9e, 0xef, 0x39, 0xc4, 0x36, 0xda, 0xab, 0xfb, 0xca, 0x41, 0x5d, 0x6b, 0x86, 0x81, + 0xba, 0x3e, 0x8e, 0x44, 0x28, 0xd6, 0xc1, 0x4f, 0x40, 0xf3, 0x53, 0x3a, 0x39, 0x21, 0x73, 0xc7, + 0xc2, 0x8c, 0xb4, 0x6b, 0x22, 0xce, 0x6f, 0x94, 0x06, 0xe3, 0x28, 0xc5, 0x89, 0x7c, 0xbc, 0x26, + 0x9d, 0x6c, 0x66, 0x14, 0x28, 0xcb, 0x06, 0x7f, 0x06, 0xf6, 0x3c, 0x5f, 0xd7, 0x89, 0xe7, 0x4d, + 0x7d, 0xeb, 0x88, 0x4e, 0xbc, 0x1f, 0x9a, 0x1e, 0xa3, 0xee, 0xf9, 0xd0, 0x9c, 0x9b, 0xac, 0xbd, + 0xb6, 0xaf, 0x1c, 0xd4, 0xb4, 0x4e, 0x18, 0xa8, 0x7b, 0xe3, 0x4a, 0x14, 0xba, 0x84, 0x01, 0x22, + 0x70, 0x63, 0x8a, 0x4d, 0x8b, 0x18, 0x0b, 0xdc, 0xeb, 0x82, 0x7b, 0x2f, 0x0c, 0xd4, 0x1b, 0x0f, + 0x4a, 0x11, 0xa8, 0xc2, 0xb2, 0xfb, 0xe7, 0x65, 0xb0, 0x99, 0xab, 0x17, 0xf8, 0x21, 0x58, 0xc3, + 0x3a, 0x33, 0xcf, 0x78, 0x52, 0xf1, 0x54, 0xbd, 0x95, 0xbd, 0x1d, 0xde, 0xe9, 0xd2, 0xaa, 0x47, + 0x64, 0x4a, 0x78, 0x10, 0x48, 0x5a, 0x64, 0xf7, 0x84, 0x29, 0x92, 0x14, 0xd0, 0x02, 0x2d, 0x0b, + 0x7b, 0x2c, 0xce, 0x47, 0x9e, 0x6d, 0x22, 0x3e, 0xcd, 0xc3, 0xaf, 0x5d, 0xad, 0xb8, 0xb8, 0x85, + 0xb6, 0x1b, 0x06, 0x6a, 0x6b, 0x58, 0xe0, 0x41, 0x0b, 0xcc, 0xd0, 0x05, 0x50, 0xc8, 0x92, 0x2b, + 0x14, 0xe7, 0xd5, 0x5e, 0xfa, 0xbc, 0x1b, 0x61, 0xa0, 0xc2, 0xe1, 0x02, 0x13, 0x2a, 0x61, 0xef, + 0xfe, 0x4b, 0x01, 0x2b, 0xaf, 0xa6, 0x81, 0x7e, 0x3f, 0xd7, 0x40, 0x5f, 0xab, 0x4a, 0xda, 0xca, + 0xe6, 0xf9, 0xa0, 0xd0, 0x3c, 0x3b, 0x95, 0x0c, 0x97, 0x37, 0xce, 0xbf, 0xae, 0x80, 0x8d, 0x23, + 0x3a, 0x19, 0x50, 0xdb, 0x30, 0x99, 0x49, 0x6d, 0x78, 0x17, 0xac, 0xb2, 0x73, 0x27, 0x6e, 0x42, + 0xfb, 0xf1, 0xd1, 0x27, 0xe7, 0x0e, 0xb9, 0x08, 0xd4, 0x56, 0x16, 0xcb, 0x65, 0x48, 0xa0, 0xe1, + 0x30, 0x71, 0x67, 0x59, 0xd8, 0xdd, 0xcd, 0x1f, 0x77, 0x11, 0xa8, 0x25, 0x23, 0xb6, 0x97, 0x30, + 0xe5, 0x9d, 0x82, 0x33, 0xb0, 0xc9, 0x83, 0x33, 0x72, 0xe9, 0x24, 0xca, 0xb2, 0x95, 0x97, 0x8e, + 0xfa, 0x75, 0xe9, 0xc0, 0xe6, 0x30, 0x4b, 0x84, 0xf2, 0xbc, 0xf0, 0x2c, 0xca, 0xb1, 0x13, 0x17, + 0xdb, 0x5e, 0xf4, 0x4a, 0x5f, 0x2e, 0xa7, 0xf7, 0xe4, 0x69, 0x22, 0xcf, 0xf2, 0x6c, 0xa8, 0xe4, + 0x04, 0x78, 0x1b, 0xac, 0xb9, 0x04, 0x7b, 0xd4, 0x16, 0xf9, 0xdc, 0x48, 0xa3, 0x83, 0x84, 0x14, + 0x49, 0x2d, 0x7c, 0x0b, 0xac, 0xcf, 0x89, 0xe7, 0xe1, 0x19, 0x11, 0x1d, 0xa7, 0xa1, 0x6d, 0x4b, + 0xe0, 0xfa, 0x71, 0x24, 0x46, 0xb1, 0xbe, 0xfb, 0x7b, 0x05, 0xac, 0xbf, 0x9a, 0xe9, 0xf7, 0xbd, + 0xfc, 0xf4, 0x6b, 0x57, 0x65, 0x5e, 0xc5, 0xe4, 0xfb, 0x5d, 0x43, 0x38, 0x2a, 0xa6, 0xde, 0x1d, + 0xd0, 0x74, 0xb0, 0x8b, 0x2d, 0x8b, 0x58, 0xa6, 0x37, 0x17, 0xbe, 0xd6, 0xb4, 0x6d, 0xde, 0x97, + 0x47, 0xa9, 0x18, 0x65, 0x31, 0xdc, 0x44, 0xa7, 0x73, 0xc7, 0x22, 0xfc, 0x32, 0xa3, 0x74, 0x93, + 0x26, 0x83, 0x54, 0x8c, 0xb2, 0x18, 0xf8, 0x08, 0x5c, 0x8f, 0x3a, 0x58, 0x71, 0x02, 0xae, 0x88, + 0x09, 0xf8, 0x95, 0x30, 0x50, 0xaf, 0xdf, 0x2b, 0x03, 0xa0, 0x72, 0x3b, 0x38, 0x03, 0x2d, 0x87, + 0x1a, 0xbc, 0x39, 0xfb, 0x2e, 0x91, 0xc3, 0xaf, 0x29, 0xee, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x15, + 0xc0, 0x51, 0x0f, 0x2c, 0x4a, 0xd1, 0x02, 0x29, 0xfc, 0x04, 0x6c, 0xca, 0x11, 0x22, 0x4f, 0x69, + 0x5d, 0xb2, 0x29, 0x8d, 0xb3, 0x48, 0x6d, 0x87, 0x27, 0x7f, 0x4e, 0x84, 0xf2, 0x5c, 0xf0, 0x2e, + 0xd8, 0x98, 0x60, 0xfd, 0x29, 0x9d, 0x4e, 0xb3, 0x73, 0xa7, 0x15, 0x06, 0xea, 0x86, 0x96, 0x91, + 0xa3, 0x1c, 0x0a, 0x0e, 0xc1, 0x6e, 0xf6, 0x79, 0x44, 0xdc, 0x87, 0xb6, 0x41, 0x9e, 0xb5, 0x37, + 0x84, 0x75, 0x3b, 0x0c, 0xd4, 0x5d, 0xad, 0x44, 0x8f, 0x4a, 0xad, 0xe0, 0xfb, 0xa0, 0x35, 0xc7, + 0xcf, 0xa2, 0x31, 0x27, 0x24, 0xc4, 0x6b, 0x6f, 0x0a, 0x26, 0x71, 0x45, 0xc7, 0x05, 0x1d, 0x5a, + 0x40, 0xc3, 0x9f, 0x82, 0xba, 0x47, 0x2c, 0xa2, 0x33, 0xea, 0xca, 0xc2, 0x7d, 0xf7, 0x8a, 0xb9, + 0x8e, 0x27, 0xc4, 0x1a, 0x4b, 0xd3, 0x68, 0x7f, 0x8a, 0x9f, 0x50, 0x42, 0x09, 0xbf, 0x03, 0xb6, + 0xe6, 0xd8, 0xf6, 0x71, 0x82, 0x14, 0x15, 0x5b, 0xd7, 0x60, 0x18, 0xa8, 0x5b, 0xc7, 0x39, 0x0d, + 0x2a, 0x20, 0xe1, 0x8f, 0x40, 0x9d, 0xc5, 0xcb, 0xc9, 0x9a, 0x70, 0xad, 0x74, 0xfc, 0x8e, 0xa8, + 0x91, 0xdb, 0x4d, 0x92, 0xda, 0x4b, 0x16, 0x93, 0x84, 0x86, 0xaf, 0x73, 0x8c, 0x59, 0x32, 0x0f, + 0xef, 0x4d, 0x19, 0x71, 0x1f, 0x98, 0xb6, 0xe9, 0x9d, 0x12, 0x43, 0xec, 0x81, 0xb5, 0x68, 0x9d, + 0x3b, 0x39, 0x19, 0x96, 0x41, 0x50, 0x95, 0x2d, 0x1c, 0x82, 0xad, 0xb4, 0x60, 0x8e, 0xa9, 0x41, + 0xda, 0x0d, 0xd1, 0x6e, 0xde, 0xe0, 0x6f, 0x39, 0xc8, 0x69, 0x2e, 0x16, 0x24, 0xa8, 0x60, 0x9b, + 0x5d, 0xdf, 0xc0, 0x25, 0xeb, 0x9b, 0x01, 0x76, 0x1d, 0x6a, 0x20, 0xe2, 0x58, 0x58, 0x27, 0x73, + 0x62, 0x33, 0x99, 0xe3, 0x5b, 0xe2, 0xe8, 0x77, 0x78, 0x26, 0x8d, 0x4a, 0xf4, 0x17, 0x15, 0x72, + 0x54, 0xca, 0x06, 0xbf, 0x0e, 0x1a, 0x73, 0x6c, 0xe3, 0x19, 0x31, 0xb4, 0xf3, 0xf6, 0xb6, 0xa0, + 0xde, 0x0c, 0x03, 0xb5, 0x71, 0x1c, 0x0b, 0x51, 0xaa, 0xef, 0xfe, 0xa7, 0x06, 0x1a, 0xe9, 0xf2, + 0xf4, 0x18, 0x00, 0x3d, 0x9e, 0x50, 0x9e, 0x5c, 0xa0, 0x5e, 0xaf, 0xea, 0x76, 0xc9, 0x2c, 0x4b, + 0x07, 0x7f, 0x22, 0xf2, 0x50, 0x86, 0x08, 0xfe, 0x04, 0x34, 0xc4, 0x5a, 0x2d, 0x66, 0xcd, 0xf2, + 0x4b, 0xcf, 0x1a, 0xe1, 0xfd, 0x38, 0x26, 0x40, 0x29, 0x17, 0x9c, 0x66, 0xa3, 0xf8, 0x25, 0xe7, + 0x26, 0xcc, 0x47, 0x5c, 0x1c, 0x51, 0x60, 0xe5, 0xd3, 0x4b, 0x2e, 0x95, 0xab, 0x22, 0xe7, 0xaa, + 0xf6, 0xc5, 0x3e, 0x68, 0x88, 0x8e, 0x43, 0x0c, 0x62, 0x88, 0xb2, 0xa9, 0x69, 0x3b, 0x12, 0xda, + 0x18, 0xc7, 0x0a, 0x94, 0x62, 0x38, 0x71, 0xb4, 0xd9, 0xca, 0xfd, 0x3a, 0x21, 0x8e, 0x4a, 0x1e, + 0x49, 0x2d, 0x9f, 0x01, 0x8c, 0xb8, 0x73, 0xd3, 0xc6, 0xfc, 0xdb, 0x44, 0xb4, 0x5e, 0x39, 0x03, + 0x4e, 0x52, 0x31, 0xca, 0x62, 0xe0, 0x7d, 0xd0, 0x92, 0x6f, 0x91, 0x36, 0x9a, 0x75, 0x91, 0x0d, + 0x6d, 0x79, 0x48, 0x6b, 0x50, 0xd0, 0xa3, 0x05, 0x0b, 0xf8, 0x1e, 0xd8, 0x9c, 0xe6, 0x7a, 0x15, + 0x10, 0x14, 0xa2, 0xd7, 0xe6, 0x1b, 0x55, 0x1e, 0x07, 0x7f, 0xad, 0x80, 0x9b, 0xbe, 0xad, 0x53, + 0xdf, 0x66, 0xc4, 0x88, 0x9d, 0x24, 0xc6, 0x88, 0x1a, 0x9e, 0x28, 0xdc, 0xe6, 0xe1, 0xdb, 0xa5, + 0x89, 0xf5, 0xb8, 0xdc, 0x26, 0x2a, 0xf3, 0x0a, 0x25, 0xaa, 0x3a, 0x09, 0xaa, 0xa0, 0xe6, 0x12, + 0x6c, 0x9c, 0x8b, 0xea, 0xae, 0x69, 0x0d, 0x3e, 0x9b, 0x11, 0x17, 0xa0, 0x48, 0xde, 0xfd, 0xa3, + 0x02, 0xb6, 0x0b, 0x9f, 0x4a, 0xff, 0xff, 0xbb, 0x70, 0x77, 0x02, 0x16, 0x66, 0x29, 0xfc, 0x08, + 0xd4, 0x5c, 0xdf, 0x22, 0x71, 0xd9, 0xbe, 0x75, 0xa5, 0xb9, 0x8c, 0x7c, 0x8b, 0xa4, 0x5b, 0x0b, + 0x7f, 0xf2, 0x50, 0x44, 0xd3, 0xfd, 0xbb, 0x02, 0x6e, 0x17, 0xe1, 0x8f, 0xec, 0x1f, 0x3c, 0x33, + 0xd9, 0x80, 0x1a, 0xc4, 0x43, 0xe4, 0x17, 0xbe, 0xe9, 0x8a, 0xbe, 0xc3, 0x93, 0x44, 0xa7, 0x36, + 0xc3, 0xfc, 0x5a, 0x3e, 0xc2, 0xf3, 0x78, 0x95, 0x16, 0x49, 0x32, 0xc8, 0x2a, 0x50, 0x1e, 0x07, + 0xc7, 0xa0, 0x4e, 0x1d, 0xe2, 0x62, 0x3e, 0x65, 0xa2, 0x35, 0xfa, 0xbd, 0x78, 0x14, 0x3c, 0x92, + 0xf2, 0x8b, 0x40, 0xbd, 0x75, 0x89, 0x1b, 0x31, 0x0c, 0x25, 0x44, 0xb0, 0x0b, 0xd6, 0xce, 0xb0, + 0xe5, 0x13, 0xbe, 0xed, 0xac, 0x1c, 0xd4, 0x34, 0xc0, 0xeb, 0xe9, 0xc7, 0x42, 0x82, 0xa4, 0xa6, + 0xfb, 0x97, 0xd2, 0x97, 0x1b, 0x51, 0x23, 0xed, 0x60, 0x23, 0xcc, 0x18, 0x71, 0x6d, 0xf8, 0x41, + 0xee, 0xf3, 0xe0, 0xdd, 0xc2, 0xe7, 0xc1, 0xad, 0x92, 0x25, 0x3f, 0x4b, 0xf3, 0xbf, 0xfa, 0x62, + 0xe8, 0x3e, 0x5f, 0x06, 0xbb, 0x65, 0xd1, 0x84, 0xef, 0x47, 0xbd, 0x8a, 0xda, 0xd2, 0xe3, 0x83, + 0x6c, 0xaf, 0xa2, 0xf6, 0x45, 0xa0, 0xde, 0x28, 0xda, 0x45, 0x1a, 0x24, 0xed, 0xa0, 0x0d, 0x9a, + 0x34, 0xbd, 0x61, 0x99, 0xa4, 0xdf, 0xbd, 0x52, 0x3e, 0x95, 0x27, 0x48, 0xd4, 0xa9, 0xb2, 0xba, + 0xec, 0x01, 0xf0, 0x97, 0x60, 0x9b, 0xe6, 0xef, 0x5e, 0x44, 0xee, 0xea, 0x67, 0x96, 0xc5, 0x4d, + 0xbb, 0x29, 0xdf, 0x7b, 0xbb, 0xa0, 0x47, 0xc5, 0xc3, 0xba, 0x4f, 0x40, 0x7e, 0x6d, 0x84, 0x1f, + 0xe6, 0x4b, 0xe9, 0xf6, 0x17, 0x2f, 0x9f, 0x97, 0xd4, 0xd1, 0x6f, 0x15, 0xb0, 0xb3, 0x80, 0xe5, + 0x6b, 0x60, 0x32, 0x05, 0xe2, 0xd6, 0x1a, 0xc5, 0x4b, 0xac, 0x81, 0xe3, 0x82, 0x0e, 0x2d, 0xa0, + 0xf9, 0x9e, 0x96, 0xc8, 0x06, 0xbc, 0xf9, 0xc9, 0x2f, 0x03, 0x31, 0xcf, 0xc6, 0x39, 0x0d, 0x2a, + 0x20, 0xbb, 0x7f, 0x52, 0x40, 0x55, 0x2f, 0x85, 0xa3, 0xec, 0x0c, 0xe3, 0x17, 0xd0, 0xd0, 0x0e, + 0x73, 0xf3, 0xeb, 0x22, 0x50, 0x5f, 0xaf, 0xfa, 0xcb, 0x96, 0x27, 0xba, 0xd7, 0x7b, 0xfc, 0xf0, + 0x7e, 0x76, 0xc8, 0x7d, 0x90, 0x0c, 0xb9, 0x65, 0x41, 0xd7, 0x4f, 0x07, 0xdc, 0xd5, 0xb8, 0xa4, + 0xb9, 0xf6, 0xed, 0xe7, 0x2f, 0x3a, 0x4b, 0x9f, 0xbd, 0xe8, 0x2c, 0x7d, 0xfe, 0xa2, 0xb3, 0xf4, + 0xab, 0xb0, 0xa3, 0x3c, 0x0f, 0x3b, 0xca, 0x67, 0x61, 0x47, 0xf9, 0x3c, 0xec, 0x28, 0xff, 0x08, + 0x3b, 0xca, 0x6f, 0xfe, 0xd9, 0x59, 0xfa, 0xf8, 0x5a, 0xc9, 0x7f, 0xe8, 0xff, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0x1e, 0x70, 0x68, 0xe1, 0x59, 0x17, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -1030,6 +1093,27 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SuccessPolicy != nil { + { + size, err := m.SuccessPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.ManagedBy != nil { + i -= len(*m.ManagedBy) + copy(dAtA[i:], *m.ManagedBy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ManagedBy))) + i-- + dAtA[i] = 0x7a + } if m.PodReplacementPolicy != nil { i -= len(*m.PodReplacementPolicy) copy(dAtA[i:], *m.PodReplacementPolicy) @@ -1449,6 +1533,78 @@ func (m *PodFailurePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SuccessPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuccessPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuccessPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SuccessPolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuccessPolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuccessPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SucceededCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SucceededCount)) + i-- + dAtA[i] = 0x10 + } + if m.SucceededIndexes != nil { + i -= len(*m.SucceededIndexes) + copy(dAtA[i:], *m.SucceededIndexes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SucceededIndexes))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1691,6 +1847,14 @@ func (m *JobSpec) Size() (n int) { l = len(*m.PodReplacementPolicy) n += 1 + l + sovGenerated(uint64(l)) } + if m.ManagedBy != nil { + l = len(*m.ManagedBy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SuccessPolicy != nil { + l = m.SuccessPolicy.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -1818,6 +1982,37 @@ func (m *PodFailurePolicyRule) Size() (n int) { return n } +func (m *SuccessPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SuccessPolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SucceededIndexes != nil { + l = len(*m.SucceededIndexes) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SucceededCount != nil { + n += 1 + sovGenerated(uint64(*m.SucceededCount)) + } + return n +} + func (m *UncountedTerminatedPods) Size() (n int) { if m == nil { return 0 @@ -1969,6 +2164,8 @@ func (this *JobSpec) String() string { `BackoffLimitPerIndex:` + valueToStringGenerated(this.BackoffLimitPerIndex) + `,`, `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`, `PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`, + `ManagedBy:` + valueToStringGenerated(this.ManagedBy) + `,`, + `SuccessPolicy:` + strings.Replace(this.SuccessPolicy.String(), "SuccessPolicy", "SuccessPolicy", 1) + `,`, `}`, }, "") return s @@ -2064,6 +2261,32 @@ func (this *PodFailurePolicyRule) String() string { }, "") return s } +func (this *SuccessPolicy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]SuccessPolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "SuccessPolicyRule", "SuccessPolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&SuccessPolicy{`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *SuccessPolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SuccessPolicyRule{`, + `SucceededIndexes:` + valueToStringGenerated(this.SucceededIndexes) + `,`, + `SucceededCount:` + valueToStringGenerated(this.SucceededCount) + `,`, + `}`, + }, "") + return s +} func (this *UncountedTerminatedPods) String() string { if this == nil { return "nil" @@ -3658,6 +3881,75 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { s := PodReplacementPolicy(dAtA[iNdEx:postIndex]) m.PodReplacementPolicy = &s iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ManagedBy = &s + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SuccessPolicy == nil { + m.SuccessPolicy = &SuccessPolicy{} + } + if err := m.SuccessPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4690,6 +4982,193 @@ func (m *PodFailurePolicyRule) Unmarshal(dAtA []byte) error { } return nil } +func (m *SuccessPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuccessPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuccessPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, SuccessPolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SuccessPolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuccessPolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuccessPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SucceededIndexes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SucceededIndexes = &s + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SucceededCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SucceededCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *UncountedTerminatedPods) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index f899779889..f5a9385f5e 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -34,7 +34,7 @@ message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -113,15 +113,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // Job represents the configuration of a single job. @@ -129,7 +129,7 @@ message Job { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -152,11 +152,11 @@ message JobCondition { // Last time the condition was checked. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -172,7 +172,7 @@ message JobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Jobs. repeated Job items = 2; @@ -213,11 +213,20 @@ message JobSpec { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is beta-level. It can be used when the `JobPodFailurePolicy` - // feature gate is enabled (enabled by default). // +optional optional PodFailurePolicy podFailurePolicy = 11; + // successPolicy specifies the policy when the Job can be declared as succeeded. + // If empty, the default behavior applies - the Job is declared as succeeded + // only when the number of succeeded pods equals to the completions. + // When the field is specified, it must be immutable and works only for the Indexed Jobs. + // Once the Job meets the SuccessPolicy, the lingering pods are terminated. + // + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). + // +optional + optional SuccessPolicy successPolicy = 16; + // Specifies the number of retries before marking this job failed. // Defaults to 6 // +optional @@ -251,7 +260,7 @@ message JobSpec { // Normally, the system sets this field for you. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -269,7 +278,7 @@ message JobSpec { // Describes the pod that will be created when executing a job. // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - optional k8s.io.api.core.v1.PodTemplateSpec template = 6; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 6; // ttlSecondsAfterFinished limits the lifetime of a Job that has finished // execution (either Complete or Failed). If this field is set, @@ -330,6 +339,21 @@ message JobSpec { // This is on by default. // +optional optional string podReplacementPolicy = 14; + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + optional string managedBy = 15; } // JobStatus represents the current state of a Job. @@ -340,6 +364,12 @@ message JobStatus { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -351,25 +381,36 @@ message JobStatus { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; - // The number of pending and running pods. + // The number of pending and running pods which are not terminating (without + // a deletionTimestamp). + // The value is zero for finished jobs. // +optional optional int32 active = 4; // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional optional int32 succeeded = 5; // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional optional int32 failed = 6; @@ -391,7 +432,7 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. // The indexes are represented in the text format analogous as for the // `completedIndexes` field, ie. they are kept as decimal integers // separated by commas. The numbers are listed in increasing order. Three or @@ -399,6 +440,8 @@ message JobStatus { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` // feature gate is enabled (enabled by default). // +optional @@ -418,11 +461,12 @@ message JobStatus { // // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional optional UncountedTerminatedPods uncountedTerminatedPods = 8; - // The number of pods which have a Ready condition. - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). optional int32 ready = 9; } @@ -431,7 +475,7 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -532,6 +576,51 @@ message PodFailurePolicyRule { repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3; } +// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes. +message SuccessPolicy { + // rules represents the list of alternative rules for the declaring the Jobs + // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, + // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // The terminal state for such a Job has the "Complete" condition. + // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, + // other rules are ignored. At most 20 elements are allowed. + // +listType=atomic + repeated SuccessPolicyRule rules = 1; +} + +// SuccessPolicyRule describes rule for declaring a Job as succeeded. +// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified. +message SuccessPolicyRule { + // succeededIndexes specifies the set of indexes + // which need to be contained in the actual set of the succeeded indexes for the Job. + // The list of indexes must be within 0 to ".spec.completions-1" and + // must not contain duplicates. At least one element is required. + // The indexes are represented as intervals separated by commas. + // The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. + // The number are listed in represented by the first and last element of the series, + // separated by a hyphen. + // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // When this field is null, this field doesn't default to any value + // and is never evaluated at any time. + // + // +optional + optional string succeededIndexes = 1; + + // succeededCount specifies the minimal required size of the actual set of the succeeded indexes + // for the Job. When succeededCount is used along with succeededIndexes, the check is + // constrained only to the set of indexes specified by succeededIndexes. + // For example, given that succeededIndexes is "1-4", succeededCount is "3", + // and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded + // because only "1" and "3" indexes are considered in that rules. + // When this field is null, this doesn't default to any value and + // is never evaluated at any time. + // When specified it needs to be a positive integer. + // + // +optional + optional int32 succeededCount = 2; +} + // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 65e1d3c592..b42ec231e4 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -57,10 +57,14 @@ const ( // to the pod, which don't count towards the backoff limit, according to the // pod failure policy. When the annotation is absent zero is implied. JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count" + // JobControllerName reserved value for the managedBy field for the built-in + // Job controller. + JobControllerName = "kubernetes.io/job-controller" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Job represents the configuration of a single job. type Job struct { @@ -82,6 +86,7 @@ type Job struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // JobList is a collection of jobs. type JobList struct { @@ -252,6 +257,51 @@ type PodFailurePolicy struct { Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` } +// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes. +type SuccessPolicy struct { + // rules represents the list of alternative rules for the declaring the Jobs + // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, + // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // The terminal state for such a Job has the "Complete" condition. + // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, + // other rules are ignored. At most 20 elements are allowed. + // +listType=atomic + Rules []SuccessPolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` +} + +// SuccessPolicyRule describes rule for declaring a Job as succeeded. +// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified. +type SuccessPolicyRule struct { + // succeededIndexes specifies the set of indexes + // which need to be contained in the actual set of the succeeded indexes for the Job. + // The list of indexes must be within 0 to ".spec.completions-1" and + // must not contain duplicates. At least one element is required. + // The indexes are represented as intervals separated by commas. + // The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. + // The number are listed in represented by the first and last element of the series, + // separated by a hyphen. + // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // When this field is null, this field doesn't default to any value + // and is never evaluated at any time. + // + // +optional + SucceededIndexes *string `json:"succeededIndexes,omitempty" protobuf:"bytes,1,opt,name=succeededIndexes"` + + // succeededCount specifies the minimal required size of the actual set of the succeeded indexes + // for the Job. When succeededCount is used along with succeededIndexes, the check is + // constrained only to the set of indexes specified by succeededIndexes. + // For example, given that succeededIndexes is "1-4", succeededCount is "3", + // and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded + // because only "1" and "3" indexes are considered in that rules. + // When this field is null, this doesn't default to any value and + // is never evaluated at any time. + // When specified it needs to be a positive integer. + // + // +optional + SucceededCount *int32 `json:"succeededCount,omitempty" protobuf:"varint,2,opt,name=succeededCount"` +} + // JobSpec describes how the job execution will look like. type JobSpec struct { @@ -288,11 +338,20 @@ type JobSpec struct { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is beta-level. It can be used when the `JobPodFailurePolicy` - // feature gate is enabled (enabled by default). // +optional PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` + // successPolicy specifies the policy when the Job can be declared as succeeded. + // If empty, the default behavior applies - the Job is declared as succeeded + // only when the number of succeeded pods equals to the completions. + // When the field is specified, it must be immutable and works only for the Indexed Jobs. + // Once the Job meets the SuccessPolicy, the lingering pods are terminated. + // + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). + // +optional + SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` + // Specifies the number of retries before marking this job failed. // Defaults to 6 // +optional @@ -410,6 +469,21 @@ type JobSpec struct { // This is on by default. // +optional PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"` } // JobStatus represents the current state of a Job. @@ -420,6 +494,12 @@ type JobStatus struct { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -431,25 +511,36 @@ type JobStatus struct { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - // The number of pending and running pods. + // The number of pending and running pods which are not terminating (without + // a deletionTimestamp). + // The value is zero for finished jobs. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` @@ -471,7 +562,7 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. // The indexes are represented in the text format analogous as for the // `completedIndexes` field, ie. they are kept as decimal integers // separated by commas. The numbers are listed in increasing order. Three or @@ -479,6 +570,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` // feature gate is enabled (enabled by default). // +optional @@ -498,11 +591,12 @@ type JobStatus struct { // // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` - // The number of pods which have a Ready condition. - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } @@ -532,13 +626,14 @@ const ( JobFailed JobConditionType = "Failed" // FailureTarget means the job is about to fail its execution. JobFailureTarget JobConditionType = "FailureTarget" + // JobSuccessCriteriaMet means the Job has been succeeded. + JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet" ) const ( // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to // a failed pod matching a pod failure policy rule // https://kep.k8s.io/3329 - // This is currently a beta field. JobReasonPodFailurePolicy string = "PodFailurePolicy" // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of // times higher than backOffLimit times. @@ -551,6 +646,16 @@ const ( // JobReasonFailedIndexes means Job has failed indexes. // This const is used in beta-level feature: https://kep.k8s.io/3850. JobReasonFailedIndexes string = "FailedIndexes" + // JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to + // a Job met successPolicy. + // https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonSuccessPolicy string = "SuccessPolicy" + // JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to + // a number of succeeded Job pods met completions. + // - https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonCompletionsReached string = "CompletionsReached" ) // JobCondition describes current state of a job. @@ -588,6 +693,7 @@ type JobTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJob represents the configuration of a single cron job. type CronJob struct { @@ -609,6 +715,7 @@ type CronJob struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJobList is a collection of cron jobs. type CronJobList struct { diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 980f1e4750..d504887884 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -115,7 +115,8 @@ var map_JobSpec = map[string]string{ "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", - "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", + "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", @@ -126,6 +127,7 @@ var map_JobSpec = map[string]string{ "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { @@ -134,17 +136,17 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", - "active": "The number of pending and running pods.", - "succeeded": "The number of pods which reached phase Succeeded.", - "failed": "The number of pods which reached phase Failed.", + "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". A Job cannot have both the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field.", + "active": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", + "succeeded": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", + "failed": "The number of pods which reached phase Failed. The value increases monotonically.", "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", - "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", - "ready": "The number of pods which have a Ready condition.", + "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", + "ready": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", } func (JobStatus) SwaggerDoc() map[string]string { @@ -202,6 +204,25 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { return map_PodFailurePolicyRule } +var map_SuccessPolicy = map[string]string{ + "": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.", + "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", +} + +func (SuccessPolicy) SwaggerDoc() map[string]string { + return map_SuccessPolicy +} + +var map_SuccessPolicyRule = map[string]string{ + "": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.", + "succeededIndexes": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.", + "succeededCount": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.", +} + +func (SuccessPolicyRule) SwaggerDoc() map[string]string { + return map_SuccessPolicyRule +} + var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", "succeeded": "succeeded holds UIDs of succeeded Pods.", diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 43fc41515b..88c58b3d11 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -262,6 +262,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(PodFailurePolicy) (*in).DeepCopyInto(*out) } + if in.SuccessPolicy != nil { + in, out := &in.SuccessPolicy, &out.SuccessPolicy + *out = new(SuccessPolicy) + (*in).DeepCopyInto(*out) + } if in.BackoffLimit != nil { in, out := &in.BackoffLimit, &out.BackoffLimit *out = new(int32) @@ -308,6 +313,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(PodReplacementPolicy) **out = **in } + if in.ManagedBy != nil { + in, out := &in.ManagedBy, &out.ManagedBy + *out = new(string) + **out = **in + } return } @@ -481,6 +491,55 @@ func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessPolicy) DeepCopyInto(out *SuccessPolicy) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]SuccessPolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicy. +func (in *SuccessPolicy) DeepCopy() *SuccessPolicy { + if in == nil { + return nil + } + out := new(SuccessPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessPolicyRule) DeepCopyInto(out *SuccessPolicyRule) { + *out = *in + if in.SucceededIndexes != nil { + in, out := &in.SucceededIndexes, &out.SucceededIndexes + *out = new(string) + **out = **in + } + if in.SucceededCount != nil { + in, out := &in.SucceededCount, &out.SucceededCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicyRule. +func (in *SuccessPolicyRule) DeepCopy() *SuccessPolicyRule { + if in == nil { + return nil + } + out := new(SuccessPolicyRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..b76cb09249 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJob) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJobList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Job) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *JobList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 03feb2ceaf..895d9c9196 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto +// source: k8s.io/api/batch/v1beta1/generated.proto package v1beta1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} func (*CronJob) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{0} + return fileDescriptor_ed95843ae7b4086b, []int{0} } func (m *CronJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_CronJob proto.InternalMessageInfo func (m *CronJobList) Reset() { *m = CronJobList{} } func (*CronJobList) ProtoMessage() {} func (*CronJobList) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{1} + return fileDescriptor_ed95843ae7b4086b, []int{1} } func (m *CronJobList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +104,7 @@ var xxx_messageInfo_CronJobList proto.InternalMessageInfo func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } func (*CronJobSpec) ProtoMessage() {} func (*CronJobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{2} + return fileDescriptor_ed95843ae7b4086b, []int{2} } func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +132,7 @@ var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } func (*CronJobStatus) ProtoMessage() {} func (*CronJobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{3} + return fileDescriptor_ed95843ae7b4086b, []int{3} } func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +160,7 @@ var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} + return fileDescriptor_ed95843ae7b4086b, []int{4} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -194,60 +194,59 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) + proto.RegisterFile("k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_ed95843ae7b4086b) } -var fileDescriptor_e57b277b05179ae7 = []byte{ - // 787 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xbd, 0x49, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, - 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x72, - 0xa9, 0x2a, 0xd4, 0xd9, 0xd9, 0x17, 0x67, 0x9a, 0xdd, 0x9d, 0xd5, 0xce, 0x6c, 0xa4, 0xdc, 0xb8, - 0x70, 0xe7, 0xbb, 0x70, 0xe7, 0x9c, 0x63, 0x6f, 0xf4, 0xb4, 0x22, 0xcb, 0xb7, 0xe0, 0x84, 0x66, - 0xbc, 0xb1, 0x5d, 0x7b, 0xdd, 0x84, 0x4b, 0x6f, 0x9e, 0x37, 0xff, 0xff, 0x6f, 0x9e, 0xde, 0x7b, - 0xfb, 0x8c, 0x1e, 0x9c, 0x7c, 0xad, 0x88, 0x90, 0xfe, 0x49, 0x11, 0x42, 0x9e, 0x82, 0x06, 0xe5, - 0x9f, 0x42, 0x1a, 0xc9, 0xdc, 0xaf, 0x2f, 0x58, 0x26, 0xfc, 0x90, 0x69, 0x7e, 0xec, 0x9f, 0xee, - 0x85, 0xa0, 0xd9, 0x9e, 0x3f, 0x86, 0x14, 0x72, 0xa6, 0x21, 0x22, 0x59, 0x2e, 0xb5, 0xc4, 0xee, - 0x44, 0x49, 0x58, 0x26, 0x88, 0x55, 0x92, 0x5a, 0xd9, 0xfb, 0x7c, 0x2c, 0xf4, 0x71, 0x11, 0x12, - 0x2e, 0x13, 0x7f, 0x2c, 0xc7, 0xd2, 0xb7, 0x86, 0xb0, 0x38, 0xb2, 0x27, 0x7b, 0xb0, 0xbf, 0x26, - 0xa0, 0xde, 0xdd, 0x86, 0x27, 0x17, 0x5f, 0xeb, 0x0d, 0xe6, 0x44, 0x5c, 0xe6, 0xd0, 0xa4, 0xf9, - 0x72, 0xa6, 0x49, 0x18, 0x3f, 0x16, 0x29, 0xe4, 0x67, 0x7e, 0x76, 0x32, 0x36, 0x01, 0xe5, 0x27, - 0xa0, 0x59, 0x93, 0xcb, 0x5f, 0xe5, 0xca, 0x8b, 0x54, 0x8b, 0x04, 0x96, 0x0c, 0x5f, 0x5d, 0x65, - 0x50, 0xfc, 0x18, 0x12, 0xb6, 0xe8, 0x1b, 0xfc, 0xb6, 0x86, 0xb6, 0x1e, 0xe6, 0x32, 0x3d, 0x90, - 0x21, 0x7e, 0x8e, 0x3a, 0x26, 0x9f, 0x88, 0x69, 0xe6, 0x3a, 0xbb, 0xce, 0xb0, 0xbb, 0xff, 0x05, - 0x99, 0xd5, 0x73, 0x8a, 0x25, 0xd9, 0xc9, 0xd8, 0x04, 0x14, 0x31, 0x6a, 0x72, 0xba, 0x47, 0x1e, - 0x87, 0x2f, 0x80, 0xeb, 0x1f, 0x41, 0xb3, 0x00, 0x9f, 0x97, 0xfd, 0x56, 0x55, 0xf6, 0xd1, 0x2c, - 0x46, 0xa7, 0x54, 0xfc, 0x2d, 0xda, 0x50, 0x19, 0x70, 0x77, 0xcd, 0xd2, 0xef, 0x91, 0x55, 0xdd, - 0x22, 0x75, 0x4a, 0xa3, 0x0c, 0x78, 0x70, 0xb3, 0x46, 0x6e, 0x98, 0x13, 0xb5, 0x00, 0xfc, 0x18, - 0x6d, 0x2a, 0xcd, 0x74, 0xa1, 0xdc, 0x75, 0x8b, 0xfa, 0xf8, 0x6a, 0x94, 0x95, 0x07, 0xef, 0xd4, - 0xb0, 0xcd, 0xc9, 0x99, 0xd6, 0x98, 0xc1, 0x1f, 0x0e, 0xea, 0xd6, 0xca, 0x43, 0xa1, 0x34, 0x7e, - 0xb6, 0x54, 0x0b, 0x72, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x76, 0xfd, 0x52, 0xe7, 0x32, 0x32, - 0x57, 0x87, 0x47, 0xa8, 0x2d, 0x34, 0x24, 0xca, 0x5d, 0xdb, 0x5d, 0x1f, 0x76, 0xf7, 0x3f, 0xba, - 0x32, 0xfb, 0xe0, 0x56, 0x4d, 0x6b, 0x7f, 0x6f, 0x7c, 0x74, 0x62, 0x1f, 0xfc, 0xb5, 0x31, 0xcd, - 0xda, 0x14, 0x07, 0x7f, 0x86, 0x3a, 0xa6, 0xcf, 0x51, 0x11, 0x83, 0xcd, 0xfa, 0xc6, 0x2c, 0x8b, - 0x51, 0x1d, 0xa7, 0x53, 0x05, 0x1e, 0xa2, 0x8e, 0x19, 0x8d, 0xa7, 0x32, 0x05, 0xb7, 0x63, 0xd5, - 0x37, 0x8d, 0xf2, 0x49, 0x1d, 0xa3, 0xd3, 0x5b, 0xfc, 0x33, 0xba, 0xa3, 0x34, 0xcb, 0xb5, 0x48, - 0xc7, 0xdf, 0x00, 0x8b, 0x62, 0x91, 0xc2, 0x08, 0xb8, 0x4c, 0x23, 0x65, 0x5b, 0xb9, 0x1e, 0x7c, - 0x50, 0x95, 0xfd, 0x3b, 0xa3, 0x66, 0x09, 0x5d, 0xe5, 0xc5, 0xcf, 0xd0, 0x6d, 0x2e, 0x53, 0x5e, - 0xe4, 0x39, 0xa4, 0xfc, 0xec, 0x27, 0x19, 0x0b, 0x7e, 0x66, 0x1b, 0x7a, 0x23, 0x20, 0x75, 0xde, - 0xb7, 0x1f, 0x2e, 0x0a, 0xfe, 0x6d, 0x0a, 0xd2, 0x65, 0x10, 0xbe, 0x87, 0xb6, 0x54, 0xa1, 0x32, - 0x48, 0x23, 0x77, 0x63, 0xd7, 0x19, 0x76, 0x82, 0x6e, 0x55, 0xf6, 0xb7, 0x46, 0x93, 0x10, 0xbd, - 0xbc, 0xc3, 0xcf, 0x51, 0xf7, 0x85, 0x0c, 0x9f, 0x40, 0x92, 0xc5, 0x4c, 0x83, 0xdb, 0xb6, 0xcd, - 0xfe, 0x64, 0x75, 0x47, 0x0e, 0x66, 0x62, 0x3b, 0x9e, 0xef, 0xd5, 0x99, 0x76, 0xe7, 0x2e, 0xe8, - 0x3c, 0x12, 0xff, 0x82, 0x7a, 0xaa, 0xe0, 0x1c, 0x94, 0x3a, 0x2a, 0xe2, 0x03, 0x19, 0xaa, 0xef, - 0x84, 0xd2, 0x32, 0x3f, 0x3b, 0x14, 0x89, 0xd0, 0xee, 0xe6, 0xae, 0x33, 0x6c, 0x07, 0x5e, 0x55, - 0xf6, 0x7b, 0xa3, 0x95, 0x2a, 0xfa, 0x06, 0x02, 0xa6, 0x68, 0xe7, 0x88, 0x89, 0x18, 0xa2, 0x25, - 0xf6, 0x96, 0x65, 0xf7, 0xaa, 0xb2, 0xbf, 0xf3, 0xa8, 0x51, 0x41, 0x57, 0x38, 0x07, 0x7f, 0xae, - 0xa1, 0x5b, 0xaf, 0x7d, 0x39, 0xf8, 0x07, 0xb4, 0xc9, 0xb8, 0x16, 0xa7, 0x66, 0xb2, 0xcc, 0xd0, - 0xde, 0x9d, 0x2f, 0x91, 0xd9, 0x7e, 0xb3, 0x4d, 0x40, 0xe1, 0x08, 0x4c, 0x27, 0x60, 0xf6, 0xb9, - 0x3d, 0xb0, 0x56, 0x5a, 0x23, 0x70, 0x8c, 0xb6, 0x63, 0xa6, 0xf4, 0xe5, 0x50, 0x9a, 0x91, 0xb3, - 0x4d, 0xea, 0xee, 0x7f, 0x7a, 0xbd, 0xcf, 0xcc, 0x38, 0x82, 0xf7, 0xab, 0xb2, 0xbf, 0x7d, 0xb8, - 0xc0, 0xa1, 0x4b, 0x64, 0x9c, 0x23, 0x6c, 0x63, 0xd3, 0x12, 0xda, 0xf7, 0xda, 0xff, 0xfb, 0xbd, - 0x9d, 0xaa, 0xec, 0xe3, 0xc3, 0x25, 0x12, 0x6d, 0xa0, 0x9b, 0x85, 0xf2, 0xee, 0xc2, 0xa8, 0xbc, - 0x85, 0x05, 0x7b, 0xff, 0xb5, 0x05, 0xfb, 0x61, 0xd3, 0x14, 0x93, 0x37, 0xec, 0xd5, 0xe0, 0xfe, - 0xf9, 0x85, 0xd7, 0x7a, 0x79, 0xe1, 0xb5, 0x5e, 0x5d, 0x78, 0xad, 0x5f, 0x2b, 0xcf, 0x39, 0xaf, - 0x3c, 0xe7, 0x65, 0xe5, 0x39, 0xaf, 0x2a, 0xcf, 0xf9, 0xbb, 0xf2, 0x9c, 0xdf, 0xff, 0xf1, 0x5a, - 0x4f, 0xdd, 0x55, 0xff, 0xc7, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x61, 0x72, 0xc3, 0xe0, 0xc3, +var fileDescriptor_ed95843ae7b4086b = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0xc7, 0xe3, 0x6c, 0x7e, 0x75, 0xd2, 0xc2, 0x76, 0x40, 0x5b, 0x2b, 0x20, 0x3b, 0xa4, 0xaa, + 0x08, 0x08, 0xc6, 0xec, 0x0a, 0x21, 0x4e, 0x95, 0x70, 0x51, 0x81, 0x25, 0xa8, 0x68, 0x52, 0x2e, + 0x55, 0x85, 0x3a, 0x9e, 0x4c, 0x92, 0xe9, 0xda, 0x1e, 0xcb, 0x33, 0x5e, 0x29, 0x37, 0x2e, 0xdc, + 0xf9, 0x5f, 0xb8, 0x73, 0xde, 0x63, 0x6f, 0xf4, 0x64, 0xb1, 0xe6, 0xbf, 0xe0, 0x84, 0x66, 0xe2, + 0x4d, 0xd2, 0xc4, 0xe9, 0x96, 0x0b, 0xb7, 0xcc, 0xf3, 0xf7, 0xfb, 0x99, 0xa7, 0xf7, 0xde, 0xbc, + 0x80, 0xe1, 0xd9, 0x97, 0x12, 0x71, 0xe1, 0x91, 0x84, 0x7b, 0x01, 0x51, 0x74, 0xee, 0x9d, 0x1f, + 0x07, 0x4c, 0x91, 0x63, 0x6f, 0xc6, 0x62, 0x96, 0x12, 0xc5, 0x26, 0x28, 0x49, 0x85, 0x12, 0xd0, + 0x5e, 0x2a, 0x11, 0x49, 0x38, 0x32, 0x4a, 0x54, 0x2a, 0x7b, 0x9f, 0xce, 0xb8, 0x9a, 0x67, 0x01, + 0xa2, 0x22, 0xf2, 0x66, 0x62, 0x26, 0x3c, 0x63, 0x08, 0xb2, 0xa9, 0x39, 0x99, 0x83, 0xf9, 0xb5, + 0x04, 0xf5, 0xee, 0x56, 0x5c, 0xb9, 0x7d, 0x5b, 0x6f, 0xb0, 0x21, 0xa2, 0x22, 0x65, 0x55, 0x9a, + 0xcf, 0xd7, 0x9a, 0x88, 0xd0, 0x39, 0x8f, 0x59, 0xba, 0xf0, 0x92, 0xb3, 0x99, 0x0e, 0x48, 0x2f, + 0x62, 0x8a, 0x54, 0xb9, 0xbc, 0x7d, 0xae, 0x34, 0x8b, 0x15, 0x8f, 0xd8, 0x8e, 0xe1, 0x8b, 0xeb, + 0x0c, 0x92, 0xce, 0x59, 0x44, 0xb6, 0x7d, 0x83, 0x5f, 0xeb, 0xa0, 0xfd, 0x20, 0x15, 0xf1, 0xa9, + 0x08, 0xe0, 0x33, 0xd0, 0xd1, 0xf9, 0x4c, 0x88, 0x22, 0xb6, 0xd5, 0xb7, 0x86, 0xdd, 0x93, 0xcf, + 0xd0, 0xba, 0x9e, 0x2b, 0x2c, 0x4a, 0xce, 0x66, 0x3a, 0x20, 0x91, 0x56, 0xa3, 0xf3, 0x63, 0xf4, + 0x28, 0x78, 0xce, 0xa8, 0xfa, 0x81, 0x29, 0xe2, 0xc3, 0x8b, 0xdc, 0xad, 0x15, 0xb9, 0x0b, 0xd6, + 0x31, 0xbc, 0xa2, 0xc2, 0x6f, 0x40, 0x43, 0x26, 0x8c, 0xda, 0x75, 0x43, 0xbf, 0x87, 0xf6, 0x75, + 0x0b, 0x95, 0x29, 0x8d, 0x13, 0x46, 0xfd, 0x9b, 0x25, 0xb2, 0xa1, 0x4f, 0xd8, 0x00, 0xe0, 0x23, + 0xd0, 0x92, 0x8a, 0xa8, 0x4c, 0xda, 0x07, 0x06, 0xf5, 0xe1, 0xf5, 0x28, 0x23, 0xf7, 0xdf, 0x2a, + 0x61, 0xad, 0xe5, 0x19, 0x97, 0x98, 0xc1, 0xef, 0x16, 0xe8, 0x96, 0xca, 0x11, 0x97, 0x0a, 0x3e, + 0xdd, 0xa9, 0x05, 0x7a, 0xb3, 0x5a, 0x68, 0xb7, 0xa9, 0xc4, 0x61, 0x79, 0x53, 0xe7, 0x2a, 0xb2, + 0x51, 0x87, 0x87, 0xa0, 0xc9, 0x15, 0x8b, 0xa4, 0x5d, 0xef, 0x1f, 0x0c, 0xbb, 0x27, 0x1f, 0x5c, + 0x9b, 0xbd, 0x7f, 0xab, 0xa4, 0x35, 0xbf, 0xd3, 0x3e, 0xbc, 0xb4, 0x0f, 0xfe, 0x6c, 0xac, 0xb2, + 0xd6, 0xc5, 0x81, 0x9f, 0x80, 0x8e, 0xee, 0xf3, 0x24, 0x0b, 0x99, 0xc9, 0xfa, 0xc6, 0x3a, 0x8b, + 0x71, 0x19, 0xc7, 0x2b, 0x05, 0x1c, 0x82, 0x8e, 0x1e, 0x8d, 0x27, 0x22, 0x66, 0x76, 0xc7, 0xa8, + 0x6f, 0x6a, 0xe5, 0xe3, 0x32, 0x86, 0x57, 0x5f, 0xe1, 0x4f, 0xe0, 0x8e, 0x54, 0x24, 0x55, 0x3c, + 0x9e, 0x7d, 0xcd, 0xc8, 0x24, 0xe4, 0x31, 0x1b, 0x33, 0x2a, 0xe2, 0x89, 0x34, 0xad, 0x3c, 0xf0, + 0xdf, 0x2b, 0x72, 0xf7, 0xce, 0xb8, 0x5a, 0x82, 0xf7, 0x79, 0xe1, 0x53, 0x70, 0x9b, 0x8a, 0x98, + 0x66, 0x69, 0xca, 0x62, 0xba, 0xf8, 0x51, 0x84, 0x9c, 0x2e, 0x4c, 0x43, 0x6f, 0xf8, 0xa8, 0xcc, + 0xfb, 0xf6, 0x83, 0x6d, 0xc1, 0x3f, 0x55, 0x41, 0xbc, 0x0b, 0x82, 0xf7, 0x40, 0x5b, 0x66, 0x32, + 0x61, 0xf1, 0xc4, 0x6e, 0xf4, 0xad, 0x61, 0xc7, 0xef, 0x16, 0xb9, 0xdb, 0x1e, 0x2f, 0x43, 0xf8, + 0xea, 0x1b, 0x7c, 0x06, 0xba, 0xcf, 0x45, 0xf0, 0x98, 0x45, 0x49, 0x48, 0x14, 0xb3, 0x9b, 0xa6, + 0xd9, 0x1f, 0xed, 0xef, 0xc8, 0xe9, 0x5a, 0x6c, 0xc6, 0xf3, 0x9d, 0x32, 0xd3, 0xee, 0xc6, 0x07, + 0xbc, 0x89, 0x84, 0x3f, 0x83, 0x9e, 0xcc, 0x28, 0x65, 0x52, 0x4e, 0xb3, 0xf0, 0x54, 0x04, 0xf2, + 0x5b, 0x2e, 0x95, 0x48, 0x17, 0x23, 0x1e, 0x71, 0x65, 0xb7, 0xfa, 0xd6, 0xb0, 0xe9, 0x3b, 0x45, + 0xee, 0xf6, 0xc6, 0x7b, 0x55, 0xf8, 0x35, 0x04, 0x88, 0xc1, 0xd1, 0x94, 0xf0, 0x90, 0x4d, 0x76, + 0xd8, 0x6d, 0xc3, 0xee, 0x15, 0xb9, 0x7b, 0xf4, 0xb0, 0x52, 0x81, 0xf7, 0x38, 0x07, 0x7f, 0xd4, + 0xc1, 0xad, 0x57, 0x5e, 0x0e, 0xfc, 0x1e, 0xb4, 0x08, 0x55, 0xfc, 0x5c, 0x4f, 0x96, 0x1e, 0xda, + 0xbb, 0x9b, 0x25, 0xd2, 0xdb, 0x6f, 0xbd, 0x09, 0x30, 0x9b, 0x32, 0xdd, 0x09, 0xb6, 0x7e, 0x6e, + 0x5f, 0x19, 0x2b, 0x2e, 0x11, 0x30, 0x04, 0x87, 0x21, 0x91, 0xea, 0x6a, 0x28, 0xf5, 0xc8, 0x99, + 0x26, 0x75, 0x4f, 0x3e, 0x7e, 0xb3, 0x67, 0xa6, 0x1d, 0xfe, 0xbb, 0x45, 0xee, 0x1e, 0x8e, 0xb6, + 0x38, 0x78, 0x87, 0x0c, 0x53, 0x00, 0x4d, 0x6c, 0x55, 0x42, 0x73, 0x5f, 0xf3, 0x3f, 0xdf, 0x77, + 0x54, 0xe4, 0x2e, 0x1c, 0xed, 0x90, 0x70, 0x05, 0x5d, 0x2f, 0x94, 0xb7, 0xb7, 0x46, 0xe5, 0x7f, + 0x58, 0xb0, 0xf7, 0x5f, 0x59, 0xb0, 0xef, 0x57, 0x4d, 0x31, 0x7a, 0xcd, 0x5e, 0xf5, 0xef, 0x5f, + 0x5c, 0x3a, 0xb5, 0x17, 0x97, 0x4e, 0xed, 0xe5, 0xa5, 0x53, 0xfb, 0xa5, 0x70, 0xac, 0x8b, 0xc2, + 0xb1, 0x5e, 0x14, 0x8e, 0xf5, 0xb2, 0x70, 0xac, 0xbf, 0x0a, 0xc7, 0xfa, 0xed, 0x6f, 0xa7, 0xf6, + 0xc4, 0xde, 0xf7, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xaa, 0x2c, 0x86, 0xaa, 0x07, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index ac774f19ad..6dd322128d 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -35,7 +35,7 @@ message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -53,7 +53,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -116,15 +116,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // JobTemplateSpec describes the data a Job should have when created from a template @@ -132,11 +132,11 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional - optional k8s.io.api.batch.v1.JobSpec spec = 2; + optional .k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/certificates/v1/doc.go b/vendor/k8s.io/api/certificates/v1/doc.go index fe3ea3af87..78434478e8 100644 --- a/vendor/k8s.io/api/certificates/v1/doc.go +++ b/vendor/k8s.io/api/certificates/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=certificates.k8s.io package v1 // import "k8s.io/api/certificates/v1" diff --git a/vendor/k8s.io/api/certificates/v1/generated.pb.go b/vendor/k8s.io/api/certificates/v1/generated.pb.go index 37859babcd..cba4a8ea49 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1/generated.proto +// source: k8s.io/api/certificates/v1/generated.proto package v1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{0} + return fileDescriptor_5f7d41da689f96f7, []int{0} } func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{1} + return fileDescriptor_5f7d41da689f96f7, []int{1} } func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{2} + return fileDescriptor_5f7d41da689f96f7, []int{2} } func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{3} + return fileDescriptor_5f7d41da689f96f7, []int{3} } func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{4} + return fileDescriptor_5f7d41da689f96f7, []int{4} } func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{5} + return fileDescriptor_5f7d41da689f96f7, []int{5} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -225,68 +225,67 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1/generated.proto", fileDescriptor_17e045d0de66f3c7) + proto.RegisterFile("k8s.io/api/certificates/v1/generated.proto", fileDescriptor_5f7d41da689f96f7) } -var fileDescriptor_17e045d0de66f3c7 = []byte{ - // 910 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdf, 0x6e, 0x1b, 0xc5, - 0x17, 0xf6, 0xc6, 0x7f, 0x62, 0x8f, 0xf3, 0x4b, 0xdb, 0xd1, 0x8f, 0x6a, 0xb1, 0x54, 0xaf, 0xb5, - 0x82, 0x2a, 0x20, 0xd8, 0x25, 0x51, 0x81, 0x50, 0x10, 0x42, 0x9b, 0x46, 0xa8, 0x22, 0x05, 0x69, - 0x92, 0x70, 0x51, 0xb8, 0xe8, 0x64, 0x7d, 0xba, 0x99, 0xba, 0xfb, 0x87, 0x99, 0x59, 0xab, 0xbe, - 0xeb, 0x23, 0x70, 0xc9, 0x25, 0x2f, 0xc0, 0x33, 0x70, 0x9b, 0xcb, 0x5e, 0x16, 0x09, 0x59, 0xc4, - 0x7d, 0x8b, 0x5c, 0xa1, 0x99, 0x1d, 0xaf, 0x1d, 0x27, 0x6e, 0x4b, 0xee, 0x76, 0xce, 0xf9, 0xce, - 0xf7, 0x9d, 0x73, 0xe6, 0x9c, 0xd1, 0xa2, 0x9d, 0xc1, 0xb6, 0xf0, 0x58, 0xea, 0x0f, 0xf2, 0x23, - 0xe0, 0x09, 0x48, 0x10, 0xfe, 0x10, 0x92, 0x7e, 0xca, 0x7d, 0xe3, 0xa0, 0x19, 0xf3, 0x43, 0xe0, - 0x92, 0x3d, 0x66, 0x21, 0xd5, 0xee, 0x4d, 0x3f, 0x82, 0x04, 0x38, 0x95, 0xd0, 0xf7, 0x32, 0x9e, - 0xca, 0x14, 0x77, 0x0a, 0xac, 0x47, 0x33, 0xe6, 0xcd, 0x63, 0xbd, 0xe1, 0x66, 0xe7, 0xe3, 0x88, - 0xc9, 0xe3, 0xfc, 0xc8, 0x0b, 0xd3, 0xd8, 0x8f, 0xd2, 0x28, 0xf5, 0x75, 0xc8, 0x51, 0xfe, 0x58, - 0x9f, 0xf4, 0x41, 0x7f, 0x15, 0x54, 0x1d, 0x77, 0x5e, 0x36, 0xe5, 0x70, 0x89, 0x5c, 0xe7, 0xce, - 0x0c, 0x13, 0xd3, 0xf0, 0x98, 0x25, 0xc0, 0x47, 0x7e, 0x36, 0x88, 0x94, 0x41, 0xf8, 0x31, 0x48, - 0x7a, 0x59, 0x94, 0xbf, 0x2c, 0x8a, 0xe7, 0x89, 0x64, 0x31, 0x5c, 0x08, 0xf8, 0xec, 0x4d, 0x01, - 0x22, 0x3c, 0x86, 0x98, 0x2e, 0xc6, 0xb9, 0x7f, 0xae, 0xa0, 0x77, 0x77, 0x66, 0x5d, 0xd8, 0x67, - 0x51, 0xc2, 0x92, 0x88, 0xc0, 0x2f, 0x39, 0x08, 0x89, 0x1f, 0xa1, 0xa6, 0xca, 0xb0, 0x4f, 0x25, - 0xb5, 0xad, 0x9e, 0xb5, 0xd1, 0xde, 0xfa, 0xc4, 0x9b, 0xb5, 0xaf, 0x14, 0xf2, 0xb2, 0x41, 0xa4, - 0x0c, 0xc2, 0x53, 0x68, 0x6f, 0xb8, 0xe9, 0xfd, 0x70, 0xf4, 0x04, 0x42, 0xf9, 0x00, 0x24, 0x0d, - 0xf0, 0xc9, 0xd8, 0xa9, 0x4c, 0xc6, 0x0e, 0x9a, 0xd9, 0x48, 0xc9, 0x8a, 0x7f, 0x42, 0x35, 0x91, - 0x41, 0x68, 0xaf, 0x68, 0xf6, 0x2f, 0xbc, 0xe5, 0x97, 0xe3, 0x2d, 0x4d, 0x73, 0x3f, 0x83, 0x30, - 0x58, 0x33, 0x32, 0x35, 0x75, 0x22, 0x9a, 0x14, 0x87, 0xa8, 0x21, 0x24, 0x95, 0xb9, 0xb0, 0xab, - 0x9a, 0xfe, 0xcb, 0xab, 0xd1, 0x6b, 0x8a, 0x60, 0xdd, 0x08, 0x34, 0x8a, 0x33, 0x31, 0xd4, 0xee, - 0xab, 0x2a, 0x72, 0x97, 0xc6, 0xee, 0xa4, 0x49, 0x9f, 0x49, 0x96, 0x26, 0x78, 0x1b, 0xd5, 0xe4, - 0x28, 0x03, 0xdd, 0xc6, 0x56, 0xf0, 0xde, 0x34, 0xdb, 0x83, 0x51, 0x06, 0x67, 0x63, 0xe7, 0xff, - 0x8b, 0x78, 0x65, 0x27, 0x3a, 0x02, 0xef, 0x95, 0x55, 0x34, 0x74, 0xec, 0x9d, 0xf3, 0x89, 0x9c, - 0x8d, 0x9d, 0x4b, 0xe6, 0xd0, 0x2b, 0x99, 0xce, 0xa7, 0x8b, 0x6f, 0xa3, 0x06, 0x07, 0x2a, 0xd2, - 0x44, 0xb7, 0xbc, 0x35, 0x2b, 0x8b, 0x68, 0x2b, 0x31, 0x5e, 0xfc, 0x01, 0x5a, 0x8d, 0x41, 0x08, - 0x1a, 0x81, 0x6e, 0x5e, 0x2b, 0xb8, 0x66, 0x80, 0xab, 0x0f, 0x0a, 0x33, 0x99, 0xfa, 0xf1, 0x13, - 0xb4, 0xfe, 0x94, 0x0a, 0x79, 0x98, 0xf5, 0xa9, 0x84, 0x03, 0x16, 0x83, 0x5d, 0xd3, 0xed, 0xfe, - 0xf0, 0xed, 0x66, 0x45, 0x45, 0x04, 0x37, 0x0d, 0xfb, 0xfa, 0xde, 0x39, 0x26, 0xb2, 0xc0, 0x8c, - 0x87, 0x08, 0x2b, 0xcb, 0x01, 0xa7, 0x89, 0x28, 0x1a, 0xa5, 0xf4, 0xea, 0xff, 0x59, 0xaf, 0x63, - 0xf4, 0xf0, 0xde, 0x05, 0x36, 0x72, 0x89, 0x82, 0xfb, 0x97, 0x85, 0x6e, 0x2d, 0xbd, 0xe5, 0x3d, - 0x26, 0x24, 0xfe, 0xf9, 0xc2, 0xae, 0x78, 0x6f, 0x97, 0x8f, 0x8a, 0xd6, 0x9b, 0x72, 0xdd, 0xe4, - 0xd4, 0x9c, 0x5a, 0xe6, 0xf6, 0xe4, 0x21, 0xaa, 0x33, 0x09, 0xb1, 0xb0, 0x57, 0x7a, 0xd5, 0x8d, - 0xf6, 0xd6, 0xa7, 0x57, 0x9a, 0xe4, 0xe0, 0x7f, 0x46, 0xa1, 0x7e, 0x5f, 0x71, 0x91, 0x82, 0xd2, - 0xfd, 0xa3, 0xf6, 0x9a, 0xda, 0xd4, 0x3a, 0xe1, 0xf7, 0xd1, 0x2a, 0x2f, 0x8e, 0xba, 0xb4, 0xb5, - 0xa0, 0xad, 0x06, 0xc1, 0x20, 0xc8, 0xd4, 0x87, 0xb7, 0x10, 0x12, 0x2c, 0x4a, 0x80, 0x7f, 0x4f, - 0x63, 0xb0, 0x57, 0xf5, 0xd8, 0x94, 0xeb, 0xbf, 0x5f, 0x7a, 0xc8, 0x1c, 0x0a, 0xef, 0xa0, 0x1b, - 0xf0, 0x2c, 0x63, 0x9c, 0xea, 0x59, 0x85, 0x30, 0x4d, 0xfa, 0xc2, 0x6e, 0xf6, 0xac, 0x8d, 0x7a, - 0xf0, 0xce, 0x64, 0xec, 0xdc, 0xd8, 0x5d, 0x74, 0x92, 0x8b, 0x78, 0xec, 0xa1, 0x46, 0xae, 0x46, - 0x51, 0xd8, 0xf5, 0x5e, 0x75, 0xa3, 0x15, 0xdc, 0x54, 0x03, 0x7d, 0xa8, 0x2d, 0x67, 0x63, 0xa7, - 0xf9, 0x1d, 0x8c, 0xf4, 0x81, 0x18, 0x14, 0xfe, 0x08, 0x35, 0x73, 0x01, 0x3c, 0x51, 0x69, 0x16, - 0x6b, 0x50, 0xf6, 0xfe, 0xd0, 0xd8, 0x49, 0x89, 0xc0, 0xb7, 0x50, 0x35, 0x67, 0x7d, 0xb3, 0x06, - 0x6d, 0x03, 0xac, 0x1e, 0xde, 0xbf, 0x47, 0x94, 0x1d, 0xbb, 0xa8, 0x11, 0xf1, 0x34, 0xcf, 0x84, - 0x5d, 0xd3, 0xe2, 0x48, 0x89, 0x7f, 0xab, 0x2d, 0xc4, 0x78, 0x30, 0x43, 0x75, 0x78, 0x26, 0x39, - 0xb5, 0x1b, 0xfa, 0xfa, 0xee, 0x5d, 0xf9, 0x9d, 0xf3, 0x76, 0x15, 0xcd, 0x6e, 0x22, 0xf9, 0x68, - 0x76, 0x9b, 0xda, 0x46, 0x0a, 0x85, 0xce, 0x23, 0x84, 0x66, 0x18, 0x7c, 0x1d, 0x55, 0x07, 0x30, - 0x2a, 0x5e, 0x1d, 0xa2, 0x3e, 0xf1, 0x57, 0xa8, 0x3e, 0xa4, 0x4f, 0x73, 0x30, 0x4f, 0xee, 0xed, - 0xd7, 0xa5, 0xa2, 0x89, 0x7e, 0x54, 0x68, 0x52, 0x04, 0xdd, 0x5d, 0xd9, 0xb6, 0xdc, 0x13, 0x0b, - 0x39, 0x6f, 0x78, 0x2d, 0x31, 0x47, 0x28, 0x9c, 0xbe, 0x40, 0xc2, 0xb6, 0x74, 0xd5, 0x5f, 0x5f, - 0xa9, 0xea, 0xf2, 0x21, 0x9b, 0x8d, 0x52, 0x69, 0x12, 0x64, 0x4e, 0x05, 0x6f, 0xa2, 0xf6, 0x1c, - 0xab, 0xae, 0x6f, 0x2d, 0xb8, 0x36, 0x19, 0x3b, 0xed, 0x39, 0x72, 0x32, 0x8f, 0x71, 0x3f, 0x37, - 0xcd, 0xd2, 0x35, 0x62, 0x67, 0xba, 0x64, 0x96, 0xbe, 0xc8, 0xd6, 0xe2, 0xa6, 0xdc, 0x6d, 0xfe, - 0xf6, 0xbb, 0x53, 0x79, 0xfe, 0x77, 0xaf, 0x12, 0x7c, 0x73, 0x72, 0xda, 0xad, 0xbc, 0x38, 0xed, - 0x56, 0x5e, 0x9e, 0x76, 0x2b, 0xcf, 0x27, 0x5d, 0xeb, 0x64, 0xd2, 0xb5, 0x5e, 0x4c, 0xba, 0xd6, - 0xcb, 0x49, 0xd7, 0xfa, 0x67, 0xd2, 0xb5, 0x7e, 0x7d, 0xd5, 0xad, 0x3c, 0xec, 0x2c, 0xff, 0x2f, - 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x00, 0x0c, 0x1b, 0xcd, 0x08, 0x00, 0x00, +var fileDescriptor_5f7d41da689f96f7 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xc6, 0x7f, 0x62, 0x8f, 0x43, 0xda, 0x8e, 0xa0, 0x5a, 0x2c, 0xd5, 0x6b, 0xad, 0xa0, + 0x0a, 0x15, 0xcc, 0x92, 0xa8, 0x40, 0x28, 0x08, 0xa1, 0x4d, 0x23, 0x54, 0x91, 0x82, 0x34, 0x49, + 0x38, 0x14, 0x0e, 0x9d, 0xac, 0x5f, 0x37, 0xd3, 0x74, 0xff, 0xb0, 0x33, 0x6b, 0xd5, 0xb7, 0x7e, + 0x04, 0x8e, 0x1c, 0xf9, 0x02, 0x7c, 0x06, 0xae, 0x39, 0xf6, 0x58, 0x24, 0x64, 0x11, 0xf7, 0x5b, + 0xe4, 0x84, 0x66, 0x76, 0xbc, 0x76, 0x9c, 0xb8, 0x0d, 0xb9, 0x79, 0x7e, 0xf3, 0x7b, 0xbf, 0xdf, + 0x7b, 0x6f, 0xdf, 0x1b, 0x19, 0xdd, 0x39, 0xda, 0x14, 0x84, 0x27, 0x1e, 0x4b, 0xb9, 0x17, 0x40, + 0x26, 0xf9, 0x13, 0x1e, 0x30, 0x09, 0xc2, 0x1b, 0xac, 0x7b, 0x21, 0xc4, 0x90, 0x31, 0x09, 0x7d, + 0x92, 0x66, 0x89, 0x4c, 0x70, 0xa7, 0xe0, 0x12, 0x96, 0x72, 0x32, 0xcb, 0x25, 0x83, 0xf5, 0xce, + 0x27, 0x21, 0x97, 0x87, 0xf9, 0x01, 0x09, 0x92, 0xc8, 0x0b, 0x93, 0x30, 0xf1, 0x74, 0xc8, 0x41, + 0xfe, 0x44, 0x9f, 0xf4, 0x41, 0xff, 0x2a, 0xa4, 0x3a, 0xee, 0xac, 0x6d, 0x92, 0xc1, 0x05, 0x76, + 0x9d, 0xbb, 0x53, 0x4e, 0xc4, 0x82, 0x43, 0x1e, 0x43, 0x36, 0xf4, 0xd2, 0xa3, 0x50, 0x01, 0xc2, + 0x8b, 0x40, 0xb2, 0x8b, 0xa2, 0xbc, 0x45, 0x51, 0x59, 0x1e, 0x4b, 0x1e, 0xc1, 0xb9, 0x80, 0xcf, + 0xdf, 0x16, 0x20, 0x82, 0x43, 0x88, 0xd8, 0x7c, 0x9c, 0xfb, 0xd7, 0x12, 0x7a, 0x7f, 0x6b, 0xda, + 0x85, 0x5d, 0x1e, 0xc6, 0x3c, 0x0e, 0x29, 0xfc, 0x9a, 0x83, 0x90, 0xf8, 0x31, 0x6a, 0xaa, 0x0c, + 0xfb, 0x4c, 0x32, 0xdb, 0xea, 0x59, 0x6b, 0xed, 0x8d, 0x4f, 0xc9, 0xb4, 0x7d, 0xa5, 0x11, 0x49, + 0x8f, 0x42, 0x05, 0x08, 0xa2, 0xd8, 0x64, 0xb0, 0x4e, 0x7e, 0x3c, 0x78, 0x0a, 0x81, 0x7c, 0x08, + 0x92, 0xf9, 0xf8, 0x78, 0xe4, 0x54, 0xc6, 0x23, 0x07, 0x4d, 0x31, 0x5a, 0xaa, 0xe2, 0x9f, 0x51, + 0x4d, 0xa4, 0x10, 0xd8, 0x4b, 0x5a, 0xfd, 0x4b, 0xb2, 0xf8, 0xe3, 0x90, 0x85, 0x69, 0xee, 0xa6, + 0x10, 0xf8, 0x2b, 0xc6, 0xa6, 0xa6, 0x4e, 0x54, 0x8b, 0xe2, 0x00, 0x35, 0x84, 0x64, 0x32, 0x17, + 0x76, 0x55, 0xcb, 0x7f, 0x75, 0x35, 0x79, 0x2d, 0xe1, 0xaf, 0x1a, 0x83, 0x46, 0x71, 0xa6, 0x46, + 0xda, 0x7d, 0x5d, 0x45, 0xee, 0xc2, 0xd8, 0xad, 0x24, 0xee, 0x73, 0xc9, 0x93, 0x18, 0x6f, 0xa2, + 0x9a, 0x1c, 0xa6, 0xa0, 0xdb, 0xd8, 0xf2, 0x3f, 0x98, 0x64, 0xbb, 0x37, 0x4c, 0xe1, 0x74, 0xe4, + 0xbc, 0x3b, 0xcf, 0x57, 0x38, 0xd5, 0x11, 0x78, 0xa7, 0xac, 0xa2, 0xa1, 0x63, 0xef, 0x9e, 0x4d, + 0xe4, 0x74, 0xe4, 0x5c, 0x30, 0x87, 0xa4, 0x54, 0x3a, 0x9b, 0x2e, 0xbe, 0x8d, 0x1a, 0x19, 0x30, + 0x91, 0xc4, 0xba, 0xe5, 0xad, 0x69, 0x59, 0x54, 0xa3, 0xd4, 0xdc, 0xe2, 0x8f, 0xd0, 0x72, 0x04, + 0x42, 0xb0, 0x10, 0x74, 0xf3, 0x5a, 0xfe, 0x35, 0x43, 0x5c, 0x7e, 0x58, 0xc0, 0x74, 0x72, 0x8f, + 0x9f, 0xa2, 0xd5, 0x67, 0x4c, 0xc8, 0xfd, 0xb4, 0xcf, 0x24, 0xec, 0xf1, 0x08, 0xec, 0x9a, 0x6e, + 0xf7, 0x9d, 0xcb, 0xcd, 0x8a, 0x8a, 0xf0, 0x6f, 0x1a, 0xf5, 0xd5, 0x9d, 0x33, 0x4a, 0x74, 0x4e, + 0x19, 0x0f, 0x10, 0x56, 0xc8, 0x5e, 0xc6, 0x62, 0x51, 0x34, 0x4a, 0xf9, 0xd5, 0xff, 0xb7, 0x5f, + 0xc7, 0xf8, 0xe1, 0x9d, 0x73, 0x6a, 0xf4, 0x02, 0x07, 0xf7, 0x6f, 0x0b, 0xdd, 0x5a, 0xf8, 0x95, + 0x77, 0xb8, 0x90, 0xf8, 0x97, 0x73, 0xbb, 0x42, 0x2e, 0x97, 0x8f, 0x8a, 0xd6, 0x9b, 0x72, 0xdd, + 0xe4, 0xd4, 0x9c, 0x20, 0x33, 0x7b, 0xf2, 0x08, 0xd5, 0xb9, 0x84, 0x48, 0xd8, 0x4b, 0xbd, 0xea, + 0x5a, 0x7b, 0xe3, 0xb3, 0x2b, 0x4d, 0xb2, 0xff, 0x8e, 0x71, 0xa8, 0x3f, 0x50, 0x5a, 0xb4, 0x90, + 0x74, 0xff, 0xac, 0xbd, 0xa1, 0x36, 0xb5, 0x4e, 0xf8, 0x43, 0xb4, 0x9c, 0x15, 0x47, 0x5d, 0xda, + 0x8a, 0xdf, 0x56, 0x83, 0x60, 0x18, 0x74, 0x72, 0x87, 0x37, 0x10, 0x12, 0x3c, 0x8c, 0x21, 0xfb, + 0x81, 0x45, 0x60, 0x2f, 0xeb, 0xb1, 0x29, 0xd7, 0x7f, 0xb7, 0xbc, 0xa1, 0x33, 0x2c, 0xbc, 0x85, + 0x6e, 0xc0, 0xf3, 0x94, 0x67, 0x4c, 0xcf, 0x2a, 0x04, 0x49, 0xdc, 0x17, 0x76, 0xb3, 0x67, 0xad, + 0xd5, 0xfd, 0xf7, 0xc6, 0x23, 0xe7, 0xc6, 0xf6, 0xfc, 0x25, 0x3d, 0xcf, 0xc7, 0x04, 0x35, 0x72, + 0x35, 0x8a, 0xc2, 0xae, 0xf7, 0xaa, 0x6b, 0x2d, 0xff, 0xa6, 0x1a, 0xe8, 0x7d, 0x8d, 0x9c, 0x8e, + 0x9c, 0xe6, 0xf7, 0x30, 0xd4, 0x07, 0x6a, 0x58, 0xf8, 0x63, 0xd4, 0xcc, 0x05, 0x64, 0xb1, 0x4a, + 0xb3, 0x58, 0x83, 0xb2, 0xf7, 0xfb, 0x06, 0xa7, 0x25, 0x03, 0xdf, 0x42, 0xd5, 0x9c, 0xf7, 0xcd, + 0x1a, 0xb4, 0x0d, 0xb1, 0xba, 0xff, 0xe0, 0x3e, 0x55, 0x38, 0x76, 0x51, 0x23, 0xcc, 0x92, 0x3c, + 0x15, 0x76, 0x4d, 0x9b, 0x23, 0x65, 0xfe, 0x9d, 0x46, 0xa8, 0xb9, 0xc1, 0x1c, 0xd5, 0xe1, 0xb9, + 0xcc, 0x98, 0xdd, 0xd0, 0x9f, 0xef, 0xfe, 0x95, 0xdf, 0x39, 0xb2, 0xad, 0x64, 0xb6, 0x63, 0x99, + 0x0d, 0xa7, 0x5f, 0x53, 0x63, 0xb4, 0x70, 0xe8, 0x3c, 0x46, 0x68, 0xca, 0xc1, 0xd7, 0x51, 0xf5, + 0x08, 0x86, 0xc5, 0xab, 0x43, 0xd5, 0x4f, 0xfc, 0x35, 0xaa, 0x0f, 0xd8, 0xb3, 0x1c, 0xcc, 0x93, + 0x7b, 0xfb, 0x4d, 0xa9, 0x68, 0xa1, 0x9f, 0x14, 0x9b, 0x16, 0x41, 0xf7, 0x96, 0x36, 0x2d, 0xf7, + 0xd8, 0x42, 0xce, 0x5b, 0x5e, 0x4b, 0x9c, 0x21, 0x14, 0x4c, 0x5e, 0x20, 0x61, 0x5b, 0xba, 0xea, + 0x6f, 0xae, 0x54, 0x75, 0xf9, 0x90, 0x4d, 0x47, 0xa9, 0x84, 0x04, 0x9d, 0x71, 0xc1, 0xeb, 0xa8, + 0x3d, 0xa3, 0xaa, 0xeb, 0x5b, 0xf1, 0xaf, 0x8d, 0x47, 0x4e, 0x7b, 0x46, 0x9c, 0xce, 0x72, 0xdc, + 0x2f, 0x4c, 0xb3, 0x74, 0x8d, 0xd8, 0x99, 0x2c, 0x99, 0xa5, 0x3f, 0x64, 0x6b, 0x7e, 0x53, 0xee, + 0x35, 0x7f, 0xff, 0xc3, 0xa9, 0xbc, 0xf8, 0xa7, 0x57, 0xf1, 0xbf, 0x3d, 0x3e, 0xe9, 0x56, 0x5e, + 0x9e, 0x74, 0x2b, 0xaf, 0x4e, 0xba, 0x95, 0x17, 0xe3, 0xae, 0x75, 0x3c, 0xee, 0x5a, 0x2f, 0xc7, + 0x5d, 0xeb, 0xd5, 0xb8, 0x6b, 0xfd, 0x3b, 0xee, 0x5a, 0xbf, 0xbd, 0xee, 0x56, 0x1e, 0x75, 0x16, + 0xff, 0x2f, 0xf9, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x4a, 0x4f, 0xbc, 0xb4, 0x08, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 968cc2564c..dac7c7f5f2 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "k8s.io/api/certificates/v1"; // or to obtain certificates from custom non-Kubernetes signers. message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -87,19 +87,19 @@ message CertificateSigningRequestCondition { // lastUpdateTime is the time of the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } // CertificateSigningRequestList is a collection of CertificateSigningRequest objects message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of CertificateSigningRequest objects repeated CertificateSigningRequest items = 2; diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index 92b2018e76..ba8009840d 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -27,6 +27,7 @@ import ( // +genclient:nonNamespaced // +genclient:method=UpdateApproval,verb=update,subresource=approval,input=k8s.io/api/certificates/v1.CertificateSigningRequest,result=k8s.io/api/certificates/v1.CertificateSigningRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequest objects provide a mechanism to obtain x509 certificates // by submitting a certificate signing request, and having it asynchronously approved and issued. @@ -262,6 +263,7 @@ type CertificateSigningRequestCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequestList is a collection of CertificateSigningRequest objects type CertificateSigningRequestList struct { diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..3a2b274030 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequestList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go index 546ecbefbf..a62a400596 100644 --- a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto +// source: k8s.io/api/certificates/v1alpha1/generated.proto package v1alpha1 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } func (*ClusterTrustBundle) ProtoMessage() {} func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { - return fileDescriptor_8915b0d419f9eda6, []int{0} + return fileDescriptor_f73d5fe56c015bb8, []int{0} } func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } func (*ClusterTrustBundleList) ProtoMessage() {} func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { - return fileDescriptor_8915b0d419f9eda6, []int{1} + return fileDescriptor_f73d5fe56c015bb8, []int{1} } func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } func (*ClusterTrustBundleSpec) ProtoMessage() {} func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_8915b0d419f9eda6, []int{2} + return fileDescriptor_f73d5fe56c015bb8, []int{2} } func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,39 +134,39 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_8915b0d419f9eda6) -} - -var fileDescriptor_8915b0d419f9eda6 = []byte{ - // 448 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6b, 0x13, 0x41, - 0x14, 0xc7, 0x77, 0x6a, 0x0b, 0xed, 0x44, 0x41, 0x56, 0x90, 0x90, 0xc3, 0x34, 0xe4, 0xd4, 0x8b, - 0x33, 0x26, 0x54, 0xe9, 0x79, 0x05, 0xa1, 0xe0, 0x0f, 0xd8, 0x7a, 0xb1, 0x78, 0x70, 0x32, 0x79, - 0xdd, 0x8c, 0xc9, 0xee, 0x0e, 0x33, 0xb3, 0x01, 0x6f, 0x82, 0xff, 0x80, 0x7f, 0x56, 0x8e, 0xd5, - 0x53, 0x4f, 0xc5, 0xac, 0xff, 0x88, 0xcc, 0x64, 0x93, 0x5d, 0x5c, 0x25, 0xd2, 0xdb, 0xbe, 0x1f, - 0x9f, 0xef, 0x7b, 0xdf, 0xb7, 0x0c, 0x3e, 0x9f, 0x9d, 0x19, 0x2a, 0x73, 0x36, 0x2b, 0xc6, 0xa0, - 0x33, 0xb0, 0x60, 0xd8, 0x02, 0xb2, 0x49, 0xae, 0x59, 0x55, 0xe0, 0x4a, 0x32, 0x01, 0xda, 0xca, - 0x2b, 0x29, 0xb8, 0x2f, 0x0f, 0xf9, 0x5c, 0x4d, 0xf9, 0x90, 0x25, 0x90, 0x81, 0xe6, 0x16, 0x26, - 0x54, 0xe9, 0xdc, 0xe6, 0x61, 0x7f, 0x4d, 0x50, 0xae, 0x24, 0x6d, 0x12, 0x74, 0x43, 0xf4, 0x9e, - 0x24, 0xd2, 0x4e, 0x8b, 0x31, 0x15, 0x79, 0xca, 0x92, 0x3c, 0xc9, 0x99, 0x07, 0xc7, 0xc5, 0x95, - 0x8f, 0x7c, 0xe0, 0xbf, 0xd6, 0x82, 0xbd, 0xd3, 0x7a, 0x85, 0x94, 0x8b, 0xa9, 0xcc, 0x40, 0x7f, - 0x66, 0x6a, 0x96, 0xb8, 0x84, 0x61, 0x29, 0x58, 0xce, 0x16, 0xad, 0x35, 0x7a, 0xec, 0x5f, 0x94, - 0x2e, 0x32, 0x2b, 0x53, 0x68, 0x01, 0xcf, 0x77, 0x01, 0x46, 0x4c, 0x21, 0xe5, 0x7f, 0x72, 0x83, - 0x1f, 0x08, 0x87, 0x2f, 0xe6, 0x85, 0xb1, 0xa0, 0xdf, 0xe9, 0xc2, 0xd8, 0xa8, 0xc8, 0x26, 0x73, - 0x08, 0x3f, 0xe2, 0x43, 0xb7, 0xda, 0x84, 0x5b, 0xde, 0x45, 0x7d, 0x74, 0xd2, 0x19, 0x3d, 0xa5, - 0xf5, 0x65, 0xb6, 0x13, 0xa8, 0x9a, 0x25, 0x2e, 0x61, 0xa8, 0xeb, 0xa6, 0x8b, 0x21, 0x7d, 0x3b, - 0xfe, 0x04, 0xc2, 0xbe, 0x06, 0xcb, 0xa3, 0x70, 0x79, 0x7b, 0x1c, 0x94, 0xb7, 0xc7, 0xb8, 0xce, - 0xc5, 0x5b, 0xd5, 0xf0, 0x12, 0xef, 0x1b, 0x05, 0xa2, 0xbb, 0xe7, 0xd5, 0xcf, 0xe8, 0xae, 0xbb, - 0xd3, 0xf6, 0x96, 0x17, 0x0a, 0x44, 0x74, 0xbf, 0x9a, 0xb2, 0xef, 0xa2, 0xd8, 0x6b, 0x0e, 0xbe, - 0x23, 0xfc, 0xb8, 0xdd, 0xfe, 0x4a, 0x1a, 0x1b, 0x7e, 0x68, 0x19, 0xa3, 0xff, 0x67, 0xcc, 0xd1, - 0xde, 0xd6, 0xc3, 0x6a, 0xe0, 0xe1, 0x26, 0xd3, 0x30, 0xf5, 0x1e, 0x1f, 0x48, 0x0b, 0xa9, 0xe9, - 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0xa7, 0x77, 0x71, 0x15, 0x3d, 0xa8, 0x06, 0x1c, 0x9c, 0x3b, - 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0x57, 0x4f, 0xce, 0x74, 0x38, 0xc2, 0xd8, 0xc8, 0x24, 0x03, - 0xfd, 0x86, 0xa7, 0xe0, 0x5d, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0x56, 0xe2, 0x46, 0x57, 0xf8, 0x0c, - 0x77, 0x6c, 0x2d, 0xe3, 0xff, 0xc2, 0x51, 0xf4, 0xa8, 0x82, 0x3a, 0x8d, 0x09, 0x71, 0xb3, 0x2f, - 0x7a, 0xb9, 0x5c, 0x91, 0xe0, 0x7a, 0x45, 0x82, 0x9b, 0x15, 0x09, 0xbe, 0x94, 0x04, 0x2d, 0x4b, - 0x82, 0xae, 0x4b, 0x82, 0x6e, 0x4a, 0x82, 0x7e, 0x96, 0x04, 0x7d, 0xfb, 0x45, 0x82, 0xcb, 0xfe, - 0xae, 0x67, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x05, 0xe9, 0xaa, 0x07, 0xb2, 0x03, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_f73d5fe56c015bb8) +} + +var fileDescriptor_f73d5fe56c015bb8 = []byte{ + // 437 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40, + 0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9, + 0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae, + 0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f, + 0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a, + 0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c, + 0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02, + 0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26, + 0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c, + 0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9, + 0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a, + 0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14, + 0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c, + 0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47, + 0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a, + 0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50, + 0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57, + 0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55, + 0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46, + 0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f, + 0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8, + 0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2, + 0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89, + 0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34, + 0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c, + 0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6, + 0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb, + 0xdd, 0x99, 0x03, 0x00, 0x00, } func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto index b0ebc4bd45..7155f778cf 100644 --- a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -46,7 +46,7 @@ option go_package = "k8s.io/api/certificates/v1alpha1"; message ClusterTrustBundle { // metadata contains the object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the signer (if any) and trust anchors. optional ClusterTrustBundleSpec spec = 2; @@ -57,7 +57,7 @@ message ClusterTrustBundleList { // metadata contains the list metadata. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of ClusterTrustBundle objects repeated ClusterTrustBundle items = 2; diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 352b9faa7a..b6d8ab3f59 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto +// source: k8s.io/api/certificates/v1beta1/generated.proto package v1beta1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{0} + return fileDescriptor_6529c11a462c48a5, []int{0} } func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{1} + return fileDescriptor_6529c11a462c48a5, []int{1} } func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{2} + return fileDescriptor_6529c11a462c48a5, []int{2} } func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{3} + return fileDescriptor_6529c11a462c48a5, []int{3} } func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{4} + return fileDescriptor_6529c11a462c48a5, []int{4} } func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{5} + return fileDescriptor_6529c11a462c48a5, []int{5} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -225,69 +225,68 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) + proto.RegisterFile("k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_6529c11a462c48a5) } -var fileDescriptor_09d156762b8218ef = []byte{ - // 915 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x5d, 0x6f, 0x1b, 0x45, - 0x17, 0xf6, 0xc6, 0x1f, 0xb1, 0xc7, 0x79, 0xd3, 0x76, 0xf4, 0x52, 0x2d, 0x96, 0xea, 0xb5, 0x2c, - 0x40, 0xe1, 0x6b, 0x96, 0x54, 0x15, 0x44, 0xb9, 0x40, 0xb0, 0x21, 0x82, 0x88, 0x14, 0xa4, 0x49, - 0xc2, 0x05, 0x42, 0xa2, 0xe3, 0xf5, 0xe9, 0x66, 0xea, 0xee, 0x07, 0x3b, 0xb3, 0xa6, 0xbe, 0xeb, - 0x4f, 0xe0, 0x92, 0x4b, 0xfe, 0x03, 0x7f, 0x22, 0x5c, 0x20, 0xf5, 0xb2, 0x17, 0xc8, 0x22, 0xee, - 0xbf, 0xc8, 0x15, 0x9a, 0xd9, 0xf1, 0xda, 0xb1, 0x13, 0x5c, 0xda, 0xbb, 0x9d, 0x67, 0xce, 0xf3, - 0x3c, 0x67, 0xce, 0x9c, 0x33, 0x36, 0xfa, 0x6a, 0xb0, 0x23, 0x08, 0x8f, 0xdd, 0x41, 0xd6, 0x83, - 0x34, 0x02, 0x09, 0xc2, 0x1d, 0x42, 0xd4, 0x8f, 0x53, 0xd7, 0x6c, 0xb0, 0x84, 0xbb, 0x3e, 0xa4, - 0x92, 0x3f, 0xe4, 0x3e, 0xd3, 0xdb, 0xdb, 0x3d, 0x90, 0x6c, 0xdb, 0x0d, 0x20, 0x82, 0x94, 0x49, - 0xe8, 0x93, 0x24, 0x8d, 0x65, 0x8c, 0x9d, 0x9c, 0x40, 0x58, 0xc2, 0xc9, 0x3c, 0x81, 0x18, 0x42, - 0xeb, 0xc3, 0x80, 0xcb, 0xd3, 0xac, 0x47, 0xfc, 0x38, 0x74, 0x83, 0x38, 0x88, 0x5d, 0xcd, 0xeb, - 0x65, 0x0f, 0xf5, 0x4a, 0x2f, 0xf4, 0x57, 0xae, 0xd7, 0xea, 0xce, 0x27, 0x10, 0xa7, 0xe0, 0x0e, - 0x97, 0x3c, 0x5b, 0xf7, 0x66, 0x31, 0x21, 0xf3, 0x4f, 0x79, 0x04, 0xe9, 0xc8, 0x4d, 0x06, 0x81, - 0x02, 0x84, 0x1b, 0x82, 0x64, 0x57, 0xb1, 0xdc, 0xeb, 0x58, 0x69, 0x16, 0x49, 0x1e, 0xc2, 0x12, - 0xe1, 0xe3, 0x55, 0x04, 0xe1, 0x9f, 0x42, 0xc8, 0x16, 0x79, 0xdd, 0x3f, 0xd6, 0xd0, 0x9b, 0x7b, - 0xb3, 0x52, 0x1c, 0xf1, 0x20, 0xe2, 0x51, 0x40, 0xe1, 0xa7, 0x0c, 0x84, 0xc4, 0x0f, 0x50, 0x5d, - 0x65, 0xd8, 0x67, 0x92, 0xd9, 0x56, 0xc7, 0xda, 0x6a, 0xde, 0xfd, 0x88, 0xcc, 0x6a, 0x58, 0x18, - 0x91, 0x64, 0x10, 0x28, 0x40, 0x10, 0x15, 0x4d, 0x86, 0xdb, 0xe4, 0xdb, 0xde, 0x23, 0xf0, 0xe5, - 0x7d, 0x90, 0xcc, 0xc3, 0x67, 0x63, 0xa7, 0x34, 0x19, 0x3b, 0x68, 0x86, 0xd1, 0x42, 0x15, 0x3f, - 0x40, 0x15, 0x91, 0x80, 0x6f, 0xaf, 0x69, 0xf5, 0x4f, 0xc9, 0x8a, 0x1b, 0x22, 0xd7, 0xe6, 0x7a, - 0x94, 0x80, 0xef, 0x6d, 0x18, 0xaf, 0x8a, 0x5a, 0x51, 0xad, 0x8c, 0x4f, 0x51, 0x4d, 0x48, 0x26, - 0x33, 0x61, 0x97, 0xb5, 0xc7, 0x67, 0xaf, 0xe1, 0xa1, 0x75, 0xbc, 0x4d, 0xe3, 0x52, 0xcb, 0xd7, - 0xd4, 0xe8, 0x77, 0x5f, 0x94, 0x51, 0xf7, 0x5a, 0xee, 0x5e, 0x1c, 0xf5, 0xb9, 0xe4, 0x71, 0x84, - 0x77, 0x50, 0x45, 0x8e, 0x12, 0xd0, 0x05, 0x6d, 0x78, 0x6f, 0x4d, 0x53, 0x3e, 0x1e, 0x25, 0x70, - 0x31, 0x76, 0xfe, 0xbf, 0x18, 0xaf, 0x70, 0xaa, 0x19, 0xf8, 0xb0, 0x38, 0x4a, 0x4d, 0x73, 0xef, - 0x5d, 0x4e, 0xe4, 0x62, 0xec, 0x5c, 0xd1, 0x91, 0xa4, 0x50, 0xba, 0x9c, 0x2e, 0x7e, 0x07, 0xd5, - 0x52, 0x60, 0x22, 0x8e, 0x74, 0xf1, 0x1b, 0xb3, 0x63, 0x51, 0x8d, 0x52, 0xb3, 0x8b, 0xdf, 0x45, - 0xeb, 0x21, 0x08, 0xc1, 0x02, 0xd0, 0x15, 0x6c, 0x78, 0x37, 0x4c, 0xe0, 0xfa, 0xfd, 0x1c, 0xa6, - 0xd3, 0x7d, 0xfc, 0x08, 0x6d, 0x3e, 0x66, 0x42, 0x9e, 0x24, 0x7d, 0x26, 0xe1, 0x98, 0x87, 0x60, - 0x57, 0x74, 0xcd, 0xdf, 0x7b, 0xb9, 0xae, 0x51, 0x0c, 0xef, 0xb6, 0x51, 0xdf, 0x3c, 0xbc, 0xa4, - 0x44, 0x17, 0x94, 0xf1, 0x10, 0x61, 0x85, 0x1c, 0xa7, 0x2c, 0x12, 0x79, 0xa1, 0x94, 0x5f, 0xf5, - 0x3f, 0xfb, 0xb5, 0x8c, 0x1f, 0x3e, 0x5c, 0x52, 0xa3, 0x57, 0x38, 0x74, 0xc7, 0x16, 0xba, 0x73, - 0xed, 0x2d, 0x1f, 0x72, 0x21, 0xf1, 0x0f, 0x4b, 0x53, 0x43, 0x5e, 0x2e, 0x1f, 0xc5, 0xd6, 0x33, - 0x73, 0xd3, 0xe4, 0x54, 0x9f, 0x22, 0x73, 0x13, 0xf3, 0x23, 0xaa, 0x72, 0x09, 0xa1, 0xb0, 0xd7, - 0x3a, 0xe5, 0xad, 0xe6, 0xdd, 0xdd, 0x57, 0x6f, 0x67, 0xef, 0x7f, 0xc6, 0xa6, 0x7a, 0xa0, 0x04, - 0x69, 0xae, 0xdb, 0xfd, 0xbd, 0xf2, 0x2f, 0x07, 0x54, 0x83, 0x85, 0xdf, 0x46, 0xeb, 0x69, 0xbe, - 0xd4, 0xe7, 0xdb, 0xf0, 0x9a, 0xaa, 0x1b, 0x4c, 0x04, 0x9d, 0xee, 0x61, 0x82, 0x90, 0xe0, 0x41, - 0x04, 0xe9, 0x37, 0x2c, 0x04, 0x7b, 0x3d, 0x6f, 0x32, 0xf5, 0x12, 0x1c, 0x15, 0x28, 0x9d, 0x8b, - 0xc0, 0x7b, 0xe8, 0x16, 0x3c, 0x49, 0x78, 0xca, 0x74, 0xb3, 0x82, 0x1f, 0x47, 0x7d, 0x61, 0xd7, - 0x3b, 0xd6, 0x56, 0xd5, 0x7b, 0x63, 0x32, 0x76, 0x6e, 0xed, 0x2f, 0x6e, 0xd2, 0xe5, 0x78, 0x4c, - 0x50, 0x2d, 0x53, 0xbd, 0x28, 0xec, 0x6a, 0xa7, 0xbc, 0xd5, 0xf0, 0x6e, 0xab, 0x8e, 0x3e, 0xd1, - 0xc8, 0xc5, 0xd8, 0xa9, 0x7f, 0x0d, 0x23, 0xbd, 0xa0, 0x26, 0x0a, 0x7f, 0x80, 0xea, 0x99, 0x80, - 0x34, 0x52, 0x29, 0xe6, 0x73, 0x50, 0x14, 0xff, 0xc4, 0xe0, 0xb4, 0x88, 0xc0, 0x77, 0x50, 0x39, - 0xe3, 0x7d, 0x33, 0x07, 0x4d, 0x13, 0x58, 0x3e, 0x39, 0xf8, 0x82, 0x2a, 0x1c, 0x77, 0x51, 0x2d, - 0x48, 0xe3, 0x2c, 0x11, 0x76, 0x45, 0x9b, 0x23, 0x65, 0xfe, 0xa5, 0x46, 0xa8, 0xd9, 0xc1, 0x11, - 0xaa, 0xc2, 0x13, 0x99, 0x32, 0xbb, 0xa6, 0xef, 0xef, 0xe0, 0xf5, 0x9e, 0x3c, 0xb2, 0xaf, 0xb4, - 0xf6, 0x23, 0x99, 0x8e, 0x66, 0xd7, 0xa9, 0x31, 0x9a, 0xdb, 0xb4, 0x00, 0xa1, 0x59, 0x0c, 0xbe, - 0x89, 0xca, 0x03, 0x18, 0xe5, 0x6f, 0x0f, 0x55, 0x9f, 0xf8, 0x73, 0x54, 0x1d, 0xb2, 0xc7, 0x19, - 0x98, 0x27, 0xf8, 0xfd, 0x95, 0xf9, 0x68, 0xb5, 0xef, 0x14, 0x85, 0xe6, 0xcc, 0xdd, 0xb5, 0x1d, - 0xab, 0xfb, 0xa7, 0x85, 0x9c, 0x15, 0x0f, 0x27, 0xfe, 0x19, 0x21, 0x7f, 0xfa, 0x18, 0x09, 0xdb, - 0xd2, 0xe7, 0xdf, 0x7b, 0xf5, 0xf3, 0x17, 0x0f, 0xdb, 0xec, 0x37, 0xa6, 0x80, 0x04, 0x9d, 0xb3, - 0xc2, 0xdb, 0xa8, 0x39, 0x27, 0xad, 0x4f, 0xba, 0xe1, 0xdd, 0x98, 0x8c, 0x9d, 0xe6, 0x9c, 0x38, - 0x9d, 0x8f, 0xe9, 0x7e, 0x62, 0xca, 0xa6, 0x0f, 0x8a, 0x9d, 0xe9, 0xd0, 0x59, 0xfa, 0x5e, 0x1b, - 0x8b, 0x43, 0xb3, 0x5b, 0xff, 0xf5, 0x37, 0xa7, 0xf4, 0xf4, 0xaf, 0x4e, 0xc9, 0xdb, 0x3f, 0x3b, - 0x6f, 0x97, 0x9e, 0x9d, 0xb7, 0x4b, 0xcf, 0xcf, 0xdb, 0xa5, 0xa7, 0x93, 0xb6, 0x75, 0x36, 0x69, - 0x5b, 0xcf, 0x26, 0x6d, 0xeb, 0xf9, 0xa4, 0x6d, 0xfd, 0x3d, 0x69, 0x5b, 0xbf, 0xbc, 0x68, 0x97, - 0xbe, 0x77, 0x56, 0xfc, 0x77, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xe7, 0x9b, 0x78, 0xf6, - 0x08, 0x00, 0x00, +var fileDescriptor_6529c11a462c48a5 = []byte{ + // 901 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0xc6, 0x1f, 0xb1, 0xc7, 0x21, 0x6d, 0x47, 0x50, 0x2d, 0x96, 0xea, 0xb5, 0x56, 0x80, + 0xc2, 0xd7, 0x2c, 0xa9, 0x2a, 0x88, 0x72, 0x40, 0xb0, 0x21, 0x42, 0x11, 0x29, 0x48, 0x93, 0x84, + 0x03, 0x42, 0xa2, 0x93, 0xf5, 0xdb, 0xcd, 0x34, 0xdd, 0x0f, 0x76, 0x66, 0x4d, 0x7d, 0xeb, 0x4f, + 0xe0, 0xc8, 0x91, 0xff, 0xc0, 0x9f, 0x08, 0x07, 0xa4, 0x1e, 0x7b, 0x40, 0x16, 0x71, 0xff, 0x45, + 0x4e, 0x68, 0x66, 0xc7, 0x6b, 0xc7, 0x4e, 0x70, 0x69, 0x6f, 0x3b, 0xcf, 0xbc, 0xcf, 0xf3, 0xbc, + 0xf3, 0xce, 0xfb, 0x8e, 0x8d, 0xbc, 0xd3, 0x2d, 0x41, 0x78, 0xe2, 0xb1, 0x94, 0x7b, 0x01, 0x64, + 0x92, 0x3f, 0xe4, 0x01, 0x93, 0x20, 0xbc, 0xc1, 0xe6, 0x31, 0x48, 0xb6, 0xe9, 0x85, 0x10, 0x43, + 0xc6, 0x24, 0xf4, 0x49, 0x9a, 0x25, 0x32, 0xc1, 0x4e, 0x41, 0x20, 0x2c, 0xe5, 0x64, 0x96, 0x40, + 0x0c, 0xa1, 0xf3, 0x71, 0xc8, 0xe5, 0x49, 0x7e, 0x4c, 0x82, 0x24, 0xf2, 0xc2, 0x24, 0x4c, 0x3c, + 0xcd, 0x3b, 0xce, 0x1f, 0xea, 0x95, 0x5e, 0xe8, 0xaf, 0x42, 0xaf, 0xe3, 0xce, 0x26, 0x90, 0x64, + 0xe0, 0x0d, 0x16, 0x3c, 0x3b, 0xf7, 0xa6, 0x31, 0x11, 0x0b, 0x4e, 0x78, 0x0c, 0xd9, 0xd0, 0x4b, + 0x4f, 0x43, 0x05, 0x08, 0x2f, 0x02, 0xc9, 0xae, 0x62, 0x79, 0xd7, 0xb1, 0xb2, 0x3c, 0x96, 0x3c, + 0x82, 0x05, 0xc2, 0xa7, 0xcb, 0x08, 0x22, 0x38, 0x81, 0x88, 0xcd, 0xf3, 0xdc, 0x3f, 0x57, 0xd0, + 0xdb, 0x3b, 0xd3, 0x52, 0x1c, 0xf0, 0x30, 0xe6, 0x71, 0x48, 0xe1, 0xe7, 0x1c, 0x84, 0xc4, 0x0f, + 0x50, 0x53, 0x65, 0xd8, 0x67, 0x92, 0xd9, 0x56, 0xcf, 0xda, 0x68, 0xdf, 0xfd, 0x84, 0x4c, 0x6b, + 0x58, 0x1a, 0x91, 0xf4, 0x34, 0x54, 0x80, 0x20, 0x2a, 0x9a, 0x0c, 0x36, 0xc9, 0x77, 0xc7, 0x8f, + 0x20, 0x90, 0xf7, 0x41, 0x32, 0x1f, 0x9f, 0x8d, 0x9c, 0xca, 0x78, 0xe4, 0xa0, 0x29, 0x46, 0x4b, + 0x55, 0xfc, 0x00, 0xd5, 0x44, 0x0a, 0x81, 0xbd, 0xa2, 0xd5, 0x3f, 0x27, 0x4b, 0x6e, 0x88, 0x5c, + 0x9b, 0xeb, 0x41, 0x0a, 0x81, 0xbf, 0x66, 0xbc, 0x6a, 0x6a, 0x45, 0xb5, 0x32, 0x3e, 0x41, 0x0d, + 0x21, 0x99, 0xcc, 0x85, 0x5d, 0xd5, 0x1e, 0x5f, 0xbc, 0x86, 0x87, 0xd6, 0xf1, 0xd7, 0x8d, 0x4b, + 0xa3, 0x58, 0x53, 0xa3, 0xef, 0xbe, 0xa8, 0x22, 0xf7, 0x5a, 0xee, 0x4e, 0x12, 0xf7, 0xb9, 0xe4, + 0x49, 0x8c, 0xb7, 0x50, 0x4d, 0x0e, 0x53, 0xd0, 0x05, 0x6d, 0xf9, 0xef, 0x4c, 0x52, 0x3e, 0x1c, + 0xa6, 0x70, 0x31, 0x72, 0xde, 0x9c, 0x8f, 0x57, 0x38, 0xd5, 0x0c, 0xbc, 0x5f, 0x1e, 0xa5, 0xa1, + 0xb9, 0xf7, 0x2e, 0x27, 0x72, 0x31, 0x72, 0xae, 0xe8, 0x48, 0x52, 0x2a, 0x5d, 0x4e, 0x17, 0xbf, + 0x87, 0x1a, 0x19, 0x30, 0x91, 0xc4, 0xba, 0xf8, 0xad, 0xe9, 0xb1, 0xa8, 0x46, 0xa9, 0xd9, 0xc5, + 0xef, 0xa3, 0xd5, 0x08, 0x84, 0x60, 0x21, 0xe8, 0x0a, 0xb6, 0xfc, 0x1b, 0x26, 0x70, 0xf5, 0x7e, + 0x01, 0xd3, 0xc9, 0x3e, 0x7e, 0x84, 0xd6, 0x1f, 0x33, 0x21, 0x8f, 0xd2, 0x3e, 0x93, 0x70, 0xc8, + 0x23, 0xb0, 0x6b, 0xba, 0xe6, 0x1f, 0xbc, 0x5c, 0xd7, 0x28, 0x86, 0x7f, 0xdb, 0xa8, 0xaf, 0xef, + 0x5f, 0x52, 0xa2, 0x73, 0xca, 0x78, 0x80, 0xb0, 0x42, 0x0e, 0x33, 0x16, 0x8b, 0xa2, 0x50, 0xca, + 0xaf, 0xfe, 0xbf, 0xfd, 0x3a, 0xc6, 0x0f, 0xef, 0x2f, 0xa8, 0xd1, 0x2b, 0x1c, 0xdc, 0x91, 0x85, + 0xee, 0x5c, 0x7b, 0xcb, 0xfb, 0x5c, 0x48, 0xfc, 0xe3, 0xc2, 0xd4, 0x90, 0x97, 0xcb, 0x47, 0xb1, + 0xf5, 0xcc, 0xdc, 0x34, 0x39, 0x35, 0x27, 0xc8, 0xcc, 0xc4, 0xfc, 0x84, 0xea, 0x5c, 0x42, 0x24, + 0xec, 0x95, 0x5e, 0x75, 0xa3, 0x7d, 0x77, 0xfb, 0xd5, 0xdb, 0xd9, 0x7f, 0xc3, 0xd8, 0xd4, 0xf7, + 0x94, 0x20, 0x2d, 0x74, 0xdd, 0x3f, 0x6a, 0xff, 0x71, 0x40, 0x35, 0x58, 0xf8, 0x5d, 0xb4, 0x9a, + 0x15, 0x4b, 0x7d, 0xbe, 0x35, 0xbf, 0xad, 0xba, 0xc1, 0x44, 0xd0, 0xc9, 0x1e, 0x26, 0x08, 0x09, + 0x1e, 0xc6, 0x90, 0x7d, 0xcb, 0x22, 0xb0, 0x57, 0x8b, 0x26, 0x53, 0x2f, 0xc1, 0x41, 0x89, 0xd2, + 0x99, 0x08, 0xbc, 0x83, 0x6e, 0xc1, 0x93, 0x94, 0x67, 0x4c, 0x37, 0x2b, 0x04, 0x49, 0xdc, 0x17, + 0x76, 0xb3, 0x67, 0x6d, 0xd4, 0xfd, 0xb7, 0xc6, 0x23, 0xe7, 0xd6, 0xee, 0xfc, 0x26, 0x5d, 0x8c, + 0xc7, 0x04, 0x35, 0x72, 0xd5, 0x8b, 0xc2, 0xae, 0xf7, 0xaa, 0x1b, 0x2d, 0xff, 0xb6, 0xea, 0xe8, + 0x23, 0x8d, 0x5c, 0x8c, 0x9c, 0xe6, 0x37, 0x30, 0xd4, 0x0b, 0x6a, 0xa2, 0xf0, 0x47, 0xa8, 0x99, + 0x0b, 0xc8, 0x62, 0x95, 0x62, 0x31, 0x07, 0x65, 0xf1, 0x8f, 0x0c, 0x4e, 0xcb, 0x08, 0x7c, 0x07, + 0x55, 0x73, 0xde, 0x37, 0x73, 0xd0, 0x36, 0x81, 0xd5, 0xa3, 0xbd, 0xaf, 0xa8, 0xc2, 0xb1, 0x8b, + 0x1a, 0x61, 0x96, 0xe4, 0xa9, 0xb0, 0x6b, 0xda, 0x1c, 0x29, 0xf3, 0xaf, 0x35, 0x42, 0xcd, 0x0e, + 0x8e, 0x51, 0x1d, 0x9e, 0xc8, 0x8c, 0xd9, 0x0d, 0x7d, 0x7f, 0x7b, 0xaf, 0xf7, 0xe4, 0x91, 0x5d, + 0xa5, 0xb5, 0x1b, 0xcb, 0x6c, 0x38, 0xbd, 0x4e, 0x8d, 0xd1, 0xc2, 0xa6, 0x03, 0x08, 0x4d, 0x63, + 0xf0, 0x4d, 0x54, 0x3d, 0x85, 0x61, 0xf1, 0xf6, 0x50, 0xf5, 0x89, 0xbf, 0x44, 0xf5, 0x01, 0x7b, + 0x9c, 0x83, 0x79, 0x82, 0x3f, 0x5c, 0x9a, 0x8f, 0x56, 0xfb, 0x5e, 0x51, 0x68, 0xc1, 0xdc, 0x5e, + 0xd9, 0xb2, 0xdc, 0xbf, 0x2c, 0xe4, 0x2c, 0x79, 0x38, 0xf1, 0x2f, 0x08, 0x05, 0x93, 0xc7, 0x48, + 0xd8, 0x96, 0x3e, 0xff, 0xce, 0xab, 0x9f, 0xbf, 0x7c, 0xd8, 0xa6, 0xbf, 0x31, 0x25, 0x24, 0xe8, + 0x8c, 0x15, 0xde, 0x44, 0xed, 0x19, 0x69, 0x7d, 0xd2, 0x35, 0xff, 0xc6, 0x78, 0xe4, 0xb4, 0x67, + 0xc4, 0xe9, 0x6c, 0x8c, 0xfb, 0x99, 0x29, 0x9b, 0x3e, 0x28, 0x76, 0x26, 0x43, 0x67, 0xe9, 0x7b, + 0x6d, 0xcd, 0x0f, 0xcd, 0x76, 0xf3, 0xb7, 0xdf, 0x9d, 0xca, 0xd3, 0xbf, 0x7b, 0x15, 0x7f, 0xf7, + 0xec, 0xbc, 0x5b, 0x79, 0x76, 0xde, 0xad, 0x3c, 0x3f, 0xef, 0x56, 0x9e, 0x8e, 0xbb, 0xd6, 0xd9, + 0xb8, 0x6b, 0x3d, 0x1b, 0x77, 0xad, 0xe7, 0xe3, 0xae, 0xf5, 0xcf, 0xb8, 0x6b, 0xfd, 0xfa, 0xa2, + 0x5b, 0xf9, 0xc1, 0x59, 0xf2, 0xdf, 0xe5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x2f, 0x11, + 0xe8, 0xdd, 0x08, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index f70f01ef7a..f3ec4c06e4 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/certificates/v1beta1"; // Describes a certificate signing request message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -65,18 +65,18 @@ message CertificateSigningRequestCondition { // timestamp for the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated CertificateSigningRequest items = 2; } diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go index fc2f4f2c6e..9b2fbbda3a 100644 --- a/vendor/k8s.io/api/coordination/v1/doc.go +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=coordination.k8s.io diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index de06106013..cf6702aef3 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto +// source: k8s.io/api/coordination/v1/generated.proto package v1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{0} + return fileDescriptor_239d5a4df3139dce, []int{0} } func (m *Lease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_Lease proto.InternalMessageInfo func (m *LeaseList) Reset() { *m = LeaseList{} } func (*LeaseList) ProtoMessage() {} func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{1} + return fileDescriptor_239d5a4df3139dce, []int{1} } func (m *LeaseList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } func (*LeaseSpec) ProtoMessage() {} func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{2} + return fileDescriptor_239d5a4df3139dce, []int{2} } func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,45 +135,48 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) -} - -var fileDescriptor_929e1148ad9baca3 = []byte{ - // 539 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcf, 0x6e, 0xd3, 0x4c, - 0x14, 0xc5, 0xe3, 0x36, 0x91, 0x9a, 0xc9, 0xd7, 0x7e, 0x91, 0x95, 0x85, 0x95, 0x85, 0x5d, 0x22, - 0x21, 0x75, 0xc3, 0x0c, 0xa9, 0x10, 0x42, 0xac, 0x8a, 0x41, 0x40, 0xa5, 0x54, 0x48, 0x6e, 0x57, - 0xa8, 0x0b, 0x26, 0xf6, 0xc5, 0x19, 0x52, 0x7b, 0xcc, 0xcc, 0x38, 0xa8, 0x3b, 0x1e, 0x81, 0x2d, - 0x8f, 0x01, 0x4f, 0x91, 0x65, 0x97, 0x5d, 0x59, 0xc4, 0xbc, 0x08, 0x9a, 0x49, 0xda, 0x84, 0xfc, - 0x51, 0x2b, 0x76, 0x9e, 0x7b, 0xef, 0xf9, 0xdd, 0x73, 0xcf, 0xc2, 0xe8, 0xe5, 0xf0, 0x99, 0xc4, - 0x8c, 0x93, 0x61, 0xde, 0x07, 0x91, 0x82, 0x02, 0x49, 0x46, 0x90, 0x46, 0x5c, 0x90, 0x59, 0x83, - 0x66, 0x8c, 0x84, 0x9c, 0x8b, 0x88, 0xa5, 0x54, 0x31, 0x9e, 0x92, 0x51, 0x97, 0xc4, 0x90, 0x82, - 0xa0, 0x0a, 0x22, 0x9c, 0x09, 0xae, 0xb8, 0xdd, 0x9e, 0xce, 0x62, 0x9a, 0x31, 0xbc, 0x38, 0x8b, - 0x47, 0xdd, 0xf6, 0xa3, 0x98, 0xa9, 0x41, 0xde, 0xc7, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0x8c, - 0xa4, 0x9f, 0x7f, 0x34, 0x2f, 0xf3, 0x30, 0x5f, 0x53, 0x54, 0xfb, 0xc9, 0x7c, 0x6d, 0x42, 0xc3, - 0x01, 0x4b, 0x41, 0x5c, 0x92, 0x6c, 0x18, 0xeb, 0x82, 0x24, 0x09, 0x28, 0xba, 0xc6, 0x40, 0x9b, - 0x6c, 0x52, 0x89, 0x3c, 0x55, 0x2c, 0x81, 0x15, 0xc1, 0xd3, 0xbb, 0x04, 0x32, 0x1c, 0x40, 0x42, - 0x97, 0x75, 0x9d, 0x9f, 0x16, 0xaa, 0xf5, 0x80, 0x4a, 0xb0, 0x3f, 0xa0, 0x1d, 0xed, 0x26, 0xa2, - 0x8a, 0x3a, 0xd6, 0xbe, 0x75, 0xd0, 0x38, 0x7c, 0x8c, 0xe7, 0x31, 0xdc, 0x42, 0x71, 0x36, 0x8c, - 0x75, 0x41, 0x62, 0x3d, 0x8d, 0x47, 0x5d, 0xfc, 0xae, 0xff, 0x09, 0x42, 0x75, 0x02, 0x8a, 0xfa, - 0xf6, 0xb8, 0xf0, 0x2a, 0x65, 0xe1, 0xa1, 0x79, 0x2d, 0xb8, 0xa5, 0xda, 0x6f, 0x50, 0x55, 0x66, - 0x10, 0x3a, 0x5b, 0x86, 0xfe, 0x10, 0x6f, 0x0e, 0x19, 0x1b, 0x4b, 0xa7, 0x19, 0x84, 0xfe, 0x7f, - 0x33, 0x64, 0x55, 0xbf, 0x02, 0x03, 0xe8, 0xfc, 0xb0, 0x50, 0xdd, 0x4c, 0xf4, 0x98, 0x54, 0xf6, - 0xf9, 0x8a, 0x71, 0x7c, 0x3f, 0xe3, 0x5a, 0x6d, 0x6c, 0x37, 0x67, 0x3b, 0x76, 0x6e, 0x2a, 0x0b, - 0xa6, 0x5f, 0xa3, 0x1a, 0x53, 0x90, 0x48, 0x67, 0x6b, 0x7f, 0xfb, 0xa0, 0x71, 0xf8, 0xe0, 0x4e, - 0xd7, 0xfe, 0xee, 0x8c, 0x56, 0x3b, 0xd6, 0xba, 0x60, 0x2a, 0xef, 0x7c, 0xdf, 0x9e, 0x79, 0xd6, - 0x77, 0xd8, 0xcf, 0xd1, 0xde, 0x80, 0x5f, 0x44, 0x20, 0x8e, 0x23, 0x48, 0x15, 0x53, 0x97, 0xc6, - 0x79, 0xdd, 0xb7, 0xcb, 0xc2, 0xdb, 0x7b, 0xfb, 0x57, 0x27, 0x58, 0x9a, 0xb4, 0x7b, 0xa8, 0x75, - 0xa1, 0x41, 0xaf, 0x72, 0x61, 0x36, 0x9f, 0x42, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, - 0x16, 0x5e, 0xab, 0xb7, 0xa6, 0x1f, 0xac, 0x55, 0xd9, 0x7d, 0xd4, 0xa0, 0xe1, 0xe7, 0x9c, 0x09, - 0x38, 0x63, 0x09, 0x38, 0xdb, 0x26, 0x40, 0x72, 0xbf, 0x00, 0x4f, 0x58, 0x28, 0xb8, 0x96, 0xf9, - 0xff, 0x97, 0x85, 0xd7, 0x78, 0x31, 0xe7, 0x04, 0x8b, 0x50, 0xfb, 0x1c, 0xd5, 0x05, 0xa4, 0xf0, - 0xc5, 0x6c, 0xa8, 0xfe, 0xdb, 0x86, 0xdd, 0xb2, 0xf0, 0xea, 0xc1, 0x0d, 0x25, 0x98, 0x03, 0xed, - 0x23, 0xd4, 0x34, 0x97, 0x9d, 0x09, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, - 0x0b, 0xaf, 0xd9, 0x5b, 0xea, 0x05, 0x2b, 0xd3, 0xfe, 0xd1, 0x78, 0xe2, 0x56, 0xae, 0x26, 0x6e, - 0xe5, 0x7a, 0xe2, 0x56, 0xbe, 0x96, 0xae, 0x35, 0x2e, 0x5d, 0xeb, 0xaa, 0x74, 0xad, 0xeb, 0xd2, - 0xb5, 0x7e, 0x95, 0xae, 0xf5, 0xed, 0xb7, 0x5b, 0x79, 0xdf, 0xde, 0xfc, 0x03, 0xf9, 0x13, 0x00, - 0x00, 0xff, 0xff, 0xef, 0xe4, 0x75, 0x3b, 0x76, 0x04, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/coordination/v1/generated.proto", fileDescriptor_239d5a4df3139dce) +} + +var fileDescriptor_239d5a4df3139dce = []byte{ + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x40, + 0x14, 0xc6, 0xb7, 0xb0, 0xab, 0xec, 0xac, 0xfc, 0xc9, 0xc8, 0x45, 0xb3, 0x17, 0x2d, 0x92, 0x98, + 0x10, 0x13, 0xa7, 0x42, 0x8c, 0x31, 0x26, 0x26, 0x58, 0x89, 0x4a, 0xb2, 0x44, 0x53, 0xb8, 0x32, + 0x5c, 0x38, 0xdb, 0x1e, 0xba, 0x23, 0xb4, 0x53, 0x67, 0x66, 0x31, 0xdc, 0xf9, 0x08, 0x3e, 0x81, + 0xef, 0xa0, 0x4f, 0xc1, 0x25, 0x97, 0x5c, 0x35, 0x32, 0xbe, 0x85, 0x57, 0x66, 0x66, 0x0b, 0x0b, + 0xcb, 0x6e, 0x20, 0xde, 0x75, 0xce, 0x39, 0xdf, 0xef, 0x7c, 0x73, 0x4e, 0x5b, 0xf4, 0x68, 0xff, + 0xb9, 0x24, 0x8c, 0x07, 0xb4, 0x60, 0x41, 0xcc, 0xb9, 0x48, 0x58, 0x4e, 0x15, 0xe3, 0x79, 0x70, + 0xb8, 0x1a, 0xa4, 0x90, 0x83, 0xa0, 0x0a, 0x12, 0x52, 0x08, 0xae, 0x38, 0x6e, 0x0f, 0x6a, 0x09, + 0x2d, 0x18, 0xb9, 0x5c, 0x4b, 0x0e, 0x57, 0xdb, 0x8f, 0x53, 0xa6, 0x7a, 0xfd, 0x2e, 0x89, 0x79, + 0x16, 0xa4, 0x3c, 0xe5, 0x81, 0x95, 0x74, 0xfb, 0x7b, 0xf6, 0x64, 0x0f, 0xf6, 0x69, 0x80, 0x6a, + 0x3f, 0x1d, 0xb6, 0xcd, 0x68, 0xdc, 0x63, 0x39, 0x88, 0xa3, 0xa0, 0xd8, 0x4f, 0x4d, 0x40, 0x06, + 0x19, 0x28, 0x3a, 0xc6, 0x40, 0x3b, 0x98, 0xa4, 0x12, 0xfd, 0x5c, 0xb1, 0x0c, 0xae, 0x09, 0x9e, + 0xdd, 0x24, 0x90, 0x71, 0x0f, 0x32, 0x3a, 0xaa, 0x5b, 0xfe, 0xe5, 0xa0, 0x46, 0x07, 0xa8, 0x04, + 0xfc, 0x09, 0xcd, 0x18, 0x37, 0x09, 0x55, 0xd4, 0x75, 0x96, 0x9c, 0x95, 0xd6, 0xda, 0x13, 0x32, + 0x1c, 0xc3, 0x05, 0x94, 0x14, 0xfb, 0xa9, 0x09, 0x48, 0x62, 0xaa, 0xc9, 0xe1, 0x2a, 0x79, 0xdf, + 0xfd, 0x0c, 0xb1, 0xda, 0x02, 0x45, 0x43, 0x7c, 0x5c, 0xfa, 0x35, 0x5d, 0xfa, 0x68, 0x18, 0x8b, + 0x2e, 0xa8, 0xf8, 0x2d, 0xaa, 0xcb, 0x02, 0x62, 0x77, 0xca, 0xd2, 0x1f, 0x92, 0xc9, 0x43, 0x26, + 0xd6, 0xd2, 0x76, 0x01, 0x71, 0x78, 0xaf, 0x42, 0xd6, 0xcd, 0x29, 0xb2, 0x80, 0xe5, 0x9f, 0x0e, + 0x6a, 0xda, 0x8a, 0x0e, 0x93, 0x0a, 0xef, 0x5e, 0x33, 0x4e, 0x6e, 0x67, 0xdc, 0xa8, 0xad, 0xed, + 0x85, 0xaa, 0xc7, 0xcc, 0x79, 0xe4, 0x92, 0xe9, 0x37, 0xa8, 0xc1, 0x14, 0x64, 0xd2, 0x9d, 0x5a, + 0x9a, 0x5e, 0x69, 0xad, 0x3d, 0xb8, 0xd1, 0x75, 0x38, 0x5b, 0xd1, 0x1a, 0x9b, 0x46, 0x17, 0x0d, + 0xe4, 0xcb, 0x3f, 0xea, 0x95, 0x67, 0x73, 0x0f, 0xfc, 0x02, 0xcd, 0xf5, 0xf8, 0x41, 0x02, 0x62, + 0x33, 0x81, 0x5c, 0x31, 0x75, 0x64, 0x9d, 0x37, 0x43, 0xac, 0x4b, 0x7f, 0xee, 0xdd, 0x95, 0x4c, + 0x34, 0x52, 0x89, 0x3b, 0x68, 0xf1, 0xc0, 0x80, 0x36, 0xfa, 0xc2, 0x76, 0xde, 0x86, 0x98, 0xe7, + 0x89, 0xb4, 0x63, 0x6d, 0x84, 0xae, 0x2e, 0xfd, 0xc5, 0xce, 0x98, 0x7c, 0x34, 0x56, 0x85, 0xbb, + 0xa8, 0x45, 0xe3, 0x2f, 0x7d, 0x26, 0x60, 0x87, 0x65, 0xe0, 0x4e, 0xdb, 0x01, 0x06, 0xb7, 0x1b, + 0xe0, 0x16, 0x8b, 0x05, 0x37, 0xb2, 0x70, 0x5e, 0x97, 0x7e, 0xeb, 0xd5, 0x90, 0x13, 0x5d, 0x86, + 0xe2, 0x5d, 0xd4, 0x14, 0x90, 0xc3, 0x57, 0xdb, 0xa1, 0xfe, 0x7f, 0x1d, 0x66, 0x75, 0xe9, 0x37, + 0xa3, 0x73, 0x4a, 0x34, 0x04, 0xe2, 0x75, 0xb4, 0x60, 0x6f, 0xb6, 0x23, 0x68, 0x2e, 0x99, 0xb9, + 0x9b, 0x74, 0x1b, 0x76, 0x16, 0x8b, 0xba, 0xf4, 0x17, 0x3a, 0x23, 0xb9, 0xe8, 0x5a, 0x35, 0xde, + 0x40, 0x33, 0x52, 0x99, 0xaf, 0x22, 0x3d, 0x72, 0xef, 0xd8, 0x3d, 0xac, 0x98, 0xb7, 0x61, 0xbb, + 0x8a, 0xfd, 0x2d, 0x7d, 0xf7, 0xf5, 0xf9, 0xaa, 0x21, 0x19, 0x6c, 0xb1, 0xca, 0x45, 0x17, 0x4a, + 0xfc, 0x12, 0xcd, 0x17, 0x02, 0xf6, 0x40, 0x08, 0x48, 0x06, 0x2b, 0x74, 0xef, 0x5a, 0xd8, 0x7d, + 0x5d, 0xfa, 0xf3, 0x1f, 0xae, 0xa6, 0xa2, 0xd1, 0xda, 0x70, 0xfd, 0xf8, 0xcc, 0xab, 0x9d, 0x9c, + 0x79, 0xb5, 0xd3, 0x33, 0xaf, 0xf6, 0x4d, 0x7b, 0xce, 0xb1, 0xf6, 0x9c, 0x13, 0xed, 0x39, 0xa7, + 0xda, 0x73, 0x7e, 0x6b, 0xcf, 0xf9, 0xfe, 0xc7, 0xab, 0x7d, 0x6c, 0x4f, 0xfe, 0x8b, 0xfd, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xf8, 0xf4, 0xd4, 0x78, 0xe2, 0x04, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -286,6 +289,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -395,6 +412,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -441,6 +466,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -860,6 +887,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 36fce60f2d..4d4f7e08f4 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/coordination/v1"; message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -45,7 +45,7 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Lease items = 2; @@ -54,27 +54,43 @@ message LeaseList { // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last + // to wait to force acquire it. This is measured against the time of last // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index b0e1d06829..5307cea88f 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -20,8 +20,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type CoordinatedLeaseStrategy string + +// CoordinatedLeaseStrategy defines the strategy for picking the leader for coordinated leader election. +const ( + // OldestEmulationVersion picks the oldest LeaseCandidate, where "oldest" is defined as follows + // 1) Select the candidate(s) with the lowest emulation version + // 2) If multiple candidates have the same emulation version, select the candidate(s) with the lowest binary version. (Note that binary version must be greater or equal to emulation version) + // 3) If multiple candidates have the same binary version, select the candidate with the oldest creationTimestamp. + // If a candidate does not specify the emulationVersion and binaryVersion fields, it will not be considered a candidate for the lease. + OldestEmulationVersion CoordinatedLeaseStrategy = "OldestEmulationVersion" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // Lease defines a lease concept. type Lease struct { @@ -39,10 +52,12 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last + // to wait to force acquire it. This is measured against the time of last // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` @@ -57,9 +72,22 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LeaseList is a list of Lease objects. type LeaseList struct { diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index f3720eca02..6c1a7ea8b9 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 99f6b0be7a..4d549cc99f 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -111,6 +111,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..a22632cba9 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Lease) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/doc.go b/vendor/k8s.io/api/coordination/v1alpha1/doc.go new file mode 100644 index 0000000000..33a0b0ea97 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=coordination.k8s.io + +package v1alpha1 // import "k8s.io/api/coordination/v1alpha1" diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..9e072e62d0 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go @@ -0,0 +1,1036 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/coordination/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} } +func (*LeaseCandidate) ProtoMessage() {} +func (*LeaseCandidate) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{0} +} +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidate.Merge(m, src) +} +func (m *LeaseCandidate) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidate) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidate.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo + +func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} } +func (*LeaseCandidateList) ProtoMessage() {} +func (*LeaseCandidateList) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{1} +} +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateList.Merge(m, src) +} +func (m *LeaseCandidateList) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo + +func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} } +func (*LeaseCandidateSpec) ProtoMessage() {} +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{2} +} +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateSpec.Merge(m, src) +} +func (m *LeaseCandidateSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidate") + proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateList") + proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/coordination/v1alpha1/generated.proto", fileDescriptor_cb9e87df9da593c2) +} + +var fileDescriptor_cb9e87df9da593c2 = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcb, 0x6e, 0xd3, 0x4c, + 0x14, 0xc7, 0xe3, 0x36, 0xf9, 0x94, 0xcc, 0xd7, 0xa0, 0x30, 0x15, 0x92, 0x95, 0x85, 0x13, 0x65, + 0x55, 0x21, 0x31, 0x6e, 0xa0, 0x42, 0x48, 0xec, 0x5c, 0x40, 0x42, 0x4a, 0x4b, 0xe5, 0x42, 0x25, + 0x50, 0x17, 0x4c, 0xec, 0x53, 0x67, 0x48, 0x7c, 0xd1, 0x78, 0x52, 0x94, 0x5d, 0x17, 0x3c, 0x00, + 0x8f, 0x15, 0x58, 0x75, 0xd9, 0x55, 0x44, 0xcc, 0x5b, 0xb0, 0x42, 0x33, 0xb1, 0x73, 0x6d, 0x94, + 0x88, 0x5d, 0xce, 0xe5, 0xf7, 0x3f, 0xe7, 0x7f, 0xac, 0x0c, 0x3a, 0xec, 0xbe, 0x88, 0x09, 0x0b, + 0x4d, 0x1a, 0x31, 0xd3, 0x09, 0x43, 0xee, 0xb2, 0x80, 0x0a, 0x16, 0x06, 0xe6, 0x75, 0x93, 0xf6, + 0xa2, 0x0e, 0x6d, 0x9a, 0x1e, 0x04, 0xc0, 0xa9, 0x00, 0x97, 0x44, 0x3c, 0x14, 0x21, 0xae, 0x4f, + 0x08, 0x42, 0x23, 0x46, 0xe6, 0x09, 0x92, 0x11, 0xd5, 0x27, 0x1e, 0x13, 0x9d, 0x7e, 0x9b, 0x38, + 0xa1, 0x6f, 0x7a, 0xa1, 0x17, 0x9a, 0x0a, 0x6c, 0xf7, 0xaf, 0x54, 0xa4, 0x02, 0xf5, 0x6b, 0x22, + 0x58, 0x7d, 0xbc, 0x7e, 0x85, 0xe5, 0xe1, 0xd5, 0xa3, 0x59, 0xaf, 0x4f, 0x9d, 0x0e, 0x0b, 0x80, + 0x0f, 0xcc, 0xa8, 0xeb, 0xc9, 0x44, 0x6c, 0xfa, 0x20, 0xe8, 0x7d, 0x94, 0xb9, 0x8e, 0xe2, 0xfd, + 0x40, 0x30, 0x1f, 0x56, 0x80, 0xe7, 0x9b, 0x80, 0xd8, 0xe9, 0x80, 0x4f, 0x97, 0xb9, 0xc6, 0x4f, + 0x0d, 0x3d, 0x68, 0x01, 0x8d, 0xe1, 0x98, 0x06, 0x2e, 0x73, 0xa9, 0x00, 0xfc, 0x19, 0x15, 0xe5, + 0x5a, 0x2e, 0x15, 0x54, 0xd7, 0xea, 0xda, 0xc1, 0xff, 0x4f, 0x0f, 0xc9, 0xec, 0x82, 0x53, 0x75, + 0x12, 0x75, 0x3d, 0x99, 0x88, 0x89, 0xec, 0x26, 0xd7, 0x4d, 0xf2, 0xae, 0xfd, 0x05, 0x1c, 0x71, + 0x02, 0x82, 0x5a, 0x78, 0x38, 0xaa, 0xe5, 0x92, 0x51, 0x0d, 0xcd, 0x72, 0xf6, 0x54, 0x15, 0x5f, + 0xa0, 0x7c, 0x1c, 0x81, 0xa3, 0xef, 0x28, 0xf5, 0x23, 0xb2, 0xe9, 0xfb, 0x90, 0xc5, 0x0d, 0xcf, + 0x23, 0x70, 0xac, 0xbd, 0x74, 0x42, 0x5e, 0x46, 0xb6, 0xd2, 0x6b, 0xfc, 0xd0, 0x10, 0x5e, 0x6c, + 0x6d, 0xb1, 0x58, 0xe0, 0xcb, 0x15, 0x43, 0x64, 0x3b, 0x43, 0x92, 0x56, 0x76, 0x2a, 0xe9, 0xb0, + 0x62, 0x96, 0x99, 0x33, 0xf3, 0x01, 0x15, 0x98, 0x00, 0x3f, 0xd6, 0x77, 0xea, 0xbb, 0x4b, 0xb7, + 0xda, 0xca, 0x8d, 0x55, 0x4e, 0xc5, 0x0b, 0x6f, 0xa5, 0x8c, 0x3d, 0x51, 0x6b, 0x7c, 0xcb, 0x2f, + 0x7b, 0x91, 0x46, 0xb1, 0x89, 0x4a, 0x3d, 0x99, 0x3d, 0xa5, 0x3e, 0x28, 0x33, 0x25, 0xeb, 0x61, + 0xca, 0x97, 0x5a, 0x59, 0xc1, 0x9e, 0xf5, 0xe0, 0x8f, 0xa8, 0x18, 0xb1, 0xc0, 0x7b, 0xcf, 0x7c, + 0x48, 0xef, 0x6d, 0x6e, 0x67, 0xfe, 0x84, 0x39, 0x3c, 0x94, 0x98, 0xb5, 0x27, 0x9d, 0x9f, 0xa5, + 0x22, 0xf6, 0x54, 0x0e, 0x5f, 0xa2, 0x12, 0x87, 0x00, 0xbe, 0x2a, 0xed, 0xdd, 0x7f, 0xd3, 0x2e, + 0xcb, 0xc5, 0xed, 0x4c, 0xc5, 0x9e, 0x09, 0xe2, 0x97, 0xa8, 0xdc, 0x66, 0x01, 0xe5, 0x83, 0x0b, + 0xe0, 0x31, 0x0b, 0x03, 0x3d, 0xaf, 0xdc, 0x3e, 0x4a, 0xdd, 0x96, 0xad, 0xf9, 0xa2, 0xbd, 0xd8, + 0x8b, 0x5f, 0xa1, 0x0a, 0xf8, 0xfd, 0x9e, 0x3a, 0x7c, 0xc6, 0x17, 0x14, 0xaf, 0xa7, 0x7c, 0xe5, + 0xf5, 0x52, 0xdd, 0x5e, 0x21, 0xf0, 0x8d, 0x86, 0xf6, 0x23, 0x0e, 0x57, 0xc0, 0x39, 0xb8, 0xe7, + 0x42, 0xfe, 0x6f, 0x3c, 0x06, 0xb1, 0xfe, 0x5f, 0x7d, 0xf7, 0xa0, 0x64, 0x9d, 0x26, 0xa3, 0xda, + 0xfe, 0xd9, 0x6a, 0xf9, 0xcf, 0xa8, 0xf6, 0x6c, 0xfd, 0x03, 0x41, 0x8e, 0xb3, 0x18, 0x5c, 0xf5, + 0xc1, 0x52, 0x70, 0x60, 0xdf, 0x37, 0xca, 0x7a, 0x33, 0x1c, 0x1b, 0xb9, 0xdb, 0xb1, 0x91, 0xbb, + 0x1b, 0x1b, 0xb9, 0x9b, 0xc4, 0xd0, 0x86, 0x89, 0xa1, 0xdd, 0x26, 0x86, 0x76, 0x97, 0x18, 0xda, + 0xaf, 0xc4, 0xd0, 0xbe, 0xff, 0x36, 0x72, 0x9f, 0xea, 0x9b, 0xde, 0xc4, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x05, 0x28, 0x49, 0xd9, 0x36, 0x05, 0x00, 0x00, +} + +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferredStrategies) > 0 { + for iNdEx := len(m.PreferredStrategies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PreferredStrategies[iNdEx]) + copy(dAtA[i:], m.PreferredStrategies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreferredStrategies[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.EmulationVersion) + copy(dAtA[i:], m.EmulationVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.BinaryVersion) + copy(dAtA[i:], m.BinaryVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion))) + i-- + dAtA[i] = 0x22 + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.PingTime != nil { + { + size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LeaseName) + copy(dAtA[i:], m.LeaseName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseCandidate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseCandidateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseCandidateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LeaseName) + n += 1 + l + sovGenerated(uint64(l)) + if m.PingTime != nil { + l = m.PingTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.BinaryVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EmulationVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredStrategies) > 0 { + for _, s := range m.PreferredStrategies { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LeaseCandidate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LeaseCandidate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseCandidateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidateSpec{`, + `LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`, + `PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`, + `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`, + `PreferredStrategies:` + fmt.Sprintf("%v", this.PreferredStrategies) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LeaseCandidate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeaseName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PingTime == nil { + m.PingTime = &v1.MicroTime{} + } + if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinaryVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmulationVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredStrategies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredStrategies = append(m.PreferredStrategies, k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto new file mode 100644 index 0000000000..57895ad569 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.coordination.v1alpha1; + +import "k8s.io/api/coordination/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/coordination/v1alpha1"; + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +message LeaseCandidate { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseCandidateSpec spec = 2; +} + +// LeaseCandidateList is a list of Lease objects. +message LeaseCandidateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated LeaseCandidate items = 2; +} + +// LeaseCandidateSpec is a specification of a Lease. +message LeaseCandidateSpec { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + optional string leaseName = 1; + + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2; + + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3; + + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string binaryVersion = 4; + + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string emulationVersion = 5; + + // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. + // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated + // leader election to make a decision about the final election strategy. This follows as + // - If all clients have strategy X as the first element in this list, strategy X will be used. + // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y + // will be used. + // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader + // election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +listType=atomic + // +required + repeated string preferredStrategies = 6; +} + diff --git a/vendor/k8s.io/api/coordination/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1alpha1/register.go new file mode 100644 index 0000000000..6e57905a19 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &LeaseCandidate{}, + &LeaseCandidateList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types.go b/vendor/k8s.io/api/coordination/v1alpha1/types.go new file mode 100644 index 0000000000..14066600cf --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +type LeaseCandidate struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseCandidateSpec is a specification of a Lease. +type LeaseCandidateSpec struct { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"` + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"` + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"` + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + BinaryVersion string `json:"binaryVersion,omitempty" protobuf:"bytes,4,opt,name=binaryVersion"` + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"` + // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. + // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated + // leader election to make a decision about the final election strategy. This follows as + // - If all clients have strategy X as the first element in this list, strategy X will be used. + // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y + // will be used. + // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader + // election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +listType=atomic + // +required + PreferredStrategies []v1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty" protobuf:"bytes,6,opt,name=preferredStrategies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// LeaseCandidateList is a list of Lease objects. +type LeaseCandidateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..0e52809c8c --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_LeaseCandidate = map[string]string{ + "": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (LeaseCandidate) SwaggerDoc() map[string]string { + return map_LeaseCandidate +} + +var map_LeaseCandidateList = map[string]string{ + "": "LeaseCandidateList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (LeaseCandidateList) SwaggerDoc() map[string]string { + return map_LeaseCandidateList +} + +var map_LeaseCandidateSpec = map[string]string{ + "": "LeaseCandidateSpec is a specification of a Lease.", + "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", + "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", + "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"", + "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "preferredStrategies": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", +} + +func (LeaseCandidateSpec) SwaggerDoc() map[string]string { + return map_LeaseCandidateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9cf15d21dc --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,116 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/coordination/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate. +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate { + if in == nil { + return nil + } + out := new(LeaseCandidate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LeaseCandidate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList. +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList { + if in == nil { + return nil + } + out := new(LeaseCandidateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) { + *out = *in + if in.PingTime != nil { + in, out := &in.PingTime, &out.PingTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.PreferredStrategies != nil { + in, out := &in.PreferredStrategies, &out.PreferredStrategies + *out = make([]v1.CoordinatedLeaseStrategy, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec. +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec { + if in == nil { + return nil + } + out := new(LeaseCandidateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..f42bef65c9 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 7334290fbb..bea9b8146a 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto +// source: k8s.io/api/coordination/v1beta1/generated.proto package v1beta1 @@ -25,6 +25,8 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -47,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{0} + return fileDescriptor_8d4e223b8bb23da3, []int{0} } func (m *Lease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +77,7 @@ var xxx_messageInfo_Lease proto.InternalMessageInfo func (m *LeaseList) Reset() { *m = LeaseList{} } func (*LeaseList) ProtoMessage() {} func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{1} + return fileDescriptor_8d4e223b8bb23da3, []int{1} } func (m *LeaseList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +105,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } func (*LeaseSpec) ProtoMessage() {} func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{2} + return fileDescriptor_8d4e223b8bb23da3, []int{2} } func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,45 +137,49 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) -} - -var fileDescriptor_daca6bcd2ff63a80 = []byte{ - // 543 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xc1, 0x6e, 0xd3, 0x4e, - 0x10, 0xc6, 0xe3, 0xb6, 0x91, 0x9a, 0xcd, 0xbf, 0xfd, 0x47, 0x56, 0x0e, 0x56, 0x0e, 0x76, 0x95, - 0x03, 0xaa, 0x90, 0xd8, 0x25, 0x15, 0x42, 0x88, 0x13, 0x58, 0x20, 0xb5, 0xc2, 0x15, 0x92, 0xdb, - 0x13, 0xea, 0x81, 0xb5, 0x3d, 0x38, 0x4b, 0x6a, 0xaf, 0xd9, 0x5d, 0x07, 0xf5, 0xc6, 0x23, 0x70, - 0xe5, 0x45, 0xe0, 0x15, 0x72, 0xec, 0xb1, 0x27, 0x8b, 0x98, 0x17, 0x41, 0xde, 0xb8, 0x4d, 0x48, - 0x8a, 0x12, 0x71, 0xf3, 0xce, 0xcc, 0xf7, 0x9b, 0x6f, 0xbe, 0x83, 0xd1, 0xf1, 0xe8, 0x99, 0xc4, - 0x8c, 0x93, 0x51, 0x1e, 0x80, 0x48, 0x41, 0x81, 0x24, 0x63, 0x48, 0x23, 0x2e, 0x48, 0xdd, 0xa0, - 0x19, 0x23, 0x21, 0xe7, 0x22, 0x62, 0x29, 0x55, 0x8c, 0xa7, 0x64, 0x3c, 0x08, 0x40, 0xd1, 0x01, - 0x89, 0x21, 0x05, 0x41, 0x15, 0x44, 0x38, 0x13, 0x5c, 0x71, 0xd3, 0x99, 0x09, 0x30, 0xcd, 0x18, - 0x5e, 0x14, 0xe0, 0x5a, 0xd0, 0x7b, 0x14, 0x33, 0x35, 0xcc, 0x03, 0x1c, 0xf2, 0x84, 0xc4, 0x3c, - 0xe6, 0x44, 0xeb, 0x82, 0xfc, 0x83, 0x7e, 0xe9, 0x87, 0xfe, 0x9a, 0xf1, 0x7a, 0x4f, 0xe6, 0x06, - 0x12, 0x1a, 0x0e, 0x59, 0x0a, 0xe2, 0x8a, 0x64, 0xa3, 0xb8, 0x2a, 0x48, 0x92, 0x80, 0xa2, 0x64, - 0xbc, 0xe2, 0xa2, 0x47, 0xfe, 0xa6, 0x12, 0x79, 0xaa, 0x58, 0x02, 0x2b, 0x82, 0xa7, 0xeb, 0x04, - 0x32, 0x1c, 0x42, 0x42, 0x97, 0x75, 0xfd, 0x1f, 0x06, 0x6a, 0x7a, 0x40, 0x25, 0x98, 0xef, 0xd1, - 0x6e, 0xe5, 0x26, 0xa2, 0x8a, 0x5a, 0xc6, 0x81, 0x71, 0xd8, 0x3e, 0x7a, 0x8c, 0xe7, 0x59, 0xdc, - 0x41, 0x71, 0x36, 0x8a, 0xab, 0x82, 0xc4, 0xd5, 0x34, 0x1e, 0x0f, 0xf0, 0xdb, 0xe0, 0x23, 0x84, - 0xea, 0x14, 0x14, 0x75, 0xcd, 0x49, 0xe1, 0x34, 0xca, 0xc2, 0x41, 0xf3, 0x9a, 0x7f, 0x47, 0x35, - 0x3d, 0xb4, 0x23, 0x33, 0x08, 0xad, 0x2d, 0x4d, 0x7f, 0x88, 0xd7, 0x24, 0x8d, 0xb5, 0xaf, 0xb3, - 0x0c, 0x42, 0xf7, 0xbf, 0x9a, 0xbb, 0x53, 0xbd, 0x7c, 0x4d, 0xe9, 0x7f, 0x37, 0x50, 0x4b, 0x4f, - 0x78, 0x4c, 0x2a, 0xf3, 0x62, 0xc5, 0x3d, 0xde, 0xcc, 0x7d, 0xa5, 0xd6, 0xde, 0x3b, 0xf5, 0x8e, - 0xdd, 0xdb, 0xca, 0x82, 0xf3, 0x37, 0xa8, 0xc9, 0x14, 0x24, 0xd2, 0xda, 0x3a, 0xd8, 0x3e, 0x6c, - 0x1f, 0x3d, 0xd8, 0xcc, 0xba, 0xbb, 0x57, 0x23, 0x9b, 0x27, 0x95, 0xd8, 0x9f, 0x31, 0xfa, 0xdf, - 0xb6, 0x6b, 0xe3, 0xd5, 0x31, 0xe6, 0x73, 0xb4, 0x3f, 0xe4, 0x97, 0x11, 0x88, 0x93, 0x08, 0x52, - 0xc5, 0xd4, 0x95, 0xb6, 0xdf, 0x72, 0xcd, 0xb2, 0x70, 0xf6, 0x8f, 0xff, 0xe8, 0xf8, 0x4b, 0x93, - 0xa6, 0x87, 0xba, 0x97, 0x15, 0xe8, 0x55, 0x2e, 0xf4, 0xfa, 0x33, 0x08, 0x79, 0x1a, 0x49, 0x1d, - 0x70, 0xd3, 0xb5, 0xca, 0xc2, 0xe9, 0x7a, 0xf7, 0xf4, 0xfd, 0x7b, 0x55, 0x66, 0x80, 0xda, 0x34, - 0xfc, 0x94, 0x33, 0x01, 0xe7, 0x2c, 0x01, 0x6b, 0x5b, 0xa7, 0x48, 0x36, 0x4b, 0xf1, 0x94, 0x85, - 0x82, 0x57, 0x32, 0xf7, 0xff, 0xb2, 0x70, 0xda, 0x2f, 0xe7, 0x1c, 0x7f, 0x11, 0x6a, 0x5e, 0xa0, - 0x96, 0x80, 0x14, 0x3e, 0xeb, 0x0d, 0x3b, 0xff, 0xb6, 0x61, 0xaf, 0x2c, 0x9c, 0x96, 0x7f, 0x4b, - 0xf1, 0xe7, 0x40, 0xf3, 0x05, 0xea, 0xe8, 0xcb, 0xce, 0x05, 0x4d, 0x25, 0xab, 0x6e, 0x93, 0x56, - 0x53, 0x67, 0xd1, 0x2d, 0x0b, 0xa7, 0xe3, 0x2d, 0xf5, 0xfc, 0x95, 0x69, 0xf7, 0xf5, 0x64, 0x6a, - 0x37, 0xae, 0xa7, 0x76, 0xe3, 0x66, 0x6a, 0x37, 0xbe, 0x94, 0xb6, 0x31, 0x29, 0x6d, 0xe3, 0xba, - 0xb4, 0x8d, 0x9b, 0xd2, 0x36, 0x7e, 0x96, 0xb6, 0xf1, 0xf5, 0x97, 0xdd, 0x78, 0xe7, 0xac, 0xf9, - 0xa9, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x19, 0x0e, 0xd7, 0x8f, 0x04, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_8d4e223b8bb23da3) +} + +var fileDescriptor_8d4e223b8bb23da3 = []byte{ + // 600 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e, + 0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1, + 0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae, + 0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f, + 0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61, + 0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d, + 0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e, + 0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8, + 0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf, + 0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe, + 0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f, + 0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26, + 0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51, + 0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51, + 0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26, + 0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1, + 0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89, + 0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73, + 0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57, + 0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe, + 0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f, + 0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83, + 0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76, + 0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8, + 0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f, + 0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66, + 0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea, + 0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b, + 0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd, + 0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc, + 0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee, + 0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e, + 0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05, + 0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee, + 0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea, + 0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed, + 0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -286,6 +292,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -395,6 +415,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -441,6 +469,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -860,6 +890,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 92c8918b80..088811a74b 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.coordination.v1beta1; +import "k8s.io/api/coordination/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -32,7 +33,7 @@ option go_package = "k8s.io/api/coordination/v1beta1"; message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -45,7 +46,7 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Lease items = 2; @@ -54,6 +55,8 @@ message LeaseList { // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; @@ -65,16 +68,28 @@ message LeaseSpec { // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 3a3d5f32e2..d63fc30a9e 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -42,6 +43,8 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need @@ -60,6 +63,16 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 78ca4e393f..50fe8ea189 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index 3adfd87203..dcef1e346a 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -111,6 +112,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(v1.CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 106ba14c3d..5cf6f329f1 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -54,21 +54,18 @@ const ( // SeccompLocalhostProfileNamePrefix is the prefix for specifying profiles loaded from the node's disk. SeccompLocalhostProfileNamePrefix = "localhost/" - // AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. - AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // AppArmorBetaDefaultProfileAnnotationKey is the annotation key specifying the default AppArmor profile. - AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" - // AppArmorBetaAllowedProfilesAnnotationKey is the annotation key specifying the allowed AppArmor profiles. - AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" + // DeprecatedAppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. + // Deprecated: use a pod or container security context `appArmorProfile` field instead. + DeprecatedAppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. - AppArmorBetaProfileRuntimeDefault = "runtime/default" + // DeprecatedAppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. + DeprecatedAppArmorBetaProfileRuntimeDefault = "runtime/default" - // AppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node. - AppArmorBetaProfileNamePrefix = "localhost/" + // DeprecatedAppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node. + DeprecatedAppArmorBetaProfileNamePrefix = "localhost/" - // AppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile - AppArmorBetaProfileNameUnconfined = "unconfined" + // DeprecatedAppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile + DeprecatedAppArmorBetaProfileNameUnconfined = "unconfined" // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 1bdf0b25b1..bc0041b331 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -17,6 +17,8 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName= // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 04c7939e0d..5654ee4829 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto +// source: k8s.io/api/core/v1/generated.proto package v1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{0} + return fileDescriptor_6c07b07c062484ab, []int{0} } func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo func (m *Affinity) Reset() { *m = Affinity{} } func (*Affinity) ProtoMessage() {} func (*Affinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{1} + return fileDescriptor_6c07b07c062484ab, []int{1} } func (m *Affinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,10 +105,38 @@ func (m *Affinity) XXX_DiscardUnknown() { var xxx_messageInfo_Affinity proto.InternalMessageInfo +func (m *AppArmorProfile) Reset() { *m = AppArmorProfile{} } +func (*AppArmorProfile) ProtoMessage() {} +func (*AppArmorProfile) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{2} +} +func (m *AppArmorProfile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppArmorProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppArmorProfile) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppArmorProfile.Merge(m, src) +} +func (m *AppArmorProfile) XXX_Size() int { + return m.Size() +} +func (m *AppArmorProfile) XXX_DiscardUnknown() { + xxx_messageInfo_AppArmorProfile.DiscardUnknown(m) +} + +var xxx_messageInfo_AppArmorProfile proto.InternalMessageInfo + func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } func (*AttachedVolume) ProtoMessage() {} func (*AttachedVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{2} + return fileDescriptor_6c07b07c062484ab, []int{3} } func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +164,7 @@ var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo func (m *AvoidPods) Reset() { *m = AvoidPods{} } func (*AvoidPods) ProtoMessage() {} func (*AvoidPods) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{3} + return fileDescriptor_6c07b07c062484ab, []int{4} } func (m *AvoidPods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +192,7 @@ var xxx_messageInfo_AvoidPods proto.InternalMessageInfo func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } func (*AzureDiskVolumeSource) ProtoMessage() {} func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{4} + return fileDescriptor_6c07b07c062484ab, []int{5} } func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +220,7 @@ var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } func (*AzureFilePersistentVolumeSource) ProtoMessage() {} func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{5} + return fileDescriptor_6c07b07c062484ab, []int{6} } func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +248,7 @@ var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } func (*AzureFileVolumeSource) ProtoMessage() {} func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{6} + return fileDescriptor_6c07b07c062484ab, []int{7} } func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +276,7 @@ var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo func (m *Binding) Reset() { *m = Binding{} } func (*Binding) ProtoMessage() {} func (*Binding) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{7} + return fileDescriptor_6c07b07c062484ab, []int{8} } func (m *Binding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +304,7 @@ var xxx_messageInfo_Binding proto.InternalMessageInfo func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } func (*CSIPersistentVolumeSource) ProtoMessage() {} func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{8} + return fileDescriptor_6c07b07c062484ab, []int{9} } func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +332,7 @@ var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } func (*CSIVolumeSource) ProtoMessage() {} func (*CSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{9} + return fileDescriptor_6c07b07c062484ab, []int{10} } func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +360,7 @@ var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} func (*Capabilities) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{10} + return fileDescriptor_6c07b07c062484ab, []int{11} } func (m *Capabilities) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_Capabilities proto.InternalMessageInfo func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{11} + return fileDescriptor_6c07b07c062484ab, []int{12} } func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{12} + return fileDescriptor_6c07b07c062484ab, []int{13} } func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } func (*CinderPersistentVolumeSource) ProtoMessage() {} func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{13} + return fileDescriptor_6c07b07c062484ab, []int{14} } func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} func (*CinderVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{14} + return fileDescriptor_6c07b07c062484ab, []int{15} } func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,38 +497,10 @@ func (m *CinderVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *ClaimSource) Reset() { *m = ClaimSource{} } -func (*ClaimSource) ProtoMessage() {} -func (*ClaimSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{15} -} -func (m *ClaimSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClaimSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClaimSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClaimSource.Merge(m, src) -} -func (m *ClaimSource) XXX_Size() int { - return m.Size() -} -func (m *ClaimSource) XXX_DiscardUnknown() { - xxx_messageInfo_ClaimSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ClaimSource proto.InternalMessageInfo - func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} func (*ClientIPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{16} + return fileDescriptor_6c07b07c062484ab, []int{16} } func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +528,7 @@ var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } func (*ClusterTrustBundleProjection) ProtoMessage() {} func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} + return fileDescriptor_6c07b07c062484ab, []int{17} } func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +556,7 @@ var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} + return fileDescriptor_6c07b07c062484ab, []int{18} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} + return fileDescriptor_6c07b07c062484ab, []int{19} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} + return fileDescriptor_6c07b07c062484ab, []int{20} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} + return fileDescriptor_6c07b07c062484ab, []int{21} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} + return fileDescriptor_6c07b07c062484ab, []int{22} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} + return fileDescriptor_6c07b07c062484ab, []int{23} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} + return fileDescriptor_6c07b07c062484ab, []int{24} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} + return fileDescriptor_6c07b07c062484ab, []int{25} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} + return fileDescriptor_6c07b07c062484ab, []int{26} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} + return fileDescriptor_6c07b07c062484ab, []int{27} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} + return fileDescriptor_6c07b07c062484ab, []int{28} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} + return fileDescriptor_6c07b07c062484ab, []int{29} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_6c07b07c062484ab, []int{30} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +920,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_6c07b07c062484ab, []int{31} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +948,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_6c07b07c062484ab, []int{32} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_6c07b07c062484ab, []int{33} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_6c07b07c062484ab, []int{34} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_6c07b07c062484ab, []int{35} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_6c07b07c062484ab, []int{36} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1085,10 +1085,38 @@ func (m *ContainerStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo +func (m *ContainerUser) Reset() { *m = ContainerUser{} } +func (*ContainerUser) ProtoMessage() {} +func (*ContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{37} +} +func (m *ContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerUser.Merge(m, src) +} +func (m *ContainerUser) XXX_Size() int { + return m.Size() +} +func (m *ContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerUser proto.InternalMessageInfo + func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_6c07b07c062484ab, []int{38} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1144,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_6c07b07c062484ab, []int{39} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_6c07b07c062484ab, []int{40} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1200,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_6c07b07c062484ab, []int{41} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1228,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_6c07b07c062484ab, []int{42} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1256,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_6c07b07c062484ab, []int{43} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1284,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_6c07b07c062484ab, []int{44} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1312,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_6c07b07c062484ab, []int{45} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1340,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_6c07b07c062484ab, []int{46} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_6c07b07c062484ab, []int{47} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_6c07b07c062484ab, []int{48} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_6c07b07c062484ab, []int{49} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_6c07b07c062484ab, []int{50} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_6c07b07c062484ab, []int{51} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_6c07b07c062484ab, []int{52} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_6c07b07c062484ab, []int{53} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_6c07b07c062484ab, []int{54} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_6c07b07c062484ab, []int{55} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_6c07b07c062484ab, []int{56} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1648,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_6c07b07c062484ab, []int{57} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1676,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_6c07b07c062484ab, []int{58} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1704,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_6c07b07c062484ab, []int{59} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1732,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_6c07b07c062484ab, []int{60} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1760,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_6c07b07c062484ab, []int{61} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_6c07b07c062484ab, []int{62} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_6c07b07c062484ab, []int{63} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_6c07b07c062484ab, []int{64} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_6c07b07c062484ab, []int{65} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1900,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_6c07b07c062484ab, []int{66} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1928,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_6c07b07c062484ab, []int{67} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1956,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_6c07b07c062484ab, []int{68} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1984,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_6c07b07c062484ab, []int{69} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2012,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_6c07b07c062484ab, []int{70} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2040,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostIP) Reset() { *m = HostIP{} } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_6c07b07c062484ab, []int{71} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2068,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_6c07b07c062484ab, []int{72} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2096,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_6c07b07c062484ab, []int{73} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_6c07b07c062484ab, []int{74} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2121,10 +2149,38 @@ func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo +func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} } +func (*ImageVolumeSource) ProtoMessage() {} +func (*ImageVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{75} +} +func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageVolumeSource.Merge(m, src) +} +func (m *ImageVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ImageVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo + func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_6c07b07c062484ab, []int{76} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2208,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_6c07b07c062484ab, []int{77} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2236,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_6c07b07c062484ab, []int{78} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2264,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_6c07b07c062484ab, []int{79} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2292,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_6c07b07c062484ab, []int{80} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2320,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_6c07b07c062484ab, []int{81} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2348,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_6c07b07c062484ab, []int{82} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2317,10 +2373,38 @@ func (m *LimitRangeSpec) XXX_DiscardUnknown() { var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo +func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} } +func (*LinuxContainerUser) ProtoMessage() {} +func (*LinuxContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{83} +} +func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LinuxContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerUser.Merge(m, src) +} +func (m *LinuxContainerUser) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo + func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_6c07b07c062484ab, []int{84} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2432,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_6c07b07c062484ab, []int{85} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2460,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_6c07b07c062484ab, []int{86} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2488,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_6c07b07c062484ab, []int{87} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2516,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_6c07b07c062484ab, []int{88} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2544,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } func (*ModifyVolumeStatus) ProtoMessage() {} func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_6c07b07c062484ab, []int{89} } func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2572,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_6c07b07c062484ab, []int{90} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2600,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_6c07b07c062484ab, []int{91} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2628,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_6c07b07c062484ab, []int{92} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2656,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_6c07b07c062484ab, []int{93} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2684,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_6c07b07c062484ab, []int{94} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2712,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_6c07b07c062484ab, []int{95} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2740,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_6c07b07c062484ab, []int{96} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2768,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_6c07b07c062484ab, []int{97} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2796,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_6c07b07c062484ab, []int{98} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2824,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_6c07b07c062484ab, []int{99} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2852,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_6c07b07c062484ab, []int{100} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2880,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_6c07b07c062484ab, []int{101} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2908,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_6c07b07c062484ab, []int{102} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2849,10 +2933,38 @@ func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo +func (m *NodeFeatures) Reset() { *m = NodeFeatures{} } +func (*NodeFeatures) ProtoMessage() {} +func (*NodeFeatures) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{103} +} +func (m *NodeFeatures) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeFeatures) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeFeatures.Merge(m, src) +} +func (m *NodeFeatures) XXX_Size() int { + return m.Size() +} +func (m *NodeFeatures) XXX_DiscardUnknown() { + xxx_messageInfo_NodeFeatures.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo + func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_6c07b07c062484ab, []int{104} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2992,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_6c07b07c062484ab, []int{105} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2905,15 +3017,15 @@ func (m *NodeProxyOptions) XXX_DiscardUnknown() { var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} +func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} } +func (*NodeRuntimeHandler) ProtoMessage() {} +func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{106} } -func (m *NodeResources) XXX_Unmarshal(b []byte) error { +func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *NodeRuntimeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -2921,22 +3033,50 @@ func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *NodeResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeResources.Merge(m, src) +func (m *NodeRuntimeHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeRuntimeHandler.Merge(m, src) } -func (m *NodeResources) XXX_Size() int { +func (m *NodeRuntimeHandler) XXX_Size() int { return m.Size() } -func (m *NodeResources) XXX_DiscardUnknown() { - xxx_messageInfo_NodeResources.DiscardUnknown(m) +func (m *NodeRuntimeHandler) XXX_DiscardUnknown() { + xxx_messageInfo_NodeRuntimeHandler.DiscardUnknown(m) } -var xxx_messageInfo_NodeResources proto.InternalMessageInfo +var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo + +func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} } +func (*NodeRuntimeHandlerFeatures) ProtoMessage() {} +func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{107} +} +func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeRuntimeHandlerFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeRuntimeHandlerFeatures) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeRuntimeHandlerFeatures.Merge(m, src) +} +func (m *NodeRuntimeHandlerFeatures) XXX_Size() int { + return m.Size() +} +func (m *NodeRuntimeHandlerFeatures) XXX_DiscardUnknown() { + xxx_messageInfo_NodeRuntimeHandlerFeatures.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_6c07b07c062484ab, []int{108} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3104,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_6c07b07c062484ab, []int{109} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3132,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_6c07b07c062484ab, []int{110} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3160,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_6c07b07c062484ab, []int{111} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3188,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_6c07b07c062484ab, []int{112} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3216,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_6c07b07c062484ab, []int{113} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3244,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3272,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3300,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3328,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3664,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3692,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3720,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3748,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3776,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3804,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3832,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3860,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3916,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3944,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{139} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3972,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +4000,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +4028,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4056,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4084,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{144} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4112,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{145} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4140,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{146} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4168,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{147} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4196,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4224,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4252,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4280,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4308,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4336,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4364,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4392,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4420,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4448,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4476,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4504,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4532,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4560,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4588,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4616,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4644,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4672,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4700,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4728,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4756,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4784,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4812,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{173} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4924,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4809,10 +4949,38 @@ func (m *ResourceFieldSelector) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo +func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } +func (*ResourceHealth) ProtoMessage() {} +func (*ResourceHealth) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{175} +} +func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHealth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHealth.Merge(m, src) +} +func (m *ResourceHealth) XXX_Size() int { + return m.Size() +} +func (m *ResourceHealth) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHealth.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo + func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +5008,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +5036,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4949,10 +5117,38 @@ func (m *ResourceRequirements) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo +func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } +func (*ResourceStatus) ProtoMessage() {} +func (*ResourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{181} +} +func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceStatus.Merge(m, src) +} +func (m *ResourceStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo + func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5176,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5204,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5232,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5260,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5288,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5316,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5344,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5372,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5400,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5428,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5456,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5484,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5512,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5540,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5568,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5596,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5624,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5652,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5708,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5736,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5764,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5792,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5820,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5848,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *SleepAction) Reset() { *m = SleepAction{} } func (*SleepAction) ProtoMessage() {} func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{207} } func (m *SleepAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5876,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5904,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5932,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5960,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5988,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +6016,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +6044,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6072,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6128,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6156,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_6c07b07c062484ab, []int{218} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6184,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6212,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6240,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6069,10 +6265,38 @@ func (m *VolumeMount) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeMount proto.InternalMessageInfo +func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } +func (*VolumeMountStatus) ProtoMessage() {} +func (*VolumeMountStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{222} +} +func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMountStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeMountStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMountStatus.Merge(m, src) +} +func (m *VolumeMountStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeMountStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMountStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo + func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{215} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6324,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{216} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6352,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{217} + return fileDescriptor_6c07b07c062484ab, []int{225} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6380,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{218} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6408,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{219} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6436,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{220} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6464,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{221} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6268,6 +6492,7 @@ var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo func init() { proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AppArmorProfile)(nil), "k8s.io.api.core.v1.AppArmorProfile") proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") @@ -6283,7 +6508,6 @@ func init() { proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") @@ -6308,6 +6532,7 @@ func init() { proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") + proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6347,6 +6572,7 @@ func init() { proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler") @@ -6359,6 +6585,7 @@ func init() { proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser") proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") @@ -6378,10 +6605,11 @@ func init() { proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures") proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler") + proto.RegisterType((*NodeRuntimeHandlerFeatures)(nil), "k8s.io.api.core.v1.NodeRuntimeHandlerFeatures") proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") @@ -6458,6 +6686,7 @@ func init() { proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim") proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth") proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") @@ -6468,6 +6697,7 @@ func init() { proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus") proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") @@ -6511,6 +6741,7 @@ func init() { proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeMountStatus)(nil), "k8s.io.api.core.v1.VolumeMountStatus") proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements") @@ -6523,978 +6754,1015 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) -} - -var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 15465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, - 0x75, 0x18, 0xcc, 0xea, 0x9e, 0xab, 0xdf, 0xdc, 0x39, 0x00, 0x76, 0x30, 0x0b, 0xa0, 0xb1, 0xb5, - 0xbb, 0x58, 0xec, 0x35, 0x20, 0xf6, 0x20, 0x97, 0xbb, 0xcb, 0x15, 0xe7, 0x04, 0x66, 0x31, 0x33, - 0xe8, 0xcd, 0x1e, 0x00, 0xe4, 0x72, 0xc9, 0x8f, 0x85, 0xee, 0x9c, 0x99, 0xe2, 0x74, 0x57, 0xf5, - 0x56, 0x55, 0x0f, 0x30, 0xf8, 0xc8, 0x90, 0x44, 0x7d, 0xa2, 0x44, 0x4a, 0x5f, 0x04, 0xe3, 0x0b, - 0x7d, 0x47, 0x50, 0x0a, 0xc5, 0x17, 0x92, 0xac, 0xc3, 0xb4, 0x64, 0xd3, 0x94, 0x25, 0x59, 0xd4, - 0xe5, 0x2b, 0x2c, 0x29, 0x1c, 0xb2, 0xac, 0x08, 0x8b, 0x0a, 0x2b, 0x3c, 0x32, 0x21, 0x47, 0x28, - 0xf4, 0xc3, 0x92, 0x7c, 0xfc, 0xb0, 0x61, 0xd9, 0x72, 0xe4, 0x59, 0x99, 0x75, 0x74, 0xf7, 0x60, - 0x07, 0xc3, 0x25, 0x63, 0xff, 0x75, 0xbf, 0xf7, 0xf2, 0x65, 0x56, 0x9e, 0x2f, 0xdf, 0x7b, 0xf9, - 0x1e, 0xbc, 0xb2, 0xf3, 0x52, 0x38, 0xeb, 0xfa, 0x17, 0x76, 0xda, 0x37, 0x49, 0xe0, 0x91, 0x88, - 0x84, 0x17, 0x76, 0x89, 0x57, 0xf7, 0x83, 0x0b, 0x02, 0xe1, 0xb4, 0xdc, 0x0b, 0x35, 0x3f, 0x20, - 0x17, 0x76, 0x2f, 0x5e, 0xd8, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x67, 0x5b, 0x81, 0x1f, 0xf9, - 0x08, 0x71, 0x9a, 0x59, 0xa7, 0xe5, 0xce, 0x52, 0x9a, 0xd9, 0xdd, 0x8b, 0x33, 0xcf, 0x6e, 0xb9, - 0xd1, 0x76, 0xfb, 0xe6, 0x6c, 0xcd, 0x6f, 0x5e, 0xd8, 0xf2, 0xb7, 0xfc, 0x0b, 0x8c, 0xf4, 0x66, - 0x7b, 0x93, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xe6, 0x85, 0xb8, 0x9a, 0xa6, 0x53, 0xdb, - 0x76, 0x3d, 0x12, 0xec, 0x5d, 0x68, 0xed, 0x6c, 0xb1, 0x7a, 0x03, 0x12, 0xfa, 0xed, 0xa0, 0x46, - 0x92, 0x15, 0x77, 0x2c, 0x15, 0x5e, 0x68, 0x92, 0xc8, 0xc9, 0x68, 0xee, 0xcc, 0x85, 0xbc, 0x52, - 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, 0x9a, 0x0f, 0x74, 0x2b, 0x10, 0xd6, 0xb6, 0x49, 0xd3, 0x49, - 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, 0x72, 0x1b, 0x17, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc8, - 0xfe, 0x86, 0x05, 0x67, 0xe7, 0x6e, 0x54, 0x97, 0x1a, 0x4e, 0x18, 0xb9, 0xb5, 0xf9, 0x86, 0x5f, - 0xdb, 0xa9, 0x46, 0x7e, 0x40, 0xae, 0xfb, 0x8d, 0x76, 0x93, 0x54, 0x59, 0x47, 0xa0, 0x67, 0x60, - 0x68, 0x97, 0xfd, 0x5f, 0x59, 0x9c, 0xb6, 0xce, 0x5a, 0xe7, 0x4b, 0xf3, 0x13, 0xbf, 0xbd, 0x5f, - 0x7e, 0xdf, 0xdd, 0xfd, 0xf2, 0xd0, 0x75, 0x01, 0xc7, 0x8a, 0x02, 0x9d, 0x83, 0x81, 0xcd, 0x70, - 0x63, 0xaf, 0x45, 0xa6, 0x0b, 0x8c, 0x76, 0x4c, 0xd0, 0x0e, 0x2c, 0x57, 0x29, 0x14, 0x0b, 0x2c, - 0xba, 0x00, 0xa5, 0x96, 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x74, 0xf1, 0xac, 0x75, 0xbe, 0x7f, - 0x7e, 0x52, 0x90, 0x96, 0x2a, 0x12, 0x81, 0x63, 0x1a, 0xda, 0x8c, 0x80, 0x38, 0xf5, 0xab, 0x5e, - 0x63, 0x6f, 0xba, 0xef, 0xac, 0x75, 0x7e, 0x28, 0x6e, 0x06, 0x16, 0x70, 0xac, 0x28, 0xec, 0x2f, - 0x17, 0x60, 0x68, 0x6e, 0x73, 0xd3, 0xf5, 0xdc, 0x68, 0x0f, 0x5d, 0x87, 0x11, 0xcf, 0xaf, 0x13, - 0xf9, 0x9f, 0x7d, 0xc5, 0xf0, 0x73, 0x67, 0x67, 0xd3, 0x53, 0x69, 0x76, 0x5d, 0xa3, 0x9b, 0x9f, - 0xb8, 0xbb, 0x5f, 0x1e, 0xd1, 0x21, 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, - 0x60, 0x6c, 0xcb, 0x59, 0x6c, 0x2b, 0x31, 0xd9, 0xfc, 0xf8, 0xdd, 0xfd, 0xf2, 0xb0, 0x06, 0xc0, - 0x3a, 0x13, 0x74, 0x13, 0xc6, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdf, 0x22, 0xe3, 0xfb, 0x68, 0x1e, - 0x5f, 0x8d, 0x74, 0x7e, 0xea, 0xee, 0x7e, 0x79, 0x3c, 0x01, 0xc4, 0x49, 0x86, 0xf6, 0x1d, 0x18, - 0x9b, 0x8b, 0x22, 0xa7, 0xb6, 0x4d, 0xea, 0x7c, 0x04, 0xd1, 0x0b, 0xd0, 0xe7, 0x39, 0x4d, 0x22, - 0xc6, 0xf7, 0xac, 0xe8, 0xd8, 0xbe, 0x75, 0xa7, 0x49, 0xee, 0xed, 0x97, 0x27, 0xae, 0x79, 0xee, - 0xdb, 0x6d, 0x31, 0x2b, 0x28, 0x0c, 0x33, 0x6a, 0xf4, 0x1c, 0x40, 0x9d, 0xec, 0xba, 0x35, 0x52, - 0x71, 0xa2, 0x6d, 0x31, 0xde, 0x48, 0x94, 0x85, 0x45, 0x85, 0xc1, 0x1a, 0x95, 0x7d, 0x1b, 0x4a, - 0x73, 0xbb, 0xbe, 0x5b, 0xaf, 0xf8, 0xf5, 0x10, 0xed, 0xc0, 0x78, 0x2b, 0x20, 0x9b, 0x24, 0x50, - 0xa0, 0x69, 0xeb, 0x6c, 0xf1, 0xfc, 0xf0, 0x73, 0xe7, 0x33, 0x3f, 0xd6, 0x24, 0x5d, 0xf2, 0xa2, - 0x60, 0x6f, 0xfe, 0x21, 0x51, 0xdf, 0x78, 0x02, 0x8b, 0x93, 0x9c, 0xed, 0x7f, 0x5a, 0x80, 0xe3, - 0x73, 0x77, 0xda, 0x01, 0x59, 0x74, 0xc3, 0x9d, 0xe4, 0x0c, 0xaf, 0xbb, 0xe1, 0xce, 0x7a, 0xdc, - 0x03, 0x6a, 0x6a, 0x2d, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x59, 0x18, 0xa4, 0xbf, 0xaf, 0xe1, 0x15, - 0xf1, 0xc9, 0x53, 0x82, 0x78, 0x78, 0xd1, 0x89, 0x9c, 0x45, 0x8e, 0xc2, 0x92, 0x06, 0xad, 0xc1, - 0x70, 0x8d, 0x2d, 0xc8, 0xad, 0x35, 0xbf, 0x4e, 0xd8, 0x60, 0x96, 0xe6, 0x9f, 0xa6, 0xe4, 0x0b, - 0x31, 0xf8, 0xde, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, - 0xbe, 0xfa, 0x18, 0x27, 0xc8, 0x58, 0x5b, 0xe7, 0xb5, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, - 0x32, 0x41, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57, - 0x5c, 0xaf, 0x7e, 0x6f, 0xbf, 0x3c, 0x69, 0x34, 0x87, 0x02, 0x31, 0x23, 0xb5, 0xff, 0xb3, 0x05, - 0x65, 0x86, 0x5b, 0x76, 0x1b, 0xa4, 0x42, 0x82, 0xd0, 0x0d, 0x23, 0xe2, 0x45, 0x46, 0x87, 0x3e, - 0x07, 0x10, 0x92, 0x5a, 0x40, 0x22, 0xad, 0x4b, 0xd5, 0xc4, 0xa8, 0x2a, 0x0c, 0xd6, 0xa8, 0xe8, - 0x86, 0x10, 0x6e, 0x3b, 0x01, 0x9b, 0x5f, 0xa2, 0x63, 0xd5, 0x86, 0x50, 0x95, 0x08, 0x1c, 0xd3, - 0x18, 0x1b, 0x42, 0xb1, 0xdb, 0x86, 0x80, 0x3e, 0x0c, 0xe3, 0x71, 0x65, 0x61, 0xcb, 0xa9, 0xc9, - 0x0e, 0x64, 0x4b, 0xa6, 0x6a, 0xa2, 0x70, 0x92, 0xd6, 0xfe, 0xdb, 0x96, 0x98, 0x3c, 0xf4, 0xab, - 0xdf, 0xe5, 0xdf, 0x6a, 0xff, 0x8a, 0x05, 0x83, 0xf3, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x29, - 0x18, 0xa2, 0x67, 0x53, 0xdd, 0x89, 0x1c, 0xb1, 0xef, 0xbd, 0x5f, 0x5b, 0x5b, 0xea, 0xa8, 0x98, - 0x6d, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x4b, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0xd3, 0xa4, 0x16, - 0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6, - 0x48, 0x24, 0x36, 0xc0, 0xcc, 0x8d, 0x8a, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x63, - 0x61, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0x3f, 0x06, 0xe1, 0xe4, 0x42, 0x75, 0x25, 0x67, 0x5e, - 0x9d, 0x83, 0x81, 0x7a, 0xe0, 0xee, 0x92, 0x40, 0xf4, 0xb3, 0xe2, 0xb2, 0xc8, 0xa0, 0x58, 0x60, - 0xd1, 0x4b, 0x30, 0xc2, 0x0f, 0xa4, 0xcb, 0x8e, 0x57, 0x6f, 0xc8, 0x2e, 0x3e, 0x26, 0xa8, 0x47, - 0xae, 0x6b, 0x38, 0x6c, 0x50, 0x1e, 0x70, 0x52, 0x9d, 0x4b, 0x2c, 0xc6, 0xbc, 0xc3, 0xee, 0x0b, - 0x16, 0x4c, 0xf0, 0x6a, 0xe6, 0xa2, 0x28, 0x70, 0x6f, 0xb6, 0x23, 0x12, 0x4e, 0xf7, 0xb3, 0x9d, - 0x6e, 0x21, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xf6, 0x7a, 0x82, 0x0b, 0xdf, 0x04, 0xa7, 0x45, 0xbd, - 0x13, 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xcf, 0x82, 0x99, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, - 0x90, 0xa0, 0xd2, 0xbe, 0xd9, 0x70, 0xc3, 0x6d, 0x3e, 0x4f, 0x31, 0xd9, 0x64, 0x3b, 0x41, 0xce, - 0x18, 0x2a, 0x22, 0x31, 0x86, 0x67, 0xee, 0xee, 0x97, 0x67, 0x16, 0x72, 0x59, 0xe1, 0x0e, 0xd5, - 0xa0, 0x1d, 0x40, 0xf4, 0x28, 0xad, 0x46, 0xce, 0x16, 0x89, 0x2b, 0x1f, 0xec, 0xbd, 0xf2, 0x13, - 0x77, 0xf7, 0xcb, 0x68, 0x3d, 0xc5, 0x02, 0x67, 0xb0, 0x45, 0x6f, 0xc3, 0x31, 0x0a, 0x4d, 0x7d, - 0xeb, 0x50, 0xef, 0xd5, 0x4d, 0xdf, 0xdd, 0x2f, 0x1f, 0x5b, 0xcf, 0x60, 0x82, 0x33, 0x59, 0xa3, - 0xef, 0xb1, 0xe0, 0x64, 0xfc, 0xf9, 0x4b, 0xb7, 0x5b, 0x8e, 0x57, 0x8f, 0x2b, 0x2e, 0xf5, 0x5e, - 0x31, 0xdd, 0x93, 0x4f, 0x2e, 0xe4, 0x71, 0xc2, 0xf9, 0x95, 0x20, 0x0f, 0xa6, 0x68, 0xd3, 0x92, - 0x75, 0x43, 0xef, 0x75, 0x3f, 0x74, 0x77, 0xbf, 0x3c, 0xb5, 0x9e, 0xe6, 0x81, 0xb3, 0x18, 0xcf, - 0x2c, 0xc0, 0xf1, 0xcc, 0xd9, 0x89, 0x26, 0xa0, 0xb8, 0x43, 0xb8, 0xd4, 0x55, 0xc2, 0xf4, 0x27, - 0x3a, 0x06, 0xfd, 0xbb, 0x4e, 0xa3, 0x2d, 0x16, 0x26, 0xe6, 0x7f, 0x5e, 0x2e, 0xbc, 0x64, 0xd9, - 0xff, 0xac, 0x08, 0xe3, 0x0b, 0xd5, 0x95, 0xfb, 0x5a, 0xf5, 0xfa, 0xb1, 0x57, 0xe8, 0x78, 0xec, - 0xc5, 0x87, 0x68, 0x31, 0xf7, 0x10, 0xfd, 0xee, 0x8c, 0x25, 0xdb, 0xc7, 0x96, 0xec, 0x87, 0x72, - 0x96, 0xec, 0x21, 0x2f, 0xd4, 0xdd, 0x9c, 0x59, 0xdb, 0xcf, 0x06, 0x30, 0x53, 0x42, 0x5a, 0xf5, - 0x6b, 0x4e, 0x23, 0xb9, 0xd5, 0x1e, 0x70, 0xea, 0x1e, 0xce, 0x38, 0xd6, 0x60, 0x64, 0xc1, 0x69, - 0x39, 0x37, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, 0x40, 0xd1, 0xa9, 0xd7, 0x99, 0x74, 0x57, - 0x9a, 0x3f, 0x7e, 0x77, 0xbf, 0x5c, 0x9c, 0xab, 0x53, 0x31, 0x03, 0x14, 0xd5, 0x1e, 0xa6, 0x14, - 0xe8, 0x29, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, 0x28, 0xe9, 0x2a, 0xef, 0x5b, 0x0c, 0xfc, - 0x56, 0x82, 0x94, 0xd1, 0xd8, 0xbf, 0x55, 0x80, 0x53, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, - 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, - 0x86, 0x15, 0x16, 0x9d, 0x85, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, - 0x43, 0x29, 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0xd7, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, - 0x01, 0x2a, 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, - 0x62, 0x64, 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, - 0xae, 0xa2, 0xc2, 0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, - 0xbe, 0x24, 0x0e, 0xab, 0xf7, 0xfe, 0x8b, 0x05, 0xa7, 0x16, 0x5c, 0xaf, 0x4e, 0x82, 0x9c, 0x09, - 0xf8, 0x60, 0xee, 0xce, 0x07, 0x13, 0x52, 0x8c, 0x29, 0xd6, 0x77, 0x08, 0x53, 0xcc, 0xfe, 0x4b, - 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdd, 0xc7, 0x5e, 0x4b, 0x7f, 0xec, 0x21, 0x4c, 0x0b, 0xfb, 0xef, - 0x5a, 0x30, 0xbc, 0xd0, 0x70, 0xdc, 0xa6, 0xf8, 0xd4, 0x05, 0x98, 0x94, 0x8a, 0x22, 0x06, 0xd6, - 0x64, 0x7f, 0xba, 0xb9, 0x4d, 0xe2, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0xc7, 0xe1, 0xa4, 0x01, 0xdc, - 0x20, 0xcd, 0x56, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, - 0xaf, 0xc2, 0xd8, 0x42, 0xc3, 0x25, 0x5e, 0xb4, 0x52, 0x59, 0xf0, 0xbd, 0x4d, 0x77, 0x0b, 0xbd, - 0x0c, 0x63, 0x91, 0xdb, 0x24, 0x7e, 0x3b, 0xaa, 0x92, 0x9a, 0xef, 0xb1, 0xbb, 0xb6, 0x75, 0xbe, - 0x7f, 0x1e, 0xdd, 0xdd, 0x2f, 0x8f, 0x6d, 0x18, 0x18, 0x9c, 0xa0, 0xb4, 0x7f, 0x9a, 0xee, 0xb4, - 0x8d, 0x76, 0x18, 0x91, 0x60, 0x23, 0x68, 0x87, 0xd1, 0x7c, 0x9b, 0x4a, 0xcb, 0x95, 0xc0, 0xa7, - 0x1d, 0xe8, 0xfa, 0x1e, 0x3a, 0x65, 0x28, 0x10, 0x86, 0xa4, 0xf2, 0x40, 0x28, 0x0a, 0x66, 0x01, - 0x42, 0x77, 0xcb, 0x23, 0x81, 0xf6, 0x69, 0x63, 0x6c, 0x71, 0x2b, 0x28, 0xd6, 0x28, 0x50, 0x03, - 0x46, 0x1b, 0xce, 0x4d, 0xd2, 0xa8, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0x20, 0x54, 0x20, 0xcf, 0xf7, - 0x76, 0x73, 0x59, 0xd5, 0x8b, 0xce, 0x4f, 0xde, 0xdd, 0x2f, 0x8f, 0x1a, 0x20, 0x6c, 0x32, 0xa7, - 0x9b, 0x9d, 0xdf, 0xa2, 0x5f, 0xe1, 0x34, 0xf4, 0xeb, 0xf2, 0x55, 0x01, 0xc3, 0x0a, 0xab, 0x36, - 0xbb, 0xbe, 0xbc, 0xcd, 0xce, 0xfe, 0x63, 0xba, 0x34, 0xfc, 0x66, 0xcb, 0xf7, 0x88, 0x17, 0x2d, - 0xf8, 0x5e, 0x9d, 0x2b, 0xaf, 0x5e, 0x86, 0xbe, 0x88, 0x4e, 0x75, 0xde, 0x3d, 0xe7, 0x64, 0x41, - 0x3a, 0xc1, 0xef, 0xed, 0x97, 0x4f, 0xa4, 0x4b, 0xb0, 0x25, 0xc0, 0xca, 0xa0, 0x0f, 0xc1, 0x40, - 0x18, 0x39, 0x51, 0x3b, 0x14, 0x1d, 0xf7, 0x88, 0x5c, 0x28, 0x55, 0x06, 0xbd, 0xb7, 0x5f, 0x1e, - 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, 0xa4, 0xa0, - 0x33, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93, 0x20, 0xf0, 0x03, - 0xf1, 0x6d, 0xa3, 0x82, 0xb0, 0x7f, 0x89, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xd2, 0x82, 0x71, 0xd5, - 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x04, 0xa8, 0xc9, 0x0f, 0x0c, 0x99, 0x60, 0x30, 0xfc, - 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x75, - 0x0b, 0xa6, 0x12, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x2b, 0xf5, 0x55, 0xb3, 0x3d, 0x4e, 0x3e, - 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb, 0x11, 0x69, 0xca, - 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, 0x4b, 0x62, 0xce, 0xc0, 0xfe, - 0xad, 0x22, 0x94, 0xf8, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0x43, 0xc9, 0x6d, 0x36, - 0xdb, 0x91, 0x73, 0x53, 0x9c, 0xd4, 0x43, 0x7c, 0xd7, 0x5c, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0x0a, - 0xf4, 0xb1, 0xa6, 0xf0, 0xaf, 0x7c, 0x22, 0xfb, 0x2b, 0x45, 0xdb, 0x67, 0x17, 0x9d, 0xc8, 0xe1, - 0x42, 0xb2, 0x5a, 0x57, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0xdc, 0x74, 0x3d, 0x27, 0xd8, 0xa3, - 0xb0, 0xe9, 0x22, 0x63, 0xf8, 0x6c, 0x67, 0x86, 0xf3, 0x8a, 0x9e, 0xb3, 0x55, 0x1f, 0x16, 0x23, - 0xb0, 0xc6, 0x74, 0xe6, 0x83, 0x50, 0x52, 0xc4, 0x07, 0x91, 0x75, 0x67, 0x3e, 0x0c, 0xe3, 0x89, - 0xba, 0xba, 0x15, 0x1f, 0xd1, 0x45, 0xe5, 0x5f, 0x65, 0x5b, 0x86, 0x68, 0xf5, 0x92, 0xb7, 0x2b, - 0x8e, 0x98, 0x3b, 0x70, 0xac, 0x91, 0x71, 0x48, 0x89, 0x71, 0xed, 0xfd, 0x50, 0x3b, 0x25, 0x3e, - 0xfb, 0x58, 0x16, 0x16, 0x67, 0xd6, 0x61, 0xec, 0x88, 0x85, 0x4e, 0x3b, 0x22, 0xdd, 0xef, 0x8e, - 0xa9, 0xc6, 0x5f, 0x21, 0x7b, 0x6a, 0x53, 0xfd, 0x56, 0x36, 0xff, 0x34, 0xef, 0x7d, 0xbe, 0x5d, - 0x0e, 0x0b, 0x06, 0xc5, 0x2b, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0x8e, 0x5f, 0xf7, 0x55, - 0x0b, 0x46, 0xd5, 0xd7, 0x1d, 0xc1, 0xbe, 0x30, 0x6f, 0xee, 0x0b, 0xa7, 0x3b, 0x4e, 0xf0, 0x9c, - 0x1d, 0xe1, 0xeb, 0x05, 0x38, 0xa9, 0x68, 0xe8, 0xb5, 0x8f, 0xff, 0x11, 0xb3, 0xea, 0x02, 0x94, - 0x3c, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91, 0xe7, 0xc5, 0x87, - 0xf6, 0x88, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x3c, 0x14, 0xdb, 0x6e, 0x5d, 0x1c, 0x30, 0xef, 0x97, - 0xbd, 0x7d, 0x6d, 0x65, 0xf1, 0xde, 0x7e, 0xf9, 0x91, 0x3c, 0xab, 0x14, 0x3d, 0xd9, 0xc2, 0xd9, - 0x6b, 0x2b, 0x8b, 0x98, 0x16, 0x46, 0x73, 0x30, 0x2e, 0x45, 0x99, 0xeb, 0x54, 0x92, 0xf6, 0x3d, - 0x71, 0x0e, 0x29, 0xf5, 0x3e, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0x16, 0x61, 0x62, 0xa7, 0x7d, 0x93, - 0x34, 0x48, 0xc4, 0x3f, 0xf8, 0x0a, 0xe1, 0xca, 0xef, 0x52, 0x7c, 0xe9, 0xbe, 0x92, 0xc0, 0xe3, - 0x54, 0x09, 0xfb, 0x6f, 0xd8, 0x79, 0x20, 0x7a, 0x4f, 0x93, 0x6f, 0xbe, 0x95, 0xd3, 0xb9, 0x97, - 0x59, 0x71, 0x85, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x9e, 0x15, 0xc6, 0x9c, 0xef, 0xeb, 0x38, - 0xe7, 0x7f, 0xb1, 0x00, 0xc7, 0x55, 0x0f, 0x18, 0xf2, 0xfd, 0xb7, 0x7b, 0x1f, 0x5c, 0x84, 0xe1, - 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x59, 0x62, 0xfa, 0xb9, 0x35, 0x6e, 0x31, 0x06, 0x63, 0x9d, - 0xe6, 0x00, 0xdd, 0xf6, 0xf3, 0xa3, 0xec, 0x20, 0x8e, 0x1c, 0x3a, 0xc7, 0xd5, 0xaa, 0xb1, 0x72, - 0x57, 0xcd, 0xa3, 0xd0, 0xef, 0x36, 0xa9, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x15, 0x0a, 0xc4, 0x1c, - 0x87, 0x1e, 0x87, 0xc1, 0x9a, 0xdf, 0x6c, 0x3a, 0x5e, 0x9d, 0x1d, 0x79, 0xa5, 0xf9, 0x61, 0x2a, - 0xbb, 0x2d, 0x70, 0x10, 0x96, 0x38, 0x2a, 0x7c, 0x3b, 0xc1, 0x16, 0x57, 0x4f, 0x09, 0xe1, 0x7b, - 0x2e, 0xd8, 0x0a, 0x31, 0x83, 0xd2, 0xdb, 0xf5, 0x2d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x45, 0x37, - 0x10, 0x4b, 0x42, 0x9d, 0x85, 0x37, 0x14, 0x06, 0x6b, 0x54, 0x68, 0x19, 0xfa, 0x5b, 0x7e, 0x10, - 0x85, 0xd3, 0x03, 0xac, 0xbb, 0x1f, 0xc9, 0xd9, 0x88, 0xf8, 0xd7, 0x56, 0xfc, 0x20, 0x8a, 0x3f, - 0x80, 0xfe, 0x0b, 0x31, 0x2f, 0x8e, 0x56, 0x61, 0x90, 0x78, 0xbb, 0xcb, 0x81, 0xdf, 0x9c, 0x9e, - 0xca, 0xe7, 0xb4, 0xc4, 0x49, 0xf8, 0x34, 0x8b, 0x65, 0x54, 0x01, 0xc6, 0x92, 0x05, 0xfa, 0x10, - 0x14, 0x89, 0xb7, 0x3b, 0x3d, 0xc8, 0x38, 0xcd, 0xe4, 0x70, 0xba, 0xee, 0x04, 0xf1, 0x9e, 0xbf, - 0xe4, 0xed, 0x62, 0x5a, 0x06, 0x7d, 0x0c, 0x4a, 0x72, 0xc3, 0x08, 0x85, 0xde, 0x37, 0x73, 0xc2, - 0xca, 0x6d, 0x06, 0x93, 0xb7, 0xdb, 0x6e, 0x40, 0x9a, 0xc4, 0x8b, 0xc2, 0x78, 0x87, 0x94, 0xd8, - 0x10, 0xc7, 0xdc, 0x50, 0x0d, 0x46, 0x02, 0x12, 0xba, 0x77, 0x48, 0xc5, 0x6f, 0xb8, 0xb5, 0xbd, - 0xe9, 0x87, 0x58, 0xf3, 0x9e, 0xec, 0xd8, 0x65, 0x58, 0x2b, 0x10, 0xdb, 0x25, 0x74, 0x28, 0x36, - 0x98, 0xa2, 0x37, 0x60, 0x34, 0x20, 0x61, 0xe4, 0x04, 0x91, 0xa8, 0x65, 0x5a, 0xd9, 0x11, 0x47, - 0xb1, 0x8e, 0xe0, 0xd7, 0x89, 0xb8, 0x9a, 0x18, 0x83, 0x4d, 0x0e, 0xe8, 0x63, 0xd2, 0x48, 0xb2, - 0xe6, 0xb7, 0xbd, 0x28, 0x9c, 0x2e, 0xb1, 0x76, 0x67, 0x9a, 0xaf, 0xaf, 0xc7, 0x74, 0x49, 0x2b, - 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x13, 0x30, 0xca, 0xff, 0x73, 0x23, 0x70, 0x38, 0x7d, 0x9c, - 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0xce, 0x1f, 0x17, 0xcc, 0x47, 0x75, 0x68, 0x88, 0x4d, 0x6e, - 0x08, 0xc3, 0x68, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24, 0x42, 0xa7, 0x7d, - 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b, 0x74, 0x0d, 0xc6, - 0x02, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d, 0x42, 0x38, 0xc1, - 0x04, 0x5d, 0x85, 0x11, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53, 0xe6, 0x73, 0x50, - 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x93, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, - 0x3d, 0xc2, 0xb8, 0x65, 0x6e, 0x86, 0xab, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, - 0xeb, 0x70, 0x22, 0x22, 0x41, 0xd3, 0xf5, 0x1c, 0xba, 0x89, 0x89, 0x2b, 0x21, 0xb3, 0xe5, 0x8f, - 0xb2, 0xd9, 0x75, 0x46, 0x8c, 0xc6, 0x89, 0x8d, 0x4c, 0x2a, 0x9c, 0x53, 0x1a, 0xdd, 0x86, 0xe9, - 0x0c, 0x0c, 0x9f, 0xb7, 0xc7, 0x18, 0xe7, 0x57, 0x05, 0xe7, 0xe9, 0x8d, 0x1c, 0xba, 0x7b, 0x1d, - 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0xe3, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d, 0x51, 0xe1, 0x18, - 0xab, 0xf0, 0x71, 0x29, 0x47, 0xac, 0x98, 0xe8, 0x7b, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, - 0x74, 0x93, 0x99, 0x8d, 0xdb, 0x81, 0x1b, 0xed, 0xd1, 0x55, 0x45, 0x6e, 0x47, 0xd3, 0xe3, 0x1d, - 0x55, 0x68, 0x3a, 0xa9, 0xb2, 0x2d, 0xeb, 0x40, 0x9c, 0x64, 0x48, 0x8f, 0x82, 0x30, 0xaa, 0xbb, - 0xde, 0xf4, 0x04, 0xbf, 0x4f, 0xc9, 0x9d, 0xb4, 0x4a, 0x81, 0x98, 0xe3, 0x98, 0xc9, 0x98, 0xfe, - 0xb8, 0x4a, 0x4f, 0xdc, 0x49, 0x46, 0x18, 0x9b, 0x8c, 0x25, 0x02, 0xc7, 0x34, 0x54, 0x08, 0x8e, - 0xa2, 0xbd, 0x69, 0xc4, 0x48, 0xd5, 0x86, 0xb8, 0xb1, 0xf1, 0x31, 0x4c, 0xe1, 0xf6, 0x4d, 0x18, - 0x53, 0xdb, 0x04, 0xeb, 0x13, 0x54, 0x86, 0x7e, 0x26, 0xf6, 0x09, 0x85, 0x6f, 0x89, 0x36, 0x81, - 0x89, 0x84, 0x98, 0xc3, 0x59, 0x13, 0xdc, 0x3b, 0x64, 0x7e, 0x2f, 0x22, 0x5c, 0x17, 0x51, 0xd4, - 0x9a, 0x20, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x27, 0x17, 0x9f, 0xe3, 0x53, 0xa2, 0x87, 0x73, 0xf1, - 0x19, 0x18, 0xda, 0xf6, 0xc3, 0x88, 0x52, 0xb3, 0x3a, 0xfa, 0x63, 0x81, 0xf9, 0xb2, 0x80, 0x63, - 0x45, 0x81, 0x5e, 0x81, 0xd1, 0x9a, 0x5e, 0x81, 0x38, 0xd4, 0xd5, 0x36, 0x62, 0xd4, 0x8e, 0x4d, - 0x5a, 0xf4, 0x12, 0x0c, 0x31, 0x37, 0xa8, 0x9a, 0xdf, 0x10, 0xd2, 0xa6, 0x94, 0x4c, 0x86, 0x2a, - 0x02, 0x7e, 0x4f, 0xfb, 0x8d, 0x15, 0x35, 0x3a, 0x07, 0x03, 0xb4, 0x09, 0x2b, 0x15, 0x71, 0x9c, - 0x2a, 0xdd, 0xe5, 0x65, 0x06, 0xc5, 0x02, 0x6b, 0xff, 0xba, 0xc5, 0x64, 0xa9, 0xf4, 0x9e, 0x8f, - 0x2e, 0xb3, 0x43, 0x83, 0x9d, 0x20, 0x9a, 0xee, 0xf0, 0x31, 0xed, 0x24, 0x50, 0xb8, 0x7b, 0x89, - 0xff, 0xd8, 0x28, 0x89, 0xde, 0x4c, 0x9e, 0x0c, 0x5c, 0xa0, 0x78, 0x41, 0x76, 0x41, 0xf2, 0x74, - 0x78, 0x38, 0x3e, 0xe2, 0x68, 0x7b, 0x3a, 0x1d, 0x11, 0xf6, 0xff, 0x55, 0xd0, 0x66, 0x49, 0x35, - 0x72, 0x22, 0x82, 0x2a, 0x30, 0x78, 0xcb, 0x71, 0x23, 0xd7, 0xdb, 0x12, 0x72, 0x5f, 0xe7, 0x83, - 0x8e, 0x15, 0xba, 0xc1, 0x0b, 0x70, 0xe9, 0x45, 0xfc, 0xc1, 0x92, 0x0d, 0xe5, 0x18, 0xb4, 0x3d, - 0x8f, 0x72, 0x2c, 0xf4, 0xca, 0x11, 0xf3, 0x02, 0x9c, 0xa3, 0xf8, 0x83, 0x25, 0x1b, 0xf4, 0x16, - 0x80, 0xdc, 0x21, 0x48, 0x5d, 0xe8, 0x0e, 0x9f, 0xe9, 0xce, 0x74, 0x43, 0x95, 0xe1, 0xca, 0xc9, - 0xf8, 0x3f, 0xd6, 0xf8, 0xd9, 0x91, 0x36, 0xa6, 0x7a, 0x63, 0xd0, 0xc7, 0xe9, 0x12, 0x75, 0x82, - 0x88, 0xd4, 0xe7, 0x22, 0xd1, 0x39, 0x4f, 0xf5, 0x76, 0x39, 0xdc, 0x70, 0x9b, 0x44, 0x5f, 0xce, - 0x82, 0x09, 0x8e, 0xf9, 0xd9, 0xbf, 0x5c, 0x84, 0xe9, 0xbc, 0xe6, 0xd2, 0x45, 0x43, 0x6e, 0xbb, - 0xd1, 0x02, 0x15, 0x6b, 0x2d, 0x73, 0xd1, 0x2c, 0x09, 0x38, 0x56, 0x14, 0x74, 0xf6, 0x86, 0xee, - 0x96, 0xbc, 0xdb, 0xf7, 0xc7, 0xb3, 0xb7, 0xca, 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x40, 0x9c, 0x50, - 0xf8, 0xe7, 0x69, 0xb3, 0x1c, 0x33, 0x28, 0x16, 0x58, 0x5d, 0xcb, 0xd8, 0xd7, 0x45, 0xcb, 0x68, - 0x74, 0x51, 0xff, 0xe1, 0x76, 0x11, 0xfa, 0x24, 0xc0, 0xa6, 0xeb, 0xb9, 0xe1, 0x36, 0xe3, 0x3e, - 0x70, 0x60, 0xee, 0x4a, 0x28, 0x5e, 0x56, 0x5c, 0xb0, 0xc6, 0x11, 0xbd, 0x08, 0xc3, 0x6a, 0x03, - 0x59, 0x59, 0x64, 0xce, 0x0a, 0x9a, 0xf3, 0x57, 0xbc, 0x9b, 0x2e, 0x62, 0x9d, 0xce, 0xfe, 0x74, - 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, 0x3a, 0xf7, 0xaf, 0xfd, 0xcd, - 0x01, 0x18, 0x37, 0x2a, 0x6b, 0x87, 0x3d, 0xec, 0xb9, 0x97, 0xe8, 0x01, 0xe4, 0x44, 0x44, 0xac, - 0x3f, 0xbb, 0xfb, 0x52, 0xd1, 0x0f, 0x29, 0xba, 0x02, 0x78, 0x79, 0xf4, 0x49, 0x28, 0x35, 0x9c, - 0x90, 0x69, 0x2c, 0x89, 0x58, 0x77, 0xbd, 0x30, 0x8b, 0x2f, 0x84, 0x4e, 0x18, 0x69, 0xa7, 0x3e, - 0xe7, 0x1d, 0xb3, 0xa4, 0x27, 0x25, 0x95, 0xaf, 0xa4, 0x03, 0xa8, 0x6a, 0x04, 0x15, 0xc2, 0xf6, - 0x30, 0xc7, 0xa1, 0x97, 0xd8, 0xd6, 0x4a, 0x67, 0xc5, 0x02, 0x95, 0x46, 0xd9, 0x34, 0xeb, 0x37, - 0x84, 0x6c, 0x85, 0xc3, 0x06, 0x65, 0x7c, 0x27, 0x1b, 0xe8, 0x70, 0x27, 0x7b, 0x12, 0x06, 0xd9, - 0x0f, 0x35, 0x03, 0xd4, 0x68, 0xac, 0x70, 0x30, 0x96, 0xf8, 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, - 0xd0, 0x5b, 0x9f, 0x98, 0xd4, 0xcc, 0x51, 0x64, 0x88, 0xef, 0x72, 0x62, 0xca, 0x63, 0x89, 0x43, - 0x3f, 0x63, 0x01, 0x72, 0x1a, 0xf4, 0xb6, 0x4c, 0xc1, 0xea, 0x72, 0x03, 0x4c, 0xd4, 0x7e, 0xa5, - 0x6b, 0xb7, 0xb7, 0xc3, 0xd9, 0xb9, 0x54, 0x69, 0xae, 0x29, 0x7d, 0x59, 0x34, 0x11, 0xa5, 0x09, - 0xf4, 0xc3, 0x68, 0xd5, 0x0d, 0xa3, 0xcf, 0xfd, 0x49, 0xe2, 0x70, 0xca, 0x68, 0x12, 0xba, 0xa6, - 0x5f, 0xbe, 0x86, 0x0f, 0x78, 0xf9, 0x1a, 0xcd, 0xbb, 0x78, 0xcd, 0xb4, 0xe1, 0xa1, 0x9c, 0x2f, - 0xc8, 0xd0, 0xbf, 0x2e, 0xea, 0xfa, 0xd7, 0x2e, 0x5a, 0xbb, 0x59, 0x59, 0xc7, 0xec, 0x1b, 0x6d, - 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0xd7, 0xd7, 0x3e, 0x05, 0x63, 0x8b, 0x0e, 0x69, 0xfa, 0xde, 0x92, - 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x69, 0xe8, 0x63, 0xc2, 0x07, 0xdf, 0x7a, 0xfb, 0x68, 0xef, - 0x61, 0x06, 0xb1, 0xb7, 0xe0, 0xf8, 0xa2, 0x7f, 0xcb, 0xbb, 0xe5, 0x04, 0xf5, 0xb9, 0xca, 0x8a, - 0xa6, 0x4f, 0x5a, 0x97, 0xfa, 0x0c, 0x2b, 0xff, 0xb6, 0xa8, 0x95, 0xe4, 0xd7, 0xa1, 0x65, 0xb7, - 0x41, 0x72, 0xb4, 0x7e, 0xff, 0x6f, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x36, 0x2b, 0x2b, 0xd7, 0x40, - 0xff, 0x06, 0x0c, 0x6d, 0xba, 0xa4, 0x51, 0xc7, 0x64, 0x53, 0xf4, 0xce, 0x13, 0xf9, 0x2e, 0x7c, - 0xcb, 0x94, 0x52, 0x19, 0xd7, 0x98, 0x36, 0x64, 0x59, 0x14, 0xc6, 0x8a, 0x0d, 0xda, 0x81, 0x09, - 0xd9, 0x87, 0x12, 0x2b, 0xf6, 0x83, 0x27, 0x3b, 0x0d, 0xbc, 0xc9, 0xfc, 0xd8, 0xdd, 0xfd, 0xf2, - 0x04, 0x4e, 0xb0, 0xc1, 0x29, 0xc6, 0xe8, 0x14, 0xf4, 0x35, 0xe9, 0xc9, 0xd7, 0xc7, 0xba, 0x9f, - 0xa9, 0x3f, 0x98, 0x26, 0x87, 0x41, 0xed, 0x1f, 0xb3, 0xe0, 0xa1, 0x54, 0xcf, 0x08, 0x8d, 0xd6, - 0x21, 0x8f, 0x42, 0x52, 0xc3, 0x54, 0xe8, 0xae, 0x61, 0xb2, 0xff, 0x8e, 0x05, 0xc7, 0x96, 0x9a, - 0xad, 0x68, 0x6f, 0xd1, 0x35, 0xad, 0xe9, 0x1f, 0x84, 0x81, 0x26, 0xa9, 0xbb, 0xed, 0xa6, 0x18, - 0xb9, 0xb2, 0x3c, 0x1d, 0xd6, 0x18, 0xf4, 0xde, 0x7e, 0x79, 0xb4, 0x1a, 0xf9, 0x81, 0xb3, 0x45, - 0x38, 0x00, 0x0b, 0x72, 0x76, 0xc6, 0xba, 0x77, 0xc8, 0xaa, 0xdb, 0x74, 0xa3, 0xfb, 0x9b, 0xed, - 0xc2, 0x10, 0x2e, 0x99, 0xe0, 0x98, 0x9f, 0xfd, 0x0d, 0x0b, 0xc6, 0xe5, 0xbc, 0x9f, 0xab, 0xd7, - 0x03, 0x12, 0x86, 0x68, 0x06, 0x0a, 0x6e, 0x4b, 0xb4, 0x12, 0x44, 0x2b, 0x0b, 0x2b, 0x15, 0x5c, - 0x70, 0x5b, 0x52, 0x9c, 0x67, 0x07, 0x50, 0xd1, 0xf4, 0x09, 0xb8, 0x2c, 0xe0, 0x58, 0x51, 0xa0, - 0xf3, 0x30, 0xe4, 0xf9, 0x75, 0x2e, 0x11, 0x0b, 0x1b, 0x2b, 0xa5, 0x5c, 0x17, 0x30, 0xac, 0xb0, - 0xa8, 0x02, 0x25, 0xee, 0x31, 0x1a, 0x4f, 0xda, 0x9e, 0xfc, 0x4e, 0xd9, 0x97, 0x6d, 0xc8, 0x92, - 0x38, 0x66, 0x62, 0xff, 0xa6, 0x05, 0x23, 0xf2, 0xcb, 0x7a, 0xbc, 0xab, 0xd0, 0xa5, 0x15, 0xdf, - 0x53, 0xe2, 0xa5, 0x45, 0xef, 0x1a, 0x0c, 0x63, 0x5c, 0x31, 0x8a, 0x07, 0xba, 0x62, 0x5c, 0x84, - 0x61, 0xa7, 0xd5, 0xaa, 0x98, 0xf7, 0x13, 0x36, 0x95, 0xe6, 0x62, 0x30, 0xd6, 0x69, 0xec, 0x1f, - 0x2d, 0xc0, 0x98, 0xfc, 0x82, 0x6a, 0xfb, 0x66, 0x48, 0x22, 0xb4, 0x01, 0x25, 0x87, 0x8f, 0x12, - 0x91, 0x93, 0xfc, 0xd1, 0x6c, 0xbd, 0x99, 0x31, 0xa4, 0xb1, 0xa0, 0x35, 0x27, 0x4b, 0xe3, 0x98, - 0x11, 0x6a, 0xc0, 0xa4, 0xe7, 0x47, 0xec, 0xd0, 0x55, 0xf8, 0x4e, 0xa6, 0xcc, 0x24, 0xf7, 0x93, - 0x82, 0xfb, 0xe4, 0x7a, 0x92, 0x0b, 0x4e, 0x33, 0x46, 0x4b, 0x52, 0x17, 0x59, 0xcc, 0x57, 0x22, - 0xe9, 0x03, 0x97, 0xad, 0x8a, 0xb4, 0x7f, 0xcd, 0x82, 0x92, 0x24, 0x3b, 0x0a, 0xab, 0xf5, 0x1a, - 0x0c, 0x86, 0x6c, 0x10, 0x64, 0xd7, 0xd8, 0x9d, 0x1a, 0xce, 0xc7, 0x2b, 0x96, 0x25, 0xf8, 0xff, - 0x10, 0x4b, 0x1e, 0xcc, 0x14, 0xa5, 0x9a, 0xff, 0x2e, 0x31, 0x45, 0xa9, 0xf6, 0xe4, 0x1c, 0x4a, - 0x7f, 0xc6, 0xda, 0xac, 0xe9, 0x76, 0xa9, 0xc8, 0xdb, 0x0a, 0xc8, 0xa6, 0x7b, 0x3b, 0x29, 0xf2, - 0x56, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x0b, 0x46, 0x6a, 0xd2, 0x06, 0x11, 0xaf, 0xf0, 0x73, 0x1d, - 0xed, 0x61, 0xca, 0x74, 0xca, 0x75, 0x68, 0x0b, 0x5a, 0x79, 0x6c, 0x70, 0x33, 0x3d, 0xa2, 0x8a, - 0xdd, 0x3c, 0xa2, 0x62, 0xbe, 0xf9, 0xfe, 0x41, 0x3f, 0x6e, 0xc1, 0x00, 0xd7, 0x3d, 0xf7, 0xa6, - 0xfa, 0xd7, 0x2c, 0xc9, 0x71, 0xdf, 0x5d, 0xa7, 0x40, 0x21, 0x69, 0xa0, 0x35, 0x28, 0xb1, 0x1f, - 0x4c, 0x77, 0x5e, 0xcc, 0x7f, 0xb0, 0xc4, 0x6b, 0xd5, 0x1b, 0x78, 0x5d, 0x16, 0xc3, 0x31, 0x07, - 0xfb, 0x47, 0x8a, 0x74, 0x77, 0x8b, 0x49, 0x8d, 0x43, 0xdf, 0x7a, 0x70, 0x87, 0x7e, 0xe1, 0x41, - 0x1d, 0xfa, 0x5b, 0x30, 0x5e, 0xd3, 0xec, 0xce, 0xf1, 0x48, 0x9e, 0xef, 0x38, 0x49, 0x34, 0x13, - 0x35, 0xd7, 0xce, 0x2d, 0x98, 0x4c, 0x70, 0x92, 0x2b, 0xfa, 0x38, 0x8c, 0xf0, 0x71, 0x16, 0xb5, - 0x70, 0xa7, 0xb2, 0xc7, 0xf3, 0xe7, 0x8b, 0x5e, 0x05, 0xd7, 0xe6, 0x6a, 0xc5, 0xb1, 0xc1, 0xcc, - 0xfe, 0x2b, 0x0b, 0xd0, 0x52, 0x6b, 0x9b, 0x34, 0x49, 0xe0, 0x34, 0x62, 0xf3, 0xd1, 0x17, 0x2d, - 0x98, 0x26, 0x29, 0xf0, 0x82, 0xdf, 0x6c, 0x8a, 0xcb, 0x62, 0x8e, 0x3e, 0x63, 0x29, 0xa7, 0x8c, - 0x7a, 0xd1, 0x35, 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, - 0xbc, 0xb8, 0x1e, 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x6b, 0xa3, 0x90, - 0xdb, 0x8a, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, 0xde, 0xb3, 0x9b, 0xbd, 0x67, 0x37, 0x7b, 0xcf, - 0x6e, 0xf6, 0x9e, 0xdd, 0xec, 0x5d, 0x6a, 0x37, 0xfb, 0xbf, 0x2d, 0x38, 0xae, 0x8e, 0x2f, 0xe3, - 0xc2, 0xfe, 0x19, 0x98, 0xe2, 0xcb, 0xcd, 0x70, 0xc6, 0x16, 0xc7, 0xf5, 0xc5, 0xcc, 0x99, 0x9b, - 0x78, 0x34, 0x60, 0x14, 0xe4, 0xaf, 0xaf, 0x32, 0x10, 0x38, 0xab, 0x1a, 0xfb, 0x97, 0x87, 0xa0, - 0x7f, 0x69, 0x97, 0x78, 0xd1, 0x11, 0x5c, 0x6d, 0x6a, 0x30, 0xe6, 0x7a, 0xbb, 0x7e, 0x63, 0x97, - 0xd4, 0x39, 0xfe, 0x20, 0x37, 0xf0, 0x13, 0x82, 0xf5, 0xd8, 0x8a, 0xc1, 0x02, 0x27, 0x58, 0x3e, - 0x08, 0xeb, 0xc3, 0x25, 0x18, 0xe0, 0x87, 0x8f, 0x30, 0x3d, 0x64, 0xee, 0xd9, 0xac, 0x13, 0xc5, - 0x91, 0x1a, 0x5b, 0x46, 0xf8, 0xe1, 0x26, 0x8a, 0xa3, 0x4f, 0xc3, 0xd8, 0xa6, 0x1b, 0x84, 0xd1, - 0x86, 0xdb, 0xa4, 0x47, 0x43, 0xb3, 0x75, 0x1f, 0xd6, 0x06, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, - 0xe0, 0x8c, 0xb6, 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xb8, 0x2a, 0x75, 0x3a, 0xac, 0xea, - 0x8c, 0xb0, 0xc9, 0x97, 0x2e, 0xa7, 0x1a, 0x53, 0x98, 0x0f, 0x31, 0x75, 0x86, 0x5a, 0x4e, 0x5c, - 0x53, 0xce, 0x71, 0x54, 0x40, 0x63, 0x8e, 0xec, 0x25, 0x53, 0x40, 0xd3, 0xdc, 0xd5, 0x3f, 0x05, - 0x25, 0x42, 0xbb, 0x90, 0x32, 0x16, 0x07, 0xcc, 0x85, 0xde, 0xda, 0xba, 0xe6, 0xd6, 0x02, 0xdf, - 0xb4, 0xf3, 0x2c, 0x49, 0x4e, 0x38, 0x66, 0x8a, 0x16, 0x60, 0x20, 0x24, 0x81, 0xab, 0x74, 0xc9, - 0x1d, 0x86, 0x91, 0x91, 0xf1, 0xe7, 0x7d, 0xfc, 0x37, 0x16, 0x45, 0xe9, 0xf4, 0x72, 0x98, 0x2a, - 0x96, 0x1d, 0x06, 0xda, 0xf4, 0x9a, 0x63, 0x50, 0x2c, 0xb0, 0xe8, 0x75, 0x18, 0x0c, 0x48, 0x83, - 0x19, 0x12, 0x47, 0x7b, 0x9f, 0xe4, 0xdc, 0x2e, 0xc9, 0xcb, 0x61, 0xc9, 0x00, 0x5d, 0x01, 0x14, - 0x10, 0x2a, 0xe0, 0xb9, 0xde, 0x96, 0x72, 0xef, 0x16, 0x1b, 0xad, 0x12, 0xa4, 0x71, 0x4c, 0x21, - 0x5f, 0x76, 0xe2, 0x8c, 0x62, 0xe8, 0x12, 0x4c, 0x2a, 0xe8, 0x8a, 0x17, 0x46, 0x0e, 0xdd, 0xe0, - 0xc6, 0x19, 0x2f, 0xa5, 0x5f, 0xc1, 0x49, 0x02, 0x9c, 0x2e, 0x63, 0xff, 0x9c, 0x05, 0xbc, 0x9f, - 0x8f, 0x40, 0xab, 0xf0, 0x9a, 0xa9, 0x55, 0x38, 0x99, 0x3b, 0x72, 0x39, 0x1a, 0x85, 0x9f, 0xb3, - 0x60, 0x58, 0x1b, 0xd9, 0x78, 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0x7a, - 0x33, 0x24, 0xc1, 0x2e, 0xa9, 0xb3, 0x89, 0x59, 0xb8, 0xbf, 0x89, 0xa9, 0x5c, 0x49, 0x57, 0x13, - 0x0c, 0x71, 0xaa, 0x0a, 0xfb, 0x53, 0xb2, 0xa9, 0xca, 0xf3, 0xb6, 0xa6, 0xc6, 0x3c, 0xe1, 0x79, - 0xab, 0x46, 0x15, 0xc7, 0x34, 0x74, 0xa9, 0x6d, 0xfb, 0x61, 0x94, 0xf4, 0xbc, 0xbd, 0xec, 0x87, - 0x11, 0x66, 0x18, 0xfb, 0x79, 0x80, 0xa5, 0xdb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0xe9, 0xb1, 0xf2, - 0x2f, 0x3d, 0xf6, 0x1f, 0x58, 0x30, 0xb6, 0xbc, 0x60, 0x9c, 0x5c, 0xb3, 0x00, 0xfc, 0xa6, 0x76, - 0xe3, 0xc6, 0xba, 0x74, 0xff, 0xe0, 0x16, 0x70, 0x05, 0xc5, 0x1a, 0x05, 0x3a, 0x09, 0xc5, 0x46, - 0xdb, 0x13, 0x6a, 0xcf, 0x41, 0x7a, 0x3c, 0xae, 0xb6, 0x3d, 0x4c, 0x61, 0xda, 0xab, 0xae, 0x62, - 0xcf, 0xaf, 0xba, 0xba, 0x46, 0x73, 0x41, 0x65, 0xe8, 0xbf, 0x75, 0xcb, 0xad, 0xf3, 0x37, 0xf3, - 0xc2, 0x35, 0xe5, 0xc6, 0x8d, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0xa5, 0x22, 0xcc, 0x2c, 0x37, - 0xc8, 0xed, 0x77, 0x18, 0x37, 0xa0, 0xd7, 0x37, 0x69, 0x07, 0x53, 0x20, 0x1d, 0xf4, 0xdd, 0x61, - 0xf7, 0xfe, 0xd8, 0x84, 0x41, 0xee, 0x78, 0x2a, 0xa3, 0x08, 0x64, 0x9a, 0xfb, 0xf2, 0x3b, 0x64, - 0x96, 0x3b, 0xb0, 0x0a, 0x73, 0x9f, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, - 0x4e, 0x79, 0xa0, 0x17, 0xc0, 0xdf, 0x5b, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0xd7, 0xd2, - 0x03, 0x71, 0xd8, 0xaf, 0x40, 0xbb, 0x8f, 0xc6, 0x5b, 0xc9, 0xd1, 0xb8, 0x98, 0x37, 0x1a, 0x47, - 0x3d, 0x06, 0xdf, 0x67, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x5e, 0x6a, 0xbe, 0x08, 0xc3, - 0x74, 0x3b, 0x0e, 0x8d, 0xa0, 0x25, 0x46, 0x18, 0x1b, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xda, - 0xb5, 0x95, 0xc5, 0xac, 0xe8, 0x37, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xf7, 0x2c, 0x38, 0x7d, 0x69, - 0x61, 0x29, 0x9e, 0x8a, 0xa9, 0x00, 0x3c, 0xe7, 0x60, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xab, 0x85, - 0x17, 0x59, 0x2b, 0x04, 0xf6, 0xdd, 0x12, 0x5c, 0xea, 0x1a, 0xc0, 0x25, 0x5c, 0x59, 0x10, 0xfb, - 0xae, 0xb4, 0x02, 0x59, 0xb9, 0x56, 0xa0, 0xc7, 0x61, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, - 0x1b, 0xf4, 0x39, 0x08, 0x4b, 0x9c, 0xfd, 0xb3, 0x16, 0x4c, 0x5d, 0x72, 0x23, 0x7a, 0x68, 0x27, - 0x23, 0xcc, 0xd0, 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x46, 0x98, 0xc1, 0x0a, 0x83, 0x35, - 0x2a, 0xfe, 0x41, 0xbb, 0x2e, 0x7b, 0x49, 0x51, 0x30, 0xed, 0x6e, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, - 0xfd, 0x55, 0x77, 0x03, 0xa6, 0xb2, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, - 0x8d, 0xfd, 0x17, 0x16, 0x94, 0x2f, 0xf1, 0xf7, 0xa0, 0x9b, 0x61, 0xce, 0xa6, 0xfb, 0x3c, 0x94, - 0x88, 0x34, 0x10, 0xc8, 0xb7, 0xb1, 0x52, 0x10, 0x55, 0x96, 0x03, 0x1e, 0xe8, 0x46, 0xd1, 0xf5, - 0xf0, 0x9c, 0xfc, 0x60, 0xef, 0x81, 0x97, 0x01, 0x11, 0xbd, 0x2e, 0x3d, 0xf2, 0x0f, 0x0b, 0x21, - 0xb2, 0x94, 0xc2, 0xe2, 0x8c, 0x12, 0xf6, 0x8f, 0x59, 0x70, 0x5c, 0x7d, 0xf0, 0xbb, 0xee, 0x33, - 0xed, 0xaf, 0x15, 0x60, 0xf4, 0xf2, 0xc6, 0x46, 0xe5, 0x12, 0x89, 0xb4, 0x59, 0xd9, 0xd9, 0xec, - 0x8f, 0x35, 0xeb, 0x65, 0xa7, 0x3b, 0x62, 0x3b, 0x72, 0x1b, 0xb3, 0x3c, 0x80, 0xdc, 0xec, 0x8a, - 0x17, 0x5d, 0x0d, 0xaa, 0x51, 0xe0, 0x7a, 0x5b, 0x99, 0x33, 0x5d, 0xca, 0x2c, 0xc5, 0x3c, 0x99, - 0x05, 0x3d, 0x0f, 0x03, 0x2c, 0x82, 0x9d, 0x1c, 0x84, 0x87, 0xd5, 0x15, 0x8b, 0x41, 0xef, 0xed, - 0x97, 0x4b, 0xd7, 0xf0, 0x0a, 0xff, 0x83, 0x05, 0x29, 0xba, 0x06, 0xc3, 0xdb, 0x51, 0xd4, 0xba, - 0x4c, 0x9c, 0x3a, 0x09, 0xe4, 0x2e, 0x7b, 0x26, 0x6b, 0x97, 0xa5, 0x9d, 0xc0, 0xc9, 0xe2, 0x8d, - 0x29, 0x86, 0x85, 0x58, 0xe7, 0x63, 0x57, 0x01, 0x62, 0xdc, 0x21, 0x19, 0x6e, 0xec, 0x0d, 0x28, - 0xd1, 0xcf, 0x9d, 0x6b, 0xb8, 0x4e, 0x67, 0xd3, 0xf8, 0xd3, 0x50, 0x92, 0x86, 0xef, 0x50, 0x84, - 0xbb, 0x60, 0x27, 0x92, 0xb4, 0x8b, 0x87, 0x38, 0xc6, 0xdb, 0x8f, 0x81, 0xf0, 0x2d, 0xed, 0xc4, - 0xd2, 0xde, 0x84, 0x63, 0xcc, 0x49, 0xd6, 0x89, 0xb6, 0x8d, 0x39, 0xda, 0x7d, 0x32, 0x3c, 0x23, - 0xee, 0x75, 0xfc, 0xcb, 0xa6, 0xb5, 0xc7, 0xc9, 0x23, 0x92, 0x63, 0x7c, 0xc7, 0xb3, 0xff, 0xbc, - 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0xc7, 0x69, 0x7a, 0x09, 0x46, 0xb8, 0xb8, 0x48, 0xa7, 0x86, 0xd3, - 0x10, 0xf5, 0x2a, 0x0d, 0xe8, 0x86, 0x86, 0xc3, 0x06, 0x25, 0x3a, 0x0d, 0x45, 0xf7, 0x6d, 0x2f, - 0xf9, 0x74, 0x6f, 0xe5, 0x8d, 0x75, 0x4c, 0xe1, 0x14, 0x4d, 0x25, 0x4f, 0xbe, 0xa5, 0x2b, 0xb4, - 0x92, 0x3e, 0x5f, 0x83, 0x31, 0x37, 0xac, 0x85, 0xee, 0x8a, 0x47, 0xd7, 0xa9, 0xb6, 0xd2, 0x95, - 0xce, 0x81, 0x36, 0x5a, 0x61, 0x71, 0x82, 0x5a, 0x3b, 0x5f, 0xfa, 0x7b, 0x96, 0x5e, 0xbb, 0x46, - 0x89, 0xa0, 0xdb, 0x7f, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0xf0, 0x62, 0xfb, 0xe7, 0x1f, 0x1c, 0x62, - 0x89, 0xa3, 0x17, 0xba, 0xda, 0xb6, 0xd3, 0x9a, 0x6b, 0x47, 0xdb, 0x8b, 0x6e, 0x58, 0xf3, 0x77, - 0x49, 0xb0, 0xc7, 0xee, 0xe2, 0x43, 0xf1, 0x85, 0x4e, 0x21, 0x16, 0x2e, 0xcf, 0x55, 0x28, 0x25, - 0x4e, 0x97, 0x41, 0x73, 0x30, 0x2e, 0x81, 0x55, 0x12, 0xb2, 0x23, 0x60, 0x98, 0xb1, 0x51, 0x8f, - 0xe9, 0x04, 0x58, 0x31, 0x49, 0xd2, 0x9b, 0x02, 0x2e, 0x1c, 0x86, 0x80, 0xfb, 0x41, 0x18, 0x75, - 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xf6, 0x23, 0x7e, 0xed, 0x66, 0x0a, 0xe6, 0x15, 0x1d, 0x81, - 0x4d, 0x3a, 0xfb, 0xdf, 0xf7, 0xc1, 0x24, 0x1b, 0xb6, 0xf7, 0x66, 0xd8, 0x77, 0xd2, 0x0c, 0xbb, - 0x96, 0x9e, 0x61, 0x87, 0x21, 0xb9, 0xdf, 0xf7, 0x34, 0xfb, 0x34, 0x94, 0xd4, 0xfb, 0x41, 0xf9, - 0x80, 0xd8, 0xca, 0x79, 0x40, 0xdc, 0xfd, 0xf4, 0x96, 0x2e, 0x69, 0xc5, 0x4c, 0x97, 0xb4, 0xaf, - 0x58, 0x10, 0x1b, 0x16, 0xd0, 0x1b, 0x50, 0x6a, 0xf9, 0xcc, 0xc3, 0x35, 0x90, 0x6e, 0xe3, 0x8f, - 0x75, 0xb4, 0x4c, 0xf0, 0x50, 0x75, 0x01, 0xef, 0x85, 0x8a, 0x2c, 0x8a, 0x63, 0x2e, 0xe8, 0x0a, - 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x1c, 0xa5, 0xde, 0x19, 0xf2, 0x59, 0xc3, 0x0b, 0x62, 0xc9, - 0xc1, 0xfe, 0x85, 0x02, 0x4c, 0x24, 0x49, 0xd1, 0xab, 0xd0, 0x47, 0x6e, 0x93, 0x9a, 0x68, 0x6f, - 0xe6, 0x51, 0x1c, 0xab, 0x26, 0x78, 0x07, 0xd0, 0xff, 0x98, 0x95, 0x42, 0x97, 0x61, 0x90, 0x9e, - 0xc3, 0x97, 0x54, 0xcc, 0xc0, 0x47, 0xf2, 0xce, 0x72, 0x25, 0xd0, 0xf0, 0xc6, 0x09, 0x10, 0x96, - 0xc5, 0x99, 0x1f, 0x58, 0xad, 0x55, 0xa5, 0x57, 0x9c, 0xa8, 0xd3, 0x4d, 0x7c, 0x63, 0xa1, 0xc2, - 0x89, 0x04, 0x37, 0xee, 0x07, 0x26, 0x81, 0x38, 0x66, 0x82, 0x3e, 0x02, 0xfd, 0x61, 0x83, 0x90, - 0x96, 0x30, 0xf4, 0x67, 0x2a, 0x17, 0xab, 0x94, 0x40, 0x70, 0x62, 0xca, 0x08, 0x06, 0xc0, 0xbc, - 0xa0, 0xfd, 0x8b, 0x16, 0x00, 0x77, 0x9c, 0x73, 0xbc, 0x2d, 0x72, 0x04, 0xfa, 0xf8, 0x45, 0xe8, - 0x0b, 0x5b, 0xa4, 0xd6, 0xc9, 0x7d, 0x3b, 0x6e, 0x4f, 0xb5, 0x45, 0x6a, 0xf1, 0x9c, 0xa5, 0xff, - 0x30, 0x2b, 0x6d, 0x7f, 0x3f, 0xc0, 0x58, 0x4c, 0xb6, 0x12, 0x91, 0x26, 0x7a, 0xd6, 0x08, 0x5b, - 0x72, 0x32, 0x11, 0xb6, 0xa4, 0xc4, 0xa8, 0x35, 0xd5, 0xef, 0xa7, 0xa1, 0xd8, 0x74, 0x6e, 0x0b, - 0xdd, 0xde, 0xd3, 0x9d, 0x9b, 0x41, 0xf9, 0xcf, 0xae, 0x39, 0xb7, 0xf9, 0xf5, 0xf7, 0x69, 0xb9, - 0xc6, 0xd6, 0x9c, 0xdb, 0x5d, 0x5d, 0x8c, 0x69, 0x25, 0xac, 0x2e, 0xd7, 0x13, 0x3e, 0x61, 0x3d, - 0xd5, 0xe5, 0x7a, 0xc9, 0xba, 0x5c, 0xaf, 0x87, 0xba, 0x5c, 0x0f, 0xdd, 0x81, 0x41, 0xe1, 0xb2, - 0x29, 0x22, 0xc0, 0x5d, 0xe8, 0xa1, 0x3e, 0xe1, 0xf1, 0xc9, 0xeb, 0xbc, 0x20, 0xaf, 0xf7, 0x02, - 0xda, 0xb5, 0x5e, 0x59, 0x21, 0xfa, 0x7f, 0x2c, 0x18, 0x13, 0xbf, 0x31, 0x79, 0xbb, 0x4d, 0xc2, - 0x48, 0x88, 0xbf, 0x1f, 0xe8, 0xbd, 0x0d, 0xa2, 0x20, 0x6f, 0xca, 0x07, 0xe4, 0x49, 0x65, 0x22, - 0xbb, 0xb6, 0x28, 0xd1, 0x0a, 0xf4, 0x0b, 0x16, 0x1c, 0x6b, 0x3a, 0xb7, 0x79, 0x8d, 0x1c, 0x86, - 0x9d, 0xc8, 0xf5, 0x85, 0xeb, 0xc3, 0xab, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x73, - 0x1e, 0xcb, 0x22, 0xe9, 0xda, 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc2, 0x90, 0x9c, 0x6f, 0x0f, 0xd2, - 0x3f, 0x9c, 0xd5, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, 0xcf, 0xb1, 0x07, 0x5a, - 0xd7, 0xdb, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0xb7, 0xe0, 0x64, 0xee, 0xfc, 0x78, 0xa0, - 0xfe, 0xfd, 0x5f, 0xb3, 0xf4, 0x7d, 0xf0, 0x08, 0x8c, 0x22, 0x0b, 0xa6, 0x51, 0xe4, 0x4c, 0xe7, - 0x95, 0x93, 0x63, 0x19, 0x79, 0x4b, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0xeb, 0x30, 0xd0, 0xa0, 0x10, - 0xe9, 0xf8, 0x6b, 0x77, 0x5f, 0x91, 0xb1, 0x38, 0xca, 0xe0, 0x21, 0x16, 0x1c, 0xec, 0x5f, 0xb1, - 0xa0, 0xef, 0x08, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, 0x0c, 0x7f, 0x16, 0x3b, - 0xb7, 0x96, 0x6e, 0x47, 0xc4, 0x0b, 0xd9, 0x99, 0x9e, 0xd9, 0x31, 0xfb, 0x16, 0x4c, 0xad, 0xfa, - 0x4e, 0x7d, 0xde, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x2b, 0xde, 0xd6, 0x81, 0xbc, 0xd6, 0x0b, 0x5d, - 0xbd, 0xd6, 0x5f, 0x82, 0x01, 0xb7, 0xa5, 0x05, 0xf7, 0x3e, 0x4b, 0x3b, 0x70, 0xa5, 0x22, 0xe2, - 0x7a, 0x23, 0xa3, 0x72, 0x06, 0xc5, 0x82, 0x9e, 0x8e, 0x3c, 0x77, 0x17, 0xeb, 0xcb, 0x1f, 0x79, - 0x2a, 0xc5, 0x27, 0x43, 0x40, 0x19, 0x8e, 0xcd, 0xdb, 0x60, 0x54, 0x21, 0x5e, 0x7d, 0x61, 0x18, - 0x74, 0xf9, 0x97, 0x8a, 0xe1, 0x7f, 0x22, 0x5b, 0xba, 0x4e, 0x75, 0x8c, 0xf6, 0x9e, 0x89, 0x03, - 0xb0, 0x64, 0x64, 0xbf, 0x04, 0x99, 0x21, 0x3b, 0xba, 0x6b, 0x4e, 0xec, 0x8f, 0xc1, 0x24, 0x2b, - 0x79, 0x40, 0xad, 0x84, 0x9d, 0xd0, 0xf7, 0x66, 0xc4, 0x69, 0xb5, 0xff, 0x8d, 0x05, 0x68, 0xcd, - 0xaf, 0xbb, 0x9b, 0x7b, 0x82, 0x39, 0xff, 0xfe, 0xb7, 0xa1, 0xcc, 0xaf, 0x7d, 0xc9, 0x58, 0xa6, - 0x0b, 0x0d, 0x27, 0x0c, 0x35, 0x5d, 0xf3, 0x13, 0xa2, 0xde, 0xf2, 0x46, 0x67, 0x72, 0xdc, 0x8d, - 0x1f, 0x7a, 0x23, 0x11, 0xa8, 0xed, 0x43, 0xa9, 0x40, 0x6d, 0x4f, 0x64, 0x7a, 0x7c, 0xa4, 0x5b, - 0x2f, 0x03, 0xb8, 0xd9, 0x5f, 0xb0, 0x60, 0x7c, 0x3d, 0x11, 0x9b, 0xf3, 0x1c, 0x33, 0x7f, 0x67, - 0xd8, 0x50, 0xaa, 0x0c, 0x8a, 0x05, 0xf6, 0xd0, 0x75, 0x8c, 0x7f, 0x63, 0x41, 0x1c, 0x22, 0xe8, - 0x08, 0xa4, 0xda, 0x05, 0x43, 0xaa, 0xcd, 0xbc, 0x21, 0xa8, 0xe6, 0xe4, 0x09, 0xb5, 0xe8, 0x8a, - 0x1a, 0x93, 0x0e, 0x97, 0x83, 0x98, 0x0d, 0x5f, 0x67, 0x63, 0xe6, 0xc0, 0xa9, 0xd1, 0xf8, 0xc3, - 0x02, 0x20, 0x45, 0xdb, 0x73, 0x70, 0xbf, 0x74, 0x89, 0xc3, 0x09, 0xee, 0xb7, 0x0b, 0x88, 0x39, - 0x70, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0xd0, 0xaa, 0x1e, 0xcc, 0x3b, 0x64, 0x46, 0xbe, 0xf6, - 0x5b, 0x4d, 0x71, 0xc3, 0x19, 0x35, 0x68, 0x8e, 0x39, 0xfd, 0xbd, 0x3a, 0xe6, 0x0c, 0x74, 0x79, - 0xb6, 0xfa, 0x55, 0x0b, 0x46, 0x55, 0x37, 0xbd, 0x4b, 0x1e, 0x37, 0xa8, 0xf6, 0xe4, 0x9c, 0x2b, - 0x15, 0xad, 0xc9, 0xec, 0xbc, 0xfd, 0x2e, 0xf6, 0xfc, 0xd8, 0x69, 0xb8, 0x77, 0x88, 0x8a, 0x9a, - 0x5b, 0x16, 0xcf, 0x89, 0x05, 0xf4, 0xde, 0x7e, 0x79, 0x54, 0xfd, 0xe3, 0x51, 0x2f, 0xe3, 0x22, - 0xf6, 0x4f, 0xd1, 0xc5, 0x6e, 0x4e, 0x45, 0xf4, 0x22, 0xf4, 0xb7, 0xb6, 0x9d, 0x90, 0x24, 0x1e, - 0x81, 0xf5, 0x57, 0x28, 0xf0, 0xde, 0x7e, 0x79, 0x4c, 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x0f, - 0x99, 0x98, 0x9e, 0x9c, 0x5d, 0x43, 0x26, 0xfe, 0x95, 0x05, 0x7d, 0xeb, 0xf4, 0xf4, 0x7a, 0xf0, - 0x5b, 0xc0, 0x6b, 0xc6, 0x16, 0x70, 0x2a, 0x2f, 0x61, 0x4b, 0xee, 0xea, 0x5f, 0x4e, 0xac, 0xfe, - 0x33, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0x34, 0x30, 0xe2, 0xc1, 0xdb, 0xf3, 0xc6, - 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0x27, 0x61, 0x50, 0xbc, 0xa0, 0x4a, - 0xbe, 0xe2, 0x16, 0xb4, 0x58, 0xe2, 0xed, 0x1f, 0x2f, 0x82, 0x91, 0x76, 0x06, 0xfd, 0x9a, 0x05, - 0xb3, 0x01, 0xf7, 0xac, 0xae, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, 0xbd, 0xdd, - 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0xdb, 0xa4, 0xd6, 0x66, 0x56, 0xcf, 0x2e, - 0x39, 0x6e, 0xd4, 0x0b, 0x85, 0xe7, 0xee, 0xee, 0x97, 0x67, 0xf1, 0x81, 0x78, 0xe3, 0x03, 0xb6, - 0x05, 0xfd, 0x9e, 0x05, 0x17, 0x78, 0x36, 0x96, 0xde, 0xdb, 0xdf, 0x41, 0x89, 0x50, 0x91, 0xac, - 0x62, 0x26, 0x1b, 0x24, 0x68, 0xce, 0x7f, 0x50, 0x74, 0xe8, 0x85, 0xca, 0xc1, 0xea, 0xc2, 0x07, - 0x6d, 0x9c, 0xfd, 0x8f, 0x8a, 0x30, 0x2a, 0x42, 0xeb, 0x89, 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, - 0x92, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xe1, 0x6c, 0xff, 0x21, 0x4c, 0xd2, 0xcd, 0xf9, 0x32, 0x71, - 0x82, 0xe8, 0x26, 0x71, 0xb8, 0xbf, 0x5d, 0xf1, 0xc0, 0xbb, 0xbf, 0x52, 0xfc, 0xae, 0x26, 0x99, - 0xe1, 0x34, 0xff, 0xef, 0xa4, 0x33, 0xc7, 0x83, 0x89, 0x54, 0x74, 0xc4, 0x37, 0xa1, 0xa4, 0x9e, - 0xff, 0x88, 0x4d, 0xa7, 0x73, 0x90, 0xd1, 0x24, 0x07, 0xae, 0x57, 0x8c, 0x9f, 0x9e, 0xc5, 0xec, - 0xec, 0xbf, 0x57, 0x30, 0x2a, 0xe4, 0x83, 0xb8, 0x0e, 0x43, 0x4e, 0xc8, 0x02, 0x1f, 0xd7, 0x3b, - 0xa9, 0x7e, 0x53, 0xd5, 0xb0, 0x27, 0x58, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, 0x65, 0xee, 0xd5, - 0xb8, 0x4b, 0x3a, 0xe9, 0x7d, 0x53, 0xdc, 0x40, 0xfa, 0x3d, 0xee, 0x12, 0x2c, 0xca, 0xa3, 0x4f, - 0x70, 0xb7, 0xd3, 0x2b, 0x9e, 0x7f, 0xcb, 0xbb, 0xe4, 0xfb, 0x32, 0x8c, 0x4a, 0x6f, 0x0c, 0x27, - 0xa5, 0xb3, 0xa9, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x85, 0x1b, 0xfe, 0x0c, 0xb0, 0xec, 0x13, 0xe6, - 0x6b, 0xfb, 0x10, 0x11, 0x18, 0x17, 0x71, 0x1b, 0x25, 0x4c, 0xf4, 0x5d, 0xe6, 0x0d, 0xd7, 0x2c, - 0x1d, 0x5b, 0x28, 0xae, 0x98, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0x33, 0x16, 0xb0, 0x97, 0xc7, 0x47, - 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, 0x81, 0xcf, 0xac, - 0x4a, 0xe0, 0xdf, 0xde, 0x13, 0xbe, 0x42, 0xdd, 0x2f, 0x57, 0xf6, 0x7f, 0xb7, 0xf8, 0x26, 0x16, - 0xc7, 0x69, 0xf8, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x39, 0xd2, 0x72, 0x15, 0x9d, 0x46, - 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x2b, 0xee, 0x64, 0xfc, 0xcf, 0x21, 0x09, 0xee, 0xaa, 0xac, 0x53, - 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0x6a, 0x79, 0x3e, 0xcb, 0x8f, 0x58, 0x15, 0xaf, - 0xb6, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0xe6, 0xfc, 0x58, 0xb7, 0x43, 0x94, 0x9d, - 0x3e, 0xda, 0xa3, 0xe6, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0x84, 0x05, 0x0f, 0xe9, 0x84, 0xda, - 0xbb, 0xa9, 0x6e, 0xd6, 0xa7, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xa7, 0xc6, - 0x79, 0xd9, 0xe9, 0x57, 0x05, 0xfc, 0x9e, 0xc8, 0xf8, 0x21, 0xb9, 0x4b, 0x38, 0x56, 0x25, 0xe9, - 0xd5, 0x9a, 0x75, 0x46, 0x28, 0x5e, 0xc8, 0xb1, 0x3d, 0x80, 0x39, 0x32, 0x84, 0x58, 0x60, 0xec, - 0x3f, 0xb7, 0xf8, 0xc4, 0xd2, 0x9b, 0x8e, 0xde, 0x86, 0x89, 0xa6, 0x13, 0xd5, 0xb6, 0x97, 0x6e, - 0xb7, 0x02, 0x6e, 0xcb, 0x93, 0xfd, 0xf4, 0x74, 0xb7, 0x7e, 0xd2, 0x3e, 0x32, 0xf6, 0xa4, 0x5d, - 0x4b, 0x30, 0xc3, 0x29, 0xf6, 0xe8, 0x26, 0x0c, 0x33, 0x18, 0x7b, 0xfc, 0x19, 0x76, 0x12, 0x0d, - 0xf2, 0x6a, 0x53, 0xbe, 0x20, 0x6b, 0x31, 0x1f, 0xac, 0x33, 0xb5, 0xbf, 0x52, 0xe4, 0xab, 0x9d, - 0x89, 0xf2, 0x4f, 0xc2, 0x60, 0xcb, 0xaf, 0x2f, 0xac, 0x2c, 0x62, 0x31, 0x0a, 0xea, 0x18, 0xa9, - 0x70, 0x30, 0x96, 0x78, 0x74, 0x1e, 0x86, 0xc4, 0x4f, 0x69, 0x7b, 0x65, 0x7b, 0xb3, 0xa0, 0x0b, - 0xb1, 0xc2, 0xa2, 0xe7, 0x00, 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x30, 0x98, 0xa2, 0xe9, 0xc6, - 0x55, 0x51, 0x18, 0xac, 0x51, 0xa1, 0x57, 0x60, 0xb4, 0xed, 0x85, 0x5c, 0x1c, 0xd1, 0x42, 0x6e, - 0x2b, 0x07, 0xa3, 0x6b, 0x3a, 0x12, 0x9b, 0xb4, 0x68, 0x0e, 0x06, 0x22, 0x87, 0xb9, 0x25, 0xf5, - 0xe7, 0x7b, 0x5b, 0x6f, 0x50, 0x0a, 0x3d, 0x1d, 0x17, 0x2d, 0x80, 0x45, 0x41, 0xf4, 0xa6, 0x7c, - 0x87, 0xcd, 0x37, 0x76, 0xf1, 0xcc, 0xa1, 0xb7, 0x43, 0x40, 0x7b, 0x85, 0x2d, 0x9e, 0x4f, 0x18, - 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x76, 0x44, 0x02, 0xcf, 0x69, 0x28, 0x67, 0x42, 0x25, 0x17, 0x2c, - 0xfa, 0xeb, 0x7e, 0x74, 0x2d, 0x24, 0x4b, 0x8a, 0x02, 0x6b, 0xd4, 0xf6, 0xef, 0x95, 0x00, 0x62, - 0xb9, 0x1d, 0xdd, 0x49, 0x6d, 0x5c, 0xcf, 0x74, 0x96, 0xf4, 0x0f, 0x6f, 0xd7, 0x42, 0x9f, 0xb7, - 0x60, 0x58, 0xc4, 0xbc, 0x61, 0x23, 0x54, 0xe8, 0xbc, 0x71, 0x9a, 0xa1, 0x77, 0x68, 0x09, 0xde, - 0x84, 0xe7, 0xe5, 0x0c, 0xd5, 0x30, 0x5d, 0x5b, 0xa1, 0x57, 0x8c, 0xde, 0x2f, 0xaf, 0x8a, 0x45, - 0xa3, 0x2b, 0xd5, 0x55, 0xb1, 0xc4, 0xce, 0x08, 0xfd, 0x96, 0x78, 0xcd, 0xb8, 0x25, 0xf6, 0xe5, - 0x3f, 0x34, 0x35, 0xc4, 0xd7, 0x6e, 0x17, 0x44, 0x54, 0xd1, 0x83, 0x4e, 0xf4, 0xe7, 0xbf, 0x8e, - 0xd4, 0xee, 0x49, 0x5d, 0x02, 0x4e, 0x7c, 0x1a, 0xc6, 0xeb, 0xa6, 0x10, 0x20, 0x66, 0xe2, 0x13, - 0x79, 0x7c, 0x13, 0x32, 0x43, 0x7c, 0xec, 0x27, 0x10, 0x38, 0xc9, 0x18, 0x55, 0x78, 0x0c, 0x92, - 0x15, 0x6f, 0xd3, 0x17, 0x4f, 0x6d, 0xec, 0xdc, 0xb1, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x94, 0xf1, - 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, 0x05, 0xbd, 0x0e, 0x03, 0xec, 0x79, 0x5c, 0x38, 0x3d, 0x94, - 0xaf, 0x88, 0x37, 0x83, 0x31, 0xc6, 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0x2e, 0xcb, 0xc7, - 0xa7, 0xe1, 0x8a, 0x77, 0x2d, 0x24, 0xec, 0xf1, 0x69, 0x69, 0xfe, 0xb1, 0xf8, 0x5d, 0x29, 0x87, - 0x67, 0x26, 0xed, 0x34, 0x4a, 0x52, 0x29, 0x4a, 0xfc, 0x97, 0xb9, 0x40, 0x45, 0xe8, 0xa8, 0xcc, - 0xe6, 0x99, 0xf9, 0x42, 0xe3, 0xee, 0xbc, 0x6e, 0xb2, 0xc0, 0x49, 0x9e, 0x54, 0x22, 0xe5, 0xab, - 0x5e, 0x3c, 0xd6, 0xe9, 0xb6, 0x77, 0xf0, 0x8b, 0x38, 0x3b, 0x8d, 0x38, 0x04, 0x8b, 0xf2, 0x47, - 0x2a, 0x1e, 0xcc, 0x78, 0x30, 0x91, 0x5c, 0xa2, 0x0f, 0x54, 0x1c, 0xf9, 0xd3, 0x3e, 0x18, 0x33, - 0xa7, 0x14, 0xba, 0x00, 0x25, 0xc1, 0x44, 0xe5, 0xd3, 0x51, 0xab, 0x64, 0x4d, 0x22, 0x70, 0x4c, - 0xc3, 0xd2, 0x28, 0xb1, 0xe2, 0x9a, 0x77, 0x76, 0x9c, 0x46, 0x49, 0x61, 0xb0, 0x46, 0x45, 0x2f, - 0x56, 0x37, 0x7d, 0x3f, 0x52, 0x07, 0x92, 0x9a, 0x77, 0xf3, 0x0c, 0x8a, 0x05, 0x96, 0x1e, 0x44, - 0x3b, 0x24, 0xf0, 0x48, 0xc3, 0x0c, 0xcf, 0xae, 0x0e, 0xa2, 0x2b, 0x3a, 0x12, 0x9b, 0xb4, 0xf4, - 0x38, 0xf5, 0x43, 0x36, 0x91, 0xc5, 0xf5, 0x2d, 0xf6, 0x76, 0xaf, 0xf2, 0x77, 0xfb, 0x12, 0x8f, - 0x3e, 0x06, 0x0f, 0xa9, 0x50, 0x68, 0x98, 0x1b, 0x79, 0x64, 0x8d, 0x03, 0x86, 0xb6, 0xe5, 0xa1, - 0x85, 0x6c, 0x32, 0x9c, 0x57, 0x1e, 0xbd, 0x06, 0x63, 0x42, 0xc4, 0x97, 0x1c, 0x07, 0x4d, 0xd7, - 0xad, 0x2b, 0x06, 0x16, 0x27, 0xa8, 0x65, 0x80, 0x79, 0x26, 0x65, 0x4b, 0x0e, 0x43, 0xe9, 0x00, - 0xf3, 0x3a, 0x1e, 0xa7, 0x4a, 0xa0, 0x39, 0x18, 0xe7, 0x32, 0x98, 0xeb, 0x6d, 0xf1, 0x31, 0x11, - 0x6f, 0xe9, 0xd4, 0x92, 0xba, 0x6a, 0xa2, 0x71, 0x92, 0x1e, 0xbd, 0x04, 0x23, 0x4e, 0x50, 0xdb, - 0x76, 0x23, 0x52, 0x8b, 0xda, 0x01, 0x7f, 0x64, 0xa7, 0xf9, 0xbe, 0xcd, 0x69, 0x38, 0x6c, 0x50, - 0xda, 0x77, 0x60, 0x2a, 0x23, 0xa0, 0x07, 0x9d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x70, 0x30, - 0x9f, 0xab, 0xac, 0xc8, 0xaf, 0xd1, 0xa8, 0xe8, 0xec, 0x64, 0x81, 0x3f, 0xb4, 0xd4, 0xbf, 0x6a, - 0x76, 0x2e, 0x4b, 0x04, 0x8e, 0x69, 0xec, 0xff, 0x54, 0x80, 0xf1, 0x0c, 0xc3, 0x11, 0x4b, 0x3f, - 0x9b, 0xb8, 0xa4, 0xc4, 0xd9, 0x66, 0xcd, 0x7c, 0x05, 0x85, 0x03, 0xe4, 0x2b, 0x28, 0x76, 0xcb, - 0x57, 0xd0, 0xf7, 0x4e, 0xf2, 0x15, 0x98, 0x3d, 0xd6, 0xdf, 0x53, 0x8f, 0x65, 0xe4, 0x38, 0x18, - 0x38, 0x60, 0x8e, 0x03, 0xa3, 0xd3, 0x07, 0x7b, 0xe8, 0xf4, 0x1f, 0x29, 0xc0, 0x44, 0xd2, 0xe6, - 0x74, 0x04, 0x7a, 0xdb, 0xd7, 0x0d, 0xbd, 0xed, 0xf9, 0x5e, 0xde, 0x3e, 0xe7, 0xea, 0x70, 0x71, - 0x42, 0x87, 0xfb, 0x54, 0x4f, 0xdc, 0x3a, 0xeb, 0x73, 0x7f, 0xb2, 0x00, 0xc7, 0x33, 0x4d, 0x71, - 0x47, 0xd0, 0x37, 0x57, 0x8d, 0xbe, 0x79, 0xb6, 0xe7, 0x77, 0xe1, 0xb9, 0x1d, 0x74, 0x23, 0xd1, - 0x41, 0x17, 0x7a, 0x67, 0xd9, 0xb9, 0x97, 0xbe, 0x51, 0x84, 0x33, 0x99, 0xe5, 0x62, 0xb5, 0xe7, - 0xb2, 0xa1, 0xf6, 0x7c, 0x2e, 0xa1, 0xf6, 0xb4, 0x3b, 0x97, 0x3e, 0x1c, 0x3d, 0xa8, 0x78, 0x1f, - 0xcd, 0xa2, 0x3c, 0xdc, 0xa7, 0x0e, 0xd4, 0x78, 0x1f, 0xad, 0x18, 0x61, 0x93, 0xef, 0x77, 0x92, - 0xee, 0xf3, 0x77, 0x2c, 0x38, 0x99, 0x39, 0x36, 0x47, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, - 0xf6, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x5f, 0x18, 0xc8, 0xf9, 0x16, 0x76, 0x93, 0xbf, 0x0a, 0xc3, - 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xcd, 0xaf, 0xab, 0xd0, 0xe6, 0xcf, 0xb2, 0x7b, 0x56, 0x0c, 0xbe, - 0xb7, 0x5f, 0x9e, 0x49, 0xb2, 0x88, 0xd1, 0x58, 0xe7, 0x80, 0x3e, 0x01, 0x43, 0xa1, 0xcc, 0x4a, - 0xd7, 0x77, 0xff, 0x59, 0xe9, 0x98, 0x92, 0x40, 0x69, 0x2a, 0x14, 0x4b, 0xf4, 0xbf, 0xe9, 0xf1, - 0x76, 0xd2, 0x52, 0x65, 0x22, 0xfa, 0xcb, 0x7d, 0x44, 0xdd, 0x79, 0x0e, 0x60, 0x57, 0x5d, 0x09, - 0x92, 0x5a, 0x08, 0xed, 0xb2, 0xa0, 0x51, 0xa1, 0x8f, 0xc0, 0x44, 0xc8, 0x43, 0x4d, 0xc6, 0xce, - 0x13, 0x7c, 0x2e, 0xb2, 0x68, 0x5d, 0xd5, 0x04, 0x0e, 0xa7, 0xa8, 0xd1, 0xb2, 0xac, 0x95, 0xb9, - 0xc9, 0xf0, 0xe9, 0x79, 0x2e, 0xae, 0x51, 0xb8, 0xca, 0x1c, 0x4b, 0x0e, 0x02, 0xeb, 0x7e, 0xad, - 0x24, 0xfa, 0x04, 0x00, 0x9d, 0x44, 0x42, 0x1b, 0x31, 0x98, 0xbf, 0x85, 0xd2, 0xbd, 0xa5, 0x9e, - 0xe9, 0x3b, 0xce, 0x1e, 0x36, 0x2f, 0x2a, 0x26, 0x58, 0x63, 0x88, 0x1c, 0x18, 0x8d, 0xff, 0xc5, - 0x19, 0xa2, 0xcf, 0xe7, 0xd6, 0x90, 0x64, 0xce, 0x14, 0xdf, 0x8b, 0x3a, 0x0b, 0x6c, 0x72, 0x44, - 0x1f, 0x87, 0x93, 0xbb, 0xb9, 0x1e, 0x29, 0xa5, 0x38, 0xe9, 0x63, 0xbe, 0x1f, 0x4a, 0x7e, 0x79, - 0xfb, 0x77, 0x01, 0x1e, 0xee, 0xb0, 0xd3, 0xa3, 0x39, 0xd3, 0x9a, 0xfc, 0x74, 0x52, 0x45, 0x30, - 0x93, 0x59, 0xd8, 0xd0, 0x19, 0x24, 0x16, 0x54, 0xe1, 0x1d, 0x2f, 0xa8, 0x1f, 0xb2, 0x34, 0xe5, - 0x0d, 0x77, 0xe7, 0xfd, 0xf0, 0x01, 0x4f, 0xb0, 0x43, 0xd4, 0xe6, 0x6c, 0x66, 0xa8, 0x44, 0x9e, - 0xeb, 0xb9, 0x39, 0xbd, 0xeb, 0x48, 0xbe, 0x96, 0x1d, 0xbc, 0x99, 0x6b, 0x4b, 0x2e, 0x1d, 0xf4, - 0xfb, 0x8f, 0x2a, 0x90, 0xf3, 0x1f, 0x5a, 0x70, 0x32, 0x05, 0xe6, 0x6d, 0x20, 0xa1, 0x88, 0x2f, - 0xb6, 0xfe, 0x8e, 0x1b, 0x2f, 0x19, 0xf2, 0x6f, 0xb8, 0x2c, 0xbe, 0xe1, 0x64, 0x2e, 0x5d, 0xb2, - 0xe9, 0x5f, 0xfc, 0x93, 0xf2, 0x14, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x47, 0x2d, 0x38, 0x5b, - 0x6b, 0x07, 0x41, 0x3c, 0x59, 0x33, 0x16, 0x27, 0xbf, 0xeb, 0x3d, 0x76, 0x77, 0xbf, 0x7c, 0x76, - 0xa1, 0x0b, 0x2d, 0xee, 0xca, 0x0d, 0x79, 0x80, 0x9a, 0x29, 0xbf, 0x2f, 0x91, 0x18, 0x3e, 0xd3, - 0x53, 0x23, 0xed, 0x25, 0xc6, 0x1f, 0xb0, 0x66, 0x78, 0x8f, 0x65, 0x70, 0x3e, 0x5a, 0xed, 0xc9, - 0xb7, 0x26, 0x32, 0xf7, 0xcc, 0x2a, 0x9c, 0xe9, 0x3c, 0x99, 0x0e, 0xf4, 0x78, 0xfe, 0x0f, 0x2c, - 0x38, 0xdd, 0x31, 0x42, 0xd3, 0xb7, 0xe1, 0x65, 0xc1, 0xfe, 0x9c, 0x05, 0x8f, 0x64, 0x96, 0x30, - 0x5c, 0x0c, 0x2f, 0x40, 0xa9, 0x96, 0x48, 0x6b, 0x1c, 0xc7, 0x2a, 0x51, 0x29, 0x8d, 0x63, 0x1a, - 0xc3, 0x93, 0xb0, 0xd0, 0xd5, 0x93, 0xf0, 0x37, 0x2d, 0x48, 0x1d, 0xf5, 0x47, 0x20, 0x79, 0xae, - 0x98, 0x92, 0xe7, 0x63, 0xbd, 0xf4, 0x66, 0x8e, 0xd0, 0xf9, 0x97, 0xe3, 0x70, 0x22, 0xe7, 0xed, - 0xeb, 0x2e, 0x4c, 0x6e, 0xd5, 0x88, 0x19, 0xec, 0xa0, 0x53, 0x10, 0xb0, 0x8e, 0x91, 0x11, 0x78, - 0x36, 0xe9, 0x14, 0x09, 0x4e, 0x57, 0x81, 0x3e, 0x67, 0xc1, 0x31, 0xe7, 0x56, 0xb8, 0x44, 0x6f, - 0x10, 0x6e, 0x6d, 0xbe, 0xe1, 0xd7, 0x76, 0xa8, 0x60, 0x26, 0x97, 0xd5, 0x0b, 0x99, 0x5a, 0xdd, - 0x1b, 0xd5, 0x14, 0xbd, 0x51, 0xfd, 0xf4, 0xdd, 0xfd, 0xf2, 0xb1, 0x2c, 0x2a, 0x9c, 0x59, 0x17, - 0xc2, 0x22, 0x7b, 0x8f, 0x13, 0x6d, 0x77, 0x0a, 0xc7, 0x91, 0xf5, 0x48, 0x99, 0x8b, 0xc4, 0x12, - 0x83, 0x15, 0x1f, 0xf4, 0x29, 0x28, 0x6d, 0xc9, 0x97, 0xf7, 0x19, 0x22, 0x77, 0xdc, 0x91, 0x9d, - 0xe3, 0x11, 0x70, 0xd7, 0x0c, 0x45, 0x84, 0x63, 0xa6, 0xe8, 0x35, 0x28, 0x7a, 0x9b, 0x61, 0xa7, - 0xf4, 0xfb, 0x09, 0x1f, 0x5c, 0x1e, 0xf4, 0x66, 0x7d, 0xb9, 0x8a, 0x69, 0x41, 0x74, 0x19, 0x8a, - 0xc1, 0xcd, 0xba, 0x30, 0x49, 0x64, 0x2e, 0x52, 0x3c, 0xbf, 0x98, 0xd3, 0x2a, 0xc6, 0x09, 0xcf, - 0x2f, 0x62, 0xca, 0x02, 0x55, 0xa0, 0x9f, 0x3d, 0x18, 0x15, 0xa2, 0x6d, 0xe6, 0x55, 0xbe, 0xc3, - 0xc3, 0x6b, 0xfe, 0x18, 0x8d, 0x11, 0x60, 0xce, 0x08, 0x6d, 0xc0, 0x40, 0x8d, 0xa5, 0x6a, 0x17, - 0xb2, 0xec, 0xfb, 0x33, 0x8d, 0x0f, 0x1d, 0x72, 0xd8, 0x0b, 0x5d, 0x3c, 0xa3, 0xc0, 0x82, 0x17, - 0xe3, 0x4a, 0x5a, 0xdb, 0x9b, 0xf2, 0xc4, 0xca, 0xe6, 0x4a, 0x5a, 0xdb, 0xcb, 0xd5, 0x8e, 0x5c, - 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0xc7, 0xa0, 0x99, 0x56, 0x08, 0x33, - 0x6e, 0xd1, 0xfc, 0xc0, 0xdd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, 0x36, 0x6b, 0x68, 0x1d, 0x06, - 0x37, 0x79, 0xa4, 0x13, 0x61, 0x68, 0x78, 0x22, 0x3b, 0x08, 0x4b, 0x2a, 0x18, 0x0a, 0x7f, 0x58, - 0x28, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x64, 0x54, 0xc4, 0x16, 0x11, 0x30, 0x72, 0xf6, 0x60, 0x51, - 0x76, 0xf8, 0x55, 0x23, 0x8e, 0xfb, 0x82, 0x35, 0x8e, 0x74, 0x56, 0x3b, 0x77, 0xda, 0x01, 0xcb, - 0x26, 0x20, 0x22, 0x8b, 0x65, 0xce, 0xea, 0x39, 0x49, 0xd4, 0x69, 0x56, 0x2b, 0x22, 0x1c, 0x33, - 0x45, 0x3b, 0x30, 0xba, 0x1b, 0xb6, 0xb6, 0x89, 0x5c, 0xd2, 0x2c, 0xd0, 0x58, 0x8e, 0x34, 0x7b, - 0x5d, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0x5d, 0x6b, 0xae, 0xeb, 0xcc, 0xb0, - 0xc9, 0x9b, 0x76, 0xff, 0xdb, 0x6d, 0xff, 0xe6, 0x5e, 0x44, 0x44, 0x9c, 0xc7, 0xcc, 0xee, 0x7f, - 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, 0xb0, 0x64, 0x82, 0xae, 0x8b, 0xee, 0x61, 0xbb, 0xe7, 0x44, - 0x7e, 0x10, 0xe9, 0x39, 0x49, 0x94, 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb5, - 0xed, 0x47, 0xbe, 0x97, 0xd8, 0xa1, 0x27, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, - 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, 0x8c, 0xb5, 0xfc, 0x20, 0xba, 0xe5, 0x07, 0x72, 0x7e, 0xa1, - 0x0e, 0x8a, 0x52, 0x83, 0x52, 0xd4, 0xc8, 0x42, 0xa8, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, - 0x83, 0x61, 0xcd, 0x69, 0x90, 0x95, 0xab, 0xd3, 0x53, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, - 0x2e, 0x1e, 0xa8, 0x86, 0x93, 0x60, 0xc9, 0x0e, 0x2d, 0x43, 0x3f, 0x4b, 0xd2, 0xca, 0x82, 0x92, - 0xe6, 0xc4, 0xc2, 0x4e, 0x3d, 0xf7, 0xe0, 0x7b, 0x13, 0x03, 0x63, 0x5e, 0x9c, 0xae, 0x01, 0xa1, - 0x29, 0xf0, 0xc3, 0xe9, 0xe3, 0xf9, 0x6b, 0x40, 0x28, 0x18, 0xae, 0x56, 0x3b, 0xad, 0x01, 0x45, - 0x84, 0x63, 0xa6, 0x74, 0x67, 0xa6, 0xbb, 0xe9, 0x89, 0x0e, 0xae, 0x7c, 0xb9, 0x7b, 0x29, 0xdb, - 0x99, 0xe9, 0x4e, 0x4a, 0x59, 0xd8, 0xbf, 0x3e, 0x94, 0x96, 0x59, 0x98, 0x86, 0xe9, 0xff, 0xb0, - 0x52, 0xce, 0x07, 0x1f, 0xe8, 0x55, 0xe1, 0x7d, 0x88, 0x17, 0xd7, 0xcf, 0x59, 0x70, 0xa2, 0x95, - 0xf9, 0x21, 0x42, 0x00, 0xe8, 0x4d, 0x6f, 0xce, 0x3f, 0x5d, 0x05, 0xb0, 0xcd, 0xc6, 0xe3, 0x9c, - 0x9a, 0x92, 0xca, 0x81, 0xe2, 0x3b, 0x56, 0x0e, 0xac, 0xc1, 0x50, 0x8d, 0xdf, 0xe4, 0x64, 0xe0, - 0xf5, 0x9e, 0xc2, 0x2f, 0x32, 0x51, 0x42, 0x5c, 0x01, 0x37, 0xb1, 0x62, 0x81, 0x7e, 0xd8, 0x82, - 0xd3, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xea, 0x2d, 0x57, 0x6b, 0x2d, 0x8b, 0xef, 0x4f, 0xc9, - 0xff, 0x06, 0xf1, 0xbd, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, 0xaf, 0x36, 0x60, 0x5a, - 0x14, 0x7b, 0xd0, 0xad, 0xbd, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, 0xf3, 0x4f, 0x78, 0x21, - 0x31, 0xef, 0x9b, 0x35, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x46, 0x6e, 0xe8, 0xbe, 0x35, 0x72, 0x6f, - 0xc1, 0x88, 0xa7, 0xb9, 0xaa, 0x77, 0xba, 0xc1, 0x0a, 0xed, 0xa2, 0x46, 0xcd, 0x5b, 0xa9, 0x43, - 0xb0, 0xc1, 0xad, 0xb3, 0xb6, 0x0c, 0xde, 0x99, 0xb6, 0xec, 0x68, 0xfd, 0x0d, 0x7f, 0xbe, 0x90, - 0x71, 0x63, 0xe0, 0x5a, 0xb9, 0x57, 0x4d, 0xad, 0xdc, 0xb9, 0xa4, 0x56, 0x2e, 0x65, 0xaa, 0x32, - 0x14, 0x72, 0xbd, 0x67, 0x87, 0xeb, 0x39, 0xa4, 0xee, 0xf7, 0x5a, 0xf0, 0x10, 0xb3, 0x7d, 0xd0, - 0x0a, 0xde, 0xb1, 0xbd, 0xe3, 0xe1, 0xbb, 0xfb, 0xe5, 0x87, 0x56, 0xb3, 0xd9, 0xe1, 0xbc, 0x7a, - 0xec, 0x06, 0x9c, 0xed, 0x76, 0xee, 0x32, 0x1f, 0xd7, 0xba, 0x72, 0x8e, 0x88, 0x7d, 0x5c, 0xeb, - 0x2b, 0x8b, 0x98, 0x61, 0x7a, 0x0d, 0x18, 0x67, 0xff, 0x07, 0x0b, 0x8a, 0x15, 0xbf, 0x7e, 0x04, - 0x37, 0xfa, 0x0f, 0x1b, 0x37, 0xfa, 0x87, 0xb3, 0x4f, 0xfc, 0x7a, 0xae, 0xb1, 0x6f, 0x29, 0x61, - 0xec, 0x3b, 0x9d, 0xc7, 0xa0, 0xb3, 0x69, 0xef, 0xa7, 0x8a, 0x30, 0x5c, 0xf1, 0xeb, 0x6a, 0x9d, - 0xfd, 0x93, 0xfb, 0x79, 0x60, 0x92, 0x9b, 0xef, 0x47, 0xe3, 0xcc, 0x5c, 0x63, 0x65, 0xc8, 0x81, - 0x6f, 0xb3, 0x77, 0x26, 0x37, 0x88, 0xbb, 0xb5, 0x1d, 0x91, 0x7a, 0xf2, 0x73, 0x8e, 0xee, 0x9d, - 0xc9, 0x37, 0x8b, 0x30, 0x9e, 0xa8, 0x1d, 0x35, 0x60, 0xb4, 0xa1, 0x9b, 0x92, 0xc4, 0x3c, 0xbd, - 0x2f, 0x2b, 0x94, 0xf0, 0xd3, 0xd7, 0x40, 0xd8, 0x64, 0x8e, 0x66, 0x01, 0x94, 0x6f, 0x85, 0xd4, - 0xf6, 0xb3, 0x6b, 0x8d, 0x72, 0xbe, 0x08, 0xb1, 0x46, 0x81, 0x5e, 0x84, 0xe1, 0xc8, 0x6f, 0xf9, - 0x0d, 0x7f, 0x6b, 0xef, 0x0a, 0x91, 0xb1, 0x04, 0x95, 0xf7, 0xed, 0x46, 0x8c, 0xc2, 0x3a, 0x1d, - 0xba, 0x0d, 0x93, 0x8a, 0x49, 0xf5, 0x10, 0xcc, 0x6b, 0x4c, 0x6d, 0xb2, 0x9e, 0xe4, 0x88, 0xd3, - 0x95, 0xa0, 0x97, 0x61, 0x8c, 0xb9, 0x01, 0xb3, 0xf2, 0x57, 0xc8, 0x9e, 0x8c, 0x31, 0xcb, 0x24, - 0xec, 0x35, 0x03, 0x83, 0x13, 0x94, 0x68, 0x01, 0x26, 0x9b, 0x6e, 0x98, 0x28, 0x3e, 0xc0, 0x8a, - 0xb3, 0x06, 0xac, 0x25, 0x91, 0x38, 0x4d, 0x6f, 0xff, 0xac, 0x18, 0x63, 0x2f, 0x72, 0xdf, 0x5b, - 0x8e, 0xef, 0xee, 0xe5, 0xf8, 0x0d, 0x0b, 0x26, 0x68, 0xed, 0xcc, 0xb7, 0x51, 0x0a, 0x52, 0x2a, - 0x0b, 0x81, 0xd5, 0x21, 0x0b, 0xc1, 0x39, 0xba, 0x6d, 0xd7, 0xfd, 0x76, 0x24, 0xb4, 0xa3, 0xda, - 0xbe, 0x4c, 0xa1, 0x58, 0x60, 0x05, 0x1d, 0x09, 0x02, 0xf1, 0x1e, 0x5b, 0xa7, 0x23, 0x41, 0x80, - 0x05, 0x56, 0x26, 0x29, 0xe8, 0xcb, 0x4e, 0x52, 0xc0, 0x63, 0x4d, 0x0b, 0x2f, 0x38, 0x21, 0xd2, - 0x6a, 0xb1, 0xa6, 0xa5, 0x7b, 0x5c, 0x4c, 0x63, 0x7f, 0xad, 0x08, 0x23, 0x15, 0xbf, 0x1e, 0x3b, - 0x76, 0xbc, 0x60, 0x38, 0x76, 0x9c, 0x4d, 0x38, 0x76, 0x4c, 0xe8, 0xb4, 0xef, 0xb9, 0x71, 0x7c, - 0xab, 0xdc, 0x38, 0x7e, 0xc3, 0x62, 0xa3, 0xb6, 0xb8, 0x5e, 0xe5, 0xae, 0xb2, 0xe8, 0x22, 0x0c, - 0xb3, 0x1d, 0x8e, 0x05, 0x00, 0x90, 0xde, 0x0e, 0x2c, 0x69, 0xe0, 0x7a, 0x0c, 0xc6, 0x3a, 0x0d, - 0x3a, 0x0f, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0xde, 0x85, 0x6b, 0x02, 0x87, 0x61, 0x85, - 0x45, 0x6f, 0xc4, 0x61, 0x8e, 0x8b, 0xf9, 0x0f, 0x8a, 0xf5, 0xf6, 0xf0, 0x25, 0x92, 0x1f, 0xdb, - 0xd8, 0xbe, 0x01, 0x28, 0x4d, 0xdf, 0x43, 0x20, 0xce, 0xb2, 0x19, 0x88, 0xb3, 0x94, 0x0a, 0xc2, - 0xf9, 0xd7, 0x16, 0x8c, 0x55, 0xfc, 0x3a, 0x5d, 0xba, 0xdf, 0x49, 0xeb, 0x54, 0x8f, 0xf1, 0x3e, - 0xd0, 0x21, 0xc6, 0xfb, 0xa3, 0xd0, 0x5f, 0xf1, 0xeb, 0x5d, 0x82, 0x85, 0xfe, 0x2d, 0x0b, 0x06, - 0x2b, 0x7e, 0xfd, 0x08, 0x0c, 0x2f, 0xaf, 0x9a, 0x86, 0x97, 0x87, 0x72, 0xe6, 0x4d, 0x8e, 0xad, - 0xe5, 0xff, 0xef, 0x83, 0x51, 0xda, 0x4e, 0x7f, 0x4b, 0x0e, 0xa5, 0xd1, 0x6d, 0x56, 0x0f, 0xdd, - 0x46, 0xaf, 0x01, 0x7e, 0xa3, 0xe1, 0xdf, 0x4a, 0x0e, 0xeb, 0x32, 0x83, 0x62, 0x81, 0x45, 0xcf, - 0xc0, 0x50, 0x2b, 0x20, 0xbb, 0xae, 0x2f, 0xe4, 0x6b, 0xcd, 0x8c, 0x55, 0x11, 0x70, 0xac, 0x28, - 0xe8, 0xc5, 0x3b, 0x74, 0x3d, 0x2a, 0x4b, 0xd4, 0x7c, 0xaf, 0xce, 0x6d, 0x13, 0x45, 0x91, 0x88, - 0x48, 0x83, 0x63, 0x83, 0x0a, 0xdd, 0x80, 0x12, 0xfb, 0xcf, 0xb6, 0x9d, 0x83, 0xa7, 0x40, 0x17, - 0xa9, 0x59, 0x05, 0x03, 0x1c, 0xf3, 0x42, 0xcf, 0x01, 0x44, 0x32, 0x99, 0x47, 0x28, 0x82, 0x46, - 0xaa, 0xbb, 0x88, 0x4a, 0xf3, 0x11, 0x62, 0x8d, 0x0a, 0x3d, 0x0d, 0xa5, 0xc8, 0x71, 0x1b, 0xab, - 0xae, 0xc7, 0xec, 0xf7, 0xb4, 0xfd, 0x22, 0x43, 0xaa, 0x00, 0xe2, 0x18, 0x4f, 0x65, 0x41, 0x16, - 0x0e, 0x68, 0x7e, 0x2f, 0x12, 0xc9, 0xc0, 0x8a, 0x5c, 0x16, 0x5c, 0x55, 0x50, 0xac, 0x51, 0xa0, - 0x6d, 0x38, 0xe5, 0x7a, 0x2c, 0x69, 0x0f, 0xa9, 0xee, 0xb8, 0xad, 0x8d, 0xd5, 0xea, 0x75, 0x12, - 0xb8, 0x9b, 0x7b, 0xf3, 0x4e, 0x6d, 0x87, 0x78, 0x32, 0xb9, 0xf5, 0x63, 0xa2, 0x89, 0xa7, 0x56, - 0x3a, 0xd0, 0xe2, 0x8e, 0x9c, 0xec, 0xe7, 0xd9, 0x7c, 0xbf, 0x5a, 0x45, 0x4f, 0x19, 0x5b, 0xc7, - 0x09, 0x7d, 0xeb, 0xb8, 0xb7, 0x5f, 0x1e, 0xb8, 0x5a, 0xd5, 0x62, 0xd2, 0xbc, 0x04, 0xc7, 0x2b, - 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xb2, 0x1f, 0xdc, 0x72, 0x82, 0xba, 0x9c, 0x5e, 0x65, 0x19, 0x95, - 0x87, 0xee, 0x9f, 0xfd, 0x7c, 0x77, 0x31, 0x22, 0xee, 0x3c, 0xcf, 0x24, 0xb6, 0x03, 0x3e, 0xb7, - 0xac, 0x31, 0xd9, 0x41, 0xa5, 0xbd, 0xba, 0xe4, 0x44, 0x04, 0x5d, 0x85, 0xd1, 0x9a, 0x7e, 0x8c, - 0x8a, 0xe2, 0x4f, 0xca, 0x83, 0xcc, 0x38, 0x63, 0x33, 0xcf, 0x5d, 0xb3, 0xbc, 0xfd, 0x59, 0x51, - 0x09, 0x57, 0x44, 0x70, 0x97, 0xd6, 0x5e, 0xf2, 0xbf, 0xcb, 0xbc, 0x38, 0x85, 0xfc, 0x98, 0x87, - 0xdc, 0xae, 0xdc, 0x31, 0x2f, 0x8e, 0xfd, 0xdd, 0x70, 0x22, 0x59, 0x7d, 0xcf, 0x49, 0xe8, 0x17, - 0x60, 0x32, 0xd0, 0x0b, 0x6a, 0x49, 0x06, 0x8f, 0xf3, 0x5c, 0x26, 0x09, 0x24, 0x4e, 0xd3, 0xdb, - 0x2f, 0xc2, 0x24, 0xbd, 0xfc, 0x2a, 0x41, 0x8e, 0xf5, 0x72, 0xf7, 0xf0, 0x44, 0xff, 0xb1, 0x9f, - 0x1d, 0x44, 0x89, 0x8c, 0x53, 0xe8, 0x93, 0x30, 0x16, 0x92, 0x55, 0xd7, 0x6b, 0xdf, 0x96, 0xba, - 0xb5, 0x0e, 0xef, 0x8c, 0xab, 0x4b, 0x3a, 0x25, 0xbf, 0x3f, 0x98, 0x30, 0x9c, 0xe0, 0x86, 0x9a, - 0x30, 0x76, 0xcb, 0xf5, 0xea, 0xfe, 0xad, 0x50, 0xf2, 0x1f, 0xca, 0x57, 0xd4, 0xdf, 0xe0, 0x94, - 0x89, 0x36, 0x1a, 0xd5, 0xdd, 0x30, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0xb6, 0x37, 0x17, - 0x5e, 0x0b, 0x09, 0x7f, 0x39, 0x2a, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, - 0x2e, 0x05, 0x7e, 0x9b, 0xa7, 0x37, 0x12, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, - 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x3d, 0x9f, 0x06, 0xc7, 0x06, 0x15, - 0x5a, 0x06, 0x14, 0xb6, 0x5b, 0xad, 0x06, 0x73, 0x5d, 0x74, 0x1a, 0x8c, 0x15, 0x77, 0xbb, 0x2a, - 0x72, 0xef, 0x96, 0x6a, 0x0a, 0x8b, 0x33, 0x4a, 0xd0, 0x73, 0x71, 0x53, 0x34, 0xb5, 0x9f, 0x35, - 0x95, 0x1b, 0xf5, 0xaa, 0xbc, 0x9d, 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x51, 0x23, - 0xec, 0x94, 0x0c, 0xb1, 0xca, 0x48, 0xb4, 0x5c, 0xbc, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, - 0x04, 0xc7, 0x85, 0x6d, 0xc7, 0x53, 0x29, 0xda, 0xb8, 0xf7, 0xde, 0xc5, 0xbb, 0xfb, 0xe5, 0x29, - 0x51, 0xb3, 0x8e, 0xbe, 0xb7, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, - 0xab, 0xf9, 0xcd, 0x56, 0x25, 0xf0, 0x37, 0xdd, 0x06, 0xe9, 0x64, 0x18, 0xad, 0x1a, 0x94, 0x62, - 0xf2, 0x19, 0x30, 0x9c, 0xe0, 0x66, 0x7f, 0x96, 0xc9, 0x8e, 0x55, 0x77, 0xcb, 0x73, 0xa2, 0x76, - 0x40, 0x50, 0x13, 0x46, 0x5b, 0x6c, 0x77, 0x11, 0x49, 0x87, 0xc4, 0x5c, 0x7f, 0xa1, 0x47, 0xfd, - 0xd7, 0x2d, 0x96, 0x36, 0xd1, 0xf0, 0x83, 0xac, 0xe8, 0xec, 0xb0, 0xc9, 0xdd, 0xfe, 0x17, 0x27, - 0x99, 0xf4, 0x51, 0xe5, 0x4a, 0xad, 0x41, 0xf1, 0x6c, 0x4c, 0x5c, 0x63, 0x67, 0xf2, 0xd5, 0xc7, - 0xf1, 0xb0, 0x88, 0xa7, 0x67, 0x58, 0x96, 0x45, 0x9f, 0x80, 0x31, 0x7a, 0x2b, 0x54, 0x12, 0x40, - 0x38, 0x7d, 0x2c, 0x3f, 0xbc, 0x8f, 0xa2, 0xd2, 0x13, 0x92, 0xe9, 0x85, 0x71, 0x82, 0x19, 0x7a, - 0x83, 0xb9, 0x06, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x5e, 0x80, 0x92, 0xad, 0xc6, 0x04, 0xb5, - 0x61, 0x2a, 0x9d, 0x76, 0x35, 0x9c, 0xb6, 0xf3, 0xc5, 0xeb, 0x74, 0xe6, 0xd4, 0x38, 0x73, 0x54, - 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0x14, 0xb3, 0x68, 0x28, 0x9e, 0x53, 0x89, 0x31, - 0x47, 0x3b, 0xe6, 0xc3, 0xdc, 0x82, 0xd3, 0x5a, 0x5e, 0xc1, 0x4b, 0x81, 0xc3, 0x5c, 0x53, 0x5c, - 0xb6, 0x9d, 0x6a, 0x72, 0xd1, 0x23, 0x77, 0xf7, 0xcb, 0xa7, 0x37, 0x3a, 0x11, 0xe2, 0xce, 0x7c, - 0xd0, 0x55, 0x38, 0xce, 0x83, 0x53, 0x2c, 0x12, 0xa7, 0xde, 0x70, 0x3d, 0x25, 0x78, 0xf1, 0x25, - 0x7f, 0xf2, 0xee, 0x7e, 0xf9, 0xf8, 0x5c, 0x16, 0x01, 0xce, 0x2e, 0x87, 0x5e, 0x85, 0x52, 0xdd, - 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0xa9, 0x1b, 0x4b, 0x8b, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, - 0x17, 0x40, 0x5b, 0xdc, 0xf2, 0xa1, 0xd4, 0x55, 0x83, 0xa9, 0x98, 0x85, 0x49, 0x8d, 0xae, 0xf1, - 0x3c, 0x9d, 0x9b, 0xfc, 0xd4, 0xab, 0x2d, 0xe3, 0xe5, 0xba, 0xc1, 0x18, 0xbd, 0x0e, 0x48, 0xa4, - 0x08, 0x99, 0xab, 0xb1, 0x8c, 0x56, 0x9a, 0x3b, 0xa2, 0xba, 0x85, 0x56, 0x53, 0x14, 0x38, 0xa3, - 0x14, 0xba, 0x4c, 0x77, 0x15, 0x1d, 0x2a, 0x76, 0x2d, 0x95, 0x20, 0x78, 0x91, 0xb4, 0x02, 0xc2, - 0x3c, 0xe8, 0x4c, 0x8e, 0x38, 0x51, 0x0e, 0xd5, 0xe1, 0x94, 0xd3, 0x8e, 0x7c, 0x66, 0x54, 0x32, - 0x49, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0x7b, 0xee, 0x10, 0x8b, 0x85, 0x78, 0x6a, 0xae, 0x03, 0x1d, - 0xee, 0xc8, 0x85, 0x4a, 0xe4, 0x32, 0xe7, 0xbf, 0xb0, 0xf7, 0x18, 0x2f, 0x6d, 0xb9, 0x11, 0x54, - 0x52, 0xa0, 0x17, 0x61, 0x78, 0xdb, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf2, 0x83, 0x1d, 0x11, 0x93, - 0x3c, 0xce, 0x03, 0x11, 0xa3, 0xb0, 0x4e, 0x47, 0xaf, 0xdc, 0xcc, 0xdb, 0x68, 0x65, 0x91, 0x39, - 0x7a, 0x0c, 0xc5, 0x7b, 0xcc, 0x65, 0x0e, 0xc6, 0x12, 0x2f, 0x49, 0x57, 0x2a, 0x0b, 0xcc, 0x69, - 0x23, 0x41, 0xba, 0x52, 0x59, 0xc0, 0x12, 0x4f, 0xa7, 0x6b, 0xb8, 0xed, 0x04, 0xa4, 0x12, 0xf8, - 0x35, 0x12, 0x6a, 0xd9, 0x47, 0x1e, 0xe6, 0x11, 0xd7, 0xe9, 0x74, 0xad, 0x66, 0x11, 0xe0, 0xec, - 0x72, 0x88, 0xa4, 0x73, 0x6a, 0x8e, 0xe5, 0x5b, 0xdb, 0xd2, 0xf2, 0x4c, 0x8f, 0x69, 0x35, 0x3d, - 0x98, 0x50, 0xd9, 0x3c, 0x79, 0x8c, 0xf5, 0x70, 0x7a, 0x9c, 0xcd, 0xed, 0xde, 0x03, 0xb4, 0x2b, - 0xfb, 0xe5, 0x4a, 0x82, 0x13, 0x4e, 0xf1, 0x36, 0x82, 0x6d, 0x4e, 0x74, 0x0d, 0xb6, 0x79, 0x01, - 0x4a, 0x61, 0xfb, 0x66, 0xdd, 0x6f, 0x3a, 0xae, 0xc7, 0x9c, 0x36, 0xb4, 0xbb, 0x5f, 0x55, 0x22, - 0x70, 0x4c, 0x83, 0x96, 0x61, 0xc8, 0x91, 0xc6, 0x49, 0x94, 0x1f, 0x47, 0x4c, 0x99, 0x24, 0x79, - 0x68, 0x1d, 0x69, 0x8e, 0x54, 0x65, 0xd1, 0x2b, 0x30, 0x2a, 0x82, 0x2b, 0x88, 0x04, 0xd8, 0x53, - 0xe6, 0x0b, 0xd8, 0xaa, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x06, 0xc3, 0x91, 0xdf, 0x60, 0xcf, 0x38, - 0xa9, 0x98, 0x77, 0x22, 0x3f, 0xdc, 0xe7, 0x86, 0x22, 0xd3, 0xd5, 0xe6, 0xaa, 0x28, 0xd6, 0xf9, - 0xa0, 0x0d, 0x3e, 0xdf, 0x59, 0xae, 0x11, 0x12, 0x8a, 0x0c, 0xca, 0xa7, 0xf3, 0x3c, 0xee, 0x18, - 0x99, 0xb9, 0x1c, 0x44, 0x49, 0xac, 0xb3, 0x41, 0x97, 0x60, 0xb2, 0x15, 0xb8, 0x3e, 0x9b, 0x13, - 0xca, 0xd8, 0x3a, 0x6d, 0x66, 0x16, 0xac, 0x24, 0x09, 0x70, 0xba, 0x0c, 0x8b, 0x8d, 0x21, 0x80, - 0xd3, 0x27, 0x79, 0x76, 0x24, 0x7e, 0x95, 0xe6, 0x30, 0xac, 0xb0, 0x68, 0x8d, 0xed, 0xc4, 0x5c, - 0x0b, 0x34, 0x3d, 0x93, 0x1f, 0xba, 0x4c, 0xd7, 0x16, 0x71, 0xe1, 0x55, 0xfd, 0xc5, 0x31, 0x07, - 0x54, 0xd7, 0x92, 0x12, 0xd3, 0x2b, 0x40, 0x38, 0x7d, 0xaa, 0x83, 0xcb, 0x67, 0xe2, 0x56, 0x16, - 0x0b, 0x04, 0x06, 0x38, 0xc4, 0x09, 0x9e, 0xe8, 0x23, 0x30, 0x21, 0xe2, 0xd0, 0xc6, 0xdd, 0x74, - 0x3a, 0x7e, 0x16, 0x83, 0x13, 0x38, 0x9c, 0xa2, 0xe6, 0xd9, 0x89, 0x9c, 0x9b, 0x0d, 0x22, 0xb6, - 0xbe, 0x55, 0xd7, 0xdb, 0x09, 0xa7, 0xcf, 0xb0, 0xfd, 0x41, 0x64, 0x27, 0x4a, 0x62, 0x71, 0x46, - 0x09, 0xb4, 0x01, 0x13, 0xad, 0x80, 0x90, 0x26, 0x13, 0xf4, 0xc5, 0x79, 0x56, 0xe6, 0xa1, 0x61, - 0x68, 0x4b, 0x2a, 0x09, 0xdc, 0xbd, 0x0c, 0x18, 0x4e, 0x71, 0x40, 0xb7, 0x60, 0xc8, 0xdf, 0x25, - 0xc1, 0x36, 0x71, 0xea, 0xd3, 0x67, 0x3b, 0x3c, 0xd6, 0x12, 0x87, 0xdb, 0x55, 0x41, 0x9b, 0xf0, - 0x65, 0x91, 0xe0, 0xee, 0xbe, 0x2c, 0xb2, 0x32, 0xf4, 0x7f, 0x5a, 0x70, 0x52, 0x5a, 0x87, 0xaa, - 0x2d, 0xda, 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x66, 0xf2, 0x48, 0x7e, 0x80, 0x8f, 0x8d, - 0x9c, 0x42, 0x4a, 0x11, 0x7d, 0x32, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xd2, 0xab, 0x69, 0x48, 0x22, - 0xb9, 0x19, 0xcd, 0x85, 0xcb, 0x6f, 0x2c, 0xae, 0x4f, 0x3f, 0xca, 0x23, 0xb1, 0xd0, 0xc5, 0x50, - 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0x5d, 0x84, 0x82, 0x1f, 0x4e, 0x3f, 0xd6, 0x21, 0x8f, 0xb5, 0x5f, - 0xbf, 0x5a, 0xe5, 0x3e, 0x8d, 0x57, 0xab, 0xb8, 0xe0, 0x87, 0x32, 0x43, 0x10, 0xbd, 0x8f, 0x85, - 0xd3, 0x8f, 0x73, 0xb5, 0xa5, 0xcc, 0x10, 0xc4, 0x80, 0x38, 0xc6, 0xa3, 0x6d, 0x18, 0x0f, 0x8d, - 0x7b, 0x6f, 0x38, 0x7d, 0x8e, 0xf5, 0xd4, 0xe3, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xa9, 0x3b, 0x4c, - 0x2e, 0x38, 0xc9, 0x96, 0xaf, 0x2e, 0xed, 0xe6, 0x1d, 0x4e, 0x3f, 0xd1, 0x65, 0x75, 0x69, 0xc4, - 0xfa, 0xea, 0xd2, 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x82, 0xc9, 0x94, 0xb8, 0x74, 0x10, 0xff, - 0xfd, 0x99, 0x1d, 0x18, 0x35, 0xa6, 0xe4, 0x03, 0x75, 0xef, 0xf8, 0x9d, 0x12, 0x94, 0x94, 0xd9, - 0x1d, 0x5d, 0x30, 0x3d, 0x3a, 0x4e, 0x26, 0x3d, 0x3a, 0x86, 0x2a, 0x7e, 0xdd, 0x70, 0xe2, 0xd8, - 0xc8, 0x88, 0xd7, 0x99, 0xb7, 0x01, 0xf6, 0xfe, 0xc8, 0x48, 0x33, 0x25, 0x14, 0x7b, 0x76, 0x0d, - 0xe9, 0xeb, 0x68, 0x9d, 0xb8, 0x04, 0x93, 0x9e, 0xcf, 0x64, 0x74, 0x52, 0x97, 0x02, 0x18, 0x93, - 0xb3, 0x4a, 0x7a, 0x00, 0xac, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72, 0x41, 0x29, 0x69, 0x0e, - 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0xe9, 0x89, 0xfc, 0xbb, 0x21, 0x2f, - 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0xed, 0x7f, 0xcb, 0xaf, 0xaf, 0x54, 0x84, 0x98, 0xaf, - 0x45, 0xd2, 0xae, 0xaf, 0x54, 0x30, 0xc7, 0xa1, 0x39, 0x18, 0x60, 0x3f, 0xc2, 0xe9, 0x91, 0xfc, - 0x80, 0x49, 0xac, 0x84, 0x96, 0xa1, 0x90, 0x15, 0xc0, 0xa2, 0x20, 0xd3, 0xee, 0xd2, 0xbb, 0x11, - 0xd3, 0xee, 0x0e, 0xde, 0xa7, 0x76, 0x57, 0x32, 0xc0, 0x31, 0x2f, 0x74, 0x1b, 0x8e, 0x1b, 0xf7, - 0x51, 0xf5, 0xea, 0x0a, 0xf2, 0x0d, 0xbf, 0x09, 0xe2, 0xf9, 0xd3, 0xa2, 0xd1, 0xc7, 0x57, 0xb2, - 0x38, 0xe1, 0xec, 0x0a, 0x50, 0x03, 0x26, 0x6b, 0xa9, 0x5a, 0x87, 0x7a, 0xaf, 0x55, 0xcd, 0x8b, - 0x74, 0x8d, 0x69, 0xc6, 0xe8, 0x15, 0x18, 0x7a, 0xdb, 0xe7, 0x4e, 0x5a, 0xe2, 0x6a, 0x22, 0x23, - 0x7e, 0x0c, 0xbd, 0x71, 0xb5, 0xca, 0xe0, 0xf7, 0xf6, 0xcb, 0xc3, 0x15, 0xbf, 0x2e, 0xff, 0x62, - 0x55, 0x00, 0xfd, 0x80, 0x05, 0x33, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, 0x45, - 0xa5, 0x33, 0x4b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe8, 0x7a, 0x0a, 0xdd, 0x3b, 0x44, - 0xa4, 0x77, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0x6f, 0xbf, 0x3c, 0xce, 0x77, 0x46, 0xf7, 0x8e, - 0x8a, 0xf9, 0xcd, 0x0b, 0xa0, 0xef, 0x86, 0xe3, 0x41, 0x5a, 0x83, 0x4a, 0xa4, 0x10, 0xfe, 0x54, - 0x2f, 0xbb, 0x6c, 0x72, 0xc0, 0x71, 0x16, 0x43, 0x9c, 0x5d, 0x8f, 0xfd, 0xab, 0x16, 0xd3, 0x6f, - 0x8b, 0x66, 0x91, 0xb0, 0xdd, 0x38, 0x8a, 0xa4, 0xf2, 0x4b, 0x86, 0xed, 0xf8, 0xbe, 0x3d, 0x9b, - 0xfe, 0xb1, 0xc5, 0x3c, 0x9b, 0x8e, 0xf0, 0x8d, 0xd6, 0x1b, 0x30, 0x14, 0xc9, 0x64, 0xff, 0x1d, - 0xf2, 0xe0, 0x6b, 0x8d, 0x62, 0xde, 0x5d, 0xea, 0x92, 0xa3, 0xf2, 0xfa, 0x2b, 0x36, 0xf6, 0x3f, - 0xe0, 0x23, 0x20, 0x31, 0x47, 0x60, 0xa2, 0x5b, 0x34, 0x4d, 0x74, 0xe5, 0x2e, 0x5f, 0x90, 0x63, - 0xaa, 0xfb, 0xfb, 0x66, 0xbb, 0x99, 0x72, 0xef, 0xdd, 0xee, 0x52, 0x67, 0x7f, 0xc1, 0x02, 0x88, - 0x93, 0x2c, 0xf4, 0x90, 0xce, 0xf5, 0x25, 0x7a, 0xad, 0xf1, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0x81, - 0xe2, 0x54, 0x6c, 0x25, 0xe4, 0xf0, 0x7b, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x3d, 0x2d, - 0xc6, 0x76, 0x6b, 0x23, 0xe2, 0xe9, 0x97, 0x2d, 0x38, 0x96, 0xe5, 0xf0, 0x4f, 0x2f, 0xc9, 0x5c, - 0xcd, 0xa9, 0xdc, 0x1d, 0xd5, 0x68, 0x5e, 0x17, 0x70, 0xac, 0x28, 0x7a, 0xce, 0x93, 0x7b, 0xb0, - 0x04, 0x00, 0x57, 0x61, 0xb4, 0x12, 0x10, 0x4d, 0xbe, 0x78, 0x8d, 0x47, 0xd2, 0xe1, 0xed, 0x79, - 0xe6, 0xc0, 0x51, 0x74, 0xec, 0xaf, 0x14, 0xe0, 0x18, 0x77, 0xda, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, - 0xc5, 0xaf, 0x8b, 0x67, 0x9a, 0x6f, 0xc2, 0x48, 0x4b, 0xd3, 0x4d, 0x77, 0x0a, 0x66, 0xad, 0xeb, - 0xb0, 0x63, 0x6d, 0x9a, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xe7, - 0x47, 0xe1, 0xc0, 0x87, 0xb4, 0xaa, 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x3d, 0xbb, 0xda, 0x6a, - 0x22, 0x5a, 0x5f, 0x17, 0x6f, 0x8f, 0x1f, 0xb5, 0xe0, 0xa1, 0x9c, 0xd0, 0xd7, 0xb4, 0xba, 0x5b, - 0xcc, 0x3d, 0x4a, 0x4c, 0x5b, 0x55, 0x1d, 0x77, 0x9a, 0xc2, 0x02, 0x8b, 0x3e, 0x0a, 0xc0, 0x9d, - 0x9e, 0x88, 0x57, 0xeb, 0x1a, 0x23, 0xd8, 0x08, 0x6f, 0xaa, 0x45, 0xaa, 0x94, 0xe5, 0xb1, 0xc6, - 0xcb, 0xfe, 0x72, 0x1f, 0xf4, 0x33, 0x27, 0x1b, 0x54, 0x81, 0xc1, 0x6d, 0x9e, 0x25, 0xae, 0xe3, - 0xb8, 0x51, 0x5a, 0x99, 0x78, 0x2e, 0x1e, 0x37, 0x0d, 0x8a, 0x25, 0x1b, 0xb4, 0x06, 0x53, 0x3c, - 0x59, 0x5f, 0x63, 0x91, 0x34, 0x9c, 0x3d, 0xa9, 0xf6, 0xe5, 0xf9, 0xe7, 0x95, 0xfa, 0x7b, 0x25, - 0x4d, 0x82, 0xb3, 0xca, 0xa1, 0xd7, 0x60, 0x8c, 0x5e, 0xc3, 0xfd, 0x76, 0x24, 0x39, 0xf1, 0x34, - 0x7d, 0xea, 0x66, 0xb2, 0x61, 0x60, 0x71, 0x82, 0x1a, 0xbd, 0x02, 0xa3, 0xad, 0x94, 0x82, 0xbb, - 0x3f, 0xd6, 0x04, 0x99, 0x4a, 0x6d, 0x93, 0x96, 0xf9, 0xfc, 0xb7, 0xd9, 0x0b, 0x87, 0x8d, 0xed, - 0x80, 0x84, 0xdb, 0x7e, 0xa3, 0xce, 0x24, 0xe0, 0x7e, 0xcd, 0xe7, 0x3f, 0x81, 0xc7, 0xa9, 0x12, - 0x94, 0xcb, 0xa6, 0xe3, 0x36, 0xda, 0x01, 0x89, 0xb9, 0x0c, 0x98, 0x5c, 0x96, 0x13, 0x78, 0x9c, - 0x2a, 0xd1, 0x5d, 0x73, 0x3f, 0x78, 0x38, 0x9a, 0x7b, 0xfb, 0xa7, 0x0b, 0x60, 0x0c, 0xed, 0x77, - 0x70, 0xfa, 0xc0, 0x57, 0xa1, 0x6f, 0x2b, 0x68, 0xd5, 0x84, 0x43, 0x59, 0xe6, 0x97, 0xc5, 0xb9, - 0xc3, 0xf9, 0x97, 0xd1, 0xff, 0x98, 0x95, 0xa2, 0x6b, 0xfc, 0x78, 0x25, 0xf0, 0xe9, 0x21, 0x27, - 0x63, 0x2d, 0xaa, 0xa7, 0x35, 0x83, 0x32, 0x48, 0x44, 0x87, 0xa8, 0xc4, 0xe2, 0x7d, 0x00, 0xe7, - 0x60, 0xf8, 0x5e, 0x55, 0x45, 0x28, 0x18, 0xc9, 0x05, 0x5d, 0x84, 0x61, 0x91, 0xd1, 0x8d, 0xbd, - 0x00, 0xe1, 0x8b, 0x89, 0xf9, 0x8a, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0xd8, 0x3f, 0x58, 0x80, 0xa9, - 0x8c, 0x27, 0x7c, 0xfc, 0x18, 0xd9, 0x72, 0xc3, 0x48, 0xa5, 0x27, 0xd7, 0x8e, 0x11, 0x0e, 0xc7, - 0x8a, 0x82, 0xee, 0x55, 0xfc, 0xa0, 0x4a, 0x1e, 0x4e, 0xe2, 0x89, 0x8c, 0xc0, 0x1e, 0x30, 0xd1, - 0xf7, 0x59, 0xe8, 0x6b, 0x87, 0x44, 0xc6, 0x13, 0x57, 0xc7, 0x36, 0x33, 0x6b, 0x33, 0x0c, 0xbd, - 0x02, 0x6e, 0x29, 0x0b, 0xb1, 0x76, 0x05, 0xe4, 0x36, 0x62, 0x8e, 0xa3, 0x8d, 0x8b, 0x88, 0xe7, - 0x78, 0x91, 0xb8, 0x28, 0xc6, 0x81, 0x71, 0x19, 0x14, 0x0b, 0xac, 0xfd, 0xa5, 0x22, 0x9c, 0xcc, - 0x7d, 0xd4, 0x4b, 0x9b, 0xde, 0xf4, 0x3d, 0x37, 0xf2, 0x95, 0x13, 0x1e, 0x0f, 0x86, 0x4b, 0x5a, - 0xdb, 0x6b, 0x02, 0x8e, 0x15, 0x05, 0x3a, 0x07, 0xfd, 0x4c, 0x29, 0x9e, 0x4a, 0xd4, 0x3e, 0xbf, - 0xc8, 0xa3, 0x23, 0x72, 0xb4, 0x76, 0xaa, 0x17, 0x3b, 0x9e, 0xea, 0x8f, 0x52, 0x09, 0xc6, 0x6f, - 0x24, 0x0f, 0x14, 0xda, 0x5c, 0xdf, 0x6f, 0x60, 0x86, 0x44, 0x8f, 0x8b, 0xfe, 0x4a, 0x78, 0x9d, - 0x61, 0xa7, 0xee, 0x87, 0x5a, 0xa7, 0x3d, 0x09, 0x83, 0x3b, 0x64, 0x2f, 0x70, 0xbd, 0xad, 0xa4, - 0x37, 0xe2, 0x15, 0x0e, 0xc6, 0x12, 0x6f, 0xe6, 0x0c, 0x1e, 0x3c, 0x8c, 0x9c, 0xc1, 0xfa, 0x0c, - 0x18, 0xea, 0x2a, 0x9e, 0xfc, 0x50, 0x11, 0xc6, 0xf1, 0xfc, 0xe2, 0x7b, 0x03, 0x71, 0x2d, 0x3d, - 0x10, 0x87, 0x91, 0x5a, 0xf7, 0x60, 0xa3, 0xf1, 0x4b, 0x16, 0x8c, 0xb3, 0xbc, 0x72, 0x22, 0x22, - 0x87, 0xeb, 0x7b, 0x47, 0x70, 0x15, 0x78, 0x14, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0x43, 0x3b, 0x6b, - 0x09, 0xe6, 0x38, 0x74, 0x0a, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, 0x78, 0xd1, 0x89, - 0x1c, 0xcc, 0xa0, 0x2c, 0x36, 0x20, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xbb, 0x2c, 0xbc, 0x3b, - 0xc2, 0x7d, 0x64, 0x36, 0xed, 0x9d, 0xc5, 0x06, 0xcc, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0x17, 0x05, - 0x38, 0x93, 0x59, 0xae, 0xe7, 0xd8, 0x80, 0x9d, 0x4b, 0x3f, 0xc8, 0x14, 0x59, 0xc5, 0x23, 0xf4, - 0xf5, 0xee, 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0x64, 0x5f, 0x66, 0x97, 0xbd, 0x4b, 0x42, 0xf6, - 0x65, 0xb6, 0x2d, 0x47, 0x4d, 0xf0, 0x37, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0xf3, 0x74, 0x9f, - 0x61, 0xc8, 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, - 0x8f, 0x6e, 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x19, 0x6b, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, - 0x85, 0xf3, 0xe3, 0x5f, 0xf7, 0xca, 0x81, 0x56, 0xdd, 0xac, 0xe9, 0xce, 0xa1, 0x7a, 0x31, 0x23, - 0xb4, 0xdf, 0x9a, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, - 0xa3, 0xf7, 0x6d, 0x1b, 0xb1, 0xbf, 0x51, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, - 0xa0, 0xed, 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0xb1, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x1e, 0x35, 0x91, - 0xba, 0xa4, 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x1c, 0x5b, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, - 0x8b, 0x9e, 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x25, 0x98, - 0x74, 0x76, 0x1d, 0x97, 0xe7, 0x44, 0x90, 0x0c, 0xf8, 0x1d, 0x4b, 0xe9, 0xa2, 0xe7, 0x92, 0x04, - 0x38, 0x5d, 0x06, 0xbd, 0x0e, 0xc8, 0xbf, 0xc9, 0x1e, 0x4a, 0xd4, 0x2f, 0x11, 0x4f, 0x58, 0xdd, - 0xd9, 0xd8, 0x15, 0xe3, 0x2d, 0xe1, 0x6a, 0x8a, 0x02, 0x67, 0x94, 0x4a, 0x04, 0x96, 0x1b, 0xc8, - 0x0f, 0x2c, 0xd7, 0x79, 0x5f, 0xec, 0x9a, 0x9d, 0xed, 0x22, 0x8c, 0x1e, 0xd0, 0xfd, 0xd7, 0xfe, - 0xb7, 0x16, 0x28, 0x05, 0xb1, 0x19, 0x18, 0xfa, 0x15, 0xe6, 0x9f, 0xcc, 0x55, 0xdb, 0x5a, 0x2c, - 0xa8, 0xe3, 0x9a, 0x7f, 0x72, 0x8c, 0xc4, 0x26, 0x2d, 0x9f, 0x43, 0x9a, 0x5f, 0xb1, 0x71, 0x2b, - 0x10, 0x71, 0x2b, 0x15, 0x05, 0xfa, 0x18, 0x0c, 0xd6, 0xdd, 0x5d, 0x37, 0x14, 0xca, 0xb1, 0x03, - 0x1b, 0xe3, 0xe2, 0xad, 0x73, 0x91, 0xb3, 0xc1, 0x92, 0x9f, 0xfd, 0x43, 0x85, 0xb8, 0x4f, 0xde, - 0x68, 0xfb, 0x91, 0x73, 0x04, 0x27, 0xf9, 0x25, 0xe3, 0x24, 0x7f, 0x3c, 0x7b, 0xa0, 0xb5, 0x26, - 0xe5, 0x9e, 0xe0, 0x57, 0x13, 0x27, 0xf8, 0x13, 0xdd, 0x59, 0x75, 0x3e, 0xb9, 0xff, 0xa1, 0x05, - 0x93, 0x06, 0xfd, 0x11, 0x1c, 0x20, 0xcb, 0xe6, 0x01, 0xf2, 0x48, 0xd7, 0x6f, 0xc8, 0x39, 0x38, - 0xbe, 0xbf, 0x98, 0x68, 0x3b, 0x3b, 0x30, 0xde, 0x86, 0xbe, 0x6d, 0x27, 0xa8, 0x77, 0x4a, 0x59, - 0x94, 0x2a, 0x34, 0x7b, 0xd9, 0x09, 0x84, 0xa7, 0xc2, 0x33, 0xb2, 0xd7, 0x29, 0xa8, 0xab, 0x97, - 0x02, 0xab, 0x0a, 0xbd, 0x04, 0x03, 0x61, 0xcd, 0x6f, 0xa9, 0x37, 0x53, 0x2c, 0xe5, 0x6f, 0x95, - 0x41, 0xee, 0xed, 0x97, 0x91, 0x59, 0x1d, 0x05, 0x63, 0x41, 0x8f, 0xde, 0x84, 0x51, 0xf6, 0x4b, - 0xb9, 0x0d, 0x16, 0xf3, 0x35, 0x18, 0x55, 0x9d, 0x90, 0xfb, 0xd4, 0x1a, 0x20, 0x6c, 0xb2, 0x9a, - 0xd9, 0x82, 0x92, 0xfa, 0xac, 0x07, 0x6a, 0xed, 0xfe, 0x57, 0x45, 0x98, 0xca, 0x98, 0x73, 0x28, - 0x34, 0x46, 0xe2, 0x62, 0x8f, 0x53, 0xf5, 0x1d, 0x8e, 0x45, 0xc8, 0x2e, 0x50, 0x75, 0x31, 0xb7, - 0x7a, 0xae, 0xf4, 0x5a, 0x48, 0x92, 0x95, 0x52, 0x50, 0xf7, 0x4a, 0x69, 0x65, 0x47, 0xd6, 0xd5, - 0xb4, 0x22, 0xd5, 0xd2, 0x07, 0x3a, 0xa6, 0xbf, 0xd1, 0x07, 0xc7, 0xb2, 0xe2, 0x09, 0xa3, 0xcf, - 0x24, 0xf2, 0x88, 0xbf, 0xd0, 0xa9, 0x87, 0xf5, 0x92, 0x3c, 0xb9, 0xb8, 0x08, 0xe3, 0x39, 0x6b, - 0x66, 0x16, 0xef, 0xda, 0xcd, 0xa2, 0x4e, 0x16, 0x5e, 0x27, 0xe0, 0xf9, 0xdf, 0xe5, 0xf6, 0xf1, - 0x81, 0x9e, 0x1b, 0x20, 0x12, 0xc7, 0x87, 0x09, 0x97, 0x24, 0x09, 0xee, 0xee, 0x92, 0x24, 0x6b, - 0x46, 0x2b, 0x30, 0x50, 0xe3, 0xbe, 0x2e, 0xc5, 0xee, 0x5b, 0x18, 0x77, 0x74, 0x51, 0x1b, 0xb0, - 0x70, 0x70, 0x11, 0x0c, 0x66, 0x5c, 0x18, 0xd6, 0x3a, 0xe6, 0x81, 0x4e, 0x9e, 0x1d, 0x7a, 0xf0, - 0x69, 0x5d, 0xf0, 0x40, 0x27, 0xd0, 0x8f, 0x5a, 0x90, 0x78, 0xf0, 0xa2, 0x94, 0x72, 0x56, 0xae, - 0x52, 0xee, 0x2c, 0xf4, 0x05, 0x7e, 0x83, 0x24, 0x93, 0x54, 0x63, 0xbf, 0x41, 0x30, 0xc3, 0x50, - 0x8a, 0x28, 0x56, 0xb5, 0x8c, 0xe8, 0xd7, 0x48, 0x71, 0x41, 0x7c, 0x14, 0xfa, 0x1b, 0x64, 0x97, - 0x34, 0x92, 0xb9, 0x04, 0x57, 0x29, 0x10, 0x73, 0x9c, 0xfd, 0x4b, 0x7d, 0x70, 0xba, 0x63, 0xac, - 0x2b, 0x7a, 0x19, 0xdb, 0x72, 0x22, 0x72, 0xcb, 0xd9, 0x4b, 0x26, 0xfd, 0xba, 0xc4, 0xc1, 0x58, - 0xe2, 0xd9, 0xf3, 0x4f, 0x9e, 0xbb, 0x23, 0xa1, 0xc2, 0x14, 0x29, 0x3b, 0x04, 0xd6, 0x54, 0x89, - 0x15, 0x0f, 0x43, 0x25, 0xf6, 0x1c, 0x40, 0x18, 0x36, 0xb8, 0x5b, 0x60, 0x5d, 0xbc, 0x2b, 0x8d, - 0x73, 0xbc, 0x54, 0x57, 0x05, 0x06, 0x6b, 0x54, 0x68, 0x11, 0x26, 0x5a, 0x81, 0x1f, 0x71, 0x8d, - 0xf0, 0x22, 0xf7, 0x9c, 0xed, 0x37, 0xc3, 0x0c, 0x55, 0x12, 0x78, 0x9c, 0x2a, 0x81, 0x5e, 0x84, - 0x61, 0x11, 0x7a, 0xa8, 0xe2, 0xfb, 0x0d, 0xa1, 0x84, 0x52, 0xce, 0xa4, 0xd5, 0x18, 0x85, 0x75, - 0x3a, 0xad, 0x18, 0x53, 0x33, 0x0f, 0x66, 0x16, 0xe3, 0xaa, 0x66, 0x8d, 0x2e, 0x11, 0xa6, 0x7c, - 0xa8, 0xa7, 0x30, 0xe5, 0xb1, 0x5a, 0xae, 0xd4, 0xb3, 0xd5, 0x13, 0xba, 0x2a, 0xb2, 0xbe, 0xda, - 0x07, 0x53, 0x62, 0xe2, 0x3c, 0xe8, 0xe9, 0x72, 0x2d, 0x3d, 0x5d, 0x0e, 0x43, 0x71, 0xf7, 0xde, - 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xc3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0xdf, 0x73, 0xb3, 0x26, 0xbe, - 0x98, 0x2b, 0xf9, 0xc5, 0x31, 0x8c, 0xdf, 0x59, 0xfe, 0x44, 0xfb, 0x5f, 0x5b, 0xf0, 0x48, 0x57, - 0x8e, 0x68, 0x09, 0x4a, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a, 0x89, 0xc8, 0x91, - 0x6e, 0xe3, 0x92, 0x68, 0x29, 0x95, 0x9e, 0xf2, 0xc9, 0x8c, 0xf4, 0x94, 0xc7, 0x8d, 0xee, 0xb9, - 0xcf, 0xfc, 0x94, 0x5f, 0xa4, 0x27, 0x8e, 0xf1, 0xaa, 0x0d, 0x7d, 0xc0, 0x50, 0x3a, 0xda, 0x09, - 0xa5, 0x23, 0x32, 0xa9, 0xb5, 0x33, 0xe4, 0x23, 0x30, 0xc1, 0x62, 0x12, 0xb2, 0x77, 0x1e, 0xe2, - 0xbd, 0x5d, 0x21, 0xf6, 0xe5, 0x5e, 0x4d, 0xe0, 0x70, 0x8a, 0xda, 0xfe, 0xb3, 0x22, 0x0c, 0xf0, - 0xe5, 0x77, 0x04, 0xd7, 0xcb, 0xa7, 0xa1, 0xe4, 0x36, 0x9b, 0x6d, 0x9e, 0x71, 0xb0, 0x3f, 0xf6, - 0x0c, 0x5e, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0xb2, 0xd0, 0x77, 0x77, 0x08, 0x7b, 0xcc, 0x1b, 0x3e, - 0xbb, 0xe8, 0x44, 0x0e, 0x97, 0x95, 0xd4, 0x39, 0x1b, 0x6b, 0xc6, 0xd1, 0x27, 0x01, 0xc2, 0x28, - 0x70, 0xbd, 0x2d, 0x0a, 0x13, 0xb1, 0xf1, 0x9f, 0xea, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, - 0xcf, 0x51, 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, - 0x31, 0x9b, 0xf9, 0x20, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, - 0x27, 0xda, 0x76, 0x20, 0xe5, 0xd9, 0x2f, 0x5b, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, - 0xe0, 0x0e, 0x1c, 0x6b, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, - 0x8b, 0x33, 0xeb, 0x40, 0xe7, 0xe9, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xf1, 0x0d, 0x46, 0xf8, - 0x6a, 0xe3, 0x30, 0xac, 0xb0, 0xf6, 0x1f, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x85, 0xec, 0xa9, 0xbd, - 0xe9, 0x5b, 0xd9, 0x76, 0x91, 0xeb, 0xb6, 0x90, 0x93, 0xeb, 0x56, 0xff, 0xb4, 0x62, 0xc7, 0x4f, - 0xfb, 0x8a, 0x05, 0x62, 0x86, 0x1c, 0x81, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, - 0xc8, 0x51, 0x64, 0xfc, 0xb5, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0x61, 0xde, - 0xfc, 0xa2, 0x4c, 0xe7, 0xcb, 0x2b, 0x64, 0x6f, 0xc3, 0xaf, 0x38, 0xd1, 0x76, 0xf6, 0x47, 0x19, - 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0xa9, 0xe0, 0xba, 0x04, 0x08, 0x38, 0x68, - 0x2a, 0x38, 0xfb, 0xcf, 0x2d, 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, - 0x5d, 0xbc, 0x35, 0x29, 0x0c, 0xd6, 0xa8, 0x0e, 0xa5, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, - 0x17, 0x07, 0xe8, 0xd1, 0x7f, 0x3e, 0x00, 0xc9, 0x97, 0x7d, 0xe8, 0x3a, 0x8c, 0xd4, 0x9c, 0x96, - 0x73, 0xd3, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x4e, 0xde, 0x58, 0x0b, 0x1a, 0x9d, 0x30, 0x91, 0x6b, - 0x10, 0x6c, 0xf0, 0x41, 0xb3, 0x00, 0xad, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x8b, 0xa9, 0x5d, 0x58, - 0x44, 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc0, 0x61, 0x14, - 0xe0, 0xc8, 0xc2, 0x28, 0xf4, 0x1d, 0x28, 0x8c, 0xc2, 0xd0, 0x81, 0xc3, 0x28, 0xf4, 0xf7, 0x14, - 0x46, 0x01, 0xc3, 0x09, 0x29, 0x7b, 0xd2, 0xff, 0xcb, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, - 0x33, 0x73, 0x77, 0xbf, 0x7c, 0x02, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0xa3, 0x30, 0xed, 0x34, - 0x1a, 0xfe, 0x2d, 0x35, 0xa8, 0x4b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0xc8, 0x20, 0xe3, 0x7a, 0xea, - 0xee, 0x7e, 0x79, 0x7a, 0x2e, 0x87, 0x06, 0xe7, 0x96, 0x46, 0xaf, 0x42, 0xa9, 0x15, 0xf8, 0xb5, - 0x35, 0xed, 0xf9, 0xf1, 0x19, 0xda, 0x81, 0x15, 0x09, 0xbc, 0xb7, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, - 0x81, 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x7c, 0xa8, 0x71, 0x11, 0x76, 0x60, 0xaa, 0x4a, 0x02, - 0xd7, 0x69, 0xb8, 0x77, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x0d, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, - 0x50, 0xc4, 0x5a, 0x36, 0x2e, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0xc1, 0xa0, 0x78, 0xc9, - 0x77, 0x04, 0x52, 0xe3, 0x9c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, - 0x25, 0x61, 0x8e, 0x78, 0xa4, 0x13, 0x93, 0xce, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, - 0x4d, 0xf9, 0x83, 0xef, 0x82, 0x75, 0x18, 0x0c, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, - 0x20, 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x00, 0x1f, 0x4b, - 0x77, 0x7b, 0x75, 0xdf, 0x77, 0x18, 0xaf, 0xee, 0xed, 0xaf, 0xb3, 0x93, 0x53, 0x87, 0x1f, 0x81, - 0x50, 0x75, 0xc9, 0x3c, 0x63, 0xed, 0x0e, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x45, 0x0b, - 0x4e, 0x67, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x03, 0x43, 0x4e, 0xbb, 0xee, 0xaa, 0xb5, 0xac, 0x99, - 0x26, 0xe7, 0x04, 0x1c, 0x2b, 0x0a, 0xb4, 0x00, 0x93, 0xe4, 0x76, 0xcb, 0xe5, 0x86, 0x5c, 0xdd, - 0xf9, 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x25, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, - 0x44, 0xfd, 0xbc, 0x05, 0xc3, 0xea, 0x55, 0xef, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, - 0x87, 0xde, 0xce, 0xe9, 0xe6, 0x3f, 0x28, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, - 0xff, 0x87, 0x13, 0x17, 0x61, 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0xc0, 0xf2, 0x31, - 0x18, 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, - 0x51, 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, - 0xd9, 0x15, 0x2f, 0xba, 0x1a, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, - 0xc1, 0x82, 0xd5, 0xd1, 0x6f, 0xba, 0x52, 0xac, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, - 0xc3, 0xfa, 0xf4, 0x60, 0xe1, 0xc5, 0x7e, 0x72, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0x41, - 0xcc, 0x3a, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x3c, 0xe5, 0x1e, - 0xf3, 0x6c, 0x97, 0x53, 0xe3, 0x00, 0x0e, 0x31, 0x2c, 0xcb, 0x14, 0xcb, 0xc1, 0xb3, 0x52, 0x11, - 0xeb, 0x42, 0xcb, 0x32, 0x25, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, - 0x18, 0xb1, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, - 0x28, 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x17, 0x61, 0x98, 0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, - 0x43, 0x7f, 0x1c, 0x3f, 0x73, 0x29, 0x06, 0x63, 0x9d, 0x06, 0x6d, 0xc0, 0x78, 0xc8, 0xf5, 0x6c, - 0x2a, 0x04, 0x3e, 0xd7, 0x57, 0x3e, 0xa5, 0xde, 0x53, 0x9b, 0xe8, 0x7b, 0x0c, 0xc4, 0x77, 0x27, - 0x19, 0x65, 0x22, 0xc9, 0x02, 0xbd, 0x06, 0x63, 0x0d, 0xdf, 0xa9, 0xcf, 0x3b, 0x0d, 0xc7, 0xab, - 0xb1, 0xfe, 0x19, 0x32, 0x73, 0x95, 0xaf, 0x1a, 0x58, 0x9c, 0xa0, 0xa6, 0xc2, 0x9b, 0x0e, 0x11, - 0x61, 0xda, 0x1c, 0x6f, 0x8b, 0x84, 0xd3, 0x25, 0xf6, 0x55, 0x4c, 0x78, 0x5b, 0xcd, 0xa1, 0xc1, - 0xb9, 0xa5, 0xd1, 0x4b, 0x30, 0x22, 0x3f, 0x5f, 0x0b, 0xca, 0x12, 0x3f, 0x89, 0xd1, 0x70, 0xd8, - 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67, 0x73, 0xd3, 0xad, 0x89, 0x48, 0x05, 0xfc, - 0xf9, 0xf0, 0x87, 0xe5, 0x5b, 0xc5, 0xa5, 0x2c, 0xa2, 0x7b, 0xfb, 0xe5, 0x53, 0xa2, 0xd7, 0x32, - 0xf1, 0x38, 0x9b, 0x37, 0x5a, 0x83, 0xa9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0x85, 0x6d, 0x52, 0xdb, - 0x91, 0x0b, 0x8e, 0x85, 0x79, 0xd1, 0x9e, 0x8e, 0x5c, 0x4e, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x2d, - 0x98, 0x6e, 0xb5, 0x6f, 0x36, 0xdc, 0x70, 0x7b, 0xdd, 0x8f, 0x98, 0x13, 0xd2, 0x5c, 0xbd, 0x1e, - 0x90, 0x90, 0xbf, 0x2e, 0x65, 0x47, 0xaf, 0x0c, 0xa4, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, 0x1c, 0xd0, - 0x1d, 0x38, 0x9e, 0x98, 0x08, 0x22, 0x22, 0xc6, 0x58, 0x7e, 0x02, 0x9c, 0x6a, 0x56, 0x01, 0x11, - 0x5c, 0x26, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, 0x36, 0xe8, - 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, 0xfc, 0xdb, - 0xc3, 0x1a, 0x35, 0x5a, 0x85, 0x31, 0xf1, 0x6f, 0x4f, 0x0c, 0xe9, 0xa4, 0xca, 0x95, 0x38, 0x26, - 0x4b, 0xa8, 0x71, 0x4c, 0x40, 0x70, 0xa2, 0x2c, 0xda, 0x82, 0xd3, 0x32, 0x51, 0xa3, 0x3e, 0x3f, - 0xe5, 0x18, 0x84, 0x2c, 0xeb, 0xcc, 0x10, 0x7f, 0x95, 0x32, 0xd7, 0x89, 0x10, 0x77, 0xe6, 0x43, - 0xcf, 0x75, 0x7d, 0x9a, 0xf3, 0x37, 0xc7, 0xc7, 0xe3, 0x88, 0x83, 0xab, 0x49, 0x24, 0x4e, 0xd3, - 0x23, 0x1f, 0x8e, 0xbb, 0x5e, 0xd6, 0xac, 0x3e, 0xc1, 0x18, 0x7d, 0x88, 0x3f, 0xb7, 0xee, 0x3c, - 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0x3b, 0xf3, 0xfb, 0xfb, 0x43, 0x8b, 0x96, 0xd6, 0xa4, 0x73, - 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb2, 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, - 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, 0xb6, 0xc1, 0x85, 0xde, 0x24, 0x99, - 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0xb4, 0x0a, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0x68, 0xa5, - 0xd2, 0x29, 0x7a, 0xe3, 0x82, 0xa0, 0x11, 0xeb, 0x47, 0x24, 0x90, 0xe1, 0x30, 0xac, 0x38, 0xd8, - 0x2f, 0xc1, 0x70, 0xb5, 0x41, 0x48, 0x8b, 0x3f, 0xdf, 0x41, 0x4f, 0xb2, 0xdb, 0x04, 0x93, 0x07, - 0x2d, 0x26, 0x0f, 0xea, 0x17, 0x05, 0x26, 0x09, 0x4a, 0xbc, 0xfd, 0x5b, 0x05, 0x28, 0x77, 0xc9, - 0x63, 0x94, 0x30, 0x60, 0x59, 0x3d, 0x19, 0xb0, 0xe6, 0x60, 0x3c, 0xfe, 0xa7, 0xeb, 0xc6, 0x94, - 0x0f, 0xec, 0x75, 0x13, 0x8d, 0x93, 0xf4, 0x3d, 0x3f, 0x67, 0xd0, 0x6d, 0x60, 0x7d, 0x5d, 0x1f, - 0xe4, 0x18, 0xb6, 0xef, 0xfe, 0xde, 0x2f, 0xcc, 0xb9, 0x76, 0x4c, 0xfb, 0xeb, 0x05, 0x38, 0xae, - 0xba, 0xf0, 0x3b, 0xb7, 0xe3, 0xae, 0xa5, 0x3b, 0xee, 0x10, 0xac, 0xc0, 0xf6, 0x55, 0x18, 0xe0, - 0x81, 0x2c, 0x7b, 0x10, 0xd4, 0x1f, 0x35, 0xe3, 0x6b, 0x2b, 0xd9, 0xd0, 0x88, 0xb1, 0xfd, 0x03, - 0x16, 0x8c, 0x27, 0xde, 0xc5, 0x21, 0xac, 0x3d, 0x9e, 0xbe, 0x1f, 0x61, 0x3a, 0x4b, 0x4c, 0x3f, - 0x0b, 0x7d, 0xdb, 0x7e, 0x18, 0x25, 0x5d, 0x44, 0x2e, 0xfb, 0x61, 0x84, 0x19, 0xc6, 0xfe, 0x63, - 0x0b, 0xfa, 0x37, 0x1c, 0xd7, 0x8b, 0xa4, 0x39, 0xc1, 0xca, 0x31, 0x27, 0xf4, 0xf2, 0x5d, 0xe8, - 0x45, 0x18, 0x20, 0x9b, 0x9b, 0xa4, 0x16, 0x89, 0x51, 0x95, 0x41, 0x14, 0x06, 0x96, 0x18, 0x94, - 0x4a, 0x8e, 0xac, 0x32, 0xfe, 0x17, 0x0b, 0x62, 0x74, 0x03, 0x4a, 0x91, 0xdb, 0x24, 0x73, 0xf5, - 0xba, 0x30, 0xb2, 0xdf, 0x47, 0xe4, 0x8f, 0x0d, 0xc9, 0x00, 0xc7, 0xbc, 0xec, 0x2f, 0x15, 0x00, - 0xe2, 0x08, 0x60, 0xdd, 0x3e, 0x71, 0x3e, 0x65, 0x7e, 0x3d, 0x97, 0x61, 0x7e, 0x45, 0x31, 0xc3, - 0x0c, 0xdb, 0xab, 0xea, 0xa6, 0x62, 0x4f, 0xdd, 0xd4, 0x77, 0x90, 0x6e, 0x5a, 0x80, 0xc9, 0x38, - 0x82, 0x99, 0x19, 0xc0, 0x91, 0x1d, 0xba, 0x1b, 0x49, 0x24, 0x4e, 0xd3, 0xdb, 0x04, 0xce, 0xaa, - 0x40, 0x4e, 0xe2, 0x2c, 0x64, 0x1e, 0xe4, 0xba, 0x39, 0xbb, 0x4b, 0x3f, 0xc5, 0xf6, 0xe5, 0x42, - 0xae, 0x7d, 0xf9, 0x27, 0x2c, 0x38, 0x96, 0xac, 0x87, 0x3d, 0xb7, 0xfe, 0x82, 0x05, 0xc7, 0xe3, - 0x34, 0x1e, 0x69, 0x9b, 0xfe, 0x0b, 0x1d, 0x83, 0x53, 0xe5, 0xb4, 0x38, 0x8e, 0xd6, 0xb1, 0x96, - 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xd7, 0x3e, 0x98, 0xce, 0x8b, 0x6a, 0xc5, 0x1e, 0x98, 0x38, - 0xb7, 0xab, 0x3b, 0xe4, 0x96, 0x70, 0xe3, 0x8f, 0x1f, 0x98, 0x70, 0x30, 0x96, 0xf8, 0x64, 0xe6, - 0x96, 0x42, 0x8f, 0x99, 0x5b, 0xb6, 0x61, 0xf2, 0xd6, 0x36, 0xf1, 0xae, 0x79, 0xa1, 0x13, 0xb9, - 0xe1, 0xa6, 0xcb, 0x2c, 0xd2, 0x7c, 0xde, 0xc8, 0xec, 0xe3, 0x93, 0x37, 0x92, 0x04, 0xf7, 0xf6, - 0xcb, 0xa7, 0x0d, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xe2, 0x9b, 0xbe, 0x07, - 0x9c, 0xf8, 0xa6, 0xe9, 0x0a, 0x3f, 0x16, 0xf9, 0x7a, 0x80, 0xdd, 0x35, 0xd7, 0x14, 0x14, 0x6b, - 0x14, 0xe8, 0x13, 0x80, 0xf4, 0xcc, 0x65, 0x46, 0x50, 0xd1, 0x67, 0xef, 0xee, 0x97, 0xd1, 0x7a, - 0x0a, 0x7b, 0x6f, 0xbf, 0x3c, 0x45, 0xa1, 0x2b, 0x1e, 0xbd, 0xb3, 0xc6, 0x91, 0xd8, 0x32, 0x18, - 0xa1, 0x1b, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0x88, 0xa5, 0xfc, 0x9e, 0xf9, 0xf4, 0xdd, 0xfd, - 0xf2, 0xc4, 0x7a, 0x02, 0x97, 0xc7, 0x3a, 0xc5, 0x24, 0x23, 0xff, 0xcd, 0x50, 0xaf, 0xf9, 0x6f, - 0xec, 0x2f, 0x58, 0x70, 0x92, 0x1e, 0x70, 0xf5, 0xd5, 0x1c, 0xb3, 0xb4, 0xd3, 0x72, 0xb9, 0xe1, - 0x43, 0x1c, 0x35, 0x4c, 0xc1, 0x56, 0x59, 0xe1, 0x66, 0x0f, 0x85, 0xa5, 0x3b, 0xfc, 0x8e, 0xeb, - 0xd5, 0x93, 0x3b, 0xfc, 0x15, 0xd7, 0xab, 0x63, 0x86, 0x51, 0x47, 0x56, 0x31, 0xf7, 0x11, 0xc3, - 0x57, 0xe9, 0x5a, 0xa5, 0x6d, 0xf9, 0x96, 0x36, 0x03, 0x3d, 0xad, 0x1b, 0x29, 0x85, 0x3f, 0x62, - 0xae, 0x81, 0xf2, 0xf3, 0x16, 0x88, 0x47, 0xcf, 0x3d, 0x9c, 0xc9, 0x6f, 0xc2, 0xc8, 0x6e, 0x3a, - 0xab, 0xe3, 0xd9, 0xfc, 0x57, 0xe0, 0x22, 0x56, 0xbb, 0x12, 0xd1, 0x8d, 0x0c, 0x8e, 0x06, 0x2f, - 0xbb, 0x0e, 0x02, 0xbb, 0x48, 0x98, 0x29, 0xa2, 0x7b, 0x6b, 0x9e, 0x03, 0xa8, 0x33, 0x5a, 0x96, - 0xea, 0xb9, 0x60, 0x4a, 0x5c, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x77, 0x0b, 0x30, 0x2c, 0xb3, - 0x08, 0xb6, 0xbd, 0x5e, 0x14, 0x86, 0x07, 0x4a, 0x2b, 0x8e, 0x2e, 0x40, 0x89, 0x69, 0xb4, 0x2b, - 0xb1, 0x9e, 0x55, 0xe9, 0x93, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x4c, 0x7c, 0x6f, 0xdf, 0x64, 0xe4, - 0x89, 0x27, 0xba, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, - 0x2d, 0x6e, 0x05, 0xeb, 0x57, 0x71, 0x4f, 0x26, 0xd6, 0x12, 0xb8, 0x7b, 0xfb, 0xe5, 0x63, 0x49, - 0x18, 0x33, 0xef, 0xa6, 0xb8, 0x30, 0x67, 0x37, 0x5e, 0x09, 0xdd, 0xd5, 0x53, 0x3e, 0x72, 0x31, - 0x0a, 0xeb, 0x74, 0xf6, 0xa7, 0x00, 0xa5, 0xf3, 0x29, 0xa2, 0xd7, 0xb9, 0xb3, 0xb4, 0x1b, 0x90, - 0x7a, 0x27, 0x73, 0xaf, 0x1e, 0xdd, 0x43, 0xbe, 0xae, 0xe3, 0xa5, 0xb0, 0x2a, 0x6f, 0xff, 0x60, - 0x1f, 0x4c, 0x24, 0xe3, 0x09, 0xa0, 0xcb, 0x30, 0xc0, 0x45, 0x4a, 0xc1, 0xbe, 0x83, 0x37, 0x91, - 0x16, 0x85, 0x80, 0x1d, 0xae, 0x42, 0x2a, 0x15, 0xe5, 0xd1, 0x5b, 0x30, 0x5c, 0xf7, 0x6f, 0x79, - 0xb7, 0x9c, 0xa0, 0x3e, 0x57, 0x59, 0x11, 0xd3, 0x39, 0x53, 0xc5, 0xb1, 0x18, 0x93, 0xe9, 0x91, - 0x0d, 0x98, 0xe5, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0x29, 0x42, 0x36, 0xdd, 0xad, 0x35, - 0xa7, 0xd5, 0xe9, 0xe5, 0xcc, 0x82, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x3c, 0x22, 0x1c, 0x81, 0x63, - 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x18, 0x5d, 0xf2, 0xd2, 0xeb, 0x76, 0xb2, 0x43, 0xcc, 0x3f, - 0x74, 0x77, 0xbf, 0x3c, 0x95, 0x65, 0x9e, 0xc9, 0xaa, 0x06, 0xdd, 0x06, 0x24, 0x94, 0x9b, 0x1b, - 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0x53, 0x88, 0x64, 0x27, 0xe0, 0x4e, 0x51, 0x6b, - 0x75, 0xb3, 0xf8, 0xa2, 0x69, 0x0a, 0x9c, 0x51, 0x87, 0xfd, 0xf9, 0x3e, 0x98, 0x91, 0x09, 0x4c, - 0x33, 0x5e, 0x08, 0x7c, 0xce, 0x4a, 0x3c, 0x11, 0x78, 0x39, 0x7f, 0x57, 0x7a, 0x60, 0x0f, 0x05, - 0xbe, 0x98, 0x7e, 0x28, 0xf0, 0xea, 0x01, 0x9b, 0x71, 0x68, 0xcf, 0x05, 0xbe, 0x63, 0x7d, 0xfc, - 0xbf, 0x7c, 0x0c, 0x8c, 0x73, 0xc4, 0x48, 0xf8, 0x6f, 0x1d, 0x52, 0xc2, 0x7f, 0x0c, 0x43, 0xa4, - 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0xb4, 0x38, 0x93, 0xe7, 0x92, 0xa0, 0x49, 0xf3, 0x94, 0x18, - 0xac, 0xf8, 0xa0, 0x5d, 0x98, 0xdc, 0xaa, 0x91, 0x44, 0xce, 0xef, 0x62, 0xfe, 0xba, 0xbd, 0xb4, - 0xb0, 0xd4, 0x21, 0xe1, 0x37, 0xbb, 0xa9, 0xa4, 0x48, 0x70, 0xba, 0x0a, 0x96, 0x6f, 0xdc, 0xb9, - 0x15, 0x2e, 0x35, 0x9c, 0x30, 0x72, 0x6b, 0xf3, 0x0d, 0xbf, 0xb6, 0x53, 0x8d, 0xfc, 0x40, 0x26, - 0x1c, 0xcb, 0xbc, 0x28, 0xcc, 0xdd, 0xa8, 0xa6, 0xe8, 0xd3, 0xf9, 0xc6, 0xb3, 0xa8, 0x70, 0x66, - 0x5d, 0x68, 0x1d, 0x06, 0xb7, 0xdc, 0x08, 0x93, 0x96, 0x2f, 0x76, 0x8b, 0xcc, 0xad, 0xf0, 0x12, - 0x27, 0x49, 0xe7, 0xff, 0x16, 0x08, 0x2c, 0x99, 0xa0, 0xd7, 0xd5, 0x21, 0x30, 0x90, 0xaf, 0x2d, - 0x4c, 0x7b, 0x5e, 0x65, 0x1e, 0x03, 0xaf, 0x41, 0xd1, 0xdb, 0x0c, 0x3b, 0xc5, 0x0b, 0x59, 0x5f, - 0xae, 0xa6, 0xf3, 0x72, 0xaf, 0x2f, 0x57, 0x31, 0x2d, 0xc8, 0x9e, 0x16, 0x86, 0xb5, 0xd0, 0x15, - 0xa9, 0x53, 0x32, 0x5f, 0x5a, 0xae, 0x54, 0x17, 0xaa, 0x2b, 0xe9, 0x5c, 0xe4, 0x0c, 0x8c, 0x79, - 0x71, 0x74, 0x1d, 0x4a, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0x12, 0xe3, 0xcc, 0xc3, 0xe8, 0x92, - 0x24, 0x4a, 0x67, 0x20, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78, 0x32, 0x0b, 0x34, - 0x7b, 0x10, 0x24, 0x9c, 0x94, 0x5e, 0xec, 0x25, 0x2d, 0x37, 0x2b, 0x60, 0x54, 0xc8, 0x14, 0xfc, - 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0xba, 0x70, 0x96, 0xc9, 0xec, 0xe8, 0x44, - 0xf0, 0x14, 0xde, 0xd1, 0x78, 0x7e, 0x11, 0xd3, 0x82, 0x68, 0x03, 0x60, 0xb3, 0x41, 0x64, 0xc2, - 0xfa, 0x91, 0xfc, 0xd3, 0x7f, 0x59, 0x51, 0xc9, 0x6c, 0x41, 0x54, 0x26, 0x8c, 0xa1, 0x58, 0xe3, - 0x43, 0xa7, 0x52, 0xcd, 0xf5, 0xea, 0x24, 0x60, 0xe6, 0x93, 0x9c, 0xa9, 0xb4, 0xc0, 0x28, 0xd2, - 0x53, 0x89, 0xc3, 0xb1, 0xe0, 0xc0, 0x78, 0x91, 0xd6, 0xf6, 0x66, 0xd8, 0x29, 0x2c, 0xfe, 0x02, - 0x69, 0x6d, 0x27, 0x26, 0x14, 0xe7, 0xc5, 0xe0, 0x58, 0x70, 0xa0, 0x4b, 0x66, 0x93, 0x2e, 0x20, - 0x12, 0x4c, 0x8f, 0xe7, 0x2f, 0x99, 0x65, 0x4e, 0x92, 0x5e, 0x32, 0x02, 0x81, 0x25, 0x13, 0xf4, - 0x49, 0x53, 0xda, 0x99, 0x60, 0x3c, 0x9f, 0xee, 0x22, 0xed, 0x18, 0x7c, 0x3b, 0xcb, 0x3b, 0x2f, - 0x43, 0x61, 0xb3, 0xc6, 0xcc, 0x2e, 0x39, 0x0a, 0xee, 0xe5, 0x05, 0x83, 0x1b, 0x0b, 0x33, 0xbd, - 0xbc, 0x80, 0x0b, 0x9b, 0x35, 0x3a, 0xf5, 0x9d, 0x3b, 0xed, 0x80, 0x2c, 0xbb, 0x0d, 0x22, 0x42, - 0xe4, 0x67, 0x4e, 0xfd, 0x39, 0x49, 0x94, 0x9e, 0xfa, 0x0a, 0x85, 0x63, 0x56, 0x94, 0x6f, 0x2c, - 0x83, 0x4d, 0xe5, 0xf3, 0x55, 0xa2, 0x56, 0x9a, 0x6f, 0xa6, 0x14, 0xb6, 0x03, 0xa3, 0xbb, 0x61, - 0x6b, 0x9b, 0xc8, 0x5d, 0x91, 0x19, 0x84, 0x72, 0x5e, 0xd3, 0x5f, 0x17, 0x84, 0x6e, 0x10, 0xb5, - 0x9d, 0x46, 0x6a, 0x23, 0x67, 0x7a, 0x80, 0xeb, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0x13, 0xe1, 0x6d, - 0x1e, 0xf2, 0x8a, 0x99, 0x86, 0x72, 0x26, 0x42, 0x46, 0x54, 0x2c, 0x3e, 0x11, 0x04, 0x02, 0x4b, - 0x26, 0xaa, 0xb3, 0xd9, 0x01, 0x74, 0xa2, 0x4b, 0x67, 0xa7, 0xda, 0x1b, 0x77, 0x36, 0x3b, 0x70, - 0x62, 0x56, 0xec, 0xa0, 0x69, 0x65, 0x24, 0xcc, 0x9e, 0x7e, 0x28, 0xff, 0xa0, 0xe9, 0x96, 0x60, - 0x9b, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbd, 0x4c, 0x84, 0xf1, - 0x7f, 0x32, 0x27, 0xf8, 0x5f, 0x3a, 0xc4, 0x19, 0xff, 0x38, 0x85, 0xc2, 0x31, 0x2b, 0x54, 0x87, - 0xb1, 0x96, 0x11, 0x15, 0x93, 0xa5, 0x23, 0xc8, 0x91, 0x0b, 0xb2, 0xe2, 0x67, 0x72, 0x75, 0x86, - 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x37, 0x8c, 0x3f, 0xf4, 0x62, 0xd9, 0x0a, 0x72, 0x86, 0x3a, 0xe3, - 0x2d, 0x18, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x3c, 0xc9, 0x0f, 0x59, 0xd2, - 0x8f, 0x3c, 0x13, 0x6e, 0x96, 0x4d, 0x43, 0x86, 0x82, 0x16, 0x28, 0x1c, 0xb3, 0xa2, 0x3b, 0x39, - 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0, 0x2b, 0x8a, 0xa3, - 0x4e, 0x45, 0x2e, 0x66, 0x09, 0x0b, 0x72, 0xda, 0xa5, 0x42, 0x1f, 0xa7, 0xdb, 0xa5, 0x50, 0x38, - 0x66, 0x65, 0xff, 0x60, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xa1, 0xa6, 0x12, 0x7b, 0xb3, 0x24, - 0x0c, 0x35, 0x5c, 0x6d, 0x10, 0x53, 0xf5, 0x1c, 0xcc, 0xf4, 0x12, 0x4c, 0xaa, 0x47, 0x64, 0x0d, - 0xb7, 0xb6, 0xb7, 0x1e, 0x6b, 0x6a, 0x54, 0xd8, 0x8f, 0x6a, 0x92, 0x00, 0xa7, 0xcb, 0xa0, 0x39, - 0x18, 0x37, 0x80, 0x2b, 0x8b, 0x42, 0x3d, 0x10, 0x87, 0xc8, 0x37, 0xd1, 0x38, 0x49, 0x6f, 0xff, - 0x9c, 0x05, 0x0f, 0xe5, 0xe4, 0x2b, 0xee, 0x39, 0x56, 0xe7, 0x26, 0x8c, 0xb7, 0xcc, 0xa2, 0x5d, - 0xc2, 0x0b, 0x1b, 0x59, 0x91, 0x55, 0x5b, 0x13, 0x08, 0x9c, 0x64, 0x6a, 0xff, 0x4c, 0x01, 0x4e, - 0x77, 0xf4, 0x8a, 0x46, 0x18, 0x4e, 0x6c, 0x35, 0x43, 0x67, 0x21, 0x20, 0x75, 0xe2, 0x45, 0xae, - 0xd3, 0xa8, 0xb6, 0x48, 0x4d, 0x33, 0xb5, 0x31, 0xf7, 0xe2, 0x4b, 0x6b, 0xd5, 0xb9, 0x34, 0x05, - 0xce, 0x29, 0x89, 0x96, 0x01, 0xa5, 0x31, 0x62, 0x84, 0xd9, 0xd5, 0x34, 0xcd, 0x0f, 0x67, 0x94, - 0x40, 0x1f, 0x84, 0x51, 0xe5, 0x6d, 0xad, 0x8d, 0x38, 0xdb, 0xd8, 0xb1, 0x8e, 0xc0, 0x26, 0x1d, - 0xba, 0xc8, 0x73, 0xa7, 0x88, 0x2c, 0x3b, 0xc2, 0x2e, 0x37, 0x2e, 0x13, 0xa3, 0x08, 0x30, 0xd6, - 0x69, 0xe6, 0x5f, 0xfa, 0xed, 0x6f, 0x9e, 0x79, 0xdf, 0xef, 0x7f, 0xf3, 0xcc, 0xfb, 0xfe, 0xe8, - 0x9b, 0x67, 0xde, 0xf7, 0x3d, 0x77, 0xcf, 0x58, 0xbf, 0x7d, 0xf7, 0x8c, 0xf5, 0xfb, 0x77, 0xcf, - 0x58, 0x7f, 0x74, 0xf7, 0x8c, 0xf5, 0xef, 0xee, 0x9e, 0xb1, 0xbe, 0xf4, 0xa7, 0x67, 0xde, 0xf7, - 0x26, 0x8a, 0xa3, 0xdf, 0x5e, 0xa0, 0xa3, 0x73, 0x61, 0xf7, 0xe2, 0xff, 0x0a, 0x00, 0x00, 0xff, - 0xff, 0x31, 0xd1, 0xcb, 0x48, 0xf3, 0x1a, 0x01, 0x00, + proto.RegisterFile("k8s.io/api/core/v1/generated.proto", fileDescriptor_6c07b07c062484ab) +} + +var fileDescriptor_6c07b07c062484ab = []byte{ + // 16056 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x90, 0x1c, 0xd7, + 0x79, 0x18, 0xae, 0x9e, 0xd9, 0xf3, 0xdb, 0xfb, 0x2d, 0x8e, 0xc5, 0x12, 0xc0, 0x80, 0x4d, 0x12, + 0x04, 0xaf, 0x85, 0xc0, 0x43, 0x84, 0x48, 0x8a, 0xe6, 0x9e, 0xc0, 0x12, 0xd8, 0xc5, 0xf0, 0xcd, + 0x02, 0x90, 0x28, 0x4a, 0x56, 0x63, 0xe6, 0xed, 0x6e, 0x6b, 0x67, 0xba, 0x87, 0xdd, 0x3d, 0x0b, + 0x2c, 0x7e, 0x72, 0xd9, 0x96, 0x7f, 0x96, 0x2d, 0xd9, 0xa9, 0x52, 0xb9, 0x9c, 0x38, 0x25, 0xbb, + 0x5c, 0x29, 0xc7, 0xf1, 0x11, 0xc5, 0x4e, 0x14, 0x39, 0xb6, 0x63, 0xf9, 0xca, 0x55, 0x65, 0x27, + 0x29, 0xc7, 0x71, 0x55, 0x24, 0x57, 0x5c, 0x59, 0x47, 0xeb, 0x54, 0xb9, 0xfc, 0x47, 0x6c, 0x97, + 0x93, 0x3f, 0x92, 0x2d, 0x27, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x63, 0x66, 0x16, 0x04, 0x56, 0x14, + 0x8b, 0xff, 0xcd, 0xbc, 0xef, 0x7b, 0xdf, 0x7b, 0xfd, 0xce, 0xef, 0x7d, 0x27, 0xd8, 0x5b, 0x17, + 0xc3, 0x19, 0xd7, 0x3f, 0xef, 0x34, 0xdd, 0xf3, 0x55, 0x3f, 0x20, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf, + 0x20, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x66, 0x9a, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x19, 0xa7, + 0xe9, 0xce, 0x50, 0x9c, 0x99, 0xed, 0x0b, 0xd3, 0xcf, 0x6c, 0xb8, 0xd1, 0x66, 0xeb, 0xd6, 0x4c, + 0xd5, 0x6f, 0x9c, 0xdf, 0xf0, 0x37, 0xfc, 0xf3, 0x0c, 0xf5, 0x56, 0x6b, 0x9d, 0xfd, 0x63, 0x7f, + 0xd8, 0x2f, 0x4e, 0x62, 0xfa, 0xf9, 0xb8, 0x99, 0x86, 0x53, 0xdd, 0x74, 0x3d, 0x12, 0xec, 0x9c, + 0x6f, 0x6e, 0x6d, 0xb0, 0x76, 0x03, 0x12, 0xfa, 0xad, 0xa0, 0x4a, 0x92, 0x0d, 0xb7, 0xad, 0x15, + 0x9e, 0x6f, 0x90, 0xc8, 0xc9, 0xe8, 0xee, 0xf4, 0xf9, 0xbc, 0x5a, 0x41, 0xcb, 0x8b, 0xdc, 0x46, + 0xba, 0x99, 0x0f, 0x75, 0xaa, 0x10, 0x56, 0x37, 0x49, 0xc3, 0x49, 0xd5, 0x7b, 0x2e, 0xaf, 0x5e, + 0x2b, 0x72, 0xeb, 0xe7, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x86, 0x05, 0x67, 0x66, + 0x6f, 0x56, 0x16, 0xeb, 0x4e, 0x18, 0xb9, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, 0xaa, 0x44, 0x7e, 0x40, + 0x6e, 0xf8, 0xf5, 0x56, 0x83, 0x54, 0xd8, 0x40, 0xa0, 0xa7, 0x61, 0x60, 0x9b, 0xfd, 0x5f, 0x5e, + 0x98, 0xb2, 0xce, 0x58, 0xe7, 0x06, 0xe7, 0xc6, 0x7f, 0x67, 0xb7, 0xf4, 0x81, 0xbd, 0xdd, 0xd2, + 0xc0, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0b, 0x7d, 0xeb, 0xe1, 0xda, 0x4e, 0x93, 0x4c, 0x15, + 0x18, 0xee, 0xa8, 0xc0, 0xed, 0x5b, 0xaa, 0xd0, 0x52, 0x2c, 0xa0, 0xe8, 0x3c, 0x0c, 0x36, 0x9d, + 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8a, 0x67, 0xac, 0x73, 0xbd, 0x73, 0x13, 0x02, 0x75, 0xb0, + 0x2c, 0x01, 0x38, 0xc6, 0xa1, 0xdd, 0x08, 0x88, 0x53, 0xbb, 0xe6, 0xd5, 0x77, 0xa6, 0x7a, 0xce, + 0x58, 0xe7, 0x06, 0xe2, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xa5, 0x02, 0x0c, 0xcc, 0xae, + 0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa0, 0x1b, 0x30, 0xec, 0xf9, 0x35, 0x22, 0xff, 0xb3, 0xaf, 0x18, + 0x7a, 0xf6, 0xcc, 0x4c, 0x7a, 0x29, 0xcd, 0xac, 0x6a, 0x78, 0x73, 0xe3, 0x7b, 0xbb, 0xa5, 0x61, + 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xa1, 0xa6, 0x5f, 0x53, 0x64, 0x0b, 0x8c, 0x6c, 0x29, 0x8b, + 0x6c, 0x39, 0x46, 0x9b, 0x1b, 0xdb, 0xdb, 0x2d, 0x0d, 0x69, 0x05, 0x58, 0x27, 0x82, 0x6e, 0xc1, + 0x18, 0xfd, 0xeb, 0x45, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x1f, 0xc9, 0xa3, 0xab, 0xa1, 0xce, 0x4d, + 0xee, 0xed, 0x96, 0xc6, 0x12, 0x85, 0x38, 0x49, 0xd0, 0xfe, 0x61, 0x0b, 0xc6, 0x66, 0x9b, 0xcd, + 0xd9, 0xa0, 0xe1, 0x07, 0xe5, 0xc0, 0x5f, 0x77, 0xeb, 0x04, 0xbd, 0x08, 0x3d, 0x11, 0x9d, 0x35, + 0x3e, 0xc3, 0x8f, 0x88, 0xa1, 0xed, 0xa1, 0x73, 0xb5, 0xbf, 0x5b, 0x9a, 0x4c, 0xa0, 0xb3, 0xa9, + 0x64, 0x15, 0xd0, 0x6b, 0x30, 0x5e, 0xf7, 0xab, 0x4e, 0x7d, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, + 0x7f, 0x64, 0x6f, 0xb7, 0x34, 0x7e, 0x35, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x2e, 0x8c, 0xce, 0x46, + 0x91, 0x53, 0xdd, 0x24, 0x35, 0xbe, 0xa0, 0xd0, 0xf3, 0xd0, 0xe3, 0x39, 0x0d, 0xd9, 0x99, 0x33, + 0xb2, 0x33, 0xab, 0x4e, 0x83, 0x76, 0x66, 0xfc, 0xba, 0xe7, 0xbe, 0xdd, 0x12, 0x8b, 0x94, 0x96, + 0x61, 0x86, 0x8d, 0x9e, 0x05, 0xa8, 0x91, 0x6d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x29, 0xfa, 0x80, + 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x60, 0x70, 0x76, 0xdb, 0x77, 0x6b, 0x65, + 0xbf, 0x16, 0xa2, 0x2d, 0x18, 0x6b, 0x06, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xca, 0x3a, 0x53, 0x3c, + 0x37, 0xf4, 0xec, 0xb9, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x99, 0x3b, 0x2e, 0xda, + 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xaf, 0x0b, 0x70, 0x74, 0xf6, 0x6e, 0x2b, 0x20, 0x0b, + 0x6e, 0xb8, 0x95, 0xdc, 0x70, 0x35, 0x37, 0xdc, 0x5a, 0x8d, 0x47, 0x40, 0xad, 0xf4, 0x05, 0x51, + 0x8e, 0x15, 0x06, 0x7a, 0x06, 0xfa, 0xe9, 0xef, 0xeb, 0x78, 0x59, 0x7c, 0xf2, 0xa4, 0x40, 0x1e, + 0x5a, 0x70, 0x22, 0x67, 0x81, 0x83, 0xb0, 0xc4, 0x41, 0x2b, 0x30, 0x54, 0x65, 0xe7, 0xc3, 0xc6, + 0x8a, 0x5f, 0x23, 0x6c, 0x6d, 0x0d, 0xce, 0x3d, 0x45, 0xd1, 0xe7, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, + 0x14, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0xbb, 0xf7, 0x30, 0x4a, 0x90, + 0xb1, 0xd5, 0xcf, 0x69, 0x3b, 0xb7, 0x97, 0xed, 0xdc, 0xe1, 0xec, 0x5d, 0x8b, 0x2e, 0x40, 0xcf, + 0x96, 0xeb, 0xd5, 0xa6, 0xfa, 0x18, 0xad, 0x53, 0x74, 0xce, 0xaf, 0xb8, 0x5e, 0x6d, 0x7f, 0xb7, + 0x34, 0x61, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0x0f, 0x0b, 0x4a, 0x0c, 0xb6, 0xe4, 0xd6, + 0x49, 0x99, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x16, 0x20, 0x24, 0xd5, 0x80, + 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x51, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, + 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x22, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xa7, + 0xf3, 0x09, 0x7d, 0x04, 0xc6, 0xe2, 0xc6, 0xc2, 0xa6, 0x53, 0x95, 0x03, 0xc8, 0x76, 0x70, 0xc5, + 0x04, 0xe1, 0x24, 0xae, 0xfd, 0x0f, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, 0xd5, 0xfe, + 0x55, 0x0b, 0xfa, 0xe7, 0x5c, 0xaf, 0xe6, 0x7a, 0x1b, 0xe8, 0x53, 0x30, 0x40, 0xaf, 0xca, 0x9a, + 0x13, 0x39, 0xe2, 0x18, 0xfe, 0xa0, 0xb6, 0xb7, 0xd4, 0xcd, 0x35, 0xd3, 0xdc, 0xda, 0xa0, 0x05, + 0xe1, 0x0c, 0xc5, 0xa6, 0xbb, 0xed, 0xda, 0xad, 0x4f, 0x93, 0x6a, 0xb4, 0x42, 0x22, 0x27, 0xfe, + 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x0a, 0xf4, 0x45, 0x4e, 0xb0, 0x41, 0x22, 0x71, 0x1e, 0x67, + 0x9e, 0x9b, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x55, 0x12, 0xdf, 0x52, 0x6b, 0xac, 0x2a, 0x16, + 0x24, 0xec, 0xff, 0xd3, 0x0f, 0x27, 0xe6, 0x2b, 0xcb, 0x39, 0xeb, 0xea, 0x2c, 0xf4, 0xd5, 0x02, + 0x77, 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x05, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x84, 0x61, 0x7e, + 0x3f, 0x5e, 0x76, 0xbc, 0x5a, 0x7c, 0x3c, 0x0a, 0xec, 0xe1, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, + 0x5c, 0x54, 0x67, 0x13, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xbc, 0x05, 0xe3, 0xbc, 0x99, 0xd9, 0x28, + 0x0a, 0xdc, 0x5b, 0xad, 0x88, 0x84, 0x53, 0xbd, 0xec, 0xa4, 0x9b, 0xcf, 0x1a, 0xad, 0xdc, 0x11, + 0x98, 0xb9, 0x91, 0xa0, 0xc2, 0x0f, 0xc1, 0x29, 0xd1, 0xee, 0x78, 0x12, 0x8c, 0x53, 0xcd, 0xa2, + 0xef, 0xb3, 0x60, 0xba, 0xea, 0x7b, 0x51, 0xe0, 0xd7, 0xeb, 0x24, 0x28, 0xb7, 0x6e, 0xd5, 0xdd, + 0x70, 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xe9, + 0xbd, 0xdd, 0xd2, 0xf4, 0x7c, 0x2e, 0x29, 0xdc, 0xa6, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0x95, + 0xc8, 0xd9, 0x20, 0x71, 0xe3, 0xfd, 0xdd, 0x37, 0x7e, 0x6c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, + 0x70, 0x06, 0x59, 0xf4, 0x36, 0x1c, 0xa1, 0xa5, 0xa9, 0x6f, 0x1d, 0xe8, 0xbe, 0xb9, 0xa9, 0xbd, + 0xdd, 0xd2, 0x91, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x1e, 0x0b, 0x4e, 0xc4, 0x9f, 0xbf, + 0x78, 0xa7, 0xe9, 0x78, 0xb5, 0xb8, 0xe1, 0xc1, 0xee, 0x1b, 0xa6, 0x67, 0xf2, 0x89, 0xf9, 0x3c, + 0x4a, 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x24, 0xed, 0x5a, 0xb2, 0x6d, 0xe8, 0xbe, 0xed, 0xe3, 0x7b, + 0xbb, 0xa5, 0xc9, 0xd5, 0x34, 0x0d, 0x9c, 0x45, 0x78, 0x7a, 0x1e, 0x8e, 0x66, 0xae, 0x4e, 0x34, + 0x0e, 0xc5, 0x2d, 0xc2, 0x99, 0xc0, 0x41, 0x4c, 0x7f, 0xa2, 0x23, 0xd0, 0xbb, 0xed, 0xd4, 0x5b, + 0x62, 0x63, 0x62, 0xfe, 0xe7, 0xa5, 0xc2, 0x45, 0xcb, 0xfe, 0x37, 0x45, 0x18, 0x9b, 0xaf, 0x2c, + 0xdf, 0xd3, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, 0x97, 0xe8, + 0x77, 0x67, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9c, 0xb3, 0x65, 0xef, 0xf3, 0x46, 0xdd, 0xce, + 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x03, 0x2e, 0xdd, + 0xfb, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x2d, 0xb7, 0xee, 0x46, 0x2e, 0x09, 0xd1, + 0xe3, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdd, 0xdb, 0x2d, 0x15, 0x67, 0x6b, + 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x83, 0x29, 0x06, 0x7a, 0x12, 0x7a, 0x6a, 0x81, 0xdf, 0x9c, 0x2a, + 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x6f, 0x17, 0xe0, + 0xe4, 0x3c, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0xee, 0x8b, 0x73, 0x30, 0xd0, 0xf0, 0x3d, 0x37, 0xf2, + 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x40, 0x4f, 0x33, 0x66, + 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, 0xc5, 0x28, + 0x8c, 0xeb, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x9c, 0x00, 0xe5, 0x11, 0xc4, 0x8d, 0x90, 0xe0, 0x04, + 0x28, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x83, 0x61, 0x62, 0x66, 0xbb, 0xda, 0x9a, 0x23, 0x8c, 0x55, + 0x50, 0x33, 0x19, 0x13, 0x31, 0x6e, 0xb0, 0xbe, 0x8e, 0xac, 0xc2, 0xd7, 0x0a, 0x80, 0xf8, 0x10, + 0x7e, 0x9b, 0x0d, 0xdc, 0xf5, 0xf4, 0xc0, 0x75, 0xbf, 0x25, 0xee, 0xd7, 0xe8, 0xfd, 0x4f, 0x0b, + 0x4e, 0xce, 0xbb, 0x5e, 0x8d, 0x04, 0x39, 0x0b, 0xf0, 0xc1, 0x3c, 0xe5, 0x0f, 0xc6, 0xa4, 0x18, + 0x4b, 0xac, 0xe7, 0x3e, 0x2c, 0x31, 0xfb, 0x2f, 0x2c, 0x40, 0xfc, 0xb3, 0xdf, 0x75, 0x1f, 0x7b, + 0x3d, 0xfd, 0xb1, 0xf7, 0x61, 0x59, 0xd8, 0x57, 0x61, 0x74, 0xbe, 0xee, 0x12, 0x2f, 0x5a, 0x2e, + 0xcf, 0xfb, 0xde, 0xba, 0xbb, 0x81, 0x5e, 0x82, 0xd1, 0xc8, 0x6d, 0x10, 0xbf, 0x15, 0x55, 0x48, + 0xd5, 0xf7, 0xd8, 0xcb, 0xd5, 0x3a, 0xd7, 0x3b, 0x87, 0xf6, 0x76, 0x4b, 0xa3, 0x6b, 0x06, 0x04, + 0x27, 0x30, 0xed, 0x9f, 0xa1, 0xe7, 0x56, 0xbd, 0x15, 0x46, 0x24, 0x58, 0x0b, 0x5a, 0x61, 0x34, + 0xd7, 0xa2, 0xbc, 0x67, 0x39, 0xf0, 0x69, 0x77, 0x5c, 0xdf, 0x43, 0x27, 0x8d, 0xe7, 0xf8, 0x80, + 0x7c, 0x8a, 0x8b, 0x67, 0xf7, 0x0c, 0x40, 0xe8, 0x6e, 0x78, 0x24, 0xd0, 0x9e, 0x0f, 0xa3, 0x6c, + 0xab, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x3a, 0x8c, 0xd4, 0x9d, 0x5b, 0xa4, 0x5e, 0x21, 0x75, 0x52, + 0x8d, 0xfc, 0x40, 0xc8, 0x37, 0x9e, 0xeb, 0xee, 0x1d, 0x70, 0x55, 0xaf, 0x3a, 0x37, 0xb1, 0xb7, + 0x5b, 0x1a, 0x31, 0x8a, 0xb0, 0x49, 0x9c, 0x1e, 0x1d, 0x7e, 0x93, 0x7e, 0x85, 0x53, 0xd7, 0x1f, + 0x9f, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x75, 0x74, 0xf4, 0xe4, 0x1d, 0x1d, 0xf6, 0x1f, 0xd1, 0x85, + 0xe6, 0x37, 0x9a, 0xbe, 0x47, 0xbc, 0x68, 0xde, 0xf7, 0x6a, 0x5c, 0x32, 0xf5, 0x92, 0x21, 0x3a, + 0x39, 0x9b, 0x10, 0x9d, 0x1c, 0x4b, 0xd7, 0xd0, 0xa4, 0x27, 0x1f, 0x86, 0xbe, 0x30, 0x72, 0xa2, + 0x56, 0x28, 0x06, 0xee, 0x61, 0xb9, 0xec, 0x2a, 0xac, 0x74, 0x7f, 0xb7, 0x34, 0xa6, 0xaa, 0xf1, + 0x22, 0x2c, 0x2a, 0xa0, 0x27, 0xa0, 0xbf, 0x41, 0xc2, 0xd0, 0xd9, 0x90, 0x6c, 0xc3, 0x98, 0xa8, + 0xdb, 0xbf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x47, 0xa0, 0x97, 0x04, 0x81, 0x1f, 0x88, 0x6f, 0x1b, + 0x11, 0x88, 0xbd, 0x8b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x18, 0x53, 0x7d, 0xe5, 0x6d, + 0x1d, 0xc2, 0x73, 0xed, 0x4d, 0x80, 0xaa, 0xfc, 0xc0, 0x90, 0x5d, 0xb3, 0x43, 0xcf, 0x9e, 0xcd, + 0xe4, 0x68, 0x52, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, 0xbf, 0x61, 0xc1, 0x64, + 0xe2, 0x8b, 0xae, 0xba, 0x61, 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x4c, 0x97, 0x8b, 0xcf, 0x0d, 0xf9, + 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x0c, 0xbd, 0x6e, 0x44, 0x1a, 0xf2, 0x63, 0x1e, + 0x69, 0xfb, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x2c, 0xd3, 0x9a, 0x98, 0x13, 0xb0, 0x7f, 0xbb, 0x08, + 0x83, 0x7c, 0x7f, 0xaf, 0x38, 0xcd, 0x43, 0x98, 0x8b, 0xa7, 0x60, 0xd0, 0x6d, 0x34, 0x5a, 0x91, + 0x73, 0x4b, 0xdc, 0x7b, 0x03, 0xfc, 0x0c, 0x5a, 0x96, 0x85, 0x38, 0x86, 0xa3, 0x65, 0xe8, 0x61, + 0x5d, 0xe1, 0x5f, 0xf9, 0x78, 0xf6, 0x57, 0x8a, 0xbe, 0xcf, 0x2c, 0x38, 0x91, 0xc3, 0x59, 0x4e, + 0xb5, 0xaf, 0x68, 0x11, 0x66, 0x24, 0x90, 0x03, 0x70, 0xcb, 0xf5, 0x9c, 0x60, 0x87, 0x96, 0x4d, + 0x15, 0x19, 0xc1, 0x67, 0xda, 0x13, 0x9c, 0x53, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, 0x80, 0x35, + 0xa2, 0xd3, 0x2f, 0xc2, 0xa0, 0x42, 0x3e, 0x08, 0xe7, 0x38, 0xfd, 0x11, 0x18, 0x4b, 0xb4, 0xd5, + 0xa9, 0xfa, 0xb0, 0xce, 0x78, 0xfe, 0x1a, 0x3b, 0x32, 0x44, 0xaf, 0x17, 0xbd, 0x6d, 0x71, 0x37, + 0xdd, 0x85, 0x23, 0xf5, 0x8c, 0x23, 0x5f, 0xcc, 0x6b, 0xf7, 0x57, 0xc4, 0x49, 0xf1, 0xd9, 0x47, + 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0xe3, 0x44, 0x2c, 0xb4, 0x3b, 0x11, 0xe9, 0x79, 0x77, 0x44, 0x75, + 0xfe, 0x0a, 0xd9, 0x51, 0x87, 0xea, 0xb7, 0xb2, 0xfb, 0xa7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x48, + 0x10, 0x28, 0x5e, 0x21, 0x3b, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x58, 0x30, + 0xa2, 0xbe, 0xee, 0x10, 0xce, 0x85, 0x39, 0xf3, 0x5c, 0x38, 0xd5, 0x76, 0x81, 0xe7, 0x9c, 0x08, + 0x5f, 0x2b, 0xc0, 0x09, 0x85, 0x43, 0x1f, 0x51, 0xfc, 0x8f, 0x58, 0x55, 0xe7, 0x61, 0xd0, 0x53, + 0xe2, 0x44, 0xcb, 0x94, 0xe3, 0xc5, 0xc2, 0xc4, 0x18, 0x87, 0x5e, 0x79, 0x5e, 0x7c, 0x69, 0x0f, + 0xeb, 0x72, 0x76, 0x71, 0xb9, 0xcf, 0x41, 0xb1, 0xe5, 0xd6, 0xc4, 0x05, 0xf3, 0x41, 0x39, 0xda, + 0xd7, 0x97, 0x17, 0xf6, 0x77, 0x4b, 0x0f, 0xe7, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0x5c, 0x5f, + 0x5e, 0xc0, 0xb4, 0x32, 0x9a, 0x85, 0x31, 0xa9, 0x55, 0xbb, 0x41, 0xf9, 0x52, 0xdf, 0x13, 0xf7, + 0x90, 0x12, 0x96, 0x63, 0x13, 0x8c, 0x93, 0xf8, 0x68, 0x01, 0xc6, 0xb7, 0x5a, 0xb7, 0x48, 0x9d, + 0x44, 0xfc, 0x83, 0xaf, 0x10, 0x2e, 0x4a, 0x1e, 0x8c, 0x9f, 0xb0, 0x57, 0x12, 0x70, 0x9c, 0xaa, + 0x61, 0xff, 0x0d, 0xbb, 0x0f, 0xc4, 0xe8, 0x69, 0xfc, 0xcd, 0xb7, 0x72, 0x39, 0x77, 0xb3, 0x2a, + 0xae, 0x90, 0x9d, 0x35, 0x9f, 0xf2, 0x21, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x3d, 0x6d, 0xd7, 0xfc, + 0x2f, 0x15, 0xe0, 0xa8, 0x1a, 0x01, 0x83, 0x5b, 0xfe, 0x76, 0x1f, 0x83, 0x0b, 0x30, 0x54, 0x23, + 0xeb, 0x4e, 0xab, 0x1e, 0x29, 0xbd, 0x46, 0x2f, 0x57, 0xb5, 0x2d, 0xc4, 0xc5, 0x58, 0xc7, 0x39, + 0xc0, 0xb0, 0xfd, 0xc2, 0x08, 0xbb, 0x88, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, + 0xf3, 0x08, 0xf4, 0xba, 0x0d, 0xca, 0x98, 0x15, 0x4c, 0x7e, 0x6b, 0x99, 0x16, 0x62, 0x0e, 0x43, + 0x8f, 0x41, 0x7f, 0xd5, 0x6f, 0x34, 0x1c, 0xaf, 0xc6, 0xae, 0xbc, 0xc1, 0xb9, 0x21, 0xca, 0xbb, + 0xcd, 0xf3, 0x22, 0x2c, 0x61, 0x94, 0xf9, 0x76, 0x82, 0x0d, 0x2e, 0xec, 0x11, 0xcc, 0xf7, 0x6c, + 0xb0, 0x11, 0x62, 0x56, 0x4a, 0xdf, 0xaa, 0xb7, 0xfd, 0x60, 0xcb, 0xf5, 0x36, 0x16, 0xdc, 0x40, + 0x6c, 0x09, 0x75, 0x17, 0xde, 0x54, 0x10, 0xac, 0x61, 0xa1, 0x25, 0xe8, 0x6d, 0xfa, 0x41, 0x14, + 0x4e, 0xf5, 0xb1, 0xe1, 0x7e, 0x38, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0xf6, 0x83, 0x28, 0xfe, 0x00, + 0xfa, 0x2f, 0xc4, 0xbc, 0x3a, 0xba, 0x0a, 0xfd, 0xc4, 0xdb, 0x5e, 0x0a, 0xfc, 0xc6, 0xd4, 0x64, + 0x3e, 0xa5, 0x45, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, 0x2c, 0x49, 0xa0, 0x0f, 0x43, + 0x91, 0x78, 0xdb, 0x53, 0xfd, 0x8c, 0xd2, 0x74, 0x0e, 0xa5, 0x1b, 0x4e, 0x10, 0x9f, 0xf9, 0x8b, + 0xde, 0x36, 0xa6, 0x75, 0xd0, 0xc7, 0x60, 0x50, 0x1e, 0x18, 0xa1, 0x90, 0xa2, 0x66, 0x2e, 0x58, + 0x79, 0xcc, 0x60, 0xf2, 0x76, 0xcb, 0x0d, 0x48, 0x83, 0x78, 0x51, 0x18, 0x9f, 0x90, 0x12, 0x1a, + 0xe2, 0x98, 0x1a, 0xaa, 0xc2, 0x70, 0x40, 0x42, 0xf7, 0x2e, 0x29, 0xfb, 0x75, 0xb7, 0xba, 0x33, + 0x75, 0x9c, 0x75, 0xef, 0x89, 0xb6, 0x43, 0x86, 0xb5, 0x0a, 0xb1, 0x94, 0x5f, 0x2f, 0xc5, 0x06, + 0x51, 0xf4, 0x06, 0x8c, 0x04, 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xad, 0x4c, 0x29, 0xad, 0xdc, 0x08, + 0xd6, 0x01, 0xfc, 0x39, 0x11, 0x37, 0x13, 0x43, 0xb0, 0x49, 0x01, 0x7d, 0x4c, 0xaa, 0x1c, 0x56, + 0xfc, 0x96, 0x17, 0x85, 0x53, 0x83, 0xac, 0xdf, 0x99, 0xba, 0xe9, 0x1b, 0x31, 0x5e, 0x52, 0x27, + 0xc1, 0x2b, 0x63, 0x83, 0x14, 0xfa, 0x04, 0x8c, 0xf0, 0xff, 0x5c, 0xa5, 0x1a, 0x4e, 0x1d, 0x65, + 0xb4, 0xcf, 0xe4, 0xd3, 0xe6, 0x88, 0x73, 0x47, 0x05, 0xf1, 0x11, 0xbd, 0x34, 0xc4, 0x26, 0x35, + 0x84, 0x61, 0xa4, 0xee, 0x6e, 0x13, 0x8f, 0x84, 0x61, 0x39, 0xf0, 0x6f, 0x11, 0x21, 0x21, 0x3e, + 0x91, 0xad, 0x82, 0xf5, 0x6f, 0x11, 0xf1, 0x08, 0xd4, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x0e, 0xa3, + 0xf4, 0x49, 0xee, 0xc6, 0x44, 0x87, 0x3a, 0x11, 0x65, 0x0f, 0x67, 0x6c, 0x54, 0xc2, 0x09, 0x22, + 0xe8, 0x1a, 0x0c, 0xb3, 0x31, 0x6f, 0x35, 0x39, 0xd1, 0x63, 0x9d, 0x88, 0x32, 0x83, 0x82, 0x8a, + 0x56, 0x05, 0x1b, 0x04, 0xd0, 0xeb, 0x30, 0x58, 0x77, 0xd7, 0x49, 0x75, 0xa7, 0x5a, 0x27, 0x53, + 0xc3, 0x8c, 0x5a, 0xe6, 0x61, 0x78, 0x55, 0x22, 0x71, 0xfe, 0x5c, 0xfd, 0xc5, 0x71, 0x75, 0x74, + 0x03, 0x8e, 0x45, 0x24, 0x68, 0xb8, 0x9e, 0x43, 0x0f, 0x31, 0xf1, 0x24, 0x64, 0x9a, 0xf1, 0x11, + 0xb6, 0xba, 0x4e, 0x8b, 0xd9, 0x38, 0xb6, 0x96, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0x3b, 0x30, 0x95, + 0x01, 0xe1, 0xeb, 0xf6, 0x08, 0xa3, 0xfc, 0x8a, 0xa0, 0x3c, 0xb5, 0x96, 0x83, 0xb7, 0xdf, 0x06, + 0x86, 0x73, 0xa9, 0xa3, 0x6b, 0x30, 0xc6, 0x4e, 0xce, 0x72, 0xab, 0x5e, 0x17, 0x0d, 0x8e, 0xb2, + 0x06, 0x1f, 0x93, 0x7c, 0xc4, 0xb2, 0x09, 0xde, 0xdf, 0x2d, 0x41, 0xfc, 0x0f, 0x27, 0x6b, 0xa3, + 0x5b, 0x4c, 0x09, 0xdb, 0x0a, 0xdc, 0x68, 0x87, 0xee, 0x2a, 0x72, 0x27, 0x9a, 0x1a, 0x6b, 0x2b, + 0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3, 0x9a, 0xeb, + 0x4d, 0x8d, 0xf3, 0xf7, 0x94, 0x3c, 0x49, 0x2b, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xc0, 0xd2, 0x1f, + 0xd7, 0xe8, 0x8d, 0x3b, 0xc1, 0x10, 0x63, 0x05, 0xac, 0x04, 0xe0, 0x18, 0x87, 0x32, 0xc1, 0x51, + 0xb4, 0x33, 0x85, 0x18, 0xaa, 0x3a, 0x10, 0xd7, 0xd6, 0x3e, 0x86, 0x69, 0xb9, 0x7d, 0x0b, 0x46, + 0xd5, 0x31, 0xc1, 0xc6, 0x04, 0x95, 0xa0, 0x97, 0xb1, 0x7d, 0x42, 0x7c, 0x3a, 0x48, 0xbb, 0xc0, + 0x58, 0x42, 0xcc, 0xcb, 0x59, 0x17, 0xdc, 0xbb, 0x64, 0x6e, 0x27, 0x22, 0x5c, 0x16, 0x51, 0xd4, + 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x9f, 0xe3, 0x5b, 0xa2, 0x8b, 0x7b, 0xf1, + 0x69, 0x18, 0x60, 0x86, 0x1f, 0x7e, 0xc0, 0xb5, 0xb3, 0xbd, 0x31, 0xc3, 0x7c, 0x59, 0x94, 0x63, + 0x85, 0x81, 0x5e, 0x86, 0x91, 0xaa, 0xde, 0x80, 0xb8, 0xd4, 0xd5, 0x31, 0x62, 0xb4, 0x8e, 0x4d, + 0x5c, 0x74, 0x11, 0x06, 0x98, 0x8d, 0x53, 0xd5, 0xaf, 0x0b, 0x6e, 0x53, 0x72, 0x26, 0x03, 0x65, + 0x51, 0xbe, 0xaf, 0xfd, 0xc6, 0x0a, 0x1b, 0x9d, 0x85, 0x3e, 0xda, 0x85, 0xe5, 0xb2, 0xb8, 0x4e, + 0x95, 0x24, 0xf0, 0x32, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x86, 0xc5, 0x78, 0xa9, 0xf4, 0x99, 0x8f, + 0x2e, 0xb3, 0x4b, 0x83, 0xdd, 0x20, 0x9a, 0x16, 0xfe, 0x51, 0xed, 0x26, 0x50, 0xb0, 0xfd, 0xc4, + 0x7f, 0x6c, 0xd4, 0x44, 0x6f, 0x26, 0x6f, 0x06, 0xce, 0x50, 0x3c, 0x2f, 0x87, 0x20, 0x79, 0x3b, + 0x3c, 0x14, 0x5f, 0x71, 0xb4, 0x3f, 0xed, 0xae, 0x08, 0xfb, 0x47, 0x0a, 0xda, 0x2a, 0xa9, 0x44, + 0x4e, 0x44, 0x50, 0x19, 0xfa, 0x6f, 0x3b, 0x6e, 0xe4, 0x7a, 0x1b, 0x82, 0xef, 0x6b, 0x7f, 0xd1, + 0xb1, 0x4a, 0x37, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0x96, 0xe7, + 0x51, 0x8a, 0x85, 0x6e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0xde, 0x02, + 0x90, 0x27, 0x04, 0xa9, 0x09, 0xd9, 0xe1, 0xd3, 0x9d, 0x89, 0xae, 0xa9, 0x3a, 0x5c, 0x38, 0x19, + 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x38, 0xdd, 0xa2, 0x4e, 0x10, + 0x91, 0xda, 0x6c, 0x24, 0x06, 0xe7, 0xc9, 0xee, 0x1e, 0x87, 0x6b, 0x6e, 0x83, 0xe8, 0xdb, 0x59, + 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x57, 0x8a, 0x30, 0x95, 0xd7, 0x5d, 0xba, 0x69, 0xc8, 0x1d, 0x37, + 0x9a, 0xa7, 0x6c, 0xad, 0x65, 0x6e, 0x9a, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x5d, 0xbd, 0xa1, 0xbb, + 0x21, 0xdf, 0xf6, 0xbd, 0xf1, 0xea, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x20, 0x4e, 0x28, + 0x8c, 0xef, 0xb4, 0x55, 0x8e, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xca, 0xd8, 0xd3, 0x41, 0xca, 0x68, + 0x0c, 0x51, 0xef, 0xfd, 0x1d, 0x22, 0xf4, 0x49, 0x80, 0x75, 0xd7, 0x73, 0xc3, 0x4d, 0x46, 0xbd, + 0xef, 0xc0, 0xd4, 0x15, 0x53, 0xbc, 0xa4, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x86, 0xd4, 0x01, + 0xb2, 0xbc, 0xc0, 0x54, 0xff, 0x9a, 0x29, 0x55, 0x7c, 0x9a, 0x2e, 0x60, 0x1d, 0xcf, 0xfe, 0x74, + 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xed, 0xf8, 0x16, 0xda, 0x8f, 0xaf, 0xfd, 0x63, + 0x83, 0x30, 0x66, 0x34, 0xd6, 0x0a, 0xbb, 0x38, 0x73, 0x2f, 0xd1, 0x0b, 0xc8, 0x89, 0x88, 0xd8, + 0x7f, 0x76, 0xe7, 0xad, 0xa2, 0x5f, 0x52, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x93, 0x30, 0x58, 0x77, + 0x42, 0x26, 0xb1, 0x24, 0x62, 0xdf, 0x75, 0x43, 0x2c, 0x7e, 0x10, 0x3a, 0x61, 0xa4, 0xdd, 0xfa, + 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x94, 0x94, 0xbf, 0x92, 0xd6, 0x9d, 0xaa, 0x13, 0x94, 0x09, 0xdb, + 0xc1, 0x1c, 0x86, 0x2e, 0xb2, 0xa3, 0x95, 0xae, 0x8a, 0x79, 0xca, 0x8d, 0xb2, 0x65, 0xd6, 0x6b, + 0x30, 0xd9, 0x0a, 0x86, 0x0d, 0xcc, 0xf8, 0x4d, 0xd6, 0xd7, 0xe6, 0x4d, 0xf6, 0x04, 0xf4, 0xb3, + 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0x93, 0x0b, 0x66, 0xa0, 0xbb, 0x05, + 0x43, 0x5f, 0x7d, 0x62, 0x51, 0x33, 0xb3, 0x8b, 0x01, 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0x0c, + 0xfd, 0xac, 0x05, 0xc8, 0xa9, 0xd3, 0xd7, 0x32, 0x2d, 0x56, 0x8f, 0x1b, 0x60, 0xac, 0xf6, 0xcb, + 0x1d, 0x87, 0xbd, 0x15, 0xce, 0xcc, 0xa6, 0x6a, 0x73, 0x49, 0xe9, 0x4b, 0xa2, 0x8b, 0x28, 0x8d, + 0xa0, 0x5f, 0x46, 0x57, 0xdd, 0x30, 0xfa, 0xec, 0x1f, 0x27, 0x2e, 0xa7, 0x8c, 0x2e, 0xa1, 0xeb, + 0xfa, 0xe3, 0x6b, 0xe8, 0x80, 0x8f, 0xaf, 0x91, 0xdc, 0x87, 0xd7, 0x77, 0x26, 0x1e, 0x30, 0xc3, + 0xec, 0xcb, 0x1f, 0xeb, 0xf0, 0x80, 0x11, 0xe2, 0xf4, 0x6e, 0x9e, 0x31, 0x65, 0xa1, 0x07, 0x1e, + 0x61, 0x5d, 0x6e, 0xff, 0x08, 0xbe, 0x1e, 0x92, 0x60, 0xee, 0x84, 0x54, 0x13, 0xef, 0xeb, 0xbc, + 0x87, 0xa6, 0x37, 0xfe, 0x1e, 0x0b, 0xa6, 0xd2, 0x03, 0xc4, 0xbb, 0x34, 0x35, 0xca, 0xfa, 0x6f, + 0xb7, 0x1b, 0x19, 0xd1, 0x79, 0x69, 0xee, 0x3a, 0x35, 0x9b, 0x43, 0x0b, 0xe7, 0xb6, 0x32, 0xdd, + 0x82, 0xe3, 0x39, 0xf3, 0x9e, 0x21, 0xb5, 0x5e, 0xd0, 0xa5, 0xd6, 0x1d, 0x64, 0x9d, 0x33, 0x72, + 0x66, 0x66, 0xde, 0x68, 0x39, 0x5e, 0xe4, 0x46, 0x3b, 0xba, 0x94, 0xdb, 0x03, 0x73, 0x40, 0xd0, + 0x27, 0xa0, 0xb7, 0xee, 0x7a, 0xad, 0x3b, 0xe2, 0xa6, 0x3c, 0x9b, 0xfd, 0x88, 0xf1, 0x5a, 0x77, + 0xcc, 0x21, 0x2e, 0xd1, 0x0d, 0xc9, 0xca, 0xf7, 0x77, 0x4b, 0x28, 0x8d, 0x80, 0x39, 0x55, 0xfb, + 0x49, 0x18, 0x5d, 0x70, 0x48, 0xc3, 0xf7, 0x16, 0xbd, 0x5a, 0xd3, 0x77, 0xbd, 0x08, 0x4d, 0x41, + 0x0f, 0x63, 0x11, 0xf9, 0x05, 0xd9, 0x43, 0x87, 0x10, 0xb3, 0x12, 0x7b, 0x03, 0x8e, 0x2e, 0xf8, + 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9b, 0x2d, 0x2f, 0x6b, 0x52, 0xbf, 0x55, 0x29, 0x75, 0xb2, 0xf2, + 0xdf, 0xf4, 0x5a, 0x4d, 0xbe, 0x94, 0x96, 0xdc, 0x3a, 0xc9, 0x91, 0xcd, 0xfe, 0x58, 0xc1, 0x68, + 0x29, 0xc6, 0x57, 0x9a, 0x45, 0x2b, 0xd7, 0x28, 0xe1, 0x0d, 0x18, 0x58, 0x77, 0x49, 0xbd, 0x86, + 0xc9, 0xba, 0x98, 0x8d, 0xc7, 0xf3, 0xcd, 0x16, 0x97, 0x28, 0xa6, 0x52, 0x81, 0x32, 0x99, 0xd5, + 0x92, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x05, 0xe3, 0x72, 0xce, 0x24, 0x54, 0x9c, 0xda, 0x4f, 0xb4, + 0x5b, 0x84, 0x26, 0x71, 0x66, 0xc2, 0x8d, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x3a, 0x09, 0x3d, 0x0d, + 0xca, 0x9f, 0xf4, 0xb0, 0xe1, 0x67, 0x42, 0x2a, 0x26, 0x6f, 0x63, 0xa5, 0xf6, 0x4f, 0x58, 0x70, + 0x3c, 0x35, 0x32, 0x42, 0xee, 0x78, 0x9f, 0x67, 0x21, 0x29, 0x07, 0x2c, 0x74, 0x96, 0x03, 0xda, + 0xff, 0xc8, 0x82, 0x23, 0x8b, 0x8d, 0x66, 0xb4, 0xb3, 0xe0, 0x9a, 0x16, 0x04, 0x2f, 0x42, 0x5f, + 0x83, 0xd4, 0xdc, 0x56, 0x43, 0xcc, 0x5c, 0x49, 0xde, 0xe1, 0x2b, 0xac, 0x94, 0x9e, 0x03, 0x95, + 0xc8, 0x0f, 0x9c, 0x0d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x9c, 0x90, 0x7b, 0x97, 0x5c, 0x75, 0x1b, + 0x6e, 0x74, 0x6f, 0xbb, 0x4b, 0x28, 0xff, 0x25, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0x98, + 0x5c, 0xf7, 0xb3, 0xb5, 0x5a, 0x40, 0xc2, 0x10, 0x4d, 0x43, 0xc1, 0x6d, 0x8a, 0x5e, 0x82, 0xe8, + 0x65, 0x61, 0xb9, 0x8c, 0x0b, 0x6e, 0x53, 0x3e, 0xba, 0x18, 0x9b, 0x50, 0x34, 0xed, 0x20, 0x2e, + 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x39, 0x18, 0xf0, 0xfc, 0x1a, 0x7f, 0xb7, 0x08, 0x4d, 0x38, 0xc5, + 0x5c, 0x15, 0x65, 0x58, 0x41, 0x51, 0x19, 0x06, 0xb9, 0x95, 0x6c, 0xbc, 0x68, 0xbb, 0xb2, 0xb5, + 0x65, 0x5f, 0xb6, 0x26, 0x6b, 0xe2, 0x98, 0x88, 0xfd, 0x5b, 0x16, 0x0c, 0xcb, 0x2f, 0xeb, 0xf2, + 0x45, 0x49, 0xb7, 0x56, 0xfc, 0x9a, 0x8c, 0xb7, 0x16, 0x7d, 0x11, 0x32, 0x88, 0xf1, 0x10, 0x2c, + 0x1e, 0xe8, 0x21, 0x78, 0x01, 0x86, 0x9c, 0x66, 0xb3, 0x6c, 0xbe, 0x22, 0xd9, 0x52, 0x9a, 0x8d, + 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x78, 0x01, 0x46, 0xe5, 0x17, 0x54, 0x5a, 0xb7, 0x42, 0x12, 0xa1, + 0x35, 0x18, 0x74, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x1f, 0xc9, 0x96, 0x6e, 0x1a, 0x53, 0x1a, 0xb3, + 0xc3, 0xb3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x3a, 0x4c, 0x78, 0x7e, 0xc4, 0x58, 0x23, 0x05, 0x6f, + 0xa7, 0x70, 0x4e, 0x52, 0x3f, 0x21, 0xa8, 0x4f, 0xac, 0x26, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x28, + 0x25, 0xc6, 0xc5, 0x7c, 0x51, 0x9f, 0x3e, 0x71, 0xd9, 0x02, 0x63, 0xfb, 0xd7, 0x2d, 0x18, 0x94, + 0x68, 0x87, 0x61, 0x5b, 0xb0, 0x02, 0xfd, 0x21, 0x9b, 0x04, 0x39, 0x34, 0x76, 0xbb, 0x8e, 0xf3, + 0xf9, 0x8a, 0x39, 0x3e, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0x53, 0x18, 0xaa, 0xee, 0xbf, 0x4b, 0x14, + 0x86, 0xaa, 0x3f, 0x39, 0x97, 0xd2, 0x9f, 0xb2, 0x3e, 0x6b, 0x12, 0x78, 0xfa, 0x30, 0x69, 0x06, + 0x64, 0xdd, 0xbd, 0x93, 0x7c, 0x98, 0x94, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x16, 0x0c, 0x57, 0xa5, + 0xa6, 0x28, 0xde, 0xe1, 0x67, 0xdb, 0x6a, 0x2d, 0x95, 0x82, 0x9b, 0x4b, 0x3a, 0xe7, 0xb5, 0xfa, + 0xd8, 0xa0, 0x66, 0x5a, 0x81, 0x15, 0x3b, 0x59, 0x81, 0xc5, 0x74, 0xf3, 0x6d, 0xa2, 0x7e, 0xd2, + 0x82, 0x3e, 0xae, 0x21, 0xe8, 0x4e, 0x41, 0xa3, 0xe9, 0xfb, 0xe3, 0xb1, 0xbb, 0x41, 0x0b, 0x05, + 0x67, 0x83, 0x56, 0x60, 0x90, 0xfd, 0x60, 0x1a, 0x8e, 0x62, 0xbe, 0xcf, 0x18, 0x6f, 0x55, 0xef, + 0xe0, 0x0d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd2, 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d, + 0xeb, 0xc1, 0x5d, 0xfa, 0x85, 0x07, 0x75, 0xe9, 0x6f, 0xc0, 0x58, 0x55, 0xb3, 0x0e, 0x88, 0x67, + 0xf2, 0x5c, 0xdb, 0x45, 0xa2, 0x19, 0x12, 0x70, 0x19, 0xea, 0xbc, 0x49, 0x04, 0x27, 0xa9, 0xa2, + 0x8f, 0xc3, 0x30, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xa4, 0x7b, 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, + 0x99, 0xbb, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0xbf, 0xb4, 0x00, 0x2d, 0x36, 0x37, 0x49, 0x83, 0x04, + 0x4e, 0x3d, 0x56, 0xf2, 0x7d, 0xc1, 0x82, 0x29, 0x92, 0x2a, 0x9e, 0xf7, 0x1b, 0x0d, 0xf1, 0xa4, + 0xcf, 0x91, 0x3a, 0x2d, 0xe6, 0xd4, 0x89, 0xd9, 0xfa, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x81, + 0x49, 0x7e, 0x4b, 0x2a, 0x80, 0x66, 0x6b, 0xf7, 0x90, 0x20, 0x3c, 0xb9, 0x96, 0x46, 0xc1, 0x59, + 0xf5, 0xec, 0x5f, 0x1f, 0x81, 0xdc, 0x5e, 0xbc, 0xaf, 0xdd, 0x7c, 0x5f, 0xbb, 0xf9, 0xbe, 0x76, + 0xf3, 0x7d, 0xed, 0xe6, 0xfb, 0xda, 0xcd, 0xf7, 0xb5, 0x9b, 0xef, 0x52, 0xed, 0xe6, 0xdf, 0xb6, + 0xe0, 0xa8, 0xba, 0xbe, 0x8c, 0x07, 0xfb, 0x67, 0x60, 0x92, 0x6f, 0xb7, 0xf9, 0xba, 0xe3, 0x36, + 0xd6, 0x48, 0xa3, 0x59, 0x77, 0x22, 0x69, 0xc3, 0x74, 0x21, 0x73, 0xe5, 0x26, 0x1c, 0x25, 0x8c, + 0x8a, 0xdc, 0xe3, 0x2c, 0x03, 0x80, 0xb3, 0x9a, 0xb1, 0x7f, 0x65, 0x00, 0x7a, 0x17, 0xb7, 0x89, + 0x17, 0x1d, 0xc2, 0xd3, 0xa6, 0x0a, 0xa3, 0xae, 0xb7, 0xed, 0xd7, 0xb7, 0x49, 0x8d, 0xc3, 0x0f, + 0xf2, 0x02, 0x3f, 0x26, 0x48, 0x8f, 0x2e, 0x1b, 0x24, 0x70, 0x82, 0xe4, 0x83, 0xd0, 0x11, 0x5d, + 0x82, 0x3e, 0x7e, 0xf9, 0x08, 0x05, 0x51, 0xe6, 0x99, 0xcd, 0x06, 0x51, 0x5c, 0xa9, 0xb1, 0xfe, + 0x8a, 0x5f, 0x6e, 0xa2, 0x3a, 0xfa, 0x34, 0x8c, 0xae, 0xbb, 0x41, 0x18, 0xad, 0xb9, 0x0d, 0x7a, + 0x35, 0x34, 0x9a, 0xf7, 0xa0, 0x13, 0x52, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x03, + 0x46, 0xea, 0x8e, 0xde, 0x54, 0xff, 0x81, 0x9b, 0x52, 0xb7, 0xc3, 0x55, 0x9d, 0x10, 0x36, 0xe9, + 0xd2, 0xed, 0x54, 0x65, 0x6a, 0x8d, 0x01, 0x26, 0xce, 0x50, 0xdb, 0x89, 0xeb, 0x33, 0x38, 0x8c, + 0x32, 0x68, 0xcc, 0xdd, 0x60, 0xd0, 0x64, 0xd0, 0x34, 0xa7, 0x82, 0x4f, 0xc1, 0x20, 0xa1, 0x43, + 0x48, 0x09, 0x8b, 0x0b, 0xe6, 0x7c, 0x77, 0x7d, 0x5d, 0x71, 0xab, 0x81, 0x6f, 0x6a, 0xe3, 0x16, + 0x25, 0x25, 0x1c, 0x13, 0x45, 0xf3, 0xd0, 0x17, 0x92, 0xc0, 0x55, 0x12, 0xff, 0x36, 0xd3, 0xc8, + 0xd0, 0xb8, 0x4b, 0x23, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x1c, 0x26, 0x8a, 0x65, 0x97, 0x81, + 0xb6, 0xbc, 0x66, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x3a, 0xf4, 0x07, 0xa4, 0xce, 0xd4, 0xbd, 0x23, + 0xdd, 0x2f, 0x72, 0xae, 0x3d, 0xe6, 0xf5, 0xb0, 0x24, 0x80, 0xae, 0x00, 0x0a, 0x08, 0x65, 0xf0, + 0x5c, 0x6f, 0x43, 0x19, 0xe1, 0x8b, 0x83, 0x56, 0x31, 0xd2, 0x38, 0xc6, 0x90, 0xde, 0xac, 0x38, + 0xa3, 0x1a, 0xba, 0x04, 0x13, 0xaa, 0x74, 0xd9, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x63, 0x8c, 0x96, + 0x92, 0xaf, 0xe0, 0x24, 0x02, 0x4e, 0xd7, 0xb1, 0x7f, 0xde, 0x02, 0x3e, 0xce, 0x87, 0x20, 0x55, + 0x78, 0xd5, 0x94, 0x2a, 0x9c, 0xc8, 0x9d, 0xb9, 0x1c, 0x89, 0xc2, 0xcf, 0x5b, 0x30, 0xa4, 0xcd, + 0x6c, 0xbc, 0x66, 0xad, 0x36, 0x6b, 0xb6, 0x05, 0xe3, 0x74, 0xa5, 0x5f, 0xbb, 0x15, 0x92, 0x60, + 0x9b, 0xd4, 0xd8, 0xc2, 0x2c, 0xdc, 0xdb, 0xc2, 0x54, 0x06, 0xbf, 0x57, 0x13, 0x04, 0x71, 0xaa, + 0x09, 0xfb, 0x53, 0xb2, 0xab, 0xca, 0x3e, 0xba, 0xaa, 0xe6, 0x3c, 0x61, 0x1f, 0xad, 0x66, 0x15, + 0xc7, 0x38, 0x74, 0xab, 0x6d, 0xfa, 0x61, 0x94, 0xb4, 0x8f, 0xbe, 0xec, 0x87, 0x11, 0x66, 0x10, + 0xfb, 0x39, 0x80, 0xc5, 0x3b, 0xa4, 0xca, 0x57, 0xac, 0xfe, 0xe8, 0xb1, 0xf2, 0x1f, 0x3d, 0xf6, + 0x1f, 0x58, 0x30, 0xba, 0x34, 0x6f, 0xdc, 0x5c, 0x33, 0x00, 0xfc, 0xa5, 0x76, 0xf3, 0xe6, 0xaa, + 0x34, 0xd2, 0xe1, 0x76, 0x0a, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x04, 0x14, 0xeb, 0x2d, 0x4f, 0x88, + 0x3d, 0xfb, 0xe9, 0xf5, 0x78, 0xb5, 0xe5, 0x61, 0x5a, 0xa6, 0x79, 0xb2, 0x15, 0xbb, 0xf6, 0x64, + 0xeb, 0x18, 0x50, 0x07, 0x95, 0xa0, 0xf7, 0xf6, 0x6d, 0xb7, 0xc6, 0xe3, 0x04, 0x08, 0x03, 0xa2, + 0x9b, 0x37, 0x97, 0x17, 0x42, 0xcc, 0xcb, 0xed, 0x2f, 0x16, 0x61, 0x7a, 0xa9, 0x4e, 0xee, 0xbc, + 0xc3, 0x58, 0x09, 0xdd, 0xfa, 0xe1, 0x1d, 0x4c, 0x80, 0x74, 0x50, 0x5f, 0xcb, 0xce, 0xe3, 0xb1, + 0x0e, 0xfd, 0xdc, 0x3c, 0x58, 0x46, 0x4e, 0xc8, 0x54, 0xca, 0xe6, 0x0f, 0xc8, 0x0c, 0x37, 0x33, + 0x16, 0x4a, 0x59, 0x75, 0x61, 0x8a, 0x52, 0x2c, 0x89, 0x4f, 0xbf, 0x04, 0xc3, 0x3a, 0xe6, 0x81, + 0xbc, 0x9e, 0xbf, 0xb7, 0x08, 0xe3, 0xb4, 0x07, 0x0f, 0x74, 0x22, 0xae, 0xa7, 0x27, 0xe2, 0x7e, + 0x7b, 0xbe, 0x76, 0x9e, 0x8d, 0xb7, 0x92, 0xb3, 0x71, 0x21, 0x6f, 0x36, 0x0e, 0x7b, 0x0e, 0xbe, + 0xcf, 0x82, 0xc9, 0xa5, 0xba, 0x5f, 0xdd, 0x4a, 0x78, 0xa7, 0xbe, 0x00, 0x43, 0xf4, 0x38, 0x0e, + 0x8d, 0x40, 0x2d, 0x46, 0xe8, 0x1e, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xfa, 0xf5, 0xe5, 0x85, + 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xf7, 0x2c, 0x38, 0x75, 0x69, 0x7e, 0x31, 0x5e, + 0x8a, 0xa9, 0xa0, 0x43, 0x67, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, 0x2f, + 0x04, 0xf4, 0xdd, 0x12, 0xdf, 0xeb, 0x3a, 0xc0, 0x25, 0x5c, 0x9e, 0x17, 0xe7, 0xae, 0xd4, 0x02, + 0x59, 0xb9, 0x5a, 0xa0, 0xc7, 0xa0, 0x9f, 0xde, 0x0b, 0x6e, 0x55, 0xf6, 0x9b, 0x9b, 0x5d, 0xf0, + 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x59, 0x30, 0x79, 0xc9, 0x8d, 0xe8, 0xa5, 0x9d, 0x8c, 0xaa, 0x43, + 0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x27, 0x19, 0x55, 0x07, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07, + 0x6d, 0xbb, 0xcc, 0xdf, 0xa5, 0x60, 0xea, 0xdd, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xcd, + 0x0d, 0x98, 0xc8, 0x72, 0x47, 0x1c, 0xdc, 0x6a, 0xbc, 0x16, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x9f, + 0x5b, 0x50, 0xba, 0xc4, 0xbd, 0x76, 0xd7, 0xc3, 0x9c, 0x43, 0xf7, 0x39, 0x18, 0x24, 0x52, 0x41, + 0x20, 0x7a, 0xad, 0x18, 0x51, 0xa5, 0x39, 0xe0, 0xc1, 0x7d, 0x14, 0x5e, 0x17, 0x2e, 0xf4, 0x07, + 0xf3, 0x81, 0x5e, 0x02, 0x44, 0xf4, 0xb6, 0xf4, 0x68, 0x47, 0x2c, 0x6c, 0xca, 0x62, 0x0a, 0x8a, + 0x33, 0x6a, 0xd8, 0x3f, 0x61, 0xc1, 0x51, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0xbf, 0x5a, 0x80, + 0x91, 0xcb, 0x6b, 0x6b, 0xe5, 0x4b, 0x24, 0xd2, 0x56, 0x65, 0x7b, 0xb5, 0x3f, 0xd6, 0xb4, 0x97, + 0xed, 0xde, 0x88, 0xad, 0xc8, 0xad, 0xcf, 0xf0, 0x18, 0x7e, 0x33, 0xcb, 0x5e, 0x74, 0x2d, 0xa8, + 0x44, 0x81, 0xeb, 0x6d, 0x64, 0xae, 0x74, 0xc9, 0xb3, 0x14, 0xf3, 0x78, 0x16, 0xf4, 0x1c, 0xf4, + 0xb1, 0x20, 0x82, 0x72, 0x12, 0x1e, 0x52, 0x4f, 0x2c, 0x56, 0xba, 0xbf, 0x5b, 0x1a, 0xbc, 0x8e, + 0x97, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x75, 0x18, 0xda, 0x8c, 0xa2, 0xe6, 0x65, 0xe2, 0xd4, 0x48, + 0x20, 0x4f, 0xd9, 0xd3, 0x59, 0xa7, 0x2c, 0x1d, 0x04, 0x8e, 0x16, 0x1f, 0x4c, 0x71, 0x59, 0x88, + 0x75, 0x3a, 0x76, 0x05, 0x20, 0x86, 0xdd, 0x27, 0xc5, 0x8d, 0xbd, 0x06, 0x83, 0xf4, 0x73, 0x67, + 0xeb, 0xae, 0xd3, 0x5e, 0x35, 0xfe, 0x14, 0x0c, 0x4a, 0xc5, 0x77, 0x28, 0x42, 0x7c, 0xb0, 0x1b, + 0x49, 0xea, 0xc5, 0x43, 0x1c, 0xc3, 0xed, 0x47, 0x41, 0x58, 0x00, 0xb7, 0x23, 0x69, 0xaf, 0xc3, + 0x11, 0x66, 0xca, 0xec, 0x44, 0x9b, 0xc6, 0x1a, 0xed, 0xbc, 0x18, 0x9e, 0x16, 0xef, 0x3a, 0xfe, + 0x65, 0x53, 0x9a, 0x0b, 0xf9, 0xb0, 0xa4, 0x18, 0xbf, 0xf1, 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x96, + 0x2b, 0xf9, 0xb1, 0xa9, 0x2e, 0xc2, 0x30, 0x67, 0x17, 0xe9, 0xd2, 0x70, 0xea, 0xa2, 0x5d, 0x25, + 0x01, 0x5d, 0xd3, 0x60, 0xd8, 0xc0, 0x44, 0xa7, 0xa0, 0xe8, 0xbe, 0xed, 0x25, 0x1d, 0x2c, 0x97, + 0xdf, 0x58, 0xc5, 0xb4, 0x9c, 0x82, 0x29, 0xe7, 0xc9, 0x8f, 0x74, 0x05, 0x56, 0xdc, 0xe7, 0xab, + 0x30, 0xea, 0x86, 0xd5, 0xd0, 0x5d, 0xf6, 0xe8, 0x3e, 0xd5, 0x76, 0xba, 0x92, 0x39, 0xd0, 0x4e, + 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0xf7, 0x4b, 0x6f, 0xd7, 0xdc, 0x6b, 0xc7, 0xc8, 0x18, 0xf4, 0xf8, + 0x6f, 0xb2, 0xaf, 0x0b, 0x99, 0x08, 0x5e, 0x1c, 0xff, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0x41, + 0x57, 0xdd, 0x74, 0x9a, 0xb3, 0xad, 0x68, 0x73, 0xc1, 0x0d, 0xab, 0xfe, 0x36, 0x09, 0x76, 0xd8, + 0x5b, 0x7c, 0x20, 0x7e, 0xd0, 0x29, 0xc0, 0xfc, 0xe5, 0xd9, 0x32, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, + 0x16, 0xc6, 0x64, 0x61, 0x85, 0x84, 0xec, 0x0a, 0x18, 0x62, 0x64, 0x94, 0xcb, 0xa3, 0x28, 0x56, + 0x44, 0x92, 0xf8, 0x26, 0x83, 0x0b, 0xf7, 0x83, 0xc1, 0x7d, 0x11, 0x46, 0x5c, 0xcf, 0x8d, 0x5c, + 0x27, 0xf2, 0xb9, 0xfe, 0x88, 0x3f, 0xbb, 0x99, 0x80, 0x79, 0x59, 0x07, 0x60, 0x13, 0xcf, 0xfe, + 0x6f, 0x3d, 0x30, 0xc1, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0x5e, 0x5a, 0x61, 0xd7, 0xd3, 0x2b, 0xec, + 0x7e, 0x70, 0xee, 0xf7, 0xbc, 0xcc, 0x3e, 0x67, 0xc1, 0x04, 0x93, 0x71, 0x1b, 0xcb, 0xec, 0x3c, + 0x0c, 0x06, 0x86, 0x37, 0xea, 0xa0, 0xae, 0xd4, 0x92, 0x8e, 0xa5, 0x31, 0x0e, 0x7a, 0x0d, 0xa0, + 0x19, 0xcb, 0xd0, 0x0b, 0x46, 0x08, 0x51, 0xc8, 0x15, 0x9f, 0x6b, 0x75, 0xec, 0x4f, 0xc3, 0xa0, + 0x72, 0x37, 0x95, 0xfe, 0xe6, 0x56, 0x8e, 0xbf, 0x79, 0x67, 0x36, 0x42, 0xda, 0xc6, 0x15, 0x33, + 0x6d, 0xe3, 0xbe, 0x6c, 0x41, 0xac, 0xe1, 0x40, 0x6f, 0xc0, 0x60, 0xd3, 0x67, 0x06, 0xd1, 0x81, + 0xf4, 0x32, 0x78, 0xb4, 0xad, 0x8a, 0x84, 0xc7, 0x09, 0x0c, 0xf8, 0x74, 0x94, 0x65, 0x55, 0x1c, + 0x53, 0x41, 0x57, 0xa0, 0xbf, 0x19, 0x90, 0x4a, 0xc4, 0x82, 0x58, 0x75, 0x4f, 0x90, 0x2f, 0x5f, + 0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x2f, 0x16, 0x60, 0x3c, 0x89, 0x8a, 0x5e, 0x81, 0x1e, 0x72, 0x87, + 0x54, 0x45, 0x7f, 0x33, 0x79, 0x82, 0x58, 0x46, 0xc2, 0x07, 0x80, 0xfe, 0xc7, 0xac, 0x16, 0xba, + 0x0c, 0xfd, 0x94, 0x21, 0xb8, 0xa4, 0x02, 0x36, 0x3e, 0x9c, 0xc7, 0x54, 0x28, 0xce, 0x8a, 0x77, + 0x4e, 0x14, 0x61, 0x59, 0x9d, 0x19, 0xa4, 0x55, 0x9b, 0x15, 0xfa, 0xd6, 0x8a, 0xda, 0x89, 0x04, + 0xd6, 0xe6, 0xcb, 0x1c, 0x49, 0x50, 0xe3, 0x06, 0x69, 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x6b, 0xd0, + 0x1b, 0xd6, 0x09, 0x69, 0x0a, 0x8b, 0x83, 0x4c, 0x29, 0x67, 0x85, 0x22, 0x08, 0x4a, 0x4c, 0x2a, + 0xc2, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x2d, 0xf8, 0x1c, 0x6f, 0x83, 0x1c, 0x82, + 0x62, 0x60, 0x01, 0x7a, 0xc2, 0x26, 0xa9, 0xb6, 0xb3, 0xf6, 0x8f, 0xfb, 0x53, 0x69, 0x92, 0x6a, + 0xbc, 0x66, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x0f, 0x30, 0x1a, 0xa3, 0x2d, 0x47, 0xa4, 0x81, + 0x9e, 0x31, 0xa2, 0xdc, 0x9c, 0x48, 0x44, 0xb9, 0x19, 0x64, 0xd8, 0x9a, 0x0c, 0xfa, 0xd3, 0x50, + 0x6c, 0x38, 0x77, 0x84, 0x90, 0xf1, 0xa9, 0xf6, 0xdd, 0xa0, 0xf4, 0x67, 0x56, 0x9c, 0x3b, 0xfc, + 0x1d, 0xfe, 0x94, 0xdc, 0x63, 0x2b, 0xce, 0x9d, 0x8e, 0x16, 0xe9, 0xb4, 0x11, 0xd6, 0x96, 0xeb, + 0x09, 0xe3, 0xb4, 0xae, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0xd7, 0x45, 0x5b, 0xae, 0x87, 0xee, + 0x42, 0xbf, 0xb0, 0x1d, 0x15, 0xe1, 0xf7, 0xce, 0x77, 0xd1, 0x9e, 0x30, 0x3d, 0xe5, 0x6d, 0x9e, + 0x97, 0x72, 0x06, 0x51, 0xda, 0xb1, 0x5d, 0xd9, 0x20, 0xfa, 0x3b, 0x16, 0x8c, 0x8a, 0xdf, 0x98, + 0xbc, 0xdd, 0x22, 0x61, 0x24, 0xf8, 0xf0, 0x0f, 0x75, 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x43, + 0xf2, 0xca, 0x34, 0x81, 0x1d, 0x7b, 0x94, 0xe8, 0x05, 0xfa, 0x45, 0x0b, 0x8e, 0x34, 0x9c, 0x3b, + 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x0d, 0xc6, 0x2b, 0xdd, 0x4d, 0x7f, 0xaa, 0x3a, + 0xef, 0xa4, 0x54, 0xb8, 0x1e, 0xc9, 0x42, 0xe9, 0xd8, 0xd5, 0xcc, 0x7e, 0x4d, 0xaf, 0xc3, 0x80, + 0x5c, 0x6f, 0x0f, 0xd2, 0x30, 0x9e, 0xb5, 0x23, 0xd6, 0xda, 0x03, 0x6d, 0xe7, 0xd3, 0x30, 0xac, + 0xaf, 0xb1, 0x07, 0xda, 0xd6, 0xdb, 0x30, 0x99, 0xb1, 0x96, 0x1e, 0x68, 0x93, 0xb7, 0xe1, 0x44, + 0xee, 0xfa, 0x78, 0xa0, 0x8e, 0x0d, 0x5f, 0xb5, 0xf4, 0x73, 0xf0, 0x10, 0xb4, 0x33, 0xf3, 0xa6, + 0x76, 0xe6, 0x74, 0xfb, 0x9d, 0x93, 0xa3, 0xa2, 0x79, 0x4b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0xeb, + 0xd0, 0x57, 0xa7, 0x25, 0xd2, 0x02, 0xd9, 0xee, 0xbc, 0x23, 0x63, 0xbe, 0x98, 0x95, 0x87, 0x58, + 0x50, 0xb0, 0xbf, 0x64, 0x41, 0x86, 0x6b, 0x06, 0xe5, 0x93, 0x5a, 0x6e, 0x8d, 0x0d, 0x49, 0x31, + 0xe6, 0x93, 0x54, 0x10, 0x98, 0x53, 0x50, 0xdc, 0x70, 0x6b, 0xc2, 0xb3, 0x58, 0x81, 0x2f, 0x51, + 0xf0, 0x86, 0x5b, 0x43, 0x4b, 0x80, 0xc2, 0x56, 0xb3, 0x59, 0x67, 0x66, 0x4b, 0x4e, 0xfd, 0x52, + 0xe0, 0xb7, 0x9a, 0xdc, 0xdc, 0xb8, 0xc8, 0x85, 0x44, 0x95, 0x14, 0x14, 0x67, 0xd4, 0xb0, 0x7f, + 0xd5, 0x82, 0x9e, 0x43, 0x98, 0x26, 0x6c, 0x4e, 0xd3, 0x33, 0xb9, 0xa4, 0x45, 0xd6, 0x86, 0x19, + 0xec, 0xdc, 0x5e, 0xbc, 0x13, 0x11, 0x2f, 0x64, 0x0c, 0x47, 0xe6, 0xac, 0xed, 0x5a, 0x30, 0x79, + 0xd5, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x24, 0x58, 0xf6, 0x36, 0x0e, 0x64, 0xdb, 0x5f, + 0xe8, 0x68, 0xdb, 0x7f, 0x11, 0xfa, 0xdc, 0xa6, 0x16, 0xf6, 0xfd, 0x0c, 0x9d, 0xdd, 0xe5, 0xb2, + 0x88, 0xf8, 0x8e, 0x8c, 0xc6, 0x59, 0x29, 0x16, 0xf8, 0x74, 0x59, 0x72, 0xa3, 0xba, 0x9e, 0xfc, + 0x65, 0x49, 0xdf, 0x3a, 0xc9, 0x70, 0x66, 0x86, 0xf9, 0xf7, 0x26, 0x18, 0x4d, 0x08, 0x0f, 0x46, + 0x0c, 0xfd, 0x2e, 0xff, 0x52, 0xb1, 0x36, 0x1f, 0xcf, 0x7e, 0x83, 0xa4, 0x06, 0x46, 0xf3, 0xcd, + 0xe3, 0x05, 0x58, 0x12, 0xb2, 0x2f, 0x42, 0x66, 0xf8, 0x99, 0xce, 0xf2, 0x25, 0xfb, 0x63, 0x30, + 0xc1, 0x6a, 0x1e, 0x50, 0x76, 0x63, 0x27, 0xa4, 0xe2, 0x19, 0x11, 0x7c, 0xed, 0xff, 0x6c, 0x01, + 0x5a, 0xf1, 0x6b, 0xee, 0xfa, 0x8e, 0x20, 0xce, 0xbf, 0xff, 0x6d, 0x28, 0xf1, 0xc7, 0x71, 0x32, + 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xb8, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, 0x1d, + 0x77, 0xa2, 0x87, 0xde, 0x48, 0x04, 0x1d, 0xfc, 0x70, 0x2a, 0xe8, 0xe0, 0xe3, 0x99, 0x76, 0x31, + 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0xe7, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcb, 0x8c, + 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0xbe, 0x4b, 0x62, 0xff, 0xc6, 0x82, 0x38, + 0xdc, 0xd5, 0x21, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, 0xb8, + 0xd1, 0x15, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, 0xb3, + 0xf1, 0xf5, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0xfb, 0x13, 0xa8, 0x72, 0x1b, + 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x98, 0x0d, 0xcd, 0xb4, + 0xf4, 0x5c, 0xbd, 0x9a, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, 0xea, + 0xeb, 0xe0, 0x82, 0xfd, 0x15, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4b, 0x5c, 0x40, 0x54, 0x7f, 0x72, + 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x07, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x5d, 0xa2, + 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0xdd, 0xdf, 0x2d, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, 0x6b, + 0x5c, 0xc5, 0xfe, 0x69, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x80, 0xde, 0xe6, 0xa6, 0x13, 0x92, + 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0xee, 0xef, 0x96, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, 0x76, + 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0xbf, 0xb4, 0xa0, 0x67, 0x95, 0xde, 0x5e, + 0x0f, 0xfe, 0x08, 0x78, 0xd5, 0x38, 0x02, 0x4e, 0xe6, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, 0x89, + 0xdd, 0x7f, 0x3a, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, 0x3e, + 0x67, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x02, 0xfa, 0x85, 0x9f, + 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x64, 0x11, 0x8c, 0xfc, 0x48, 0xe8, 0xd7, + 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x46, 0xa5, 0xba, 0x49, 0x6a, + 0xad, 0xba, 0xeb, 0x6d, 0x2c, 0x6f, 0x78, 0xbe, 0x2a, 0x5e, 0xbc, 0x43, 0xaa, 0x2d, 0xa6, 0x1b, + 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbb, 0xb7, 0x5b, 0x9a, 0xc1, 0x07, 0xa2, 0x8d, 0x0f, + 0xd8, 0x17, 0xf4, 0x7b, 0x16, 0x9c, 0xe7, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, 0x59, + 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xa2, 0x18, 0xd0, 0xf3, 0xe5, 0x83, 0xb5, 0x85, + 0x0f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x44, 0x84, 0x89, 0x14, 0x77, 0xc0, 0x0b, 0xc6, 0x92, + 0x78, 0x38, 0xb1, 0x24, 0x26, 0x0c, 0xe4, 0xfb, 0x73, 0xfc, 0x87, 0x30, 0x41, 0x0f, 0xe7, 0xcb, + 0xc4, 0x09, 0xa2, 0x5b, 0xc4, 0xe1, 0x56, 0x89, 0xc5, 0x03, 0x9f, 0xfe, 0x4a, 0x3c, 0x7e, 0x35, + 0x49, 0x0c, 0xa7, 0xe9, 0xbf, 0x97, 0xee, 0x1c, 0x0f, 0xc6, 0x53, 0x91, 0x3e, 0xdf, 0x84, 0x41, + 0xe5, 0x24, 0x25, 0x0e, 0x9d, 0xf6, 0x01, 0x73, 0x93, 0x14, 0xb8, 0xd0, 0x33, 0x76, 0xd0, 0x8b, + 0xc9, 0xd9, 0xff, 0xa4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15, 0x06, 0x9c, 0x90, 0x05, 0xf1, 0xae, + 0xb5, 0x93, 0x4b, 0xa7, 0x9a, 0x61, 0x8e, 0x6a, 0xb3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x65, 0x6e, + 0xfb, 0xb9, 0x4d, 0xda, 0x09, 0xa5, 0x53, 0xd4, 0x40, 0x5a, 0x87, 0x6e, 0x13, 0x2c, 0xea, 0xa3, + 0x4f, 0x70, 0xe3, 0xdc, 0x2b, 0x9e, 0x7f, 0xdb, 0xbb, 0xe4, 0xfb, 0x32, 0x24, 0x50, 0x77, 0x04, + 0x27, 0xa4, 0x49, 0xae, 0xaa, 0x8e, 0x4d, 0x6a, 0xdd, 0x85, 0xce, 0xfe, 0x0c, 0xb0, 0xbc, 0x24, + 0x66, 0x4c, 0x82, 0x10, 0x11, 0x18, 0x13, 0x31, 0x48, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xe7, 0xb7, + 0x59, 0x3b, 0xd6, 0xe3, 0x5c, 0x31, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x93, 0x1f, 0xc2, 0x4b, 0xc4, + 0x89, 0x5a, 0x01, 0x09, 0xd1, 0x47, 0x61, 0x2a, 0xfd, 0x32, 0x16, 0xea, 0x10, 0x8b, 0x71, 0xcf, + 0x27, 0xf7, 0x76, 0x4b, 0x53, 0x95, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xfe, 0x59, 0x0b, 0x98, 0x27, + 0xf8, 0x21, 0x70, 0x3e, 0x1f, 0x31, 0x39, 0x9f, 0xa9, 0xbc, 0xe9, 0xcc, 0x61, 0x7a, 0x9e, 0xe7, + 0x6b, 0xb8, 0x1c, 0xf8, 0x77, 0x76, 0x84, 0xed, 0x56, 0xe7, 0x67, 0x9c, 0xfd, 0x45, 0x0b, 0x58, + 0x12, 0x1f, 0xcc, 0x5f, 0xed, 0x52, 0xc1, 0xd1, 0xd9, 0x2c, 0xe1, 0xa3, 0x30, 0xb0, 0x2e, 0x86, + 0x3f, 0x43, 0xe8, 0x64, 0x74, 0xd8, 0xa4, 0x2d, 0x27, 0x4d, 0x78, 0x74, 0x8a, 0x7f, 0x58, 0x51, + 0xb3, 0xff, 0xb1, 0x05, 0xd3, 0xf9, 0xd5, 0xd0, 0x75, 0x38, 0x1e, 0x90, 0x6a, 0x2b, 0x08, 0xe9, + 0x96, 0x10, 0x0f, 0x20, 0xe1, 0x14, 0xc5, 0xa7, 0xfa, 0xa1, 0xbd, 0xdd, 0xd2, 0x71, 0x9c, 0x8d, + 0x82, 0xf3, 0xea, 0xa2, 0x97, 0x60, 0xb4, 0x15, 0x72, 0xce, 0x8f, 0x31, 0x5d, 0xa1, 0x88, 0x14, + 0xcd, 0xfc, 0x86, 0xae, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0xbf, 0x8b, 0x2f, 0x47, 0x15, 0x2c, 0xba, + 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x68, 0xa7, 0x5b, 0x9f, 0x5d, 0x97, + 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, 0x82, 0xe3, 0x3a, 0xa2, 0xe6, 0x0e, + 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0x3b, 0x27, + 0x57, 0xe8, 0x35, 0x51, 0xbe, 0x2f, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, 0x86, + 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, 0xfe, + 0x33, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x6d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x73, 0xf1, 0x4e, + 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0x4f, 0x75, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, 0x95, + 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc1, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, 0x26, + 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x45, 0x7e, 0x68, 0xb0, + 0xb7, 0xc7, 0x13, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, 0xca, + 0xbc, 0x18, 0x4b, 0x38, 0x3a, 0x07, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, 0x42, + 0xac, 0xa0, 0xe8, 0x59, 0x80, 0x66, 0xe0, 0x6f, 0xbb, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, 0xe7, + 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x65, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, 0xf7, + 0xca, 0x6e, 0xec, 0xba, 0x0e, 0xc4, 0x26, 0x2e, 0x9a, 0x85, 0xbe, 0xc8, 0x61, 0xd6, 0x66, 0xbd, + 0xf9, 0x46, 0xf4, 0x6b, 0x14, 0x43, 0xcf, 0x2c, 0x47, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x29, 0xdd, + 0xeb, 0xf9, 0x4d, 0x24, 0xbc, 0x57, 0xba, 0xbb, 0xb5, 0x34, 0xe7, 0x7a, 0xe1, 0x15, 0x63, 0xd0, + 0x42, 0x2f, 0x01, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xae, 0x6c, 0x44, 0x15, 0x23, 0xb3, 0xe0, + 0xaf, 0xfa, 0xd1, 0xf5, 0x90, 0x2c, 0x2a, 0x0c, 0xac, 0x61, 0xdb, 0xdf, 0x3f, 0x04, 0x10, 0x3f, + 0x34, 0xd0, 0x5d, 0x18, 0xa8, 0x3a, 0x4d, 0xa7, 0xca, 0xd3, 0xa6, 0x16, 0xf3, 0xbc, 0x9e, 0xe3, + 0x1a, 0x33, 0xf3, 0x02, 0x9d, 0x2b, 0x6f, 0x64, 0xc8, 0xf0, 0x01, 0x59, 0xdc, 0x51, 0x61, 0xa3, + 0xda, 0x43, 0x9f, 0xb3, 0x60, 0x48, 0x44, 0x3a, 0x62, 0x33, 0x54, 0xc8, 0xd7, 0xb7, 0x69, 0xed, + 0xcf, 0xc6, 0x35, 0x78, 0x17, 0x9e, 0x93, 0x2b, 0x54, 0x83, 0x74, 0xec, 0x85, 0xde, 0x30, 0xfa, + 0xa0, 0x7c, 0xdb, 0x16, 0x8d, 0xa1, 0x54, 0x6f, 0xdb, 0x41, 0x76, 0xd5, 0xe8, 0xcf, 0xda, 0xeb, + 0xc6, 0xb3, 0xb6, 0x27, 0xdf, 0x7f, 0xd8, 0xe0, 0xb7, 0x3b, 0xbd, 0x68, 0x51, 0x59, 0x8f, 0x25, + 0xd2, 0x9b, 0xef, 0xf4, 0xaa, 0x3d, 0xec, 0x3a, 0xc4, 0x11, 0xf9, 0x34, 0x8c, 0xd5, 0x4c, 0xae, + 0x45, 0xac, 0xc4, 0xc7, 0xf3, 0xe8, 0x26, 0x98, 0x9c, 0x98, 0x4f, 0x49, 0x00, 0x70, 0x92, 0x30, + 0x2a, 0xf3, 0xd0, 0x32, 0xcb, 0xde, 0xba, 0x2f, 0x3c, 0xa8, 0xec, 0xdc, 0xb9, 0xdc, 0x09, 0x23, + 0xd2, 0xa0, 0x98, 0x31, 0x93, 0xb0, 0x2a, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x7d, 0xcc, 0xeb, + 0x31, 0x9c, 0x1a, 0xc8, 0x57, 0x6b, 0x98, 0x91, 0x50, 0xe3, 0x0d, 0xc9, 0xfe, 0x86, 0x58, 0x50, + 0x40, 0x97, 0xa5, 0x4f, 0x71, 0xb8, 0xec, 0x5d, 0x0f, 0x09, 0xf3, 0x29, 0x1e, 0x9c, 0x7b, 0x34, + 0x76, 0x17, 0xe6, 0xe5, 0x99, 0xf9, 0x67, 0x8d, 0x9a, 0x94, 0xed, 0x13, 0xff, 0x65, 0x5a, 0x5b, + 0x11, 0xb7, 0x2d, 0xb3, 0x7b, 0x66, 0xea, 0xdb, 0x78, 0x38, 0x6f, 0x98, 0x24, 0x70, 0x92, 0x26, + 0x65, 0xa1, 0xf9, 0xae, 0x17, 0x3e, 0x58, 0x9d, 0xce, 0x0e, 0x2e, 0x39, 0x60, 0xb7, 0x11, 0x2f, + 0xc1, 0xa2, 0x3e, 0x72, 0x61, 0x2c, 0x30, 0xd8, 0x0b, 0x19, 0x6e, 0xed, 0x6c, 0x77, 0x4c, 0x8c, + 0x16, 0xc8, 0xdf, 0x24, 0x83, 0x93, 0x74, 0xd1, 0xeb, 0x1a, 0xa3, 0x34, 0xd2, 0xfe, 0xe5, 0xdf, + 0x89, 0x35, 0x9a, 0xde, 0x82, 0x11, 0xe3, 0xb0, 0x79, 0xa0, 0x2a, 0x48, 0x0f, 0xc6, 0x93, 0x27, + 0xcb, 0x03, 0xd5, 0x3c, 0xfe, 0x49, 0x0f, 0x8c, 0x9a, 0x3b, 0x01, 0x9d, 0x87, 0x41, 0x41, 0x44, + 0x65, 0xb4, 0x52, 0x9b, 0x7b, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, 0x99, 0xb1, 0xea, 0x9a, 0xaf, + 0x40, 0x9c, 0xc8, 0x4c, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xb0, 0xb7, 0x7c, 0x3f, 0x52, 0xf7, 0xa8, + 0xda, 0x2e, 0x73, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x3f, 0xb7, 0x48, 0xe0, 0x91, 0xba, 0x99, 0xd2, + 0x41, 0xdd, 0x9f, 0x57, 0x74, 0x20, 0x36, 0x71, 0x29, 0x17, 0xe0, 0x87, 0x6c, 0xff, 0x89, 0x67, + 0x72, 0xec, 0x7b, 0x51, 0xe1, 0x51, 0x24, 0x24, 0x1c, 0x7d, 0x0c, 0x8e, 0xab, 0xf0, 0x89, 0x62, + 0x75, 0xc9, 0x16, 0xfb, 0x0c, 0xa9, 0xd6, 0xf1, 0xf9, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, + 0xa3, 0xe2, 0x29, 0x25, 0x29, 0xf6, 0x9b, 0x86, 0x84, 0x57, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xa4, + 0x14, 0xec, 0x8d, 0x21, 0x29, 0x0c, 0xa4, 0x93, 0x52, 0xe8, 0x70, 0x9c, 0xaa, 0x81, 0x66, 0x61, + 0x8c, 0xb3, 0x8e, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0x78, 0x76, 0xaa, 0x4d, 0x75, 0xcd, 0x04, 0xe3, + 0x24, 0x3e, 0xba, 0x08, 0xc3, 0x4e, 0x50, 0xdd, 0x74, 0x23, 0x52, 0xa5, 0x3b, 0x83, 0xd9, 0xf2, + 0x69, 0x96, 0x98, 0xb3, 0x1a, 0x0c, 0x1b, 0x98, 0xf6, 0x5d, 0x98, 0xcc, 0x08, 0x2f, 0x43, 0x17, + 0x8e, 0xd3, 0x74, 0xe5, 0x37, 0x25, 0xdc, 0x1d, 0x66, 0xcb, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba, + 0x3a, 0x59, 0x18, 0x1a, 0x2d, 0xf9, 0xb6, 0x5a, 0x9d, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xaf, + 0x0a, 0x30, 0x96, 0xa1, 0xa0, 0x63, 0x09, 0xa0, 0x13, 0x2f, 0xad, 0x38, 0xdf, 0xb3, 0x99, 0xe3, + 0xa4, 0x70, 0x80, 0x1c, 0x27, 0xc5, 0x4e, 0x39, 0x4e, 0x7a, 0xde, 0x49, 0x8e, 0x13, 0x73, 0xc4, + 0x7a, 0xbb, 0x1a, 0xb1, 0x8c, 0xbc, 0x28, 0x7d, 0x07, 0xcc, 0x8b, 0x62, 0x0c, 0x7a, 0x7f, 0x17, + 0x83, 0xfe, 0xa3, 0x05, 0x18, 0x4f, 0xea, 0xf6, 0x0e, 0x41, 0x3e, 0xfe, 0xba, 0x21, 0x1f, 0x3f, + 0xd7, 0x8d, 0x27, 0x7e, 0xae, 0xac, 0x1c, 0x27, 0x64, 0xe5, 0x4f, 0x76, 0x45, 0xad, 0xbd, 0xdc, + 0xfc, 0xef, 0x17, 0xe0, 0x68, 0xa6, 0xca, 0xf3, 0x10, 0xc6, 0xe6, 0x9a, 0x31, 0x36, 0xcf, 0x74, + 0x1d, 0xa5, 0x20, 0x77, 0x80, 0x6e, 0x26, 0x06, 0xe8, 0x7c, 0xf7, 0x24, 0xdb, 0x8f, 0xd2, 0x37, + 0x8a, 0x70, 0x3a, 0xb3, 0x5e, 0x2c, 0x5e, 0x5e, 0x32, 0xc4, 0xcb, 0xcf, 0x26, 0xc4, 0xcb, 0x76, + 0xfb, 0xda, 0xf7, 0x47, 0xde, 0x2c, 0xbc, 0xf5, 0x59, 0xcc, 0x91, 0x7b, 0x94, 0x35, 0x1b, 0xde, + 0xfa, 0x8a, 0x10, 0x36, 0xe9, 0xbe, 0x97, 0x64, 0xcc, 0xbf, 0x6b, 0xc1, 0x89, 0xcc, 0xb9, 0x39, + 0x04, 0x49, 0xdf, 0xaa, 0x29, 0xe9, 0x7b, 0xa2, 0xeb, 0xd5, 0x9a, 0x23, 0xfa, 0xfb, 0x7c, 0x5f, + 0xce, 0xb7, 0x30, 0x01, 0xc4, 0x35, 0x18, 0x72, 0xaa, 0x55, 0x12, 0x86, 0x2b, 0x7e, 0x4d, 0xa5, + 0x43, 0x78, 0x86, 0x3d, 0x0f, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, 0x74, 0x92, 0x44, 0x0c, 0xc6, 0x3a, + 0x05, 0xf4, 0x09, 0x18, 0x08, 0x65, 0x26, 0xcb, 0x9e, 0x7b, 0xcf, 0x64, 0xc9, 0x98, 0x5c, 0x25, + 0x60, 0x51, 0x24, 0xd1, 0x77, 0xea, 0xd1, 0x9f, 0xda, 0x88, 0x16, 0x79, 0x27, 0xef, 0x21, 0x06, + 0xd4, 0xb3, 0x00, 0xdb, 0xea, 0x25, 0x93, 0x14, 0x9e, 0x68, 0x6f, 0x1c, 0x0d, 0x0b, 0xbd, 0x06, + 0xe3, 0x21, 0x0f, 0x7c, 0x1a, 0x1b, 0xa9, 0xf0, 0xb5, 0xc8, 0x62, 0xc7, 0x55, 0x12, 0x30, 0x9c, + 0xc2, 0x46, 0x4b, 0xb2, 0x55, 0x66, 0x8e, 0xc4, 0x97, 0xe7, 0xd9, 0xb8, 0x45, 0x61, 0x92, 0x74, + 0x24, 0x39, 0x09, 0x6c, 0xf8, 0xb5, 0x9a, 0xe8, 0x13, 0x00, 0x74, 0x11, 0x09, 0x21, 0x4a, 0x7f, + 0xfe, 0x11, 0x4a, 0xcf, 0x96, 0x5a, 0xa6, 0x27, 0x03, 0x73, 0xb3, 0x5f, 0x50, 0x44, 0xb0, 0x46, + 0x10, 0x39, 0x30, 0x12, 0xff, 0x8b, 0x73, 0xb4, 0x9f, 0xcb, 0x6d, 0x21, 0x49, 0x9c, 0x29, 0x18, + 0x16, 0x74, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0e, 0x27, 0xb6, 0x73, 0x2d, 0x7f, 0x38, 0x27, 0xc8, + 0x92, 0xae, 0xe7, 0xdb, 0xfb, 0xe4, 0xd7, 0xb7, 0xff, 0x2d, 0xc0, 0x43, 0x6d, 0x4e, 0x7a, 0x34, + 0x6b, 0x6a, 0xed, 0x9f, 0x4a, 0x4a, 0x36, 0xa6, 0x33, 0x2b, 0x1b, 0xa2, 0x8e, 0xc4, 0x86, 0x2a, + 0xbc, 0xe3, 0x0d, 0xf5, 0x43, 0x96, 0x26, 0x73, 0xe2, 0x36, 0xdd, 0x1f, 0x39, 0xe0, 0x0d, 0x76, + 0x1f, 0x85, 0x50, 0xeb, 0x19, 0x92, 0x9c, 0x67, 0xbb, 0xee, 0x4e, 0xf7, 0xa2, 0x9d, 0xaf, 0x66, + 0x07, 0x7c, 0xe7, 0x42, 0x9e, 0x4b, 0x07, 0xfd, 0xfe, 0xc3, 0x0a, 0xfe, 0xfe, 0x75, 0x0b, 0x4e, + 0xa4, 0x8a, 0x79, 0x1f, 0x48, 0x28, 0xa2, 0xdd, 0xad, 0xbe, 0xe3, 0xce, 0x4b, 0x82, 0xfc, 0x1b, + 0x2e, 0x8b, 0x6f, 0x38, 0x91, 0x8b, 0x97, 0xec, 0xfa, 0x17, 0xfe, 0xb8, 0x34, 0xc9, 0x1a, 0x30, + 0x11, 0x71, 0x7e, 0xd7, 0x51, 0x13, 0xce, 0x54, 0x5b, 0x41, 0x10, 0x2f, 0xd6, 0x8c, 0xcd, 0xc9, + 0xdf, 0x7a, 0x8f, 0xee, 0xed, 0x96, 0xce, 0xcc, 0x77, 0xc0, 0xc5, 0x1d, 0xa9, 0x21, 0x0f, 0x50, + 0x23, 0x65, 0x5f, 0xc7, 0x0e, 0x80, 0x1c, 0x39, 0x4c, 0xda, 0x1a, 0x8f, 0x5b, 0xca, 0x66, 0x58, + 0xe9, 0x65, 0x50, 0x3e, 0x5c, 0xe9, 0xc9, 0xb7, 0x26, 0x2e, 0xfd, 0xf4, 0x55, 0x38, 0xdd, 0x7e, + 0x31, 0x1d, 0x28, 0x94, 0xc3, 0x1f, 0x58, 0x70, 0xaa, 0x6d, 0xbc, 0xb0, 0x6f, 0xc3, 0xc7, 0x82, + 0xfd, 0x59, 0x0b, 0x1e, 0xce, 0xac, 0x91, 0x74, 0xc2, 0xab, 0xd2, 0x42, 0xcd, 0x1c, 0x35, 0x8e, + 0x9c, 0x23, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x36, 0x0b, 0x1d, 0x2d, 0x36, 0x7f, 0xcb, 0x82, 0xd4, + 0x55, 0x7f, 0x08, 0x9c, 0xe7, 0xb2, 0xc9, 0x79, 0x3e, 0xda, 0xcd, 0x68, 0xe6, 0x30, 0x9d, 0x7f, + 0x31, 0x06, 0xc7, 0x72, 0x3c, 0xb1, 0xb7, 0x61, 0x62, 0xa3, 0x4a, 0xcc, 0xd0, 0x1b, 0xed, 0x42, + 0xd2, 0xb5, 0x8d, 0xd3, 0x31, 0x77, 0x74, 0x6f, 0xb7, 0x34, 0x91, 0x42, 0xc1, 0xe9, 0x26, 0xd0, + 0x67, 0x2d, 0x38, 0xe2, 0xdc, 0x0e, 0x17, 0xe9, 0x0b, 0xc2, 0xad, 0xce, 0xd5, 0xfd, 0xea, 0x16, + 0x65, 0xcc, 0xe4, 0xb6, 0x7a, 0x3e, 0x53, 0x18, 0x7d, 0xb3, 0x92, 0xc2, 0x37, 0x9a, 0x9f, 0xda, + 0xdb, 0x2d, 0x1d, 0xc9, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x32, 0x7e, 0x39, 0xd1, 0x66, 0xbb, + 0xe0, 0x30, 0x59, 0x2e, 0xf3, 0x9c, 0x25, 0x96, 0x10, 0xac, 0xe8, 0xa0, 0x4f, 0xc1, 0xe0, 0x86, + 0x8c, 0x03, 0x91, 0xc1, 0x72, 0xc7, 0x03, 0xd9, 0x3e, 0x3a, 0x06, 0x37, 0x81, 0x51, 0x48, 0x38, + 0x26, 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0x8a, 0x10, 0x75, 0xd9, 0x96, 0xb8, 0xa6, 0xad, 0x33, + 0x0f, 0xc1, 0xb4, 0xba, 0x54, 0xc1, 0xb4, 0x22, 0xba, 0x0c, 0xc5, 0xe0, 0x56, 0x4d, 0x68, 0x52, + 0x32, 0x37, 0x29, 0x9e, 0x5b, 0xc8, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0x16, 0x30, 0x25, 0x81, 0xca, + 0xd0, 0xcb, 0xdc, 0x97, 0x05, 0x6b, 0x9b, 0xf9, 0x94, 0x6f, 0x13, 0x06, 0x80, 0x7b, 0x24, 0x32, + 0x04, 0xcc, 0x09, 0xa1, 0x35, 0xe8, 0xab, 0xba, 0x5e, 0x8d, 0x04, 0x82, 0x97, 0xfd, 0x60, 0xa6, + 0xce, 0x84, 0x61, 0xe4, 0xd0, 0xe4, 0x2a, 0x04, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x34, 0x37, + 0xd7, 0xe5, 0x8d, 0x95, 0x4d, 0x95, 0x34, 0x37, 0x97, 0x2a, 0x6d, 0xa9, 0x32, 0x0c, 0x2c, 0x68, + 0xa1, 0x97, 0xa0, 0xb0, 0x5e, 0x15, 0xae, 0xc9, 0x99, 0xca, 0x13, 0x33, 0x8a, 0xd6, 0x5c, 0xdf, + 0xde, 0x6e, 0xa9, 0xb0, 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xb4, 0x0a, 0xfd, 0xeb, 0x3c, 0xee, 0x8e, + 0xd0, 0x8f, 0x3c, 0x9e, 0x1d, 0x12, 0x28, 0x15, 0x9a, 0x87, 0x7b, 0x97, 0x0a, 0x00, 0x96, 0x44, + 0x58, 0x02, 0x2a, 0x15, 0x3f, 0x48, 0x84, 0x2f, 0x9d, 0x39, 0x58, 0xcc, 0x27, 0xfe, 0xd4, 0x88, + 0xa3, 0x10, 0x61, 0x8d, 0x22, 0x5d, 0xd5, 0xce, 0xdd, 0x56, 0xc0, 0x72, 0x5b, 0x08, 0xd5, 0x48, + 0xe6, 0xaa, 0x9e, 0x95, 0x48, 0xed, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x05, 0x23, 0xdb, + 0x61, 0x73, 0x93, 0xc8, 0x2d, 0xcd, 0xc2, 0xde, 0xe5, 0x70, 0xb3, 0x37, 0x04, 0xa2, 0x1b, 0x44, + 0x2d, 0xa7, 0x9e, 0x3a, 0x85, 0xd8, 0xb3, 0xe6, 0x86, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf, + 0xdd, 0xf2, 0x6f, 0xed, 0x44, 0x44, 0x44, 0x1d, 0xcd, 0x1c, 0xfe, 0x37, 0x38, 0x4a, 0x7a, 0xf8, + 0x05, 0x00, 0x4b, 0x22, 0xe8, 0x86, 0x18, 0x1e, 0x76, 0x7a, 0x8e, 0xe7, 0x87, 0x34, 0x9f, 0x95, + 0x48, 0x39, 0x83, 0xc2, 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x9b, 0x9b, 0x7e, 0xe4, 0x7b, 0x89, + 0x13, 0x7a, 0x22, 0xff, 0x94, 0x2c, 0x67, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, + 0xaa, 0xc1, 0x68, 0xd3, 0x0f, 0xa2, 0xdb, 0x7e, 0x20, 0xd7, 0x17, 0x6a, 0x23, 0x28, 0x35, 0x30, + 0x45, 0x8b, 0xcc, 0x30, 0xc7, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x51, 0xe8, 0x0f, 0xab, 0x4e, 0x9d, + 0x2c, 0x5f, 0x9b, 0x9a, 0xcc, 0xbf, 0x7e, 0x2a, 0x1c, 0x25, 0x67, 0x75, 0xf1, 0xb0, 0x49, 0x1c, + 0x05, 0x4b, 0x72, 0x68, 0x09, 0x7a, 0x59, 0x62, 0x67, 0x16, 0x22, 0x37, 0x27, 0x32, 0x7b, 0xca, + 0xad, 0x86, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0x49, 0x81, 0x1f, 0x4e, 0x1d, + 0xcd, 0xdf, 0x03, 0x42, 0xc0, 0x70, 0xad, 0xd2, 0x6e, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, + 0x33, 0x3d, 0x4d, 0x8f, 0xb5, 0x31, 0x99, 0xcc, 0x3d, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, + 0xc2, 0xfe, 0x8d, 0x81, 0x34, 0xcf, 0xc2, 0x24, 0x4c, 0xff, 0xbf, 0x95, 0xb2, 0x99, 0xf8, 0x50, + 0xb7, 0x02, 0xef, 0xfb, 0xf8, 0x70, 0xfd, 0xac, 0x05, 0xc7, 0x9a, 0x99, 0x1f, 0x22, 0x18, 0x80, + 0xee, 0xe4, 0xe6, 0xfc, 0xd3, 0x55, 0x38, 0xe5, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x29, 0x1c, 0x28, + 0xbe, 0x63, 0xe1, 0xc0, 0x0a, 0x0c, 0x54, 0xf9, 0x4b, 0x4e, 0xa6, 0x01, 0xe8, 0x2a, 0x18, 0x28, + 0x63, 0x25, 0xc4, 0x13, 0x70, 0x1d, 0x2b, 0x12, 0xe8, 0x87, 0x2d, 0x38, 0x95, 0xec, 0x3a, 0x26, + 0x0c, 0x2c, 0x0c, 0x26, 0xb9, 0x58, 0x6b, 0x49, 0x7c, 0x7f, 0x8a, 0xff, 0x37, 0x90, 0xf7, 0x3b, + 0x21, 0xe0, 0xf6, 0x8d, 0xa1, 0x85, 0x0c, 0xb9, 0x5a, 0x9f, 0xa9, 0x51, 0xec, 0x42, 0xb6, 0xf6, + 0x3c, 0x0c, 0x37, 0xfc, 0x96, 0x17, 0x09, 0xbb, 0x47, 0x61, 0x3c, 0xc5, 0x8c, 0x86, 0x56, 0xb4, + 0x72, 0x6c, 0x60, 0x25, 0x24, 0x72, 0x03, 0xf7, 0x2c, 0x91, 0x7b, 0x0b, 0x86, 0x3d, 0xcd, 0x25, + 0xa0, 0xdd, 0x0b, 0x56, 0x48, 0x17, 0x35, 0x6c, 0xde, 0x4b, 0xbd, 0x04, 0x1b, 0xd4, 0xda, 0x4b, + 0xcb, 0xe0, 0x9d, 0x49, 0xcb, 0x0e, 0xf5, 0x49, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc5, 0xc0, 0xa5, + 0x72, 0xaf, 0x98, 0x52, 0xb9, 0xb3, 0x49, 0xa9, 0x5c, 0x4a, 0x55, 0x65, 0x08, 0xe4, 0xba, 0xcf, + 0x28, 0xd9, 0x75, 0x80, 0xe7, 0xef, 0xb5, 0xe0, 0x38, 0xd3, 0x7d, 0xd0, 0x06, 0xde, 0xb1, 0xbe, + 0x83, 0x99, 0xa4, 0x5e, 0xcd, 0x26, 0x87, 0xf3, 0xda, 0xb1, 0xeb, 0x70, 0xa6, 0xd3, 0xbd, 0xcb, + 0x2c, 0x7c, 0x6b, 0xca, 0x38, 0x22, 0xb6, 0xf0, 0xad, 0x2d, 0x2f, 0x60, 0x06, 0xe9, 0x36, 0x7c, + 0xa1, 0xfd, 0xdf, 0x2d, 0x28, 0x96, 0xfd, 0xda, 0x21, 0xbc, 0xe8, 0x3f, 0x62, 0xbc, 0xe8, 0x1f, + 0xca, 0xbe, 0xf1, 0x6b, 0xb9, 0xca, 0xbe, 0xc5, 0x84, 0xb2, 0xef, 0x54, 0x1e, 0x81, 0xf6, 0xaa, + 0xbd, 0x9f, 0x2e, 0xc2, 0x50, 0xd9, 0xaf, 0xa9, 0x7d, 0xf6, 0xaf, 0xee, 0xc5, 0x91, 0x27, 0x37, + 0xfb, 0x94, 0x46, 0x99, 0x59, 0xf4, 0xca, 0xb8, 0x13, 0xdf, 0x66, 0xfe, 0x3c, 0x37, 0x89, 0xbb, + 0xb1, 0x19, 0x91, 0x5a, 0xf2, 0x73, 0x0e, 0xcf, 0x9f, 0xe7, 0x9b, 0x45, 0x18, 0x4b, 0xb4, 0x8e, + 0xea, 0x30, 0x52, 0xd7, 0x55, 0x49, 0x62, 0x9d, 0xde, 0x93, 0x16, 0x4a, 0xf8, 0x43, 0x68, 0x45, + 0xd8, 0x24, 0x8e, 0x66, 0x00, 0x3c, 0xdd, 0x2a, 0x5c, 0x05, 0x2a, 0xd6, 0x2c, 0xc2, 0x35, 0x0c, + 0xf4, 0x02, 0x0c, 0x45, 0x7e, 0xd3, 0xaf, 0xfb, 0x1b, 0x3b, 0x57, 0x88, 0x8c, 0x6c, 0xa9, 0x8c, + 0x86, 0xd7, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x1d, 0x98, 0x50, 0x44, 0x2a, 0xf7, 0x41, 0xbd, 0xc6, + 0xc4, 0x26, 0xab, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x09, 0x46, 0x99, 0xf5, 0x32, 0xab, 0x7f, + 0x85, 0xec, 0xc8, 0x88, 0xc7, 0x8c, 0xc3, 0x5e, 0x31, 0x20, 0x38, 0x81, 0x89, 0xe6, 0x61, 0xa2, + 0xe1, 0x86, 0x89, 0xea, 0x7d, 0xac, 0x3a, 0xeb, 0xc0, 0x4a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0xcf, + 0x89, 0x39, 0xf6, 0x22, 0xf7, 0xfd, 0xed, 0xf8, 0xee, 0xde, 0x8e, 0xdf, 0xb0, 0x60, 0x9c, 0xb6, + 0xce, 0x4c, 0x32, 0x25, 0x23, 0xa5, 0x72, 0x62, 0x58, 0x6d, 0x72, 0x62, 0x9c, 0xa5, 0xc7, 0x76, + 0xcd, 0x6f, 0x45, 0x42, 0x3a, 0xaa, 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, + 0xfc, 0xde, 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xca, 0x8c, 0x9e, 0xec, 0x94, 0x19, 0x3c, + 0xf2, 0xb9, 0xb0, 0x82, 0x13, 0x2c, 0xad, 0x16, 0xf9, 0x5c, 0x9a, 0xc7, 0xc5, 0x38, 0xf6, 0x57, + 0x8b, 0x30, 0x5c, 0xf6, 0x6b, 0xb1, 0x61, 0xc7, 0xf3, 0x86, 0x61, 0xc7, 0x99, 0x84, 0x61, 0xc7, + 0xb8, 0x8e, 0xfb, 0xbe, 0x19, 0xc7, 0xb7, 0xca, 0x8c, 0xe3, 0x37, 0x2d, 0x36, 0x6b, 0x0b, 0xab, + 0x15, 0x6e, 0xe1, 0x8b, 0x2e, 0xc0, 0x10, 0x3b, 0xe1, 0x58, 0xa0, 0x05, 0x69, 0xed, 0xc0, 0x52, + 0x58, 0xae, 0xc6, 0xc5, 0x58, 0xc7, 0x41, 0xe7, 0x60, 0x20, 0x24, 0x4e, 0x50, 0xdd, 0x54, 0xc7, + 0xbb, 0x30, 0x4d, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xd0, 0xed, 0x62, 0xbe, 0xb9, 0xb0, + 0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x23, 0x6d, 0xdb, 0x37, 0x01, 0xa5, 0xf1, 0xbb, 0xf0, 0xbf, 0x2a, + 0x99, 0x61, 0x61, 0x07, 0x53, 0x21, 0x61, 0xff, 0xda, 0x82, 0xd1, 0xb2, 0x5f, 0xa3, 0x5b, 0xf7, + 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x02, 0xbd, 0x65, 0xbf, 0xd6, + 0x21, 0x74, 0xed, 0x3f, 0xb0, 0xa0, 0xbf, 0xec, 0xd7, 0x0e, 0x41, 0xf1, 0xf2, 0x8a, 0xa9, 0x78, + 0x39, 0x9e, 0xb3, 0x6e, 0x72, 0x74, 0x2d, 0x7f, 0xaf, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x0d, 0x39, + 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x3b, 0x39, 0xad, + 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x06, 0x9a, 0x01, 0xd9, 0x76, 0x7d, 0xc1, 0x5f, 0x6b, + 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, 0xf7, + 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x9b, 0x30, 0xc8, 0xfe, + 0xb3, 0x63, 0xa7, 0xf7, 0xc0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x16, + 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, 0x58, + 0xe8, 0x29, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xd5, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, 0xeb, + 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x89, 0x44, 0x6a, 0xba, 0x22, + 0xe7, 0x05, 0xaf, 0xaa, 0x52, 0xac, 0x61, 0xa0, 0x4d, 0x38, 0xe9, 0x7a, 0x2c, 0x85, 0x14, 0xa9, + 0x6c, 0xb9, 0xcd, 0xb5, 0xab, 0x95, 0x1b, 0x24, 0x70, 0xd7, 0x77, 0xe6, 0x9c, 0xea, 0x16, 0xf1, + 0x64, 0x42, 0xfc, 0x47, 0x45, 0x17, 0x4f, 0x2e, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0xd9, 0xcf, 0xb1, + 0xf5, 0x7e, 0xad, 0x82, 0x9e, 0x34, 0x8e, 0x8e, 0x63, 0xfa, 0xd1, 0xb1, 0xbf, 0x5b, 0xea, 0xbb, + 0x56, 0xd1, 0x62, 0xff, 0x5c, 0x84, 0xa3, 0x65, 0xbf, 0x56, 0xf6, 0x83, 0x68, 0xc9, 0x0f, 0x6e, + 0x3b, 0x41, 0x4d, 0x2e, 0xaf, 0x92, 0x8c, 0x7e, 0x44, 0xcf, 0xcf, 0x5e, 0x7e, 0xba, 0x18, 0x91, + 0x8d, 0x9e, 0x63, 0x1c, 0xdb, 0x01, 0x9d, 0x4d, 0xab, 0x8c, 0x77, 0x50, 0x49, 0xd8, 0x2e, 0x39, + 0x11, 0x41, 0xd7, 0x60, 0xa4, 0xaa, 0x5f, 0xa3, 0xa2, 0xfa, 0x13, 0xf2, 0x22, 0x33, 0xee, 0xd8, + 0xcc, 0x7b, 0xd7, 0xac, 0x6f, 0x7f, 0xdd, 0x12, 0xad, 0x70, 0x49, 0x04, 0xb7, 0x69, 0xed, 0x7c, + 0x9e, 0xce, 0xc3, 0x44, 0xa0, 0x57, 0xd1, 0x6c, 0xc3, 0x8e, 0xf2, 0xac, 0x36, 0x09, 0x20, 0x4e, + 0xe3, 0xa3, 0x8f, 0xc3, 0x09, 0xa3, 0x50, 0xaa, 0xc9, 0xb5, 0xdc, 0xd2, 0x4c, 0x56, 0x83, 0xf3, + 0x90, 0x70, 0x7e, 0x7d, 0xfb, 0xbb, 0xe1, 0x58, 0xf2, 0xbb, 0x84, 0xf4, 0xe4, 0x1e, 0xbf, 0xae, + 0x70, 0xb0, 0xaf, 0xb3, 0x5f, 0x80, 0x09, 0xfa, 0xac, 0x56, 0x2c, 0x22, 0x9b, 0xbf, 0xce, 0x01, + 0xa6, 0xfe, 0x5d, 0x3f, 0xbb, 0xe2, 0x12, 0x99, 0xd5, 0xd0, 0x27, 0x61, 0x34, 0x24, 0x2c, 0xaa, + 0x9a, 0x94, 0xda, 0xb5, 0xf1, 0x14, 0xaf, 0x2c, 0xea, 0x98, 0xfc, 0x65, 0x62, 0x96, 0xe1, 0x04, + 0x35, 0xd4, 0x80, 0xd1, 0xdb, 0xae, 0x57, 0xf3, 0x6f, 0x87, 0x92, 0xfe, 0x40, 0xbe, 0x0a, 0xe0, + 0x26, 0xc7, 0x4c, 0xf4, 0xd1, 0x68, 0xee, 0xa6, 0x41, 0x0c, 0x27, 0x88, 0xd3, 0x63, 0x24, 0x68, + 0x79, 0xb3, 0xe1, 0xf5, 0x90, 0x04, 0x22, 0xe6, 0x1b, 0x3b, 0x46, 0xb0, 0x2c, 0xc4, 0x31, 0x9c, + 0x1e, 0x23, 0xec, 0x0f, 0x73, 0x35, 0x67, 0xe7, 0x94, 0x38, 0x46, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, + 0x3d, 0x66, 0xd9, 0xbf, 0x55, 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0x83, 0x99, 0xa5, 0xa1, 0xd4, 0xca, + 0xb1, 0x81, 0x95, 0x13, 0x61, 0xae, 0xe7, 0xa0, 0x11, 0xe6, 0x50, 0xd4, 0xc6, 0xbb, 0x9e, 0x47, + 0x3a, 0xbe, 0xd8, 0xce, 0xbb, 0x7e, 0xff, 0x9e, 0x3c, 0xef, 0xe9, 0x3d, 0xbf, 0x2e, 0x06, 0xa8, + 0x97, 0x87, 0xd0, 0x63, 0x4a, 0xca, 0x0a, 0x1f, 0x1d, 0x09, 0x43, 0x8b, 0xd0, 0x1f, 0xee, 0x84, + 0xd5, 0xa8, 0x1e, 0xb6, 0x4b, 0x35, 0x5a, 0x61, 0x28, 0x5a, 0xa6, 0x6b, 0x5e, 0x05, 0xcb, 0xba, + 0xa8, 0x0a, 0x93, 0x82, 0xe2, 0xfc, 0xa6, 0xe3, 0xa9, 0x04, 0x88, 0xdc, 0x1a, 0xf1, 0xc2, 0xde, + 0x6e, 0x69, 0x52, 0xb4, 0xac, 0x83, 0xf7, 0x77, 0x4b, 0x74, 0x4b, 0x66, 0x40, 0x70, 0x16, 0x35, + 0xbe, 0xe4, 0xab, 0x55, 0xbf, 0xd1, 0x2c, 0x07, 0xfe, 0xba, 0x5b, 0x27, 0xed, 0x14, 0xbd, 0x15, + 0x03, 0x53, 0x2c, 0x79, 0xa3, 0x0c, 0x27, 0xa8, 0xa1, 0x5b, 0x30, 0xe6, 0x34, 0x9b, 0xb3, 0x41, + 0xc3, 0x0f, 0x64, 0x03, 0x43, 0xf9, 0x1a, 0x83, 0x59, 0x13, 0x95, 0xe7, 0x3f, 0x4c, 0x14, 0xe2, + 0x24, 0x41, 0xfb, 0xbb, 0x18, 0xbf, 0x5d, 0x71, 0x37, 0x3c, 0xe6, 0x93, 0x86, 0x1a, 0x30, 0xd2, + 0x64, 0x27, 0xb2, 0x48, 0x1b, 0x26, 0x76, 0xf1, 0xf3, 0x5d, 0xca, 0x0c, 0x6f, 0xb3, 0xc4, 0xa7, + 0x86, 0xed, 0x68, 0x59, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0xbf, 0x3f, 0xc1, 0x38, 0xb6, 0x0a, 0x17, + 0x04, 0xf6, 0x0b, 0x0f, 0x41, 0xf1, 0xf4, 0x9f, 0xce, 0x17, 0xb9, 0xc7, 0x53, 0x2f, 0xbc, 0x0c, + 0xb1, 0xac, 0x8b, 0x3e, 0x01, 0xa3, 0xf4, 0x25, 0xad, 0xb8, 0xa6, 0x70, 0xea, 0x48, 0x7e, 0xe8, + 0x29, 0x85, 0xa5, 0xa7, 0x14, 0xd4, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x06, 0x33, 0xa7, 0x94, 0xa4, + 0x0b, 0xdd, 0x90, 0xd6, 0x2d, 0x27, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc1, 0x64, 0x3a, 0x71, 0x72, + 0x38, 0x65, 0xe7, 0x3f, 0x49, 0xd2, 0xb9, 0x8f, 0xe3, 0xdc, 0x6f, 0x69, 0x58, 0x88, 0xb3, 0xe8, + 0xa3, 0xab, 0xc9, 0xb4, 0xb6, 0x45, 0x43, 0x58, 0x9f, 0x4a, 0x6d, 0x3b, 0xd2, 0x36, 0xa3, 0xed, + 0x06, 0x9c, 0xd2, 0x32, 0x83, 0x5e, 0x0a, 0x1c, 0x66, 0xce, 0xe3, 0xb2, 0x8b, 0x42, 0xe3, 0x25, + 0x1f, 0xde, 0xdb, 0x2d, 0x9d, 0x5a, 0x6b, 0x87, 0x88, 0xdb, 0xd3, 0x41, 0xd7, 0xe0, 0x28, 0x0f, + 0x9c, 0xb2, 0x40, 0x9c, 0x5a, 0xdd, 0xf5, 0x14, 0xb3, 0xca, 0x8f, 0x95, 0x13, 0x7b, 0xbb, 0xa5, + 0xa3, 0xb3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x05, 0x06, 0x6b, 0x9e, 0x3c, 0x00, 0xfb, 0x8c, + 0xe4, 0xab, 0x83, 0x0b, 0xab, 0x15, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, 0x57, 0x40, 0x1b, 0x5c, 0x5b, + 0xa4, 0x44, 0x7c, 0xfd, 0xa9, 0x78, 0x9a, 0x49, 0x29, 0xb8, 0x11, 0x89, 0x80, 0xab, 0x49, 0x95, + 0xa7, 0x9b, 0x11, 0xa4, 0xc0, 0x20, 0x8c, 0x5e, 0x07, 0x24, 0x92, 0xfc, 0xcc, 0x56, 0x59, 0x4e, + 0x3a, 0xcd, 0x84, 0x53, 0xbd, 0xdc, 0x2b, 0x29, 0x0c, 0x9c, 0x51, 0x0b, 0x5d, 0xa6, 0x27, 0x97, + 0x5e, 0x2a, 0x4e, 0x46, 0x95, 0xe2, 0x7b, 0x81, 0x34, 0x03, 0xc2, 0xac, 0x0e, 0x4d, 0x8a, 0x38, + 0x51, 0x0f, 0xd5, 0xe0, 0xa4, 0xd3, 0x8a, 0x7c, 0xa6, 0x88, 0x33, 0x51, 0xd7, 0xfc, 0x2d, 0xe2, + 0x31, 0x1d, 0xf8, 0x00, 0x8b, 0xd3, 0x79, 0x72, 0xb6, 0x0d, 0x1e, 0x6e, 0x4b, 0x85, 0xbe, 0x62, + 0xe8, 0x58, 0x68, 0x3a, 0x32, 0xc3, 0xa9, 0x9a, 0x2b, 0x8e, 0x25, 0x06, 0x7a, 0x01, 0x86, 0x36, + 0xfd, 0x30, 0x5a, 0x25, 0xd1, 0x6d, 0x3f, 0xd8, 0x12, 0x59, 0x05, 0xe2, 0x4c, 0x2e, 0x31, 0x08, + 0xeb, 0x78, 0xe8, 0x09, 0xe8, 0x67, 0x16, 0x5a, 0xcb, 0x0b, 0xec, 0x1a, 0x1c, 0x88, 0xcf, 0x98, + 0xcb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0x2e, 0xcf, 0x33, 0x43, 0x97, 0x04, 0xea, 0x72, 0x79, + 0x1e, 0x4b, 0x38, 0x5d, 0xae, 0xe1, 0xa6, 0x13, 0x90, 0x72, 0xe0, 0x57, 0x49, 0xa8, 0xe5, 0x0f, + 0x7a, 0x88, 0xe7, 0x4c, 0xa0, 0xcb, 0xb5, 0x92, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0xce, 0x8a, + 0x3b, 0x9a, 0xaf, 0xa1, 0x4c, 0x73, 0x6a, 0x5d, 0x26, 0xc6, 0xf5, 0x60, 0x5c, 0xe5, 0xe3, 0xe5, + 0x59, 0x12, 0xc2, 0xa9, 0x31, 0xb6, 0xb6, 0xbb, 0x4f, 0xb1, 0xa0, 0x74, 0xbe, 0xcb, 0x09, 0x4a, + 0x38, 0x45, 0xdb, 0x08, 0x04, 0x3b, 0xde, 0x31, 0x10, 0xec, 0x79, 0x18, 0x0c, 0x5b, 0xb7, 0x6a, + 0x7e, 0xc3, 0x71, 0x3d, 0x66, 0xe8, 0xa2, 0xbd, 0x97, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x04, + 0x03, 0x8e, 0x54, 0xe8, 0xa2, 0xfc, 0x18, 0x77, 0x4a, 0x8d, 0xcb, 0xc3, 0x3e, 0x49, 0x15, 0xae, + 0xaa, 0x8b, 0x5e, 0x86, 0x11, 0x11, 0x47, 0x43, 0xa4, 0xb0, 0x9f, 0x34, 0xbd, 0x86, 0x2b, 0x3a, + 0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x0c, 0x45, 0x7e, 0x9d, 0xb9, 0xbe, 0x52, 0x06, 0xf6, 0x58, 0x7e, + 0x28, 0xda, 0x35, 0x85, 0xa6, 0xab, 0x1a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x6b, 0x7c, 0xbd, 0xb3, + 0x6c, 0x41, 0x24, 0x14, 0x39, 0xd0, 0x4f, 0xe5, 0x59, 0x29, 0x32, 0x34, 0x73, 0x3b, 0x88, 0x9a, + 0x58, 0x27, 0x83, 0x2e, 0xc1, 0x44, 0x33, 0x70, 0x7d, 0xb6, 0x26, 0x94, 0x82, 0x7a, 0xca, 0xcc, + 0x0d, 0x5a, 0x4e, 0x22, 0xe0, 0x74, 0x1d, 0x16, 0x06, 0x45, 0x14, 0x4e, 0x9d, 0xe0, 0xf9, 0xcd, + 0xb8, 0xf8, 0x81, 0x97, 0x61, 0x05, 0x45, 0x2b, 0xec, 0x24, 0xe6, 0x92, 0xb3, 0xa9, 0xe9, 0x7c, + 0xe7, 0x7a, 0x5d, 0xc2, 0xc6, 0xd9, 0x72, 0xf5, 0x17, 0xc7, 0x14, 0x50, 0x4d, 0x4b, 0x2b, 0x4e, + 0x1f, 0x37, 0xe1, 0xd4, 0xc9, 0x36, 0x66, 0xb2, 0x89, 0x97, 0x6c, 0xcc, 0x10, 0x18, 0xc5, 0x21, + 0x4e, 0xd0, 0x44, 0xaf, 0xc1, 0xb8, 0x88, 0x11, 0x10, 0x0f, 0xd3, 0xa9, 0xd8, 0x95, 0x08, 0x27, + 0x60, 0x38, 0x85, 0xcd, 0xf3, 0x8b, 0x39, 0xb7, 0xea, 0x44, 0x1c, 0x7d, 0x57, 0x5d, 0x6f, 0x2b, + 0x9c, 0x3a, 0xcd, 0xce, 0x07, 0x91, 0x5f, 0x2c, 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x1a, 0x8c, 0x37, + 0x03, 0x42, 0x1a, 0xec, 0x09, 0x23, 0xee, 0xb3, 0x12, 0x8f, 0x02, 0x44, 0x7b, 0x52, 0x4e, 0xc0, + 0xf6, 0x33, 0xca, 0x70, 0x8a, 0x02, 0xba, 0x0d, 0x03, 0xfe, 0x36, 0x09, 0x36, 0x89, 0x53, 0x9b, + 0x3a, 0xd3, 0xc6, 0xc1, 0x4d, 0x5c, 0x6e, 0xd7, 0x04, 0x6e, 0xc2, 0xfe, 0x47, 0x16, 0x77, 0xb6, + 0xff, 0x91, 0x8d, 0xa1, 0xbf, 0x65, 0xc1, 0x09, 0xa9, 0x51, 0xab, 0x34, 0xe9, 0xa8, 0xcf, 0xfb, + 0x5e, 0x18, 0x05, 0x3c, 0x6e, 0xcd, 0xc3, 0xf9, 0xb1, 0x5c, 0xd6, 0x72, 0x2a, 0x29, 0xe1, 0xfd, + 0x89, 0x3c, 0x8c, 0x10, 0xe7, 0xb7, 0x48, 0x1f, 0xdd, 0x21, 0x89, 0xe4, 0x61, 0x34, 0x1b, 0x2e, + 0xbd, 0xb1, 0xb0, 0x3a, 0xf5, 0x08, 0x0f, 0xba, 0x43, 0x37, 0x43, 0x25, 0x09, 0xc4, 0x69, 0x7c, + 0x74, 0x01, 0x0a, 0x7e, 0x38, 0xf5, 0x68, 0x9b, 0x4c, 0xf4, 0x7e, 0xed, 0x5a, 0x85, 0xdb, 0x81, + 0x5e, 0xab, 0xe0, 0x82, 0x1f, 0xca, 0x1c, 0x5f, 0xf4, 0xa5, 0x19, 0x4e, 0x3d, 0xc6, 0x45, 0xbd, + 0x32, 0xc7, 0x17, 0x2b, 0xc4, 0x31, 0x1c, 0x6d, 0xc2, 0x58, 0x68, 0xbc, 0xe8, 0xc3, 0xa9, 0xb3, + 0x6c, 0xa4, 0x1e, 0xcb, 0x9b, 0x34, 0x03, 0x5b, 0x4b, 0xbe, 0x63, 0x52, 0xc1, 0x49, 0xb2, 0x7c, + 0x77, 0x69, 0x32, 0x85, 0x70, 0xea, 0xf1, 0x0e, 0xbb, 0x4b, 0x43, 0xd6, 0x77, 0x97, 0x4e, 0x03, + 0x27, 0x68, 0x4e, 0x7f, 0x07, 0x4c, 0xa4, 0xd8, 0xa5, 0x83, 0xf8, 0x3c, 0x4c, 0x6f, 0xc1, 0x88, + 0xb1, 0x24, 0x1f, 0xa8, 0x49, 0xcc, 0xef, 0x0e, 0xc2, 0xa0, 0x32, 0x55, 0x40, 0xe7, 0x4d, 0x2b, + 0x98, 0x13, 0x49, 0x2b, 0x98, 0x81, 0xb2, 0x5f, 0x33, 0x0c, 0x5f, 0xd6, 0x32, 0x62, 0xc9, 0xe6, + 0x1d, 0x80, 0xdd, 0x3b, 0x66, 0x69, 0xea, 0x97, 0x62, 0xd7, 0xe6, 0x34, 0x3d, 0x6d, 0x35, 0x3a, + 0x97, 0x60, 0xc2, 0xf3, 0x19, 0x8f, 0x4e, 0x6a, 0x92, 0x01, 0x63, 0x7c, 0xd6, 0xa0, 0x1e, 0xeb, + 0x2c, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x51, 0x4a, 0xaa, 0x90, 0x38, 0x1f, 0x85, 0x05, + 0x94, 0xbe, 0x0d, 0xf9, 0xaf, 0x70, 0x6a, 0x3c, 0xff, 0x6d, 0xc8, 0x2b, 0x25, 0x99, 0xb1, 0x50, + 0x32, 0x63, 0x4c, 0x63, 0xd2, 0xf4, 0x6b, 0xcb, 0x65, 0xc1, 0xe6, 0x6b, 0x51, 0xde, 0x6b, 0xcb, + 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x43, 0x26, 0x6f, 0x9b, 0x2e, 0x97, 0xb5, + 0x1c, 0xa3, 0xac, 0x02, 0x16, 0x15, 0x99, 0x44, 0x9c, 0xbe, 0x8d, 0x98, 0x44, 0xbc, 0xff, 0x1e, + 0x25, 0xe2, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x3b, 0x70, 0xd4, 0x78, 0x8f, 0x2a, 0x4f, 0x35, 0xc8, + 0x57, 0x96, 0x27, 0x90, 0xe7, 0x4e, 0x89, 0x4e, 0x1f, 0x5d, 0xce, 0xa2, 0x84, 0xb3, 0x1b, 0x40, + 0x75, 0x98, 0xa8, 0xa6, 0x5a, 0x1d, 0xe8, 0xbe, 0x55, 0xb5, 0x2e, 0xd2, 0x2d, 0xa6, 0x09, 0xa3, + 0x97, 0x61, 0xe0, 0x6d, 0x9f, 0x1b, 0xb6, 0x89, 0xa7, 0x89, 0x8c, 0x92, 0x32, 0xf0, 0xc6, 0xb5, + 0x0a, 0x2b, 0xdf, 0xdf, 0x2d, 0x0d, 0x95, 0xfd, 0x9a, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x03, 0x16, + 0x4c, 0xa7, 0x1f, 0xbc, 0xaa, 0xd3, 0x23, 0xdd, 0x77, 0xda, 0x16, 0x8d, 0x4e, 0x2f, 0xe6, 0x92, + 0xc3, 0x6d, 0x9a, 0x42, 0x1f, 0xa6, 0xfb, 0x29, 0x74, 0xef, 0x12, 0x91, 0xa0, 0xfd, 0xe1, 0x78, + 0x3f, 0xd1, 0xd2, 0xfd, 0xdd, 0xd2, 0x18, 0x3f, 0x19, 0xdd, 0xbb, 0x2a, 0x1e, 0x3d, 0xaf, 0x80, + 0xbe, 0x1b, 0x8e, 0x06, 0x69, 0xd9, 0x30, 0x91, 0x4c, 0xf8, 0x93, 0xdd, 0x9c, 0xb2, 0xc9, 0x09, + 0xc7, 0x59, 0x04, 0x71, 0x76, 0x3b, 0xf6, 0xaf, 0x59, 0x4c, 0x27, 0x20, 0xba, 0x45, 0xc2, 0x56, + 0x3d, 0x3a, 0x04, 0x63, 0xb2, 0x45, 0x43, 0xdf, 0x7e, 0xcf, 0xd6, 0x60, 0xff, 0xd2, 0x62, 0xd6, + 0x60, 0x87, 0xe8, 0xd7, 0xf6, 0x06, 0x0c, 0x44, 0xa2, 0x35, 0xd1, 0xf5, 0x3c, 0xcb, 0x15, 0xd9, + 0x29, 0x66, 0x11, 0xa7, 0x1e, 0x39, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, 0xe3, 0x33, 0x20, 0x21, + 0x87, 0xa0, 0xd6, 0x5c, 0x30, 0xd5, 0x9a, 0xa5, 0x0e, 0x5f, 0x90, 0xa3, 0xde, 0xfc, 0xa7, 0x66, + 0xbf, 0x99, 0x70, 0xef, 0xdd, 0x6e, 0x86, 0x68, 0x7f, 0xde, 0x02, 0x88, 0x13, 0x80, 0x74, 0x91, + 0x90, 0xf9, 0x22, 0x7d, 0xd6, 0xf8, 0x91, 0x5f, 0xf5, 0xeb, 0x42, 0xf5, 0x72, 0x32, 0xd6, 0xac, + 0xf2, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0x23, 0xf2, 0x16, 0x63, 0x5d, 0xbf, 0x11, + 0x8d, 0xf7, 0x4b, 0x16, 0x1c, 0xc9, 0x72, 0x92, 0xa0, 0x8f, 0x64, 0x2e, 0xe6, 0x54, 0x26, 0xa2, + 0x6a, 0x36, 0x6f, 0x88, 0x72, 0xac, 0x30, 0xba, 0xce, 0x74, 0x7d, 0xb0, 0xe4, 0x14, 0xd7, 0x60, + 0xa4, 0x1c, 0x10, 0x8d, 0xbf, 0x78, 0x35, 0xce, 0x9b, 0x33, 0x38, 0xf7, 0xf4, 0x81, 0x23, 0x0f, + 0xd9, 0x5f, 0x2e, 0xc0, 0x11, 0x6e, 0xe8, 0x34, 0xbb, 0xed, 0xbb, 0xb5, 0xb2, 0x5f, 0x13, 0xae, + 0xad, 0x6f, 0xc2, 0x70, 0x53, 0x93, 0x4d, 0xb7, 0x0b, 0xb4, 0xae, 0xcb, 0xb0, 0x63, 0x69, 0x9a, + 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x06, 0xc3, 0x64, 0xdb, 0xad, 0x2a, 0x6b, 0x99, 0xc2, 0x81, 0x2f, + 0x69, 0xd5, 0xca, 0xa2, 0x46, 0x07, 0x1b, 0x54, 0xbb, 0x36, 0x4f, 0xd6, 0x58, 0xb4, 0x9e, 0x0e, + 0x16, 0x32, 0x3f, 0x6e, 0xc1, 0xf1, 0x9c, 0xb0, 0xec, 0xb4, 0xb9, 0xdb, 0xcc, 0xa4, 0x4c, 0x2c, + 0x5b, 0xd5, 0x1c, 0x37, 0x34, 0xc3, 0x02, 0x8a, 0x3e, 0x0a, 0xd0, 0x8c, 0x53, 0x52, 0x76, 0x88, + 0x5f, 0x6d, 0x44, 0xb2, 0xd5, 0x82, 0x92, 0xaa, 0xcc, 0x95, 0x1a, 0x2d, 0xfb, 0x4b, 0x3d, 0xd0, + 0xcb, 0x0c, 0x93, 0x50, 0x19, 0xfa, 0x37, 0x79, 0xcc, 0xbc, 0xb6, 0xf3, 0x46, 0x71, 0x65, 0x10, + 0xbe, 0x78, 0xde, 0xb4, 0x52, 0x2c, 0xc9, 0xa0, 0x15, 0x98, 0xe4, 0xe9, 0x36, 0xeb, 0x0b, 0xa4, + 0xee, 0xec, 0x48, 0xb1, 0x6f, 0x81, 0x7d, 0xaa, 0x12, 0x7f, 0x2f, 0xa7, 0x51, 0x70, 0x56, 0x3d, + 0xf4, 0x2a, 0x8c, 0xd2, 0x67, 0xb8, 0xdf, 0x8a, 0x24, 0x25, 0x9e, 0xdf, 0x52, 0xbd, 0x4c, 0xd6, + 0x0c, 0x28, 0x4e, 0x60, 0xa3, 0x97, 0x61, 0xa4, 0x99, 0x12, 0x70, 0xf7, 0xc6, 0x92, 0x20, 0x53, + 0xa8, 0x6d, 0xe2, 0x32, 0x3f, 0x89, 0x16, 0xf3, 0x0a, 0x59, 0xdb, 0x0c, 0x48, 0xb8, 0xe9, 0xd7, + 0x6b, 0x8c, 0x03, 0xee, 0xd5, 0xfc, 0x24, 0x12, 0x70, 0x9c, 0xaa, 0x41, 0xa9, 0xac, 0x3b, 0x6e, + 0xbd, 0x15, 0x90, 0x98, 0x4a, 0x9f, 0x49, 0x65, 0x29, 0x01, 0xc7, 0xa9, 0x1a, 0x9d, 0x25, 0xf7, + 0xfd, 0xf7, 0x47, 0x72, 0x6f, 0xff, 0x4c, 0x01, 0x8c, 0xa9, 0x7d, 0x0f, 0xe7, 0xdd, 0x7c, 0x05, + 0x7a, 0x36, 0x82, 0x66, 0x55, 0x18, 0xe1, 0x65, 0x7e, 0x59, 0x9c, 0xfd, 0x9f, 0x7f, 0x19, 0xfd, + 0x8f, 0x59, 0x2d, 0xba, 0xc7, 0x8f, 0x96, 0x03, 0x9f, 0x5e, 0x72, 0x32, 0xac, 0xa6, 0x72, 0x47, + 0xea, 0x97, 0x81, 0x35, 0xda, 0x04, 0xa0, 0x16, 0x3e, 0x15, 0x9c, 0x82, 0x61, 0xaf, 0x56, 0x11, + 0xe1, 0x73, 0x24, 0x15, 0x74, 0x01, 0x86, 0x44, 0x2a, 0x44, 0xe6, 0x35, 0xc3, 0x37, 0x13, 0xb3, + 0xaf, 0x5b, 0x88, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x60, 0x01, 0x26, 0x33, 0xdc, 0x1e, 0xf9, 0x35, + 0xb2, 0xe1, 0x86, 0x51, 0xb0, 0x93, 0xbc, 0x9c, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x9e, 0x55, 0xfc, + 0xa2, 0x4a, 0x5e, 0x4e, 0xc2, 0xad, 0x48, 0x40, 0x0f, 0x98, 0xaa, 0xff, 0x0c, 0xf4, 0xb4, 0x42, + 0x22, 0x63, 0xdd, 0xab, 0x6b, 0x9b, 0x29, 0xec, 0x19, 0x84, 0x3e, 0x01, 0x37, 0x94, 0x16, 0x5a, + 0x7b, 0x02, 0x72, 0x3d, 0x34, 0x87, 0xd1, 0xce, 0x45, 0xc4, 0x73, 0xbc, 0x48, 0x3c, 0x14, 0xe3, + 0x18, 0xc8, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xc8, 0x75, 0x84, 0xa6, 0x5d, 0x6f, + 0xf8, 0x9e, 0x1b, 0xf9, 0xca, 0x70, 0x91, 0xc7, 0x3d, 0x26, 0xcd, 0xcd, 0x15, 0x51, 0x8e, 0x15, + 0x06, 0x3a, 0x0b, 0xbd, 0x4c, 0x28, 0x9e, 0x4c, 0x83, 0x86, 0xe7, 0x16, 0x78, 0x44, 0x49, 0x0e, + 0xd6, 0x6e, 0xf5, 0x62, 0xdb, 0x5b, 0xfd, 0x11, 0xca, 0xc1, 0xf8, 0xf5, 0xe4, 0x85, 0x42, 0xbb, + 0xeb, 0xfb, 0x75, 0xcc, 0x80, 0xe8, 0x31, 0x31, 0x5e, 0x09, 0x4b, 0x3d, 0xec, 0xd4, 0xfc, 0x50, + 0x1b, 0xb4, 0x27, 0xa0, 0x7f, 0x8b, 0xec, 0x04, 0xae, 0xb7, 0x91, 0xb4, 0xe0, 0xbc, 0xc2, 0x8b, + 0xb1, 0x84, 0x9b, 0x59, 0xbf, 0xfb, 0xef, 0x47, 0xd6, 0x6f, 0x7d, 0x05, 0x0c, 0x74, 0x64, 0x4f, + 0x7e, 0xa8, 0x08, 0x63, 0x78, 0x6e, 0xe1, 0xfd, 0x89, 0xb8, 0x9e, 0x9e, 0x88, 0xfb, 0x91, 0x1c, + 0xfb, 0x60, 0xb3, 0xf1, 0xcb, 0x16, 0x8c, 0xb1, 0x84, 0x8c, 0x22, 0x8a, 0x89, 0xeb, 0x7b, 0x87, + 0xf0, 0x14, 0x78, 0x04, 0x7a, 0x03, 0xda, 0xa8, 0x98, 0x41, 0xb5, 0xc7, 0x59, 0x4f, 0x30, 0x87, + 0xa1, 0x93, 0xd0, 0xc3, 0xba, 0x40, 0x27, 0x6f, 0x98, 0x1f, 0xc1, 0x0b, 0x4e, 0xe4, 0x60, 0x56, + 0xca, 0xe2, 0x29, 0x62, 0xd2, 0xac, 0xbb, 0xbc, 0xd3, 0xb1, 0xc9, 0xc2, 0xbb, 0x23, 0x44, 0x4a, + 0x66, 0xd7, 0xde, 0x59, 0x3c, 0xc5, 0x6c, 0x92, 0xed, 0x9f, 0xd9, 0x7f, 0x5e, 0x80, 0xd3, 0x99, + 0xf5, 0xba, 0x8e, 0xa7, 0xd8, 0xbe, 0xf6, 0x83, 0x4c, 0xdf, 0x56, 0x3c, 0x44, 0xfb, 0xf8, 0x9e, + 0x6e, 0xb9, 0xff, 0xde, 0x2e, 0xc2, 0x1c, 0x66, 0x0e, 0xd9, 0xbb, 0x24, 0xcc, 0x61, 0x66, 0xdf, + 0x72, 0xc4, 0x04, 0x7f, 0x53, 0xc8, 0xf9, 0x16, 0x26, 0x30, 0x38, 0x47, 0xcf, 0x19, 0x06, 0x0c, + 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a, 0x85, 0xb1, 0x86, 0xeb, 0xd1, 0xc3, + 0x67, 0xc7, 0x64, 0xc5, 0x95, 0x2e, 0x63, 0xc5, 0x04, 0xe3, 0x24, 0x3e, 0x72, 0xb5, 0x10, 0x88, + 0xfc, 0xeb, 0x5e, 0x3e, 0xd0, 0xae, 0x9b, 0x31, 0xcd, 0x39, 0xd4, 0x28, 0x66, 0x84, 0x43, 0x5c, + 0xd1, 0xe4, 0x44, 0xc5, 0xee, 0xe5, 0x44, 0xc3, 0xd9, 0x32, 0xa2, 0xe9, 0x97, 0x61, 0xe4, 0x9e, + 0x75, 0x23, 0xf6, 0x37, 0x8a, 0xf0, 0x50, 0x9b, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3, + 0x3e, 0x35, 0x0f, 0x65, 0x38, 0xb2, 0xde, 0xaa, 0xd7, 0x77, 0x98, 0x23, 0x18, 0xa9, 0x49, 0x0c, + 0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0x91, 0xa5, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, 0x4d, + 0xb2, 0xa3, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x82, 0x09, 0x67, 0xdb, + 0x71, 0x79, 0xfa, 0x0b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a, 0x36, 0x89, 0x80, 0xd3, 0x75, + 0xd0, 0xeb, 0x80, 0xfc, 0x5b, 0xcc, 0xb9, 0xa4, 0x76, 0x89, 0x78, 0x42, 0xeb, 0xce, 0xe6, 0xae, + 0x18, 0x1f, 0x09, 0xd7, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x18, 0x5f, 0x5f, 0x7e, 0x30, 0xbe, + 0xf6, 0xe7, 0x62, 0xc7, 0xcc, 0x81, 0x6f, 0xc1, 0xc8, 0x41, 0x2d, 0xa6, 0x9f, 0x80, 0xfe, 0x40, + 0xe4, 0x64, 0x4f, 0x78, 0x5d, 0xcb, 0x8c, 0xd5, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x94, 0x2c, 0xd9, + 0x8c, 0xbb, 0xfd, 0x32, 0x33, 0xff, 0xe6, 0x52, 0x70, 0x2d, 0xd4, 0xd6, 0x51, 0xcd, 0xfc, 0x3b, + 0x06, 0x62, 0x13, 0x97, 0x2f, 0xb7, 0x30, 0x8e, 0xf0, 0x60, 0x3c, 0x20, 0x44, 0x58, 0x50, 0x85, + 0x81, 0x3e, 0x06, 0xfd, 0x35, 0x77, 0xdb, 0x0d, 0x85, 0x1c, 0xed, 0xc0, 0x7a, 0xbb, 0xf8, 0xfb, + 0x16, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x62, 0x81, 0xd2, 0x4b, 0x5e, 0x26, 0x4e, 0x3d, 0xda, + 0x44, 0xaf, 0x01, 0x48, 0x0a, 0x4a, 0xf6, 0x26, 0xad, 0xa5, 0x00, 0x2b, 0xc8, 0xbe, 0xf1, 0x0f, + 0x6b, 0x75, 0xd0, 0xab, 0xd0, 0xb7, 0xc9, 0x68, 0x89, 0x6f, 0x3b, 0xab, 0x54, 0x5d, 0xac, 0x74, + 0x7f, 0xb7, 0x74, 0xc4, 0x6c, 0x53, 0xde, 0x62, 0xbc, 0x96, 0xfd, 0x43, 0x85, 0x78, 0x4e, 0xdf, + 0x68, 0xf9, 0x91, 0x73, 0x08, 0x9c, 0xc8, 0x25, 0x83, 0x13, 0x79, 0x2c, 0x7b, 0xa1, 0x6a, 0x5d, + 0xca, 0xe5, 0x40, 0xae, 0x25, 0x38, 0x90, 0xc7, 0x3b, 0x93, 0x6a, 0xcf, 0x79, 0xfc, 0x73, 0x0b, + 0x26, 0x0c, 0xfc, 0x43, 0xb8, 0x00, 0x97, 0xcc, 0x0b, 0xf0, 0xe1, 0x8e, 0xdf, 0x90, 0x73, 0xf1, + 0x7d, 0x7f, 0x31, 0xd1, 0x77, 0x76, 0xe1, 0xbd, 0x0d, 0x3d, 0x9b, 0x4e, 0x50, 0x13, 0xef, 0xfa, + 0xf3, 0x5d, 0x8d, 0xf5, 0xcc, 0x65, 0x27, 0x10, 0x96, 0x16, 0x4f, 0xcb, 0x51, 0xa7, 0x45, 0x1d, + 0xad, 0x2c, 0x58, 0x53, 0xe8, 0x22, 0xf4, 0x85, 0x55, 0xbf, 0xa9, 0xfc, 0xe4, 0x58, 0x3a, 0xed, + 0x0a, 0x2b, 0xd9, 0xdf, 0x2d, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x13, 0x46, 0xd8, + 0x2f, 0x65, 0xf6, 0x58, 0xcc, 0x97, 0xc0, 0x54, 0x74, 0x44, 0x6e, 0x13, 0x6c, 0x14, 0x61, 0x93, + 0xd4, 0xf4, 0x06, 0x0c, 0xaa, 0xcf, 0x7a, 0xa0, 0xda, 0xfa, 0xff, 0x58, 0x84, 0xc9, 0x8c, 0x35, + 0x87, 0x42, 0x63, 0x26, 0x2e, 0x74, 0xb9, 0x54, 0xdf, 0xe1, 0x5c, 0x84, 0xec, 0x01, 0x58, 0x13, + 0x6b, 0xab, 0xeb, 0x46, 0xaf, 0x87, 0x24, 0xd9, 0x28, 0x2d, 0xea, 0xdc, 0x28, 0x6d, 0xec, 0xd0, + 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0xfa, 0x40, 0xe7, 0xf4, 0x37, 0x7b, 0xe0, 0x48, 0x56, 0x0c, 0x69, + 0xf4, 0x19, 0x10, 0xe9, 0xff, 0xc5, 0xb4, 0x3e, 0xdf, 0x6e, 0x84, 0xf5, 0x9a, 0x33, 0xcc, 0x17, + 0x4c, 0x84, 0x6e, 0x9d, 0x91, 0xc7, 0x11, 0x2f, 0xec, 0x38, 0xcc, 0xa2, 0x4d, 0x16, 0x52, 0x49, + 0xdc, 0x9e, 0xf2, 0xf8, 0xf8, 0x50, 0xd7, 0x1d, 0x10, 0xf7, 0x6f, 0x98, 0x30, 0xa9, 0x92, 0xc5, + 0x9d, 0x4d, 0xaa, 0x64, 0xcb, 0x68, 0x19, 0xfa, 0xaa, 0xdc, 0x56, 0xa7, 0xd8, 0xf9, 0x08, 0xe3, + 0x86, 0x3a, 0xea, 0x00, 0x16, 0x06, 0x3a, 0x82, 0xc0, 0xb4, 0x0b, 0x43, 0xda, 0xc0, 0x3c, 0xd0, + 0xc5, 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xe8, 0x02, 0xfa, 0x31, 0xed, 0xee, 0x17, 0xe7, + 0xc1, 0x07, 0x0d, 0xde, 0xe9, 0x64, 0xc2, 0x05, 0x2f, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x31, 0x63, + 0x9e, 0xe7, 0xa6, 0x4e, 0x32, 0x2f, 0xfc, 0xf6, 0x71, 0xce, 0xed, 0x1f, 0xb7, 0x20, 0xe1, 0x24, + 0xa5, 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x0c, 0xf4, 0x04, 0x7e, 0x9d, 0x24, 0x53, 0xd3, 0x63, + 0xbf, 0x4e, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x62, 0x0d, 0xeb, 0x0f, 0x74, 0xf1, 0xf4, 0x7e, + 0x04, 0x7a, 0xeb, 0x64, 0x9b, 0xd4, 0x93, 0x19, 0x44, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xf6, 0x2f, + 0xf7, 0xc0, 0xa9, 0xb6, 0x91, 0xd7, 0x28, 0x83, 0xb9, 0xe1, 0x44, 0xe4, 0xb6, 0xb3, 0x93, 0xcc, + 0x9c, 0x77, 0x89, 0x17, 0x63, 0x09, 0x67, 0xce, 0xc8, 0x3c, 0x93, 0x4c, 0x42, 0x38, 0x2c, 0x12, + 0xc8, 0x08, 0xa8, 0x29, 0x6c, 0x2c, 0xde, 0x0f, 0x61, 0xe3, 0xb3, 0x00, 0x61, 0x58, 0xe7, 0x06, + 0x97, 0x35, 0xe1, 0xe5, 0x1c, 0x67, 0x1c, 0xaa, 0x5c, 0x15, 0x10, 0xac, 0x61, 0xa1, 0x05, 0x18, + 0x6f, 0x06, 0x7e, 0xc4, 0x65, 0xed, 0x0b, 0xdc, 0x26, 0xb9, 0xd7, 0x0c, 0x7a, 0x55, 0x4e, 0xc0, + 0x71, 0xaa, 0x06, 0x7a, 0x01, 0x86, 0x44, 0x20, 0xac, 0xb2, 0xef, 0xd7, 0x85, 0x78, 0x4f, 0x99, + 0xe9, 0x56, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0x80, 0xdf, 0x9f, 0x59, 0x8d, 0x0b, 0xf1, + 0x35, 0xbc, 0x44, 0xd0, 0xfc, 0x81, 0xae, 0x82, 0xe6, 0xc7, 0x02, 0xcf, 0xc1, 0xae, 0xf5, 0xc9, + 0xd0, 0x51, 0x44, 0xf8, 0x95, 0x1e, 0x98, 0x14, 0x0b, 0xe7, 0x41, 0x2f, 0x97, 0xeb, 0xe9, 0xe5, + 0x72, 0x3f, 0x44, 0xa2, 0xef, 0xaf, 0x99, 0xc3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x43, 0xa2, + 0xff, 0x2f, 0x37, 0xf5, 0xe8, 0x0b, 0xb9, 0x3c, 0x69, 0x1c, 0x51, 0xfb, 0x9d, 0x25, 0x21, 0xb5, + 0xff, 0x93, 0x05, 0x0f, 0x77, 0xa4, 0x88, 0x16, 0x61, 0x90, 0x31, 0xba, 0xda, 0xbb, 0xf8, 0x71, + 0xe5, 0xb3, 0x20, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa6, 0x72, 0xbc, 0x3e, 0x91, 0x91, + 0xe3, 0xf5, 0xa8, 0x31, 0x3c, 0xf7, 0x98, 0xe4, 0xf5, 0x0b, 0xf4, 0xc6, 0x31, 0x7d, 0x12, 0x3f, + 0x64, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x1a, 0x8c, 0xb3, 0x08, + 0x99, 0xcc, 0x83, 0x46, 0x38, 0x33, 0x16, 0x62, 0x2b, 0xf9, 0xab, 0x09, 0x18, 0x4e, 0x61, 0xdb, + 0x7f, 0x5a, 0x84, 0x3e, 0xbe, 0xfd, 0x0e, 0xe1, 0xe1, 0xfb, 0x14, 0x0c, 0xba, 0x8d, 0x46, 0x8b, + 0xa7, 0xed, 0xec, 0x8d, 0x6d, 0xae, 0x97, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x49, 0x68, 0x12, 0xda, + 0x04, 0xe1, 0xe6, 0x1d, 0x9f, 0x59, 0x70, 0x22, 0x87, 0x73, 0x71, 0xea, 0x9e, 0x8d, 0x75, 0x0e, + 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x06, 0x2d, 0x13, 0x99, 0x1a, 0x9e, 0x6c, 0x43, 0xad, + 0xa2, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0x33, 0xc6, 0x4d, 0x3f, 0x9d, + 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x7e, 0x11, 0x06, 0x15, 0xf1, 0x4e, 0x72, 0xc5, 0x61, + 0x9d, 0x61, 0xfb, 0x08, 0x8c, 0x25, 0xfa, 0x76, 0x20, 0xb1, 0xe4, 0xaf, 0x58, 0x30, 0xc6, 0x3b, + 0xb3, 0xe8, 0x6d, 0x8b, 0xdb, 0xe0, 0x2e, 0x1c, 0xa9, 0x67, 0x9c, 0xca, 0x62, 0xfa, 0xbb, 0x3f, + 0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xe7, 0xe8, 0x8e, 0xa3, 0xa7, 0xae, 0x53, + 0x17, 0xd1, 0x36, 0x86, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f, 0xb4, 0x60, 0x82, 0xf7, + 0xfc, 0x0a, 0xd9, 0x51, 0x67, 0xd3, 0xb7, 0xb2, 0xef, 0x22, 0x61, 0x74, 0x21, 0x27, 0x61, 0xb4, + 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, 0x65, 0x0b, 0xc4, 0x0a, 0x39, 0x04, 0x49, 0xcb, 0x77, 0x98, + 0x92, 0x96, 0xe9, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x6b, 0x0b, 0xc6, 0x39, 0x42, 0x6c, 0x05, + 0xf1, 0x2d, 0x9d, 0x87, 0x39, 0xf3, 0x8b, 0x32, 0xcd, 0x5a, 0xaf, 0x90, 0x9d, 0x35, 0xbf, 0xec, + 0x44, 0x9b, 0xd9, 0x1f, 0x65, 0x4c, 0x56, 0x4f, 0xdb, 0xc9, 0xaa, 0xc9, 0x0d, 0x64, 0x24, 0x26, + 0xec, 0x20, 0x00, 0x3e, 0x68, 0x62, 0x42, 0xfb, 0xcf, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0x71, 0xa3, + 0xec, 0x10, 0x2b, 0xd5, 0x2e, 0xba, 0xf8, 0x68, 0x52, 0x10, 0xac, 0x61, 0xdd, 0x97, 0xe1, 0x49, + 0x98, 0xb2, 0x14, 0x3b, 0x9b, 0xb2, 0x1c, 0x60, 0x44, 0xbf, 0xdc, 0x0f, 0x49, 0x9f, 0x49, 0x74, + 0x03, 0x86, 0xab, 0x4e, 0xd3, 0xb9, 0xe5, 0xd6, 0xdd, 0xc8, 0x25, 0x61, 0x3b, 0x3b, 0xb7, 0x79, + 0x0d, 0x4f, 0x18, 0x1f, 0x68, 0x25, 0xd8, 0xa0, 0x83, 0x66, 0x00, 0x9a, 0x81, 0xbb, 0xed, 0xd6, + 0xc9, 0x06, 0x13, 0x08, 0xb1, 0xf8, 0x3e, 0xdc, 0xe8, 0x4e, 0x96, 0x62, 0x0d, 0x23, 0x23, 0xf4, + 0x46, 0xf1, 0x01, 0x87, 0xde, 0x80, 0x43, 0x0b, 0xbd, 0xd1, 0x73, 0xa0, 0xd0, 0x1b, 0x03, 0x07, + 0x0e, 0xbd, 0xd1, 0xdb, 0x55, 0xe8, 0x0d, 0x0c, 0xc7, 0x24, 0xef, 0x49, 0xff, 0x2f, 0xb9, 0x75, + 0x22, 0x1e, 0x1c, 0x3c, 0x28, 0xd1, 0xf4, 0xde, 0x6e, 0xe9, 0x18, 0xce, 0xc4, 0xc0, 0x39, 0x35, + 0xd1, 0x47, 0x61, 0xca, 0xa9, 0xd7, 0xfd, 0xdb, 0x6a, 0x52, 0x17, 0xc3, 0xaa, 0x53, 0xe7, 0xca, + 0xa5, 0x7e, 0x46, 0xf5, 0xe4, 0xde, 0x6e, 0x69, 0x6a, 0x36, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, + 0xc0, 0x60, 0x33, 0xf0, 0xab, 0x2b, 0x9a, 0x63, 0xf7, 0x69, 0x3a, 0x80, 0x65, 0x59, 0xb8, 0xbf, + 0x5b, 0x1a, 0x51, 0x7f, 0xd8, 0x85, 0x1f, 0x57, 0xc8, 0x88, 0x6a, 0x31, 0xf4, 0xa0, 0xa3, 0x5a, + 0x0c, 0xdf, 0xef, 0xa8, 0x16, 0x5b, 0x30, 0x59, 0x21, 0x81, 0xeb, 0xd4, 0xdd, 0xbb, 0x94, 0x27, + 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x06, 0x89, 0x53, 0xbf, 0xab, 0xe0, 0xdb, 0x9a, 0x5c, 0x46, 0x9e, + 0xf2, 0x31, 0x21, 0xfb, 0x7f, 0x5b, 0xd0, 0x2f, 0xfc, 0x30, 0x0f, 0x81, 0x33, 0x9d, 0x35, 0x54, + 0x32, 0xa5, 0xec, 0x49, 0x61, 0x9d, 0xc9, 0x55, 0xc6, 0x2c, 0x27, 0x94, 0x31, 0x0f, 0xb7, 0x23, + 0xd2, 0x5e, 0x0d, 0xf3, 0x77, 0x8b, 0xf4, 0x85, 0x60, 0x44, 0x04, 0x78, 0xf0, 0x43, 0xb0, 0x0a, + 0xfd, 0xa1, 0xf0, 0x48, 0x2f, 0xe4, 0xfb, 0xf2, 0x24, 0x27, 0x31, 0xb6, 0x81, 0x14, 0x3e, 0xe8, + 0x92, 0x48, 0xa6, 0xab, 0x7b, 0xf1, 0x01, 0xba, 0xba, 0x77, 0x8a, 0x99, 0xd0, 0x73, 0x3f, 0x62, + 0x26, 0xd8, 0x5f, 0x63, 0xb7, 0xb3, 0x5e, 0x7e, 0x08, 0x8c, 0xdb, 0x25, 0xf3, 0x1e, 0xb7, 0xdb, + 0xac, 0x2c, 0xd1, 0xa9, 0x1c, 0x06, 0xee, 0x97, 0x2c, 0x38, 0x95, 0xf1, 0x55, 0x1a, 0x37, 0xf7, + 0x34, 0x0c, 0x38, 0xad, 0x9a, 0xab, 0xf6, 0xb2, 0xa6, 0x2d, 0x9e, 0x15, 0xe5, 0x58, 0x61, 0xa0, + 0x79, 0x98, 0x20, 0x77, 0x9a, 0x2e, 0x57, 0xc3, 0xeb, 0xa6, 0xe3, 0x45, 0xee, 0xbc, 0xbb, 0x98, + 0x04, 0xe2, 0x34, 0xbe, 0x0a, 0x89, 0x56, 0xcc, 0x0d, 0x89, 0xf6, 0x0b, 0x16, 0x0c, 0x29, 0x9f, + 0xec, 0x07, 0x3e, 0xda, 0xaf, 0x99, 0xa3, 0xfd, 0x50, 0x9b, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x28, + 0xa8, 0xfe, 0x96, 0xfd, 0x20, 0xea, 0x82, 0x4b, 0xbc, 0x77, 0xb7, 0x97, 0x0b, 0x30, 0xe4, 0x34, + 0x9b, 0x12, 0x20, 0xed, 0x17, 0x59, 0x2a, 0x85, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98, + 0xeb, 0x85, 0x53, 0x03, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37, + 0xad, 0xc8, 0xad, 0xcf, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0xcc, 0xb2, 0x17, 0x5d, 0x0b, 0xf8, 0x33, + 0x55, 0x0b, 0x2a, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x8c, 0x3f, 0xc2, 0xda, 0xe8, 0x35, 0x0d, 0x61, + 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8b, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0x80, 0x7a, 0x7f, + 0x3e, 0xac, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf4, 0xb0, 0x7d, 0xed, 0x0f, 0x7b, 0xda, 0xb0, 0xee, + 0xcf, 0x1a, 0xc7, 0xf6, 0x43, 0x1f, 0x4f, 0x19, 0x37, 0x3d, 0xd3, 0xe1, 0xd6, 0x38, 0x80, 0x39, + 0x13, 0xcb, 0xab, 0xc6, 0xb2, 0x4e, 0x2d, 0x97, 0xc5, 0xbe, 0xd0, 0xf2, 0xaa, 0x09, 0x00, 0x8e, + 0x71, 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x53, 0x28, 0x0e, 0xbf, 0xad, 0xb0, 0x43, 0xac, 0x61, 0xa0, + 0xf3, 0x42, 0x68, 0xc1, 0x75, 0x0f, 0x0f, 0x25, 0x84, 0x16, 0x72, 0xb8, 0x34, 0x49, 0xd3, 0x05, + 0x18, 0x22, 0x77, 0x22, 0x12, 0x78, 0x4e, 0x9d, 0xb6, 0xd0, 0x1b, 0x47, 0x8c, 0x5d, 0x8c, 0x8b, + 0xb1, 0x8e, 0x83, 0xd6, 0x60, 0x2c, 0xe4, 0xb2, 0x3c, 0x95, 0xf4, 0x81, 0xcb, 0x44, 0x9f, 0x54, + 0xde, 0xf0, 0x26, 0x78, 0x9f, 0x15, 0xf1, 0xd3, 0x49, 0xc6, 0x08, 0x49, 0x92, 0x40, 0xaf, 0xc2, + 0x68, 0xdd, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x6c, 0x7c, 0x06, 0xcc, 0xec, 0xfc, 0x57, + 0x0d, 0x28, 0x4e, 0x60, 0x53, 0x06, 0x51, 0x2f, 0x11, 0x89, 0x4a, 0x1c, 0x6f, 0x83, 0x84, 0x53, + 0x83, 0xec, 0xab, 0x18, 0x83, 0x78, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x17, 0x61, 0x58, 0x7e, + 0xbe, 0x16, 0x52, 0x27, 0x76, 0x68, 0xd2, 0x60, 0xd8, 0xc0, 0x44, 0x21, 0x1c, 0x95, 0xff, 0xd7, + 0x02, 0x67, 0x7d, 0xdd, 0xad, 0x8a, 0x38, 0x13, 0xdc, 0xf9, 0xfb, 0x23, 0xd2, 0xd3, 0x74, 0x31, + 0x0b, 0x69, 0x7f, 0xb7, 0x74, 0x52, 0x8c, 0x5a, 0x26, 0x1c, 0x67, 0xd3, 0x46, 0x2b, 0x30, 0xc9, + 0x6d, 0x60, 0xe6, 0x37, 0x49, 0x75, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x97, 0xd3, + 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0xa6, 0x9a, 0xad, 0x5b, 0x75, 0x37, 0xdc, 0x5c, 0xf5, 0x23, + 0x66, 0x42, 0x36, 0x5b, 0xab, 0x05, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x61, 0x90, 0xca, + 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x0b, 0x47, 0x13, 0x0b, 0x41, 0xc4, 0x33, 0x19, 0xcd, 0x4f, + 0xf9, 0x54, 0xc9, 0xaa, 0x20, 0x42, 0x03, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x25, 0x00, 0xb7, + 0xb9, 0xe4, 0x34, 0xdc, 0x3a, 0x7d, 0x8e, 0x4e, 0xb2, 0x35, 0x42, 0x9f, 0x26, 0xb0, 0x5c, 0x96, + 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc2, 0xa8, 0xf8, 0xb7, 0x23, 0xa6, + 0x74, 0x42, 0x65, 0x07, 0x1d, 0x95, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, 0x80, + 0x53, 0x32, 0x35, 0xa9, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xcf, 0xd2, 0x00, 0xf7, 0x29, 0x9a, + 0x6d, 0x87, 0x88, 0xdb, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xd1, 0x38, 0x12, + 0xe6, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0x47, 0x5d, 0x2f, 0x6b, 0x55, 0x1f, 0x63, 0x84, + 0x3e, 0xcc, 0x9d, 0xe5, 0xdb, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0x96, 0x61, 0x32, 0xe2, + 0x05, 0x0b, 0x6e, 0xc8, 0xd3, 0xb8, 0xd0, 0x67, 0xdf, 0x71, 0xd6, 0xdc, 0x71, 0xba, 0x9a, 0xd7, + 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0x9d, 0x19, 0x80, 0x7e, 0xdd, 0xa2, 0xb5, 0x35, 0x46, 0x1f, 0x7d, + 0x0a, 0x86, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xd9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, 0x41, + 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x35, 0x23, 0xc8, 0xc5, 0xf9, 0xee, 0x98, 0xa2, 0xee, + 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x15, 0x06, 0xaa, 0x75, 0x97, 0x78, 0xd1, 0x72, 0xb9, + 0x5d, 0x80, 0xd2, 0x79, 0x81, 0x23, 0xb6, 0xa2, 0xc8, 0xbe, 0xc4, 0xcb, 0xb0, 0xa2, 0x60, 0x5f, + 0x84, 0xa1, 0x4a, 0x9d, 0x90, 0x26, 0xf7, 0xe3, 0x42, 0x4f, 0xb0, 0x87, 0x09, 0x63, 0x2d, 0x2d, + 0xc6, 0x5a, 0xea, 0x6f, 0x0e, 0xc6, 0x54, 0x4a, 0xb8, 0xfd, 0xdb, 0x05, 0x28, 0x75, 0x48, 0x02, + 0x96, 0xd0, 0xb7, 0x59, 0x5d, 0xe9, 0xdb, 0x66, 0x61, 0x2c, 0xfe, 0xa7, 0x8b, 0xf2, 0x94, 0x31, + 0xf4, 0x0d, 0x13, 0x8c, 0x93, 0xf8, 0x5d, 0xfb, 0xb5, 0xe8, 0x2a, 0xbb, 0x9e, 0x8e, 0x9e, 0x59, + 0x86, 0xaa, 0xbe, 0xb7, 0xfb, 0xb7, 0x77, 0xae, 0xda, 0xd5, 0xfe, 0x5a, 0x01, 0x8e, 0xaa, 0x21, + 0x7c, 0xef, 0x0e, 0xdc, 0xf5, 0xf4, 0xc0, 0xdd, 0x07, 0xa5, 0xb5, 0x7d, 0x0d, 0xfa, 0x78, 0xd4, + 0xd4, 0x2e, 0x78, 0xfe, 0x47, 0xcc, 0xe0, 0xf4, 0x8a, 0xcd, 0x34, 0x02, 0xd4, 0xff, 0x80, 0x05, + 0x63, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0x7b, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0x33, 0xd0, + 0xb3, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xd9, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0x1f, 0x59, 0xd0, + 0xbb, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x9b, 0xef, 0x42, 0x2f, 0x40, + 0x1f, 0x59, 0x5f, 0x27, 0xd5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0xd1, 0xb7, 0xc8, 0x4a, 0x29, 0x13, + 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x37, 0x61, 0x30, 0x72, 0x1b, 0x64, 0xb6, 0x56, 0x13, + 0x36, 0x01, 0xf7, 0x10, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0x8b, 0x05, 0x80, 0x38, + 0x14, 0x5c, 0xa7, 0x4f, 0x9c, 0x4b, 0x69, 0x8b, 0xcf, 0x66, 0x68, 0x8b, 0x51, 0x4c, 0x30, 0x43, + 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd5, 0x30, 0xf5, 0x1c, 0x64, 0x98, 0xe6, 0x61, 0x22, 0x0e, 0x65, + 0x67, 0x46, 0xf2, 0x64, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x33, 0x2a, 0xa2, + 0x97, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x30, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, 0xab, + 0x0e, 0xff, 0x29, 0x0b, 0x8e, 0x24, 0xdb, 0x61, 0x7e, 0xf7, 0x9f, 0xb7, 0xe0, 0x68, 0x9c, 0x03, + 0x27, 0x6d, 0x82, 0xf0, 0x7c, 0xdb, 0x28, 0x65, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, 0x91, + 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xab, 0x07, 0xa6, 0xf2, 0xc2, 0x9b, 0x31, 0x4f, 0x23, 0xe7, 0x4e, + 0x65, 0x8b, 0xdc, 0x16, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0x4c, 0x7b, 0x54, + 0xe8, 0x32, 0xed, 0xd1, 0x26, 0x4c, 0xdc, 0xde, 0x24, 0xde, 0x75, 0x2f, 0x74, 0x22, 0x37, 0x5c, + 0x77, 0x99, 0x02, 0x9d, 0xaf, 0x1b, 0x99, 0xba, 0x7f, 0xe2, 0x66, 0x12, 0x61, 0x7f, 0xb7, 0x74, + 0xca, 0x28, 0x88, 0xbb, 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0xce, 0x1a, 0xd5, 0xf3, 0x80, 0xb3, + 0x46, 0x35, 0x5c, 0x61, 0x76, 0x23, 0xdd, 0x48, 0xd8, 0xb3, 0x75, 0x45, 0x95, 0x62, 0x0d, 0x03, + 0x7d, 0x02, 0x90, 0x9e, 0xf6, 0xcf, 0x88, 0x2e, 0xfb, 0xcc, 0xde, 0x6e, 0x09, 0xad, 0xa6, 0xa0, + 0xfb, 0xbb, 0xa5, 0x49, 0x5a, 0xba, 0xec, 0xd1, 0xe7, 0x6f, 0x1c, 0x92, 0x2f, 0x83, 0x10, 0xba, + 0x09, 0xe3, 0xb4, 0x94, 0xed, 0x28, 0x19, 0xba, 0x96, 0x3f, 0x59, 0x9f, 0xda, 0xdb, 0x2d, 0x8d, + 0xaf, 0x26, 0x60, 0x79, 0xa4, 0x53, 0x44, 0x32, 0x92, 0x47, 0x0d, 0x74, 0x9b, 0x3c, 0xca, 0xfe, + 0xbc, 0x05, 0x27, 0xe8, 0x05, 0x57, 0xbb, 0x9a, 0xa3, 0x45, 0x77, 0x9a, 0x2e, 0xd7, 0xd3, 0x88, + 0xab, 0x86, 0xc9, 0xea, 0xca, 0xcb, 0x5c, 0x4b, 0xa3, 0xa0, 0xf4, 0x84, 0xdf, 0x72, 0xbd, 0x5a, + 0xf2, 0x84, 0xbf, 0xe2, 0x7a, 0x35, 0xcc, 0x20, 0xea, 0xca, 0x2a, 0xe6, 0x46, 0xa9, 0xff, 0x0a, + 0xdd, 0xab, 0xb4, 0x2f, 0xdf, 0xd2, 0x6e, 0xa0, 0xa7, 0x74, 0x9d, 0xaa, 0x30, 0x9f, 0xcc, 0xd5, + 0xa7, 0x7e, 0xce, 0x02, 0xe1, 0xfd, 0xde, 0xc5, 0x9d, 0xfc, 0x26, 0x0c, 0x6f, 0xa7, 0x53, 0xa2, + 0x9e, 0xc9, 0x0f, 0x07, 0x20, 0x12, 0xa1, 0x2a, 0x16, 0xdd, 0x48, 0x7f, 0x6a, 0xd0, 0xb2, 0x6b, + 0x20, 0xa0, 0x0b, 0x84, 0x69, 0x35, 0x3a, 0xf7, 0xe6, 0x59, 0x80, 0x1a, 0xc3, 0x65, 0x79, 0xd2, + 0x0b, 0x26, 0xc7, 0xb5, 0xa0, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xae, 0x08, 0x43, 0x32, 0x05, 0x67, + 0xcb, 0xeb, 0x46, 0xf6, 0x78, 0xa0, 0x9c, 0xfc, 0xe8, 0x2d, 0x98, 0x08, 0x48, 0xb5, 0x15, 0x84, + 0xee, 0x36, 0x91, 0x60, 0xb1, 0x49, 0x66, 0x78, 0x92, 0x84, 0x04, 0x70, 0x9f, 0x85, 0xc8, 0x4a, + 0x14, 0x32, 0xa5, 0x71, 0x9a, 0x10, 0x3a, 0x0f, 0x83, 0x4c, 0xf4, 0x5e, 0x8e, 0x05, 0xc2, 0x4a, + 0xf0, 0xb5, 0x22, 0x01, 0x38, 0xc6, 0x61, 0x8f, 0x83, 0xd6, 0x2d, 0x86, 0x9e, 0xf0, 0x04, 0xaf, + 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0xa3, 0x30, 0xce, 0xeb, 0x05, 0x7e, 0xd3, 0xd9, 0xe0, 0x2a, 0xc1, + 0x5e, 0x15, 0x5e, 0x67, 0x7c, 0x25, 0x01, 0xdb, 0xdf, 0x2d, 0x1d, 0x49, 0x96, 0xb1, 0x6e, 0xa7, + 0xa8, 0x30, 0xcb, 0x3f, 0xde, 0x08, 0xbd, 0x33, 0x52, 0x06, 0x83, 0x31, 0x08, 0xeb, 0x78, 0xf6, + 0x5f, 0x59, 0x30, 0xa1, 0x4d, 0x55, 0xd7, 0x79, 0x2a, 0x8c, 0x41, 0x2a, 0x74, 0x31, 0x48, 0x07, + 0x8b, 0xf6, 0x90, 0x39, 0xc3, 0x3d, 0xf7, 0x69, 0x86, 0xed, 0x4f, 0x01, 0x4a, 0xe7, 0x77, 0x45, + 0xaf, 0x73, 0x43, 0x7e, 0x37, 0x20, 0xb5, 0x76, 0x0a, 0x7f, 0x3d, 0x72, 0x8e, 0xf4, 0x5c, 0xe5, + 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x60, 0x0f, 0x8c, 0x27, 0x63, 0x75, 0xa0, 0xcb, 0xd0, 0xc7, 0xb9, + 0x74, 0x41, 0xbe, 0x8d, 0x3d, 0x99, 0x16, 0xe1, 0x83, 0xf1, 0x2b, 0x82, 0xd1, 0x17, 0xf5, 0xd1, + 0x5b, 0x30, 0x54, 0xf3, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x36, 0x5b, 0x5e, 0x16, 0x27, 0x44, 0xa6, + 0x00, 0x6a, 0x21, 0x46, 0xd3, 0xa3, 0x86, 0x30, 0xdb, 0x89, 0x18, 0x84, 0x75, 0x72, 0x68, 0x8d, + 0xa5, 0x2c, 0x5a, 0x77, 0x37, 0x56, 0x9c, 0x66, 0x3b, 0xaf, 0xae, 0x79, 0x89, 0xa4, 0x51, 0x1e, + 0x11, 0x79, 0x8d, 0x38, 0x00, 0xc7, 0x84, 0xd0, 0x67, 0x60, 0x32, 0xcc, 0x51, 0x89, 0xe5, 0xa5, + 0xfb, 0x6e, 0xa7, 0x25, 0xe2, 0xc2, 0x94, 0x2c, 0xe5, 0x59, 0x56, 0x33, 0xe8, 0x0e, 0x20, 0x21, + 0x7a, 0x5e, 0x0b, 0x5a, 0x61, 0x34, 0xd7, 0xf2, 0x6a, 0x75, 0x99, 0xd2, 0xe8, 0x83, 0xd9, 0x72, + 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x8b, 0xdd, 0x9b, 0xc6, 0xc0, 0x19, 0x6d, 0xd8, 0x9f, 0xeb, 0x81, + 0x69, 0x99, 0x50, 0x39, 0xc3, 0x7b, 0xe5, 0xb3, 0x56, 0xc2, 0x7d, 0xe5, 0xa5, 0xfc, 0x83, 0xfe, + 0x81, 0x39, 0xb1, 0x7c, 0x21, 0xed, 0xc4, 0xf2, 0xca, 0x01, 0xbb, 0x71, 0xdf, 0x5c, 0x59, 0xde, + 0xb3, 0xfe, 0x27, 0x7b, 0x47, 0xc0, 0xb8, 0x9a, 0x11, 0xe6, 0x81, 0xd1, 0xcb, 0x52, 0x75, 0x94, + 0xf3, 0xfc, 0xbf, 0x2c, 0x70, 0x8c, 0xcb, 0x7e, 0x58, 0x86, 0x4f, 0x67, 0xe7, 0xac, 0xa2, 0x43, + 0x69, 0x92, 0x46, 0x33, 0xda, 0x59, 0x70, 0x03, 0xd1, 0xe3, 0x4c, 0x9a, 0x8b, 0x02, 0x27, 0x4d, + 0x53, 0x42, 0xb0, 0xa2, 0x83, 0xb6, 0x61, 0x62, 0x83, 0x45, 0x7c, 0xd2, 0x72, 0x1b, 0x8b, 0x73, + 0x21, 0x73, 0xdf, 0x5e, 0x9a, 0x5f, 0xcc, 0x4f, 0x84, 0xcc, 0x1f, 0x7f, 0x29, 0x14, 0x9c, 0x6e, + 0x82, 0x6e, 0x8d, 0x23, 0xce, 0xed, 0x70, 0xb1, 0xee, 0x84, 0x91, 0x5b, 0x9d, 0xab, 0xfb, 0xd5, + 0xad, 0x4a, 0xe4, 0x07, 0x32, 0x01, 0x62, 0xe6, 0xdb, 0x6b, 0xf6, 0x66, 0x25, 0x85, 0x6f, 0x34, + 0x3f, 0xb5, 0xb7, 0x5b, 0x3a, 0x92, 0x85, 0x85, 0x33, 0xdb, 0x42, 0xab, 0xd0, 0xbf, 0xe1, 0x46, + 0x98, 0x34, 0x7d, 0x71, 0x5a, 0x64, 0x1e, 0x85, 0x97, 0x38, 0x8a, 0xd1, 0x12, 0x8b, 0x48, 0x25, + 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x4b, 0xa0, 0x2f, 0x5f, 0x00, 0x9b, 0xb6, 0xbd, 0xcb, 0xbc, + 0x06, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0xb6, 0x8b, 0xc5, 0xb3, 0xba, 0x64, 0xc8, 0xcf, 0xe6, 0xfa, + 0xe9, 0xd3, 0x78, 0x75, 0xa9, 0x82, 0x69, 0x45, 0xe6, 0xf6, 0x1a, 0x56, 0x43, 0x57, 0x24, 0x5c, + 0xca, 0xf4, 0x02, 0x5e, 0xae, 0xcc, 0x57, 0x96, 0x0d, 0x1a, 0x2c, 0xaa, 0x21, 0x2b, 0xc6, 0xbc, + 0x3a, 0xba, 0x01, 0x83, 0x1b, 0xfc, 0xe0, 0x5b, 0x0f, 0x45, 0x52, 0xf5, 0xcc, 0xcb, 0xe8, 0x92, + 0x44, 0x32, 0xe8, 0xb1, 0x2b, 0x43, 0x81, 0x70, 0x4c, 0x0a, 0x7d, 0xce, 0x82, 0xa3, 0xc9, 0xac, + 0xf4, 0xcc, 0x59, 0x4d, 0x98, 0xa9, 0x65, 0x3a, 0x00, 0x94, 0xb3, 0x2a, 0x18, 0x0d, 0x32, 0xf5, + 0x4b, 0x26, 0x1a, 0xce, 0x6e, 0x8e, 0x0e, 0x74, 0x70, 0xab, 0xd6, 0x2e, 0x47, 0x4f, 0x22, 0x30, + 0x11, 0x1f, 0x68, 0x3c, 0xb7, 0x80, 0x69, 0x45, 0xb4, 0x06, 0xb0, 0x5e, 0x27, 0x22, 0xe2, 0xa3, + 0x30, 0x8a, 0xca, 0xbc, 0xfd, 0x97, 0x14, 0x96, 0xa0, 0xc3, 0x5e, 0xa2, 0x71, 0x29, 0xd6, 0xe8, + 0xd0, 0xa5, 0x54, 0x75, 0xbd, 0x1a, 0x09, 0x98, 0x72, 0x2b, 0x67, 0x29, 0xcd, 0x33, 0x8c, 0xf4, + 0x52, 0xe2, 0xe5, 0x58, 0x50, 0x60, 0xb4, 0x48, 0x73, 0x73, 0x3d, 0x6c, 0x97, 0x72, 0x62, 0x9e, + 0x34, 0x37, 0x13, 0x0b, 0x8a, 0xd3, 0x62, 0xe5, 0x58, 0x50, 0xa0, 0x5b, 0x66, 0x9d, 0x6e, 0x20, + 0x12, 0x4c, 0x8d, 0xe5, 0x6f, 0x99, 0x25, 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, + 0x49, 0x93, 0xdb, 0x19, 0x67, 0x34, 0x9f, 0xea, 0xc0, 0xed, 0x18, 0x74, 0xdb, 0xf3, 0x3b, 0x2f, + 0x41, 0x61, 0xbd, 0xca, 0x94, 0x62, 0x39, 0x3a, 0x83, 0xa5, 0x79, 0x83, 0x1a, 0x0b, 0xe1, 0xbe, + 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xba, 0xf4, 0x9d, 0xbb, 0xad, 0x80, 0x2c, 0xb9, 0x75, 0x22, 0xd2, + 0x4f, 0x64, 0x2e, 0xfd, 0x59, 0x89, 0x94, 0x5e, 0xfa, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0xcc, + 0x83, 0x4d, 0xe6, 0xd3, 0x55, 0xac, 0x56, 0x9a, 0x6e, 0x26, 0x17, 0xb6, 0x05, 0x23, 0xdb, 0x61, + 0x73, 0x93, 0xc8, 0x53, 0x91, 0xa9, 0xeb, 0x72, 0x22, 0x55, 0xdc, 0x10, 0x88, 0x6e, 0x10, 0xb5, + 0x9c, 0x7a, 0xea, 0x20, 0x67, 0xa2, 0x95, 0x1b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0x0b, 0xe1, 0x6d, + 0x1e, 0x4e, 0x8e, 0x29, 0xee, 0x72, 0x16, 0x42, 0x46, 0xc4, 0x39, 0xbe, 0x10, 0x04, 0x00, 0x4b, + 0x22, 0x6a, 0xb0, 0xd9, 0x05, 0x74, 0xac, 0xc3, 0x60, 0xa7, 0xfa, 0x1b, 0x0f, 0x36, 0xbb, 0x70, + 0x62, 0x52, 0xec, 0xa2, 0x69, 0x66, 0x24, 0xf0, 0x67, 0x6a, 0xbb, 0x9c, 0x8b, 0xa6, 0x53, 0xc2, + 0x7f, 0x7e, 0xd1, 0x64, 0x61, 0xe1, 0xcc, 0xb6, 0xe8, 0xc7, 0x35, 0x65, 0x64, 0x40, 0x91, 0x22, + 0xe3, 0x89, 0x9c, 0xc0, 0x9a, 0xe9, 0xf0, 0x81, 0xfc, 0xe3, 0x14, 0x08, 0xc7, 0xa4, 0x50, 0x0d, + 0x46, 0x9b, 0x46, 0xc4, 0x59, 0x96, 0xea, 0x23, 0x87, 0x2f, 0xc8, 0x8a, 0x4d, 0xcb, 0x25, 0x44, + 0x26, 0x04, 0x27, 0x68, 0x32, 0xcb, 0x3d, 0xee, 0xea, 0xc7, 0x32, 0x81, 0xe4, 0x4c, 0x75, 0x86, + 0x37, 0x20, 0x9f, 0x6a, 0x01, 0xc0, 0x92, 0x08, 0x1d, 0x0d, 0xe1, 0xa0, 0xe6, 0x87, 0x2c, 0xa1, + 0x4e, 0x9e, 0x82, 0x3d, 0x4b, 0x4d, 0x24, 0xc3, 0xac, 0x0b, 0x10, 0x8e, 0x49, 0xd1, 0x93, 0x9c, + 0x5e, 0x78, 0x27, 0xf3, 0x4f, 0xf2, 0xe4, 0x75, 0xc7, 0x4e, 0x72, 0x7a, 0xd9, 0x15, 0xc5, 0x55, + 0xa7, 0xa2, 0x82, 0xb3, 0x64, 0x20, 0x39, 0xfd, 0x52, 0x61, 0xc5, 0xd3, 0xfd, 0x52, 0x20, 0x1c, + 0x93, 0x62, 0x57, 0x31, 0x0b, 0x4d, 0x77, 0xba, 0xcd, 0x55, 0x4c, 0x11, 0x32, 0xae, 0x62, 0x2d, + 0x74, 0x9d, 0xfd, 0x83, 0x05, 0x38, 0xdd, 0x7e, 0xdf, 0xc6, 0x3a, 0xb4, 0x72, 0x6c, 0xb3, 0x94, + 0xd0, 0xa1, 0x71, 0x89, 0x4e, 0x8c, 0xd5, 0x75, 0xc0, 0xe1, 0x4b, 0x30, 0xa1, 0xdc, 0x11, 0xeb, + 0x6e, 0x75, 0x47, 0x4b, 0xce, 0xa9, 0x42, 0xf3, 0x54, 0x92, 0x08, 0x38, 0x5d, 0x07, 0xcd, 0xc2, + 0x98, 0x51, 0xb8, 0xbc, 0x20, 0x9e, 0xff, 0x71, 0x1a, 0x0b, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, + 0x5b, 0x70, 0x3c, 0x27, 0x0f, 0x7b, 0xd7, 0xf1, 0x74, 0xd7, 0x61, 0xac, 0x69, 0x56, 0xed, 0x10, + 0x02, 0xdc, 0xc8, 0xf6, 0xae, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xfb, 0x67, 0x0b, 0x70, 0xaa, + 0xad, 0x7d, 0x3d, 0xc2, 0x70, 0x6c, 0xa3, 0x11, 0x3a, 0xf3, 0x01, 0xa9, 0x11, 0x2f, 0x72, 0x9d, + 0x7a, 0xa5, 0x49, 0xaa, 0x9a, 0x16, 0x94, 0x19, 0xaa, 0x5f, 0x5a, 0xa9, 0xcc, 0xa6, 0x31, 0x70, + 0x4e, 0x4d, 0xb4, 0x04, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x9e, 0xb8, 0x69, 0x7a, 0x38, 0xa3, 0x06, + 0x7a, 0x11, 0x46, 0x94, 0xdd, 0xbe, 0x36, 0xe3, 0xec, 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, + 0x02, 0xcf, 0x6f, 0x24, 0x32, 0x61, 0x09, 0x95, 0xe9, 0x98, 0x4c, 0x5e, 0x24, 0x8a, 0xb1, 0x8e, + 0x33, 0x77, 0xf1, 0x77, 0xbe, 0x79, 0xfa, 0x03, 0xbf, 0xff, 0xcd, 0xd3, 0x1f, 0xf8, 0xc3, 0x6f, + 0x9e, 0xfe, 0xc0, 0xf7, 0xec, 0x9d, 0xb6, 0x7e, 0x67, 0xef, 0xb4, 0xf5, 0xfb, 0x7b, 0xa7, 0xad, + 0x3f, 0xdc, 0x3b, 0x6d, 0xfd, 0xd7, 0xbd, 0xd3, 0xd6, 0x17, 0xff, 0xe4, 0xf4, 0x07, 0xde, 0x44, + 0x71, 0x84, 0xea, 0xf3, 0x74, 0x76, 0xce, 0x6f, 0x5f, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x67, 0xd5, 0x38, 0x2d, 0xc3, 0x23, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -7600,6 +7868,41 @@ func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *AppArmorProfile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppArmorProfile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppArmorProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LocalhostProfile != nil { + i -= len(*m.LocalhostProfile) + copy(dAtA[i:], *m.LocalhostProfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LocalhostProfile))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8358,43 +8661,6 @@ func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClaimSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClaimSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClaimSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResourceClaimTemplateName != nil { - i -= len(*m.ResourceClaimTemplateName) - copy(dAtA[i:], *m.ResourceClaimTemplateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) - i-- - dAtA[i] = 0x12 - } - if m.ResourceClaimName != nil { - i -= len(*m.ResourceClaimName) - copy(dAtA[i:], *m.ResourceClaimName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9617,6 +9883,46 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.AllocatedResourcesStatus) > 0 { + for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllocatedResourcesStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.Resources != nil { { size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) @@ -9722,6 +10028,41 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11822,6 +12163,39 @@ func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ImageVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PullPolicy) + copy(dAtA[i:], m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Reference) + copy(dAtA[i:], m.Reference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reference))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12278,6 +12652,42 @@ func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LinuxContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.GID)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.UID)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13110,6 +13520,39 @@ func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NodeFeatures) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeFeatures) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + i-- + if *m.SupplementalGroupsPolicy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13185,7 +13628,7 @@ func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *NodeRuntimeHandler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -13195,44 +13638,75 @@ func (m *NodeResources) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { +func (m *NodeRuntimeHandler) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NodeRuntimeHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Features != nil { + { + size, err := m.Features.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeRuntimeHandlerFeatures) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeRuntimeHandlerFeatures) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeRuntimeHandlerFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UserNamespaces != nil { + i-- + if *m.UserNamespaces { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.RecursiveReadOnlyMounts != nil { + i-- + if *m.RecursiveReadOnlyMounts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -13468,6 +13942,32 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Features != nil { + { + size, err := m.Features.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if len(m.RuntimeHandlers) > 0 { + for iNdEx := len(m.RuntimeHandlers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RuntimeHandlers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.Config != nil { { size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) @@ -15717,16 +16217,20 @@ func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ResourceClaimTemplateName != nil { + i -= len(*m.ResourceClaimTemplateName) + copy(dAtA[i:], *m.ResourceClaimTemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) + i-- + dAtA[i] = 0x22 + } + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -15818,6 +16322,25 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SupplementalGroupsPolicy != nil { + i -= len(*m.SupplementalGroupsPolicy) + copy(dAtA[i:], *m.SupplementalGroupsPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SupplementalGroupsPolicy))) + i-- + dAtA[i] = 0x62 + } + if m.AppArmorProfile != nil { + { + size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.SeccompProfile != nil { { size, err := m.SeccompProfile.MarshalToSizedBuffer(dAtA[:i]) @@ -17681,6 +18204,11 @@ func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -17732,6 +18260,39 @@ func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceHealth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHealth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Health) + copy(dAtA[i:], m.Health) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Health))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceID) + copy(dAtA[i:], m.ResourceID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -18081,6 +18642,48 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -18813,6 +19416,18 @@ func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AppArmorProfile != nil { + { + size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } if m.SeccompProfile != nil { { size, err := m.SeccompProfile.MarshalToSizedBuffer(dAtA[:i]) @@ -19314,6 +19929,15 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TrafficDistribution != nil { + i -= len(*m.TrafficDistribution) + copy(dAtA[i:], *m.TrafficDistribution) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TrafficDistribution))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } if m.InternalTrafficPolicy != nil { i -= len(*m.InternalTrafficPolicy) copy(dAtA[i:], *m.InternalTrafficPolicy) @@ -20208,6 +20832,13 @@ func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RecursiveReadOnly != nil { + i -= len(*m.RecursiveReadOnly) + copy(dAtA[i:], *m.RecursiveReadOnly) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RecursiveReadOnly))) + i-- + dAtA[i] = 0x3a + } i -= len(m.SubPathExpr) copy(dAtA[i:], m.SubPathExpr) i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) @@ -20246,6 +20877,54 @@ func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VolumeMountStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMountStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMountStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RecursiveReadOnly != nil { + i -= len(*m.RecursiveReadOnly) + copy(dAtA[i:], *m.RecursiveReadOnly) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RecursiveReadOnly))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20465,6 +21144,20 @@ func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } if m.Ephemeral != nil { { size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i]) @@ -21024,6 +21717,21 @@ func (m *Affinity) Size() (n int) { return n } +func (m *AppArmorProfile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.LocalhostProfile != nil { + l = len(*m.LocalhostProfile) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *AttachedVolume) Size() (n int) { if m == nil { return 0 @@ -21308,23 +22016,6 @@ func (m *CinderVolumeSource) Size() (n int) { return n } -func (m *ClaimSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResourceClaimName != nil { - l = len(*m.ResourceClaimName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceClaimTemplateName != nil { - l = len(*m.ResourceClaimTemplateName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *ClientIPConfig) Size() (n int) { if m == nil { return 0 @@ -21803,6 +22494,35 @@ func (m *ContainerStatus) Size() (n int) { l = m.Resources.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllocatedResourcesStatus) > 0 { + for _, e := range m.AllocatedResourcesStatus { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22587,6 +23307,19 @@ func (m *ISCSIVolumeSource) Size() (n int) { return n } +func (m *ImageVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Reference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *KeyToPath) Size() (n int) { if m == nil { return 0 @@ -22746,6 +23479,22 @@ func (m *LimitRangeSpec) Size() (n int) { return n } +func (m *LinuxContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.UID)) + n += 1 + sovGenerated(uint64(m.GID)) + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *List) Size() (n int) { if m == nil { return 0 @@ -23052,6 +23801,18 @@ func (m *NodeDaemonEndpoints) Size() (n int) { return n } +func (m *NodeFeatures) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + n += 2 + } + return n +} + func (m *NodeList) Size() (n int) { if m == nil { return 0 @@ -23080,20 +23841,32 @@ func (m *NodeProxyOptions) Size() (n int) { return n } -func (m *NodeResources) Size() (n int) { +func (m *NodeRuntimeHandler) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Features != nil { + l = m.Features.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeRuntimeHandlerFeatures) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RecursiveReadOnlyMounts != nil { + n += 2 + } + if m.UserNamespaces != nil { + n += 2 } return n } @@ -23249,6 +24022,16 @@ func (m *NodeStatus) Size() (n int) { l = m.Config.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.RuntimeHandlers) > 0 { + for _, e := range m.RuntimeHandlers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Features != nil { + l = m.Features.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24012,8 +24795,14 @@ func (m *PodResourceClaim) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceClaimTemplateName != nil { + l = len(*m.ResourceClaimTemplateName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24088,6 +24877,14 @@ func (m *PodSecurityContext) Size() (n int) { l = m.SeccompProfile.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.AppArmorProfile != nil { + l = m.AppArmorProfile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SupplementalGroupsPolicy != nil { + l = len(*m.SupplementalGroupsPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24727,6 +25524,8 @@ func (m *ResourceClaim) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -24745,6 +25544,19 @@ func (m *ResourceFieldSelector) Size() (n int) { return n } +func (m *ResourceHealth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Health) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ResourceQuota) Size() (n int) { if m == nil { return 0 @@ -24865,6 +25677,23 @@ func (m *ResourceRequirements) Size() (n int) { return n } +func (m *ResourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *SELinuxOptions) Size() (n int) { if m == nil { return 0 @@ -25172,6 +26001,10 @@ func (m *SecurityContext) Size() (n int) { l = m.SeccompProfile.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.AppArmorProfile != nil { + l = m.AppArmorProfile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -25386,6 +26219,10 @@ func (m *ServiceSpec) Size() (n int) { l = len(*m.InternalTrafficPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if m.TrafficDistribution != nil { + l = len(*m.TrafficDistribution) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -25684,6 +26521,28 @@ func (m *VolumeMount) Size() (n int) { } l = len(m.SubPathExpr) n += 1 + l + sovGenerated(uint64(l)) + if m.RecursiveReadOnly != nil { + l = len(*m.RecursiveReadOnly) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeMountStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.RecursiveReadOnly != nil { + l = len(*m.RecursiveReadOnly) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -25878,6 +26737,10 @@ func (m *VolumeSource) Size() (n int) { l = m.Ephemeral.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Image != nil { + l = m.Image.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -25965,6 +26828,17 @@ func (this *Affinity) String() string { }, "") return s } +func (this *AppArmorProfile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppArmorProfile{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `LocalhostProfile:` + valueToStringGenerated(this.LocalhostProfile) + `,`, + `}`, + }, "") + return s +} func (this *AttachedVolume) String() string { if this == nil { return "nil" @@ -26162,17 +27036,6 @@ func (this *CinderVolumeSource) String() string { }, "") return s } -func (this *ClaimSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClaimSource{`, - `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, - `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, - `}`, - }, "") - return s -} func (this *ClientIPConfig) String() string { if this == nil { return "nil" @@ -26515,6 +27378,16 @@ func (this *ContainerStatus) String() string { if this == nil { return "nil" } + repeatedStringForVolumeMounts := "[]VolumeMountStatus{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMountStatus", "VolumeMountStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForAllocatedResourcesStatus := "[]ResourceStatus{" + for _, f := range this.AllocatedResourcesStatus { + repeatedStringForAllocatedResourcesStatus += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForAllocatedResourcesStatus += "}" keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) for k := range this.AllocatedResources { keysForAllocatedResources = append(keysForAllocatedResources, string(k)) @@ -26537,6 +27410,19 @@ func (this *ContainerStatus) String() string { `Started:` + valueToStringGenerated(this.Started) + `,`, `AllocatedResources:` + mapStringForAllocatedResources + `,`, `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`, + `AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`, + `}`, + }, "") + return s +} +func (this *ContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerUser{`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerUser", "LinuxContainerUser", 1) + `,`, `}`, }, "") return s @@ -27120,6 +28006,17 @@ func (this *ISCSIVolumeSource) String() string { }, "") return s } +func (this *ImageVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageVolumeSource{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `}`, + }, "") + return s +} func (this *KeyToPath) String() string { if this == nil { return "nil" @@ -27263,6 +28160,18 @@ func (this *LimitRangeSpec) String() string { }, "") return s } +func (this *LinuxContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxContainerUser{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `}`, + }, "") + return s +} func (this *List) String() string { if this == nil { return "nil" @@ -27511,6 +28420,16 @@ func (this *NodeDaemonEndpoints) String() string { }, "") return s } +func (this *NodeFeatures) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeFeatures{`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, + `}`, + }, "") + return s +} func (this *NodeList) String() string { if this == nil { return "nil" @@ -27537,22 +28456,24 @@ func (this *NodeProxyOptions) String() string { }, "") return s } -func (this *NodeResources) String() string { +func (this *NodeRuntimeHandler) String() string { if this == nil { return "nil" } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + s := strings.Join([]string{`&NodeRuntimeHandler{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Features:` + strings.Replace(this.Features.String(), "NodeRuntimeHandlerFeatures", "NodeRuntimeHandlerFeatures", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeRuntimeHandlerFeatures) String() string { + if this == nil { + return "nil" } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, + s := strings.Join([]string{`&NodeRuntimeHandlerFeatures{`, + `RecursiveReadOnlyMounts:` + valueToStringGenerated(this.RecursiveReadOnlyMounts) + `,`, + `UserNamespaces:` + valueToStringGenerated(this.UserNamespaces) + `,`, `}`, }, "") return s @@ -27650,6 +28571,11 @@ func (this *NodeStatus) String() string { repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," } repeatedStringForVolumesAttached += "}" + repeatedStringForRuntimeHandlers := "[]NodeRuntimeHandler{" + for _, f := range this.RuntimeHandlers { + repeatedStringForRuntimeHandlers += strings.Replace(strings.Replace(f.String(), "NodeRuntimeHandler", "NodeRuntimeHandler", 1), `&`, ``, 1) + "," + } + repeatedStringForRuntimeHandlers += "}" keysForCapacity := make([]string, 0, len(this.Capacity)) for k := range this.Capacity { keysForCapacity = append(keysForCapacity, string(k)) @@ -27682,6 +28608,8 @@ func (this *NodeStatus) String() string { `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`, + `Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`, `}`, }, "") return s @@ -28210,7 +29138,8 @@ func (this *PodResourceClaim) String() string { } s := strings.Join([]string{`&PodResourceClaim{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "ClaimSource", "ClaimSource", 1), `&`, ``, 1) + `,`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, `}`, }, "") return s @@ -28256,6 +29185,8 @@ func (this *PodSecurityContext) String() string { `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, + `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, `}`, }, "") return s @@ -28757,6 +29688,7 @@ func (this *ResourceClaim) String() string { } s := strings.Join([]string{`&ResourceClaim{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, `}`, }, "") return s @@ -28773,6 +29705,17 @@ func (this *ResourceFieldSelector) String() string { }, "") return s } +func (this *ResourceHealth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHealth{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `Health:` + fmt.Sprintf("%v", this.Health) + `,`, + `}`, + }, "") + return s +} func (this *ResourceQuota) String() string { if this == nil { return "nil" @@ -28891,6 +29834,22 @@ func (this *ResourceRequirements) String() string { }, "") return s } +func (this *ResourceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ResourceHealth{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceHealth", "ResourceHealth", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ResourceStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} func (this *SELinuxOptions) String() string { if this == nil { return "nil" @@ -29115,6 +30074,7 @@ func (this *SecurityContext) String() string { `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, + `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, `}`, }, "") return s @@ -29272,6 +30232,7 @@ func (this *ServiceSpec) String() string { `AllocateLoadBalancerNodePorts:` + valueToStringGenerated(this.AllocateLoadBalancerNodePorts) + `,`, `LoadBalancerClass:` + valueToStringGenerated(this.LoadBalancerClass) + `,`, `InternalTrafficPolicy:` + valueToStringGenerated(this.InternalTrafficPolicy) + `,`, + `TrafficDistribution:` + valueToStringGenerated(this.TrafficDistribution) + `,`, `}`, }, "") return s @@ -29490,6 +30451,20 @@ func (this *VolumeMount) String() string { `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `RecursiveReadOnly:` + valueToStringGenerated(this.RecursiveReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMountStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMountStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `RecursiveReadOnly:` + valueToStringGenerated(this.RecursiveReadOnly) + `,`, `}`, }, "") return s @@ -29583,6 +30558,7 @@ func (this *VolumeSource) String() string { `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, `Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageVolumeSource", "ImageVolumeSource", 1) + `,`, `}`, }, "") return s @@ -29943,7 +30919,7 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { +func (m *AppArmorProfile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29966,15 +30942,15 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + return fmt.Errorf("proto: AppArmorProfile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AppArmorProfile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30002,11 +30978,11 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + m.Type = AppArmorProfileType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalhostProfile", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30034,7 +31010,8 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DevicePath = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.LocalhostProfile = &s iNdEx = postIndex default: iNdEx = preIndex @@ -30057,7 +31034,7 @@ func (m *AttachedVolume) Unmarshal(dAtA []byte) error { } return nil } -func (m *AvoidPods) Unmarshal(dAtA []byte) error { +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30080,17 +31057,17 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30100,25 +31077,55 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -30141,7 +31148,7 @@ func (m *AvoidPods) Unmarshal(dAtA []byte) error { } return nil } -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AvoidPods) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30164,17 +31171,17 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30184,27 +31191,111 @@ func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DiskName = string(dAtA[iNdEx:postIndex]) + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32486,122 +33577,6 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClaimSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClaimSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClaimSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceClaimName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceClaimTemplateName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -36779,61 +37754,11 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36843,64 +37768,304 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &ContainerUser{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{}) + if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerUser{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -43495,6 +44660,120 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *ImageVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *KeyToPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -44989,6 +46268,170 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *LinuxContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + m.UID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + m.GID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *List) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -47477,6 +48920,77 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } return nil } +func (m *NodeFeatures) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeFeatures: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeFeatures: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SupplementalGroupsPolicy = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *NodeList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -47676,7 +49190,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeResources) Unmarshal(dAtA []byte) error { +func (m *NodeRuntimeHandler) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -47699,15 +49213,47 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + return fmt.Errorf("proto: NodeRuntimeHandler: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeRuntimeHandler: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -47734,106 +49280,105 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Capacity == nil { - m.Capacity = make(ResourceList) + if m.Features == nil { + m.Features = &NodeRuntimeHandlerFeatures{} } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeRuntimeHandlerFeatures) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeRuntimeHandlerFeatures: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeRuntimeHandlerFeatures: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnlyMounts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Capacity[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex + b := bool(v != 0) + m.RecursiveReadOnlyMounts = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.UserNamespaces = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49060,6 +50605,76 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandlers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandlers = append(m.RuntimeHandlers, NodeRuntimeHandler{}) + if err := m.RuntimeHandlers[len(m.RuntimeHandlers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Features == nil { + m.Features = &NodeFeatures{} + } + if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55793,11 +57408,11 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -55807,24 +57422,57 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimTemplateName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -56405,6 +58053,75 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppArmorProfile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppArmorProfile == nil { + m.AppArmorProfile = &AppArmorProfile{} + } + if err := m.AppArmorProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex]) + m.SupplementalGroupsPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -62033,91 +63750,123 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62201,6 +63950,120 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceHealth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHealth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHealth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = ResourceID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Health = ResourceHealthStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ResourceQuota) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -63364,7 +65227,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { +func (m *ResourceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -63387,15 +65250,15 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -63423,13 +65286,13 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.Name = ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -63439,27 +65302,143 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Role = string(dAtA[iNdEx:postIndex]) + m.Resources = append(m.Resources, ResourceHealth{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -66074,175 +68053,9 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SerializedReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Service) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Service: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppArmorProfile", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66269,40 +68082,10 @@ func (m *Service) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.AppArmorProfile == nil { + m.AppArmorProfile = &AppArmorProfile{} } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AppArmorProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -66327,7 +68110,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceAccount) Unmarshal(dAtA []byte) error { +func (m *SerializedReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -66350,15 +68133,15 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66385,13 +68168,63 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66418,14 +68251,13 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Secrets = append(m.Secrets, ObjectReference{}) - if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66452,16 +68284,15 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -66471,13 +68302,25 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66499,7 +68342,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { +func (m *ServiceAccount) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -66522,15 +68365,15 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66557,13 +68400,13 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66590,66 +68433,16 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ServiceAccount{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -66659,49 +68452,31 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Audience = string(dAtA[iNdEx:postIndex]) + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExpirationSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -66711,24 +68486,13 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + b := bool(v != 0) + m.AutomountServiceAccountToken = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66750,7 +68514,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceList) Unmarshal(dAtA []byte) error { +func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -66773,10 +68537,10 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -66841,7 +68605,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Service{}) + m.Items = append(m.Items, ServiceAccount{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -66867,7 +68631,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServicePort) Unmarshal(dAtA []byte) error { +func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -66890,15 +68654,15 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -66926,11 +68690,262 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Audience = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -67872,94 +69887,11 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TrafficDistribution", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -67969,25 +69901,141 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.TrafficDistribution = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -70049,40 +72097,418 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MountPropagationMode(dAtA[iNdEx:postIndex]) + m.MountPropagation = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPathExpr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnly", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := RecursiveReadOnlyMode(dAtA[iNdEx:postIndex]) + m.RecursiveReadOnly = &s iNdEx = postIndex default: iNdEx = preIndex @@ -70105,7 +72531,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } return nil } -func (m *VolumeDevice) Unmarshal(dAtA []byte) error { +func (m *VolumeMountStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -70128,10 +72554,10 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeMountStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeMountStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -70168,89 +72594,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeMount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -70278,9 +72622,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.MountPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } @@ -70300,106 +72644,9 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } } m.ReadOnly = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := MountPropagationMode(dAtA[iNdEx:postIndex]) - m.MountPropagation = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnly", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -70427,7 +72674,8 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SubPathExpr = string(dAtA[iNdEx:postIndex]) + s := RecursiveReadOnlyMode(dAtA[iNdEx:postIndex]) + m.RecursiveReadOnly = &s iNdEx = postIndex default: iNdEx = preIndex @@ -72147,6 +74395,42 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageVolumeSource{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index d099238cdf..68ac80ed0b 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -77,6 +77,25 @@ message Affinity { optional PodAntiAffinity podAntiAffinity = 3; } +// AppArmorProfile defines a pod or container's AppArmor settings. +// +union +message AppArmorProfile { + // type indicates which kind of AppArmor profile will be applied. + // Valid options are: + // Localhost - a profile pre-loaded on the node. + // RuntimeDefault - the container runtime's default profile. + // Unconfined - no AppArmor enforcement. + // +unionDiscriminator + optional string type = 1; + + // localhostProfile indicates a profile loaded on the node that should be used. + // The profile must be preconfigured on the node to work. + // Must match the loaded name of the profile. + // Must be set if and only if type is "Localhost". + // +optional + optional string localhostProfile = 2; +} + // AttachedVolume describes a volume attached to a node message AttachedVolume { // Name of the attached volume @@ -93,6 +112,7 @@ message AvoidPods { // Bounded-sized list of signatures of pods that should avoid this node, sorted // in timestamp order from oldest to newest. Size of the slice is unspecified. // +optional + // +listType=atomic repeated PreferAvoidPodsEntry preferAvoidPods = 1; } @@ -106,20 +126,24 @@ message AzureDiskVolumeSource { // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) optional string cachingMode = 3; // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" optional string fsType = 4; // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false optional bool readOnly = 5; // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) optional string kind = 6; } @@ -162,7 +186,7 @@ message Binding { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The target object that you want to bind to the standard object. optional ObjectReference target = 2; @@ -269,10 +293,12 @@ message CSIVolumeSource { message Capabilities { // Added capabilities // +optional + // +listType=atomic repeated string add = 1; // Removed capabilities // +optional + // +listType=atomic repeated string drop = 2; } @@ -281,6 +307,7 @@ message Capabilities { message CephFSPersistentVolumeSource { // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -314,6 +341,7 @@ message CephFSPersistentVolumeSource { message CephFSVolumeSource { // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -398,30 +426,6 @@ message CinderVolumeSource { optional LocalObjectReference secretRef = 4; } -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -message ClaimSource { - // ResourceClaimName is the name of a ResourceClaim object in the same - // namespace as this pod. - optional string resourceClaimName = 1; - - // ResourceClaimTemplateName is the name of a ResourceClaimTemplate - // object in the same namespace as this pod. - // - // The template will be used to create a new ResourceClaim, which will - // be bound to this pod. When this pod is deleted, the ResourceClaim - // will also be deleted. The pod name and resource name, along with a - // generated component, will be used to form a unique name for the - // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - // - // This field is immutable and no changes will be made to the - // corresponding ResourceClaim by the control plane after creating the - // ResourceClaim. - optional string resourceClaimTemplateName = 2; -} - // ClientIPConfig represents the configurations of Client IP based session affinity. message ClientIPConfig { // timeoutSeconds specifies the seconds of ClientIP type session sticky time. @@ -451,7 +455,7 @@ message ClusterTrustBundleProjection { // interpreted as "match nothing". If set but empty, interpreted as "match // everything". // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; // If true, don't block pod startup if the referenced ClusterTrustBundle(s) // aren't available. If using name, then the named ClusterTrustBundle is @@ -492,12 +496,14 @@ message ComponentStatus { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // List of component conditions observed // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ComponentCondition conditions = 2; } @@ -507,7 +513,7 @@ message ComponentStatusList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ComponentStatus objects. repeated ComponentStatus items = 2; @@ -518,7 +524,7 @@ message ConfigMap { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the ConfigMap cannot // be updated (only object metadata can be modified). @@ -578,7 +584,7 @@ message ConfigMapKeySelector { message ConfigMapList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ConfigMaps. repeated ConfigMap items = 2; @@ -628,6 +634,7 @@ message ConfigMapProjection { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; // optional specify whether the ConfigMap or its keys must be defined @@ -652,6 +659,7 @@ message ConfigMapVolumeSource { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; // defaultMode is optional: mode bits used to set permissions on created files by default. @@ -692,6 +700,7 @@ message Container { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string command = 3; // Arguments to the entrypoint. @@ -703,6 +712,7 @@ message Container { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string args = 4; // Container's working directory. @@ -734,6 +744,7 @@ message Container { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic repeated EnvFromSource envFrom = 19; // List of environment variables to set in the container. @@ -741,6 +752,8 @@ message Container { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated EnvVar env = 7; // Compute Resources required by this container. @@ -779,11 +792,15 @@ message Container { // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional repeated VolumeDevice volumeDevices = 21; @@ -877,6 +894,7 @@ message ContainerImage { // Names by which this image is known. // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"] // +optional + // +listType=atomic repeated string names = 1; // The size of the image in bytes. @@ -946,7 +964,7 @@ message ContainerState { message ContainerStateRunning { // Time at which the container was last (re-)started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; } // ContainerStateTerminated is a terminated state of a container. @@ -968,11 +986,11 @@ message ContainerStateTerminated { // Time at which previous execution of the container started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; // Time at which the container last terminated // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; // Container's ID in the format '://' // +optional @@ -1055,13 +1073,45 @@ message ContainerStatus { // and after successfully admitting desired pod resize. // +featureGate=InPlacePodVerticalScaling // +optional - map allocatedResources = 10; + map allocatedResources = 10; // Resources represents the compute resource requests and limits that have been successfully // enacted on the running container after it has been started or has been successfully resized. // +featureGate=InPlacePodVerticalScaling // +optional optional ResourceRequirements resources = 11; + + // Status of volume mounts. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath + // +featureGate=RecursiveReadOnlyMounts + repeated VolumeMountStatus volumeMounts = 12; + + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + optional ContainerUser user = 13; + + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + repeated ResourceStatus allocatedResourcesStatus = 14; +} + +// ContainerUser represents user identity information +message ContainerUser { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + optional LinuxContainerUser linux = 1; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1076,6 +1126,7 @@ message DaemonEndpoint { message DownwardAPIProjection { // Items is a list of DownwardAPIVolume file // +optional + // +listType=atomic repeated DownwardAPIVolumeFile items = 1; } @@ -1084,7 +1135,7 @@ message DownwardAPIVolumeFile { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' optional string path = 1; - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. // +optional optional ObjectFieldSelector fieldRef = 2; @@ -1108,6 +1159,7 @@ message DownwardAPIVolumeFile { message DownwardAPIVolumeSource { // Items is a list of downward API volume file // +optional + // +listType=atomic repeated DownwardAPIVolumeFile items = 1; // Optional: mode bits to use on created files by default. Must be a @@ -1139,7 +1191,7 @@ message EmptyDirVolumeSource { // The default is nil which means that the limit is undefined. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } // EndpointAddress is a tuple that describes single IP address. @@ -1218,16 +1270,19 @@ message EndpointSubset { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. // +optional + // +listType=atomic repeated EndpointAddress addresses = 1; // IP addresses which offer the related ports but are not currently marked as ready // because they have not yet finished starting, have recently failed a readiness check, // or have recently failed a liveness check. // +optional + // +listType=atomic repeated EndpointAddress notReadyAddresses = 2; // Port numbers available on the related IP addresses. // +optional + // +listType=atomic repeated EndpointPort ports = 3; } @@ -1248,7 +1303,7 @@ message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -1258,6 +1313,7 @@ message Endpoints { // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. // +optional + // +listType=atomic repeated EndpointSubset subsets = 2; } @@ -1266,7 +1322,7 @@ message EndpointsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of endpoints. repeated Endpoints items = 2; @@ -1377,6 +1433,7 @@ message EphemeralContainerCommon { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string command = 3; // Arguments to the entrypoint. @@ -1388,6 +1445,7 @@ message EphemeralContainerCommon { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string args = 4; // Container's working directory. @@ -1413,6 +1471,7 @@ message EphemeralContainerCommon { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic repeated EnvFromSource envFrom = 19; // List of environment variables to set in the container. @@ -1420,6 +1479,8 @@ message EphemeralContainerCommon { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated EnvVar env = 7; // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources @@ -1446,11 +1507,15 @@ message EphemeralContainerCommon { // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional repeated VolumeDevice volumeDevices = 21; @@ -1560,7 +1625,7 @@ message EphemeralVolumeSource { message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. optional ObjectReference involvedObject = 2; @@ -1582,11 +1647,11 @@ message Event { // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; // The time at which the most recent occurrence of this event was recorded. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; // The number of times this event has occurred. // +optional @@ -1598,7 +1663,7 @@ message Event { // Time when this Event was first observed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; // Data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -1626,7 +1691,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of events repeated Event items = 2; @@ -1639,7 +1704,7 @@ message EventSeries { optional int32 count = 1; // Time of the last occurrence observed - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } // EventSource contains information for an event. @@ -1661,6 +1726,7 @@ message ExecAction { // a shell, you need to explicitly call out to that shell. // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. // +optional + // +listType=atomic repeated string command = 1; } @@ -1670,6 +1736,7 @@ message ExecAction { message FCVolumeSource { // targetWWNs is Optional: FC target worldwide names (WWNs) // +optional + // +listType=atomic repeated string targetWWNs = 1; // lun is Optional: FC target lun number @@ -1691,6 +1758,7 @@ message FCVolumeSource { // wwids Optional: FC volume world wide identifiers (wwids) // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. // +optional + // +listType=atomic repeated string wwids = 5; } @@ -1889,7 +1957,7 @@ message HTTPGetAction { // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. @@ -1903,6 +1971,7 @@ message HTTPGetAction { // Custom headers to set in the request. HTTP allows repeated headers. // +optional + // +listType=atomic repeated HTTPHeader httpHeaders = 5; } @@ -1920,15 +1989,18 @@ message HTTPHeader { // pod's hosts file. message HostAlias { // IP address of the host file entry. + // +required optional string ip = 1; // Hostnames for the above IP address. + // +listType=atomic repeated string hostnames = 2; } // HostIP represents a single IP address allocated to the host. message HostIP { // IP is the IP address assigned to the host + // +required optional string ip = 1; } @@ -1964,6 +2036,7 @@ message ISCSIPersistentVolumeSource { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; // fsType is the filesystem type of the volume that you want to mount. @@ -1982,6 +2055,7 @@ message ISCSIPersistentVolumeSource { // portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic repeated string portals = 7; // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication @@ -2020,6 +2094,7 @@ message ISCSIVolumeSource { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; // fsType is the filesystem type of the volume that you want to mount. @@ -2038,6 +2113,7 @@ message ISCSIVolumeSource { // portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic repeated string portals = 7; // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication @@ -2059,6 +2135,26 @@ message ISCSIVolumeSource { optional string initiatorName = 12; } +// ImageVolumeSource represents a image volume resource. +message ImageVolumeSource { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + optional string reference = 1; + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + optional string pullPolicy = 2; +} + // Maps a string key to a path within a volume. message KeyToPath { // key is the key to project. @@ -2132,7 +2228,7 @@ message LimitRange { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2147,23 +2243,23 @@ message LimitRangeItem { // Max usage constraints on this kind by resource name. // +optional - map max = 2; + map max = 2; // Min usage constraints on this kind by resource name. // +optional - map min = 3; + map min = 3; // Default resource requirement limit value by resource name if resource limit is omitted. // +optional - map default = 4; + map default = 4; // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. // +optional - map defaultRequest = 5; + map defaultRequest = 5; // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. // +optional - map maxLimitRequestRatio = 6; + map maxLimitRequestRatio = 6; } // LimitRangeList is a list of LimitRange items. @@ -2171,7 +2267,7 @@ message LimitRangeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -2181,18 +2277,33 @@ message LimitRangeList { // LimitRangeSpec defines a min/max usage limit for resources that match on kind. message LimitRangeSpec { // Limits is the list of LimitRangeItem objects that are enforced. + // +listType=atomic repeated LimitRangeItem limits = 1; } +// LinuxContainerUser represents user identity information in Linux containers +message LinuxContainerUser { + // UID is the primary uid initially attached to the first process in the container + optional int64 uid = 1; + + // GID is the primary gid initially attached to the first process in the container + optional int64 gid = 2; + + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + repeated int64 supplementalGroups = 3; +} + // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // LoadBalancerIngress represents the status of a load-balancer ingress point: @@ -2229,6 +2340,7 @@ message LoadBalancerStatus { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. // +optional + // +listType=atomic repeated LoadBalancerIngress ingress = 1; } @@ -2237,9 +2349,15 @@ message LoadBalancerStatus { // +structType=atomic message LocalObjectReference { // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // This field is effectively required, but due to backwards compatibility is + // allowed to be empty. Instances of this type with an empty value here are + // almost certainly wrong. // TODO: Add other useful fields. apiVersion, kind, uid? + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional + // +default="" + // +kubebuilder:default="" + // TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. optional string name = 1; } @@ -2299,7 +2417,7 @@ message Namespace { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2321,7 +2439,7 @@ message NamespaceCondition { optional string status = 2; // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // +optional optional string reason = 5; @@ -2335,7 +2453,7 @@ message NamespaceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -2347,6 +2465,7 @@ message NamespaceSpec { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional + // +listType=atomic repeated string finalizers = 1; } @@ -2361,6 +2480,8 @@ message NamespaceStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NamespaceCondition conditions = 2; } @@ -2370,7 +2491,7 @@ message Node { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2414,6 +2535,7 @@ message NodeAffinity { // "weight" to the sum if the node matches the corresponding matchExpressions; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; } @@ -2427,11 +2549,11 @@ message NodeCondition { // Last time we got an update on a given condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -2506,12 +2628,21 @@ message NodeDaemonEndpoints { optional DaemonEndpoint kubeletEndpoint = 1; } +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +message NodeFeatures { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + optional bool supplementalGroupsPolicy = 1; +} + // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of nodes repeated Node items = 2; @@ -2524,11 +2655,29 @@ message NodeProxyOptions { optional string path = 1; } -// NodeResources is an object for conveying resource information about a node. -// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details. -message NodeResources { - // Capacity represents the available resources of a node - map capacity = 1; +// NodeRuntimeHandler is a set of runtime handler information. +message NodeRuntimeHandler { + // Runtime handler name. + // Empty for the default runtime handler. + // +optional + optional string name = 1; + + // Supported features. + // +optional + optional NodeRuntimeHandlerFeatures features = 2; +} + +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. +message NodeRuntimeHandlerFeatures { + // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. + // +featureGate=RecursiveReadOnlyMounts + // +optional + optional bool recursiveReadOnlyMounts = 1; + + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + optional bool userNamespaces = 2; } // A node selector represents the union of the results of one or more label queries @@ -2537,6 +2686,7 @@ message NodeResources { // +structType=atomic message NodeSelector { // Required. A list of node selector terms. The terms are ORed. + // +listType=atomic repeated NodeSelectorTerm nodeSelectorTerms = 1; } @@ -2556,6 +2706,7 @@ message NodeSelectorRequirement { // array must have a single element, which will be interpreted as an integer. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -2566,10 +2717,12 @@ message NodeSelectorRequirement { message NodeSelectorTerm { // A list of node selector requirements by node's labels. // +optional + // +listType=atomic repeated NodeSelectorRequirement matchExpressions = 1; // A list of node selector requirements by node's fields. // +optional + // +listType=atomic repeated NodeSelectorRequirement matchFields = 2; } @@ -2584,6 +2737,7 @@ message NodeSpec { // each of IPv4 and IPv6. // +optional // +patchStrategy=merge + // +listType=set repeated string podCIDRs = 7; // ID of the node assigned by the cloud provider in the format: :// @@ -2597,6 +2751,7 @@ message NodeSpec { // If specified, the node's taints. // +optional + // +listType=atomic repeated Taint taints = 5; // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. @@ -2612,14 +2767,14 @@ message NodeSpec { // NodeStatus is information about the current status of a node. message NodeStatus { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional - map capacity = 1; + map capacity = 1; // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. // +optional - map allocatable = 2; + map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase @@ -2632,6 +2787,8 @@ message NodeStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NodeCondition conditions = 4; // List of addresses reachable to the node. @@ -2647,6 +2804,8 @@ message NodeStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NodeAddress addresses = 5; // Endpoints of daemons running on the Node. @@ -2660,19 +2819,34 @@ message NodeStatus { // List of container images on this node // +optional + // +listType=atomic repeated ContainerImage images = 8; // List of attachable volumes in use (mounted) by the node. // +optional + // +listType=atomic repeated string volumesInUse = 9; // List of volumes that are attached to the node. // +optional + // +listType=atomic repeated AttachedVolume volumesAttached = 10; // Status of the config assigned to the node via the dynamic Kubelet config feature. // +optional optional NodeConfigStatus config = 11; + + // The available runtime handlers. + // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport + // +optional + // +listType=atomic + repeated NodeRuntimeHandler runtimeHandlers = 12; + + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + optional NodeFeatures features = 13; } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. @@ -2702,7 +2876,7 @@ message NodeSystemInfo { // Kubelet Version reported by the node. optional string kubeletVersion = 7; - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. optional string kubeProxyVersion = 8; // The Operating System reported by the node @@ -2790,7 +2964,7 @@ message PersistentVolume { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. @@ -2811,7 +2985,7 @@ message PersistentVolumeClaim { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the desired characteristics of a volume requested by a pod author. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims @@ -2833,14 +3007,14 @@ message PersistentVolumeClaimCondition { // lastProbeTime is the time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // lastTransitionTime is the time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // reason is a unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // for condition's last transition. If it reports "Resizing" that means the underlying // persistent volume is being resized. // +optional optional string reason = 5; @@ -2855,7 +3029,7 @@ message PersistentVolumeClaimList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of persistent volume claims. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims @@ -2868,11 +3042,12 @@ message PersistentVolumeClaimSpec { // accessModes contains the desired access modes the volume should have. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic repeated string accessModes = 1; // selector is a label query over volumes to consider for binding. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // resources represents the minimum resources the volume should have. // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements @@ -2943,8 +3118,8 @@ message PersistentVolumeClaimSpec { // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 9; @@ -2959,17 +3134,20 @@ message PersistentVolumeClaimStatus { // accessModes contains the actual access modes the volume backing the PVC has. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic repeated string accessModes = 2; // capacity represents the actual resources of the underlying volume. // +optional - map capacity = 3; + map capacity = 3; // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being - // resized then the Condition will be set to 'ResizeStarted'. + // resized then the Condition will be set to 'Resizing'. // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated PersistentVolumeClaimCondition conditions = 4; // allocatedResources tracks the resources allocated to a PVC including its capacity. @@ -2996,7 +3174,7 @@ message PersistentVolumeClaimStatus { // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional - map allocatedResources = 5; + map allocatedResources = 5; // allocatedResourceStatuses stores status of resource being resized for the given PVC. // Key names follow standard Kubernetes label syntax. Valid values are either: @@ -3040,14 +3218,14 @@ message PersistentVolumeClaimStatus { // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string currentVolumeAttributesClassName = 8; // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional ModifyVolumeStatus modifyVolumeStatus = 9; @@ -3061,7 +3239,7 @@ message PersistentVolumeClaimTemplate { // validation. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The specification for the PersistentVolumeClaim. The entire content is // copied unchanged into the PVC that gets created from this @@ -3090,7 +3268,7 @@ message PersistentVolumeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of persistent volumes. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes @@ -3209,7 +3387,7 @@ message PersistentVolumeSpec { // capacity is the description of the persistent volume's resources and capacity. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional - map capacity = 1; + map capacity = 1; // persistentVolumeSource is the actual volume backing the persistent volume. optional PersistentVolumeSource persistentVolumeSource = 2; @@ -3217,6 +3395,7 @@ message PersistentVolumeSpec { // accessModes contains all ways the volume can be mounted. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional + // +listType=atomic repeated string accessModes = 3; // claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. @@ -3244,6 +3423,7 @@ message PersistentVolumeSpec { // simply fail if one is invalid. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional + // +listType=atomic repeated string mountOptions = 7; // volumeMode defines if a volume is intended to be used with a formatted filesystem @@ -3262,7 +3442,7 @@ message PersistentVolumeSpec { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 10; @@ -3286,10 +3466,8 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). - // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; } // Represents a Photon Controller persistent disk resource. @@ -3309,7 +3487,7 @@ message Pod { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -3335,6 +3513,7 @@ message PodAffinity { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; // The scheduler will prefer to schedule pods to nodes that satisfy @@ -3347,6 +3526,7 @@ message PodAffinity { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; } @@ -3360,13 +3540,14 @@ message PodAffinityTerm { // A label query over a set of resources, in this case pods. // If it's null, this PodAffinityTerm matches with no Pods. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; // namespaces specifies a static list of namespace names that the term applies to. // The term is applied to the union of the namespaces listed in this field // and the ones selected by namespaceSelector. // null or empty namespaces list and null namespaceSelector means "this pod's namespace". // +optional + // +listType=atomic repeated string namespaces = 2; // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -3382,30 +3563,32 @@ message PodAffinityTerm { // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; // MatchLabelKeys is a set of pod label keys to select which pods will // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` // to select the group of existing pods which pods will be taken into consideration // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // The same key is forbidden to exist in both matchLabelKeys and labelSelector. + // Also, matchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional repeated string matchLabelKeys = 5; // MismatchLabelKeys is a set of pod label keys to select which pods will // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` // to select the group of existing pods which pods will be taken into consideration // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional repeated string mismatchLabelKeys = 6; @@ -3421,6 +3604,7 @@ message PodAntiAffinity { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; // The scheduler will prefer to schedule pods to nodes that satisfy @@ -3433,6 +3617,7 @@ message PodAntiAffinity { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; } @@ -3482,11 +3667,11 @@ message PodCondition { // Last time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -3504,12 +3689,14 @@ message PodDNSConfig { // This will be appended to the base nameservers generated from DNSPolicy. // Duplicated nameservers will be removed. // +optional + // +listType=atomic repeated string nameservers = 1; // A list of DNS search domains for host-name lookup. // This will be appended to the base search paths generated from DNSPolicy. // Duplicated search paths will be removed. // +optional + // +listType=atomic repeated string searches = 2; // A list of DNS resolver options. @@ -3517,6 +3704,7 @@ message PodDNSConfig { // Duplicated entries will be removed. Resolution options given in Options // will override those that appear in the base DNSPolicy. // +optional + // +listType=atomic repeated PodDNSConfigOption options = 3; } @@ -3558,12 +3746,14 @@ message PodExecOptions { optional string container = 5; // Command is the remote command to execute. argv array. Not executed within a shell. + // +listType=atomic repeated string command = 6; } // PodIP represents a single IP address allocated to the pod. message PodIP { // IP is the IP address assigned to the pod + // +required optional string ip = 1; } @@ -3572,7 +3762,7 @@ message PodList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md @@ -3605,7 +3795,7 @@ message PodLogOptions { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. @@ -3652,6 +3842,7 @@ message PodPortForwardOptions { // List of ports to forward // Required when using WebSockets // +optional + // +listType=atomic repeated int32 ports = 1; } @@ -3668,7 +3859,10 @@ message PodReadinessGate { optional string conditionType = 1; } -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. // Containers that need access to the ResourceClaim reference it with this name. message PodResourceClaim { @@ -3676,8 +3870,29 @@ message PodResourceClaim { // This must be a DNS_LABEL. optional string name = 1; - // Source describes where to find the ResourceClaim. - optional ClaimSource source = 2; + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimName = 3; + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimTemplateName = 4; } // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim @@ -3690,7 +3905,7 @@ message PodResourceClaimStatus { optional string name = 1; // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is + // generated for the Pod in the namespace of the Pod. If this is // unset, then generating a ResourceClaim was not necessary. The // pod.spec.resourceClaims entry can be ignored in this case. // @@ -3752,16 +3967,29 @@ message PodSecurityContext { // +optional optional bool runAsNonRoot = 3; - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic repeated int64 supplementalGroups = 4; + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + optional string supplementalGroupsPolicy = 12; + // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -3779,6 +4007,7 @@ message PodSecurityContext { // sysctls (by the container runtime) might fail to launch. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic repeated Sysctl sysctls = 7; // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume @@ -3795,6 +4024,11 @@ message PodSecurityContext { // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 10; + + // appArmorProfile is the AppArmor options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + optional AppArmorProfile appArmorProfile = 11; } // Describes the class of pods that should avoid this node. @@ -3802,7 +4036,7 @@ message PodSecurityContext { message PodSignature { // Reference to controller whose pods should avoid this node. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; } // PodSpec is a description of a pod. @@ -3812,6 +4046,8 @@ message PodSpec { // +optional // +patchMergeKey=name // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name repeated Volume volumes = 1; // List of initialization containers belonging to the pod. @@ -3829,6 +4065,8 @@ message PodSpec { // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated Container initContainers = 20; // List of containers belonging to the pod. @@ -3837,6 +4075,8 @@ message PodSpec { // Cannot be updated. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated Container containers = 2; // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing @@ -3846,6 +4086,8 @@ message PodSpec { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated EphemeralContainer ephemeralContainers = 34; // Restart policy for all containers within the pod. @@ -3893,7 +4135,7 @@ message PodSpec { // +optional optional string serviceAccountName = 8; - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. // Deprecated: Use serviceAccountName instead. // +k8s:conversion-gen=false // +optional @@ -3903,9 +4145,11 @@ message PodSpec { // +optional optional bool automountServiceAccountToken = 21; - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional optional string nodeName = 10; @@ -3948,6 +4192,8 @@ message PodSpec { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated LocalObjectReference imagePullSecrets = 15; // Specifies the hostname of the Pod @@ -3971,13 +4217,16 @@ message PodSpec { // If specified, the pod's tolerations. // +optional + // +listType=atomic repeated Toleration tolerations = 22; // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. + // file if specified. // +optional // +patchMergeKey=ip // +patchStrategy=merge + // +listType=map + // +listMapKey=ip repeated HostAlias hostAliases = 23; // If specified, indicates the pod's priority. "system-node-critical" and @@ -4008,6 +4257,7 @@ message PodSpec { // all conditions specified in the readiness gates have status equal to "True" // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates // +optional + // +listType=atomic repeated PodReadinessGate readinessGates = 28; // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used @@ -4038,7 +4288,7 @@ message PodSpec { // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional - map overhead = 32; + map overhead = 32; // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. @@ -4069,6 +4319,7 @@ message PodSpec { // - spec.hostPID // - spec.hostIPC // - spec.hostUsers + // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile // - spec.securityContext.fsGroup @@ -4078,6 +4329,8 @@ message PodSpec { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy + // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile // - spec.containers[*].securityContext.capabilities @@ -4109,13 +4362,10 @@ message PodSpec { // // SchedulingGates can only be set at pod creation time, and be removed only afterwards. // - // This is a beta feature enabled by the PodSchedulingReadiness feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=PodSchedulingReadiness // +optional repeated PodSchedulingGate schedulingGates = 38; @@ -4168,6 +4418,8 @@ message PodStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated PodCondition conditions = 2; // A human readable message indicating details about why the pod is in this condition. @@ -4216,22 +4468,26 @@ message PodStatus { // +optional // +patchStrategy=merge // +patchMergeKey=ip + // +listType=map + // +listMapKey=ip repeated PodIP podIPs = 12; // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +listType=atomic repeated ContainerStatus initContainerStatuses = 10; // The list has one entry per container in the manifest. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional + // +listType=atomic repeated ContainerStatus containerStatuses = 8; // The Quality of Service (QOS) classification assigned to the pod based on resource requirements @@ -4242,6 +4498,7 @@ message PodStatus { // Status for any ephemeral containers that have run in this pod. // +optional + // +listType=atomic repeated ContainerStatus ephemeralContainerStatuses = 13; // Status of resources resize desired for pod's containers. @@ -4266,7 +4523,7 @@ message PodStatusResult { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Most recently observed status of the pod. // This data may not be up to date. @@ -4282,7 +4539,7 @@ message PodTemplate { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4295,7 +4552,7 @@ message PodTemplateList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod templates repeated PodTemplate items = 2; @@ -4306,7 +4563,7 @@ message PodTemplateSpec { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4368,7 +4625,7 @@ message PreferAvoidPodsEntry { // Time at which this entry was added to the list. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; // (brief) reason why this entry was added to the list. // +optional @@ -4457,8 +4714,10 @@ message ProbeHandler { // Represents a projected volume source message ProjectedVolumeSource { - // sources is the list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional + // +listType=atomic repeated VolumeProjection sources = 1; // defaultMode are the mode bits used to set permissions on created files by default. @@ -4508,6 +4767,7 @@ message QuobyteVolumeSource { message RBDPersistentVolumeSource { // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; // image is the rados image name. @@ -4526,18 +4786,21 @@ message RBDPersistentVolumeSource { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; // secretRef is name of the authentication secret for RBDUser. If provided @@ -4559,6 +4822,7 @@ message RBDPersistentVolumeSource { message RBDVolumeSource { // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; // image is the rados image name. @@ -4577,18 +4841,21 @@ message RBDVolumeSource { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; // secretRef is name of the authentication secret for RBDUser. If provided @@ -4610,7 +4877,7 @@ message RangeAllocation { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Range is string that identifies the range represented by 'data'. optional string range = 2; @@ -4625,7 +4892,7 @@ message ReplicationController { // be the same as the Pod(s) that the replication controller manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4651,7 +4918,7 @@ message ReplicationControllerCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -4667,7 +4934,7 @@ message ReplicationControllerList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of replication controllers. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -4733,6 +5000,8 @@ message ReplicationControllerStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicationControllerCondition conditions = 6; } @@ -4742,6 +5011,13 @@ message ResourceClaim { // the Pod where this field is used. It makes that resource available // inside a container. optional string name = 1; + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + optional string request = 2; } // ResourceFieldSelector represents container resources (cpu, memory) and their output format @@ -4756,7 +5032,26 @@ message ResourceFieldSelector { // Specifies the output format of the exposed resources, defaults to "1" // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +message ResourceHealth { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + optional string resourceID = 1; + + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + optional string health = 2; } // ResourceQuota sets aggregate quota restrictions enforced per namespace @@ -4764,7 +5059,7 @@ message ResourceQuota { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4782,7 +5077,7 @@ message ResourceQuotaList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ @@ -4794,11 +5089,12 @@ message ResourceQuotaSpec { // hard is the set of desired hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional + // +listType=atomic repeated string scopes = 2; // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota @@ -4813,11 +5109,11 @@ message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // Used is the current observed total usage of the resource in the namespace. // +optional - map used = 2; + map used = 2; } // ResourceRequirements describes the compute resource requirements. @@ -4825,14 +5121,14 @@ message ResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. @@ -4849,6 +5145,20 @@ message ResourceRequirements { repeated ResourceClaim claims = 3; } +message ResourceStatus { + // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // +required + optional string name = 1; + + // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. + // At a minimum, ResourceID must uniquely identify the Resource + // allocated to the Pod on the Node for the lifetime of a Pod. + // See ResourceID type for it's definition. + // +listType=map + // +listMapKey=resourceID + repeated ResourceHealth resources = 2; +} + // SELinuxOptions are the labels to be applied to the container message SELinuxOptions { // User is a SELinux user label that applies to the container. @@ -4895,6 +5205,7 @@ message ScaleIOPersistentVolumeSource { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; // volumeName is the name of a volume already created in the ScaleIO system @@ -4906,6 +5217,7 @@ message ScaleIOPersistentVolumeSource { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" optional string fsType = 9; // readOnly defaults to false (read/write). ReadOnly here will force @@ -4941,6 +5253,7 @@ message ScaleIOVolumeSource { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; // volumeName is the name of a volume already created in the ScaleIO system @@ -4952,6 +5265,7 @@ message ScaleIOVolumeSource { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" optional string fsType = 9; // readOnly Defaults to false (read/write). ReadOnly here will force @@ -4966,6 +5280,7 @@ message ScaleIOVolumeSource { message ScopeSelector { // A list of scope selector requirements by scope of the resources. // +optional + // +listType=atomic repeated ScopedResourceSelectorRequirement matchExpressions = 1; } @@ -4984,6 +5299,7 @@ message ScopedResourceSelectorRequirement { // the values array must be empty. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -5014,7 +5330,7 @@ message Secret { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the Secret cannot // be updated (only object metadata can be modified). @@ -5077,7 +5393,7 @@ message SecretList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -5101,6 +5417,7 @@ message SecretProjection { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; // optional field specify whether the Secret or its key must be defined @@ -5140,6 +5457,7 @@ message SecretVolumeSource { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; // defaultMode is Optional: mode bits used to set permissions on created files by default. @@ -5231,7 +5549,7 @@ message SecurityContext { optional bool allowPrivilegeEscalation = 7; // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -5244,6 +5562,12 @@ message SecurityContext { // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 11; + + // appArmorProfile is the AppArmor options to use by this container. If set, this profile + // overrides the pod's appArmorProfile. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + optional AppArmorProfile appArmorProfile = 12; } // SerializedReference is a reference to serialized object. @@ -5260,7 +5584,7 @@ message Service { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -5283,7 +5607,7 @@ message ServiceAccount { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". @@ -5293,6 +5617,8 @@ message ServiceAccount { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated ObjectReference secrets = 2; // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images @@ -5300,6 +5626,7 @@ message ServiceAccount { // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional + // +listType=atomic repeated LocalObjectReference imagePullSecrets = 3; // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. @@ -5313,7 +5640,7 @@ message ServiceAccountList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ServiceAccounts. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ @@ -5351,7 +5678,7 @@ message ServiceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of services repeated Service items = 2; @@ -5403,7 +5730,7 @@ message ServicePort { // omitted or set equal to the 'port' field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; // The port on each node on which this service is exposed when type is // NodePort or LoadBalancer. Usually assigned by the system. If a value is @@ -5520,6 +5847,7 @@ message ServiceSpec { // at a node with this IP. A common example is external load-balancers // that are not part of the Kubernetes system. // +optional + // +listType=atomic repeated string externalIPs = 5; // Supports "ClientIP" and "None". Used to maintain session affinity. @@ -5545,6 +5873,7 @@ message ServiceSpec { // cloud-provider does not support the feature." // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional + // +listType=atomic repeated string loadBalancerSourceRanges = 9; // externalName is the external reference that discovery mechanisms will @@ -5660,6 +5989,17 @@ message ServiceSpec { // (possibly modified by topology and other features). // +optional optional string internalTrafficPolicy = 22; + + // TrafficDistribution offers a way to express preferences for how traffic is + // distributed to Service endpoints. Implementations can use this field as a + // hint, but are not required to guarantee strict adherence. If the field is + // not set, the implementation will apply its default routing strategy. If set + // to "PreferClose", implementations should prioritize endpoints that are + // topologically close (e.g., same zone). + // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // +featureGate=ServiceTrafficDistribution + // +optional + optional string trafficDistribution = 23; } // ServiceStatus represents the current status of a service. @@ -5675,7 +6015,7 @@ message ServiceStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. @@ -5769,7 +6109,7 @@ message TCPSocketAction { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; // Optional: Host name to connect to, defaults to the pod IP. // +optional @@ -5794,7 +6134,7 @@ message Taint { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } // The pod this Toleration is attached to tolerates any taint that matches @@ -5838,6 +6178,7 @@ message TopologySelectorLabelRequirement { // An array of string values. One value must match the label to be selected. // Each entry in Values is ORed. + // +listType=atomic repeated string values = 2; } @@ -5850,6 +6191,7 @@ message TopologySelectorLabelRequirement { message TopologySelectorTerm { // A list of topology selector requirements by labels. // +optional + // +listType=atomic repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } @@ -5916,7 +6258,7 @@ message TopologySpreadConstraint { // Pods that match this label selector are counted to determine the number of pods // in their corresponding topology domain. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; // MinDomains indicates a minimum number of eligible domains. // When the number of eligible domains with matching topology keys is less than minDomains, @@ -5940,8 +6282,6 @@ message TopologySpreadConstraint { // In this situation, new pod with the same labelSelector cannot be scheduled, // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, // it will violate MaxSkew. - // - // This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). // +optional optional int32 minDomains = 5; @@ -6051,6 +6391,27 @@ message VolumeMount { // +optional optional bool readOnly = 2; + // RecursiveReadOnly specifies whether read-only mounts should be handled + // recursively. + // + // If ReadOnly is false, this field has no meaning and must be unspecified. + // + // If ReadOnly is true, and this field is set to Disabled, the mount is not made + // recursively read-only. If this field is set to IfPossible, the mount is made + // recursively read-only, if it is supported by the container runtime. If this + // field is set to Enabled, the mount is made recursively read-only if it is + // supported by the container runtime, otherwise the pod will not be started and + // an error will be generated to indicate the reason. + // + // If this field is set to IfPossible or Enabled, MountPropagation must be set to + // None (or be unspecified, which defaults to None). + // + // If this field is not specified, it is treated as an equivalent of Disabled. + // + // +featureGate=RecursiveReadOnlyMounts + // +optional + optional string recursiveReadOnly = 7; + // Path within the container at which the volume should be mounted. Must // not contain ':'. optional string mountPath = 3; @@ -6064,6 +6425,8 @@ message VolumeMount { // to container and the other way around. // When not set, MountPropagationNone is used. // This field is beta in 1.10. + // When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + // (which defaults to None). // +optional optional string mountPropagation = 5; @@ -6075,13 +6438,34 @@ message VolumeMount { optional string subPathExpr = 6; } +// VolumeMountStatus shows status of volume mounts. +message VolumeMountStatus { + // Name corresponds to the name of the original VolumeMount. + optional string name = 1; + + // MountPath corresponds to the original VolumeMount. + optional string mountPath = 2; + + // ReadOnly corresponds to the original VolumeMount. + // +optional + optional bool readOnly = 3; + + // RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). + // An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, + // depending on the mount result. + // +featureGate=RecursiveReadOnlyMounts + // +optional + optional string recursiveReadOnly = 4; +} + // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. message VolumeNodeAffinity { // required specifies hard node constraints that must be met. optional NodeSelector required = 1; } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. message VolumeProjection { // secret information about the secret data to project // +optional @@ -6123,14 +6507,14 @@ message VolumeResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; } // Represents the source of a volume to mount. @@ -6298,6 +6682,24 @@ message VolumeSource { // // +optional optional EphemeralVolumeSource ephemeral = 29; + + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + optional ImageVolumeSource image = 30; } // Represents a vSphere volume resource. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 61ba21bcad..3a74138bae 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -181,6 +181,23 @@ type VolumeSource struct { // // +optional Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -295,6 +312,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. @@ -331,6 +349,7 @@ type PersistentVolumeSpec struct { // accessModes contains all ways the volume can be mounted. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional + // +listType=atomic AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. @@ -354,6 +373,7 @@ type PersistentVolumeSpec struct { // simply fail if one is invalid. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional + // +listType=atomic MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. @@ -369,7 +389,7 @@ type PersistentVolumeSpec struct { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` @@ -423,13 +443,12 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). - // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { @@ -445,6 +464,7 @@ type PersistentVolumeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { @@ -467,6 +487,7 @@ type PersistentVolumeClaim struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { @@ -486,6 +507,7 @@ type PersistentVolumeClaimSpec struct { // accessModes contains the desired access modes the volume should have. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // selector is a label query over volumes to consider for binding. // +optional @@ -553,8 +575,8 @@ type PersistentVolumeClaimSpec struct { // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` @@ -578,15 +600,29 @@ type TypedObjectReference struct { Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` } -// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +// PersistentVolumeClaimConditionType defines the condition of PV claim. +// Valid values are: +// - "Resizing", "FileSystemResizePending" +// +// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected: +// - "ControllerResizeError", "NodeResizeError" +// +// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected: +// - "ModifyVolumeError", "ModifyingVolume" type PersistentVolumeClaimConditionType string +// These are valid conditions of PVC const ( // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + // PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller + PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError" + // PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node. + PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError" + // Applying the target VolumeAttributesClass encountered an error PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" // Volume is being modified @@ -603,18 +639,19 @@ const ( // State set when resize controller starts resizing the volume in control-plane. PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress" - // State set when resize has failed in resize controller with a terminal error. + // State set when resize has failed in resize controller with a terminal unrecoverable error. // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus // unmodified, so as resize controller can resume the volume expansion. - PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed" + PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible" // State set when resize controller has finished resizing the volume but further resizing of volume // is needed on the node. PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending" // State set when kubelet starts resizing the volume. PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress" - // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed - PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" + // State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors + // shouldn't set this status + PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible" ) // +enum @@ -660,7 +697,7 @@ type PersistentVolumeClaimCondition struct { // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // reason is a unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // for condition's last transition. If it reports "Resizing" that means the underlying // persistent volume is being resized. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` @@ -677,15 +714,18 @@ type PersistentVolumeClaimStatus struct { // accessModes contains the actual access modes the volume backing the PVC has. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // capacity represents the actual resources of the underlying volume. // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being - // resized then the Condition will be set to 'ResizeStarted'. + // resized then the Condition will be set to 'Resizing'. // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // allocatedResources tracks the resources allocated to a PVC including its capacity. // Key names follow standard Kubernetes label syntax. Valid values are either: @@ -757,13 +797,13 @@ type PersistentVolumeClaimStatus struct { AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` @@ -921,6 +961,7 @@ type GlusterfsPersistentVolumeSource struct { type RBDVolumeSource struct { // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // image is the rados image name. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it @@ -936,16 +977,19 @@ type RBDVolumeSource struct { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. @@ -965,6 +1009,7 @@ type RBDVolumeSource struct { type RBDPersistentVolumeSource struct { // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // image is the rados image name. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it @@ -980,16 +1025,19 @@ type RBDPersistentVolumeSource struct { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. @@ -1059,6 +1107,7 @@ type CinderPersistentVolumeSource struct { type CephFSVolumeSource struct { // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional @@ -1099,6 +1148,7 @@ type SecretReference struct { type CephFSPersistentVolumeSource struct { // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional @@ -1341,6 +1391,7 @@ type SecretVolumeSource struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` // defaultMode is Optional: mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. @@ -1376,6 +1427,7 @@ type SecretProjection struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` // optional field specify whether the Secret or its key must be defined // +optional @@ -1414,6 +1466,7 @@ type ISCSIVolumeSource struct { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -1429,6 +1482,7 @@ type ISCSIVolumeSource struct { // portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication // +optional @@ -1460,6 +1514,7 @@ type ISCSIPersistentVolumeSource struct { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -1475,6 +1530,7 @@ type ISCSIPersistentVolumeSource struct { // portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication // +optional @@ -1498,6 +1554,7 @@ type ISCSIPersistentVolumeSource struct { type FCVolumeSource struct { // targetWWNs is Optional: FC target worldwide names (WWNs) // +optional + // +listType=atomic TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"` // lun is Optional: FC target lun number // +optional @@ -1515,6 +1572,7 @@ type FCVolumeSource struct { // wwids Optional: FC volume world wide identifiers (wwids) // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. // +optional + // +listType=atomic WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"` } @@ -1597,17 +1655,21 @@ type AzureDiskVolumeSource struct { DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` } @@ -1646,6 +1708,7 @@ type ScaleIOVolumeSource struct { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. @@ -1655,6 +1718,7 @@ type ScaleIOVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -1683,6 +1747,7 @@ type ScaleIOPersistentVolumeSource struct { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. @@ -1692,6 +1757,7 @@ type ScaleIOPersistentVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -1771,6 +1837,7 @@ type ConfigMapVolumeSource struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` // defaultMode is optional: mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. @@ -1807,6 +1874,7 @@ type ConfigMapProjection struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` // optional specify whether the ConfigMap or its keys must be defined // +optional @@ -1873,8 +1941,10 @@ type ClusterTrustBundleProjection struct { // Represents a projected volume source type ProjectedVolumeSource struct { - // sources is the list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional + // +listType=atomic Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` // defaultMode are the mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. @@ -1886,10 +1956,9 @@ type ProjectedVolumeSource struct { DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - // secret information about the secret data to project // +optional Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` @@ -2137,6 +2206,26 @@ type VolumeMount struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + // RecursiveReadOnly specifies whether read-only mounts should be handled + // recursively. + // + // If ReadOnly is false, this field has no meaning and must be unspecified. + // + // If ReadOnly is true, and this field is set to Disabled, the mount is not made + // recursively read-only. If this field is set to IfPossible, the mount is made + // recursively read-only, if it is supported by the container runtime. If this + // field is set to Enabled, the mount is made recursively read-only if it is + // supported by the container runtime, otherwise the pod will not be started and + // an error will be generated to indicate the reason. + // + // If this field is set to IfPossible or Enabled, MountPropagation must be set to + // None (or be unspecified, which defaults to None). + // + // If this field is not specified, it is treated as an equivalent of Disabled. + // + // +featureGate=RecursiveReadOnlyMounts + // +optional + RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,7,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"` // Path within the container at which the volume should be mounted. Must // not contain ':'. MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` @@ -2148,6 +2237,8 @@ type VolumeMount struct { // to container and the other way around. // When not set, MountPropagationNone is used. // This field is beta in 1.10. + // When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + // (which defaults to None). // +optional MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` // Expanded path within the volume from which the container's volume should be mounted. @@ -2184,6 +2275,18 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// RecursiveReadOnlyMode describes recursive-readonly mode. +type RecursiveReadOnlyMode string + +const ( + // RecursiveReadOnlyDisabled disables recursive-readonly mode. + RecursiveReadOnlyDisabled RecursiveReadOnlyMode = "Disabled" + // RecursiveReadOnlyIfPossible enables recursive-readonly mode if possible. + RecursiveReadOnlyIfPossible RecursiveReadOnlyMode = "IfPossible" + // RecursiveReadOnlyEnabled enables recursive-readonly mode, or raise an error. + RecursiveReadOnlyEnabled RecursiveReadOnlyMode = "Enabled" +) + // volumeDevice describes a mapping of a raw block device within a container. type VolumeDevice struct { // name must match the name of a persistentVolumeClaim in the pod @@ -2347,6 +2450,7 @@ type HTTPGetAction struct { Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` // Custom headers to set in the request. HTTP allows repeated headers. // +optional + // +listType=atomic HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` } @@ -2393,6 +2497,7 @@ type ExecAction struct { // a shell, you need to explicitly call out to that shell. // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. // +optional + // +listType=atomic Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } @@ -2514,9 +2619,11 @@ type Capability string type Capabilities struct { // Added capabilities // +optional + // +listType=atomic Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` // Removed capabilities // +optional + // +listType=atomic Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` } @@ -2574,6 +2681,13 @@ type ResourceClaim struct { // the Pod where this field is used. It makes that resource available // inside a container. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"` } const ( @@ -2602,6 +2716,7 @@ type Container struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. // The container image's CMD is used if this is not provided. @@ -2612,6 +2727,7 @@ type Container struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. // If not specified, the container runtime's default will be used, which @@ -2640,12 +2756,15 @@ type Container struct { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` // List of environment variables to set in the container. // Cannot be updated. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Compute Resources required by this container. // Cannot be updated. @@ -2680,10 +2799,14 @@ type Container struct { // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Periodic probe of container liveness. @@ -2956,6 +3079,101 @@ type ContainerStatus struct { // +featureGate=InPlacePodVerticalScaling // +optional Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"` + // Status of volume mounts. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath + // +featureGate=RecursiveReadOnlyMounts + VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"` + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"` + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"` +} + +type ResourceStatus struct { + // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // +required + Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"` + // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. + // At a minimum, ResourceID must uniquely identify the Resource + // allocated to the Pod on the Node for the lifetime of a Pod. + // See ResourceID type for it's definition. + // +listType=map + // +listMapKey=resourceID + Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` +} + +type ResourceHealthStatus string + +const ( + ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy" + ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy" + ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown" +) + +// ResourceID is calculated based on the source of this resource health information. +// For DevicePlugin: +// +// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 +// +// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node. +// For DRA: +// +// dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. +type ResourceID string + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +type ResourceHealth struct { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"` + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"` +} + +// ContainerUser represents user identity information +type ContainerUser struct { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"` + + // Windows holds user identity information initially attached to the first process of the containers in Windows + // This is just reserved for future use. + // Windows *WindowsContainerUser +} + +// LinuxContainerUser represents user identity information in Linux containers +type LinuxContainerUser struct { + // UID is the primary uid initially attached to the first process in the container + UID int64 `json:"uid" protobuf:"varint,1,name=uid"` + // GID is the primary gid initially attached to the first process in the container + GID int64 `json:"gid" protobuf:"varint,2,name=gid"` + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"` } // PodPhase is a label for the condition of a pod at the current time. @@ -3065,6 +3283,23 @@ const ( PodResizeStatusInfeasible PodResizeStatus = "Infeasible" ) +// VolumeMountStatus shows status of volume mounts. +type VolumeMountStatus struct { + // Name corresponds to the name of the original VolumeMount. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // MountPath corresponds to the original VolumeMount. + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` + // ReadOnly corresponds to the original VolumeMount. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). + // An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, + // depending on the mount result. + // +featureGate=RecursiveReadOnlyMounts + // +optional + RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,4,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"` +} + // RestartPolicy describes how the container should be restarted. // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one @@ -3123,6 +3358,7 @@ const ( // +structType=atomic type NodeSelector struct { // Required. A list of node selector terms. The terms are ORed. + // +listType=atomic NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` } @@ -3133,9 +3369,11 @@ type NodeSelector struct { type NodeSelectorTerm struct { // A list of node selector requirements by node's labels. // +optional + // +listType=atomic MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` // A list of node selector requirements by node's fields. // +optional + // +listType=atomic MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"` } @@ -3153,6 +3391,7 @@ type NodeSelectorRequirement struct { // array must have a single element, which will be interpreted as an integer. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -3181,6 +3420,7 @@ type TopologySelectorTerm struct { // A list of topology selector requirements by labels. // +optional + // +listType=atomic MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"` } @@ -3191,6 +3431,7 @@ type TopologySelectorLabelRequirement struct { Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // An array of string values. One value must match the label to be selected. // Each entry in Values is ORed. + // +listType=atomic Values []string `json:"values" protobuf:"bytes,2,rep,name=values"` } @@ -3228,6 +3469,7 @@ type PodAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` // The scheduler will prefer to schedule pods to nodes that satisfy // the affinity expressions specified by this field, but it may choose @@ -3239,6 +3481,7 @@ type PodAffinity struct { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } @@ -3263,6 +3506,7 @@ type PodAntiAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` // The scheduler will prefer to schedule pods to nodes that satisfy // the anti-affinity expressions specified by this field, but it may choose @@ -3274,6 +3518,7 @@ type PodAntiAffinity struct { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } @@ -3302,6 +3547,7 @@ type PodAffinityTerm struct { // and the ones selected by namespaceSelector. // null or empty namespaces list and null namespaceSelector means "this pod's namespace". // +optional + // +listType=atomic Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node @@ -3318,25 +3564,27 @@ type PodAffinityTerm struct { NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` // MatchLabelKeys is a set of pod label keys to select which pods will // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` // to select the group of existing pods which pods will be taken into consideration // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // The same key is forbidden to exist in both matchLabelKeys and labelSelector. + // Also, matchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` // MismatchLabelKeys is a set of pod label keys to select which pods will // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` // to select the group of existing pods which pods will be taken into consideration // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` @@ -3370,6 +3618,7 @@ type NodeAffinity struct { // "weight" to the sum if the node matches the corresponding matchExpressions; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } @@ -3475,6 +3724,8 @@ type PodSpec struct { // +optional // +patchMergeKey=name // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` // List of initialization containers belonging to the pod. // Init containers are executed in order prior to containers being started. If any @@ -3491,6 +3742,8 @@ type PodSpec struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. @@ -3498,6 +3751,8 @@ type PodSpec struct { // Cannot be updated. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing // pod to perform user-initiated actions such as debugging. This list cannot be specified when @@ -3506,6 +3761,8 @@ type PodSpec struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. @@ -3547,7 +3804,7 @@ type PodSpec struct { // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. // Deprecated: Use serviceAccountName instead. // +k8s:conversion-gen=false // +optional @@ -3556,9 +3813,11 @@ type PodSpec struct { // +optional AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. @@ -3595,6 +3854,8 @@ type PodSpec struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` // Specifies the hostname of the Pod // If not specified, the pod's hostname will be set to a system-defined value. @@ -3613,12 +3874,15 @@ type PodSpec struct { SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` // If specified, the pod's tolerations. // +optional + // +listType=atomic Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. + // file if specified. // +optional // +patchMergeKey=ip // +patchStrategy=merge + // +listType=map + // +listMapKey=ip HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` // If specified, indicates the pod's priority. "system-node-critical" and // "system-cluster-critical" are two special keywords which indicate the @@ -3645,6 +3909,7 @@ type PodSpec struct { // all conditions specified in the readiness gates have status equal to "True" // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates // +optional + // +listType=atomic ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. @@ -3699,6 +3964,7 @@ type PodSpec struct { // - spec.hostPID // - spec.hostIPC // - spec.hostUsers + // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile // - spec.securityContext.fsGroup @@ -3708,6 +3974,8 @@ type PodSpec struct { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy + // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile // - spec.containers[*].securityContext.capabilities @@ -3739,13 +4007,10 @@ type PodSpec struct { // // SchedulingGates can only be set at pod creation time, and be removed only afterwards. // - // This is a beta feature enabled by the PodSchedulingReadiness feature gate. - // // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name - // +featureGate=PodSchedulingReadiness // +optional SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` // ResourceClaims defines which ResourceClaims must be allocated @@ -3767,7 +4032,10 @@ type PodSpec struct { ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` } -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. // Containers that need access to the ResourceClaim reference it with this name. type PodResourceClaim struct { @@ -3775,18 +4043,17 @@ type PodResourceClaim struct { // This must be a DNS_LABEL. Name string `json:"name" protobuf:"bytes,1,name=name"` - // Source describes where to find the ResourceClaim. - Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` -} + // Source is tombstoned since Kubernetes 1.31 where it got replaced with + // the inlined fields below. + // + // Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -type ClaimSource struct { // ResourceClaimName is the name of a ResourceClaim object in the same // namespace as this pod. - ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"` + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"` // ResourceClaimTemplateName is the name of a ResourceClaimTemplate // object in the same namespace as this pod. @@ -3800,7 +4067,10 @@ type ClaimSource struct { // This field is immutable and no changes will be made to the // corresponding ResourceClaim by the control plane after creating the // ResourceClaim. - ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"` + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"` } // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim @@ -3813,7 +4083,7 @@ type PodResourceClaimStatus struct { Name string `json:"name" protobuf:"bytes,1,name=name"` // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is + // generated for the Pod in the namespace of the Pod. If this is // unset, then generating a ResourceClaim was not necessary. The // pod.spec.resourceClaims entry can be ignored in this case. // @@ -3952,8 +4222,6 @@ type TopologySpreadConstraint struct { // In this situation, new pod with the same labelSelector cannot be scheduled, // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, // it will violate MaxSkew. - // - // This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). // +optional MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"` // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector @@ -3999,8 +4267,10 @@ const ( // pod's hosts file. type HostAlias struct { // IP address of the host file entry. - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // Hostnames for the above IP address. + // +listType=atomic Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` } @@ -4021,6 +4291,23 @@ const ( FSGroupChangeAlways PodFSGroupChangePolicy = "Always" ) +// SupplementalGroupsPolicy defines how supplemental groups +// of the first container processes are calculated. +// +enum +type SupplementalGroupsPolicy string + +const ( + // SupplementalGroupsPolicyMerge means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // merged with the primary user's groups as defined in the container image + // (in /etc/group). + SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge" + // SupplementalGroupsPolicyStrict means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // used instead of any groups defined in the container image. + SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict" +) + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -4063,15 +4350,27 @@ type PodSecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -4088,6 +4387,7 @@ type PodSecurityContext struct { // sysctls (by the container runtime) might fail to launch. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume // before being exposed inside Pod. This field will only apply to @@ -4102,6 +4402,10 @@ type PodSecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"` + // appArmorProfile is the AppArmor options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"` } // SeccompProfile defines a pod/container's seccomp profile settings. @@ -4138,6 +4442,38 @@ const ( SeccompProfileTypeLocalhost SeccompProfileType = "Localhost" ) +// AppArmorProfile defines a pod or container's AppArmor settings. +// +union +type AppArmorProfile struct { + // type indicates which kind of AppArmor profile will be applied. + // Valid options are: + // Localhost - a profile pre-loaded on the node. + // RuntimeDefault - the container runtime's default profile. + // Unconfined - no AppArmor enforcement. + // +unionDiscriminator + Type AppArmorProfileType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=AppArmorProfileType"` + + // localhostProfile indicates a profile loaded on the node that should be used. + // The profile must be preconfigured on the node to work. + // Must match the loaded name of the profile. + // Must be set if and only if type is "Localhost". + // +optional + LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"` +} + +// +enum +type AppArmorProfileType string + +const ( + // AppArmorProfileTypeUnconfined indicates that no AppArmor profile should be enforced. + AppArmorProfileTypeUnconfined AppArmorProfileType = "Unconfined" + // AppArmorProfileTypeRuntimeDefault indicates that the container runtime's default AppArmor + // profile should be used. + AppArmorProfileTypeRuntimeDefault AppArmorProfileType = "RuntimeDefault" + // AppArmorProfileTypeLocalhost indicates that a profile pre-loaded on the node should be used. + AppArmorProfileTypeLocalhost AppArmorProfileType = "Localhost" +) + // PodQOSClass defines the supported qos classes of Pods. // +enum type PodQOSClass string @@ -4158,17 +4494,20 @@ type PodDNSConfig struct { // This will be appended to the base nameservers generated from DNSPolicy. // Duplicated nameservers will be removed. // +optional + // +listType=atomic Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` // A list of DNS search domains for host-name lookup. // This will be appended to the base search paths generated from DNSPolicy. // Duplicated search paths will be removed. // +optional + // +listType=atomic Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` // A list of DNS resolver options. // This will be merged with the base options generated from DNSPolicy. // Duplicated entries will be removed. Resolution options given in Options // will override those that appear in the base DNSPolicy. // +optional + // +listType=atomic Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` } @@ -4183,13 +4522,15 @@ type PodDNSConfigOption struct { // PodIP represents a single IP address allocated to the pod. type PodIP struct { // IP is the IP address assigned to the pod - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // HostIP represents a single IP address allocated to the host. type HostIP struct { // IP is the IP address assigned to the host - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // EphemeralContainerCommon is a copy of all fields in Container to be inlined in @@ -4212,6 +4553,7 @@ type EphemeralContainerCommon struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. // The image's CMD is used if this is not provided. @@ -4222,6 +4564,7 @@ type EphemeralContainerCommon struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. // If not specified, the container runtime's default will be used, which @@ -4244,12 +4587,15 @@ type EphemeralContainerCommon struct { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` // List of environment variables to set in the container. // Cannot be updated. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources // already allocated to the pod. @@ -4272,10 +4618,14 @@ type EphemeralContainerCommon struct { // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Probes are not allowed for ephemeral containers. @@ -4401,6 +4751,8 @@ type PodStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // A human readable message indicating details about why the pod is in this condition. // +optional @@ -4446,6 +4798,8 @@ type PodStatus struct { // +optional // +patchStrategy=merge // +patchMergeKey=ip + // +listType=map + // +listMapKey=ip PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` // RFC 3339 date and time at which the object was acknowledged by the Kubelet. @@ -4457,11 +4811,13 @@ type PodStatus struct { // init container will have ready = true, the most recently started container will have // startTime set. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +listType=atomic InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` // The list has one entry per container in the manifest. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional + // +listType=atomic ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes @@ -4470,6 +4826,7 @@ type PodStatus struct { QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. // +optional + // +listType=atomic EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` // Status of resources resize desired for pod's containers. @@ -4490,6 +4847,7 @@ type PodStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { @@ -4510,6 +4868,7 @@ type PodStatusResult struct { // +genclient // +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Pod is a collection of containers that can run on a host. This resource is created // by clients and scheduled onto hosts. @@ -4535,6 +4894,7 @@ type Pod struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodList is a list of Pods. type PodList struct { @@ -4564,6 +4924,7 @@ type PodTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { @@ -4580,6 +4941,7 @@ type PodTemplate struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { @@ -4658,6 +5020,8 @@ type ReplicationControllerStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -4692,6 +5056,7 @@ type ReplicationControllerCondition struct { // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { @@ -4718,6 +5083,7 @@ type ReplicationController struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { @@ -4829,6 +5195,18 @@ const ( ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster ) +// These are valid values for the TrafficDistribution field of a Service. +const ( + // Indicates a preference for routing traffic to endpoints that are + // topologically proximate to the client. The interpretation of "topologically + // proximate" may vary across implementations and could encompass endpoints + // within the same node, rack, zone, or even region. Setting this value gives + // implementations permission to make different tradeoffs, e.g. optimizing for + // proximity rather than equal distribution of load. Users should not set this + // value if such tradeoffs are not acceptable. + ServiceTrafficDistributionPreferClose = "PreferClose" +) + // These are the valid conditions of a service. const ( // LoadBalancerPortsError represents the condition of the requested ports @@ -4859,6 +5237,7 @@ type LoadBalancerStatus struct { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. // +optional + // +listType=atomic Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } @@ -5024,6 +5403,7 @@ type ServiceSpec struct { // at a node with this IP. A common example is external load-balancers // that are not part of the Kubernetes system. // +optional + // +listType=atomic ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` // Supports "ClientIP" and "None". Used to maintain session affinity. @@ -5049,6 +5429,7 @@ type ServiceSpec struct { // cloud-provider does not support the feature." // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional + // +listType=atomic LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` // externalName is the external reference that discovery mechanisms will @@ -5170,6 +5551,17 @@ type ServiceSpec struct { // (possibly modified by topology and other features). // +optional InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + + // TrafficDistribution offers a way to express preferences for how traffic is + // distributed to Service endpoints. Implementations can use this field as a + // hint, but are not required to guarantee strict adherence. If the field is + // not set, the implementation will apply its default routing strategy. If set + // to "PreferClose", implementations should prioritize endpoints that are + // topologically close (e.g., same zone). + // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // +featureGate=ServiceTrafficDistribution + // +optional + TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"` } // ServicePort contains information on service's port. @@ -5236,6 +5628,7 @@ type ServicePort struct { // +genclient // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Service is a named abstraction of software service (for example, mysql) consisting of local port // (for example 3306) that the proxy listens on, and the selector that determines which pods @@ -5267,6 +5660,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceList holds a list of services. type ServiceList struct { @@ -5283,6 +5677,7 @@ type ServiceList struct { // +genclient // +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccount binds together: // * a name, understood by users, and perhaps by peripheral systems, for an identity @@ -5303,6 +5698,8 @@ type ServiceAccount struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images @@ -5310,6 +5707,7 @@ type ServiceAccount struct { // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional + // +listType=atomic ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. @@ -5319,6 +5717,7 @@ type ServiceAccount struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { @@ -5335,6 +5734,7 @@ type ServiceAccountList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Endpoints is a collection of endpoints that implement the actual service. Example: // @@ -5364,6 +5764,7 @@ type Endpoints struct { // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. // +optional + // +listType=atomic Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"` } @@ -5384,14 +5785,17 @@ type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. // +optional + // +listType=atomic Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` // IP addresses which offer the related ports but are not currently marked as ready // because they have not yet finished starting, have recently failed a readiness check, // or have recently failed a liveness check. // +optional + // +listType=atomic NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` // Port numbers available on the related IP addresses. // +optional + // +listType=atomic Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` } @@ -5452,6 +5856,7 @@ type EndpointPort struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EndpointsList is a list of endpoints. type EndpointsList struct { @@ -5476,6 +5881,7 @@ type NodeSpec struct { // each of IPv4 and IPv6. // +optional // +patchStrategy=merge + // +listType=set PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` // ID of the node assigned by the cloud provider in the format: :// @@ -5487,6 +5893,7 @@ type NodeSpec struct { Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` // If specified, the node's taints. // +optional + // +listType=atomic Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. @@ -5562,6 +5969,38 @@ type NodeDaemonEndpoints struct { KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` } +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. +type NodeRuntimeHandlerFeatures struct { + // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. + // +featureGate=RecursiveReadOnlyMounts + // +optional + RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"` + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"` +} + +// NodeRuntimeHandler is a set of runtime handler information. +type NodeRuntimeHandler struct { + // Runtime handler name. + // Empty for the default runtime handler. + // +optional + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Supported features. + // +optional + Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"` +} + +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +type NodeFeatures struct { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"` +} + // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification @@ -5582,7 +6021,7 @@ type NodeSystemInfo struct { ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` // Kubelet Version reported by the node. KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` // The Operating System reported by the node OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` @@ -5640,7 +6079,7 @@ type NodeConfigStatus struct { // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. @@ -5657,6 +6096,8 @@ type NodeStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. @@ -5671,6 +6112,8 @@ type NodeStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` // Endpoints of daemons running on the Node. // +optional @@ -5681,16 +6124,29 @@ type NodeStatus struct { NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node // +optional + // +listType=atomic Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` // List of attachable volumes in use (mounted) by the node. // +optional + // +listType=atomic VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` // List of volumes that are attached to the node. // +optional + // +listType=atomic VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` // Status of the config assigned to the node via the dynamic Kubelet config feature. // +optional Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` + // The available runtime handlers. + // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport + // +optional + // +listType=atomic + RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"` + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"` } type UniqueVolumeName string @@ -5711,6 +6167,7 @@ type AvoidPods struct { // Bounded-sized list of signatures of pods that should avoid this node, sorted // in timestamp order from oldest to newest. Size of the slice is unspecified. // +optional + // +listType=atomic PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` } @@ -5742,6 +6199,7 @@ type ContainerImage struct { // Names by which this image is known. // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"] // +optional + // +listType=atomic Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. // +optional @@ -5764,8 +6222,7 @@ const ( type NodeConditionType string // These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here. -// The built-in set of conditions are: -// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +// Relevant events contain "NodeReady", "NodeNotReady", "NodeSchedulable", and "NodeNotSchedulable". const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" @@ -5867,7 +6324,6 @@ const ( // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) ResourceStorage ResourceName = "storage" // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. ResourceEphemeralStorage ResourceName = "ephemeral-storage" ) @@ -5886,6 +6342,7 @@ type ResourceList map[ResourceName]resource.Quantity // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). @@ -5910,6 +6367,7 @@ type Node struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { @@ -5937,6 +6395,7 @@ type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional + // +listType=atomic Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` } @@ -5951,6 +6410,8 @@ type NamespaceStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } @@ -6005,6 +6466,7 @@ type NamespaceCondition struct { // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Namespace provides a scope for Names. // Use of multiple namespaces is optional. @@ -6027,6 +6489,7 @@ type Namespace struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NamespaceList is a list of Namespaces. type NamespaceList struct { @@ -6042,6 +6505,7 @@ type NamespaceList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. // Deprecated in 1.7, please use the bindings subresource of pods instead. @@ -6066,6 +6530,7 @@ type Preconditions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodLogOptions is the query options for a Pod's logs REST call. type PodLogOptions struct { @@ -6118,6 +6583,7 @@ type PodLogOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.1 // PodAttachOptions is the query options to a Pod's remote attach call. // --- @@ -6156,6 +6622,7 @@ type PodAttachOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodExecOptions is the query options to a Pod's remote exec call. // --- @@ -6188,11 +6655,13 @@ type PodExecOptions struct { Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` // Command is the remote command to execute. argv array. Not executed within a shell. + // +listType=atomic Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. @@ -6206,11 +6675,13 @@ type PodPortForwardOptions struct { // List of ports to forward // Required when using WebSockets // +optional + // +listType=atomic Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` } // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodProxyOptions is the query options to a Pod's proxy call. type PodProxyOptions struct { @@ -6223,6 +6694,7 @@ type PodProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // NodeProxyOptions is the query options to a Node's proxy call. type NodeProxyOptions struct { @@ -6235,6 +6707,7 @@ type NodeProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { @@ -6309,9 +6782,15 @@ type ObjectReference struct { // +structType=atomic type LocalObjectReference struct { // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // This field is effectively required, but due to backwards compatibility is + // allowed to be empty. Instances of this type with an empty value here are + // almost certainly wrong. // TODO: Add other useful fields. apiVersion, kind, uid? + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional + // +default="" + // +kubebuilder:default="" + // TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` } @@ -6331,6 +6810,7 @@ type TypedLocalObjectReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SerializedReference is a reference to serialized object. type SerializedReference struct { @@ -6360,6 +6840,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Event is a report of an event somewhere in the cluster. Events // have a limited retention time and triggers and messages may evolve @@ -6444,6 +6925,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EventList is a list of events. type EventList struct { @@ -6458,6 +6940,7 @@ type EventList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // List holds a list of objects, which may not be known by the server. type List metav1.List @@ -6499,11 +6982,13 @@ type LimitRangeItem struct { // LimitRangeSpec defines a min/max usage limit for resources that match on kind. type LimitRangeSpec struct { // Limits is the list of LimitRangeItem objects that are enforced. + // +listType=atomic Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { @@ -6520,6 +7005,7 @@ type LimitRange struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { @@ -6568,6 +7054,8 @@ const ( ResourceLimitsMemory ResourceName = "limits.memory" // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" + // resource.k8s.io devices requested with a certain DeviceClass, number + ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices" ) // The following identify resource prefix for Kubernetes object types @@ -6607,6 +7095,7 @@ type ResourceQuotaSpec struct { // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional + // +listType=atomic Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota // but expressed using ScopeSelectorOperator in combination with possible values. @@ -6621,6 +7110,7 @@ type ResourceQuotaSpec struct { type ScopeSelector struct { // A list of scope selector requirements by scope of the resources. // +optional + // +listType=atomic MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` } @@ -6637,6 +7127,7 @@ type ScopedResourceSelectorRequirement struct { // the values array must be empty. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -6665,6 +7156,7 @@ type ResourceQuotaStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { @@ -6686,6 +7178,7 @@ type ResourceQuota struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { @@ -6702,6 +7195,7 @@ type ResourceQuotaList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. @@ -6828,6 +7322,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SecretList is a list of Secret. type SecretList struct { @@ -6844,6 +7339,7 @@ type SecretList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { @@ -6880,6 +7376,7 @@ type ConfigMap struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { @@ -6922,6 +7419,7 @@ type ComponentCondition struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ComponentStatus (and ComponentStatusList) holds the cluster validation info. // Deprecated: This API is deprecated in v1.19+ @@ -6936,10 +7434,13 @@ type ComponentStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Status of all the conditions for the component as a list of ComponentStatus objects. // Deprecated: This API is deprecated in v1.19+ @@ -6959,6 +7460,7 @@ type ComponentStatusList struct { type DownwardAPIVolumeSource struct { // Items is a list of downward API volume file // +optional + // +listType=atomic Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` // Optional: mode bits to use on created files by default. Must be a // Optional: mode bits used to set permissions on created files by default. @@ -6980,7 +7482,7 @@ const ( type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. // +optional FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` // Selects a resource of the container: only resources limits and requests @@ -7003,6 +7505,7 @@ type DownwardAPIVolumeFile struct { type DownwardAPIProjection struct { // Items is a list of DownwardAPIVolume file // +optional + // +listType=atomic Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` } @@ -7071,7 +7574,7 @@ type SecurityContext struct { // +optional AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -7083,6 +7586,11 @@ type SecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"` + // appArmorProfile is the AppArmor options to use by this container. If set, this profile + // overrides the pod's appArmorProfile. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,12,opt,name=appArmorProfile"` } // +enum @@ -7144,6 +7652,7 @@ type WindowsSecurityContextOptions struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // RangeAllocation is not a public type. type RangeAllocation struct { @@ -7178,13 +7687,6 @@ type Sysctl struct { Value string `json:"value" protobuf:"bytes,2,opt,name=value"` } -// NodeResources is an object for conveying resource information about a node. -// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` -} - const ( // Enable stdin for remote command execution ExecStdinParam = "input" @@ -7260,3 +7762,23 @@ const ( // the destination set to the node's IP and port or the pod's IP and port. LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" ) + +// ImageVolumeSource represents a image volume resource. +type ImageVolumeSource struct { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"` +} diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index fd6f7dc61b..950806ef8e 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -50,6 +50,16 @@ func (Affinity) SwaggerDoc() map[string]string { return map_Affinity } +var map_AppArmorProfile = map[string]string{ + "": "AppArmorProfile defines a pod or container's AppArmor settings.", + "type": "type indicates which kind of AppArmor profile will be applied. Valid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement.", + "localhostProfile": "localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is \"Localhost\".", +} + +func (AppArmorProfile) SwaggerDoc() map[string]string { + return map_AppArmorProfile +} + var map_AttachedVolume = map[string]string{ "": "AttachedVolume describes a volume attached to a node", "name": "Name of the attached volume", @@ -209,16 +219,6 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string { return map_CinderVolumeSource } -var map_ClaimSource = map[string]string{ - "": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", - "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", - "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", -} - -func (ClaimSource) SwaggerDoc() map[string]string { - return map_ClaimSource -} - var map_ClientIPConfig = map[string]string{ "": "ClientIPConfig represents the configurations of Client IP based session affinity.", "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", @@ -459,24 +459,36 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", - "state": "State holds details about the container's current condition.", - "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", - "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", - "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", - "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", - "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", - "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", - "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", - "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", + "volumeMounts": "Status of volume mounts.", + "user": "User represents user identity information initially attached to the first process of the container", + "allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", } func (ContainerStatus) SwaggerDoc() map[string]string { return map_ContainerStatus } +var map_ContainerUser = map[string]string{ + "": "ContainerUser represents user identity information", + "linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.", +} + +func (ContainerUser) SwaggerDoc() map[string]string { + return map_ContainerUser +} + var map_DaemonEndpoint = map[string]string{ "": "DaemonEndpoint contains information about a single Daemon endpoint.", "Port": "Port number of the given endpoint.", @@ -498,7 +510,7 @@ func (DownwardAPIProjection) SwaggerDoc() map[string]string { var map_DownwardAPIVolumeFile = map[string]string{ "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.", "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", "mode": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } @@ -922,6 +934,16 @@ func (ISCSIVolumeSource) SwaggerDoc() map[string]string { return map_ISCSIVolumeSource } +var map_ImageVolumeSource = map[string]string{ + "": "ImageVolumeSource represents a image volume resource.", + "reference": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", +} + +func (ImageVolumeSource) SwaggerDoc() map[string]string { + return map_ImageVolumeSource +} + var map_KeyToPath = map[string]string{ "": "Maps a string key to a path within a volume.", "key": "key is the key to project.", @@ -998,6 +1020,17 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { return map_LimitRangeSpec } +var map_LinuxContainerUser = map[string]string{ + "": "LinuxContainerUser represents user identity information in Linux containers", + "uid": "UID is the primary uid initially attached to the first process in the container", + "gid": "GID is the primary gid initially attached to the first process in the container", + "supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container", +} + +func (LinuxContainerUser) SwaggerDoc() map[string]string { + return map_LinuxContainerUser +} + var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", @@ -1021,7 +1054,7 @@ func (LoadBalancerStatus) SwaggerDoc() map[string]string { var map_LocalObjectReference = map[string]string{ "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "name": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } func (LocalObjectReference) SwaggerDoc() map[string]string { @@ -1184,6 +1217,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { return map_NodeDaemonEndpoints } +var map_NodeFeatures = map[string]string{ + "": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", + "supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", +} + +func (NodeFeatures) SwaggerDoc() map[string]string { + return map_NodeFeatures +} + var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -1203,13 +1245,24 @@ func (NodeProxyOptions) SwaggerDoc() map[string]string { return map_NodeProxyOptions } -var map_NodeResources = map[string]string{ - "": "NodeResources is an object for conveying resource information about a node. see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.", - "Capacity": "Capacity represents the available resources of a node", +var map_NodeRuntimeHandler = map[string]string{ + "": "NodeRuntimeHandler is a set of runtime handler information.", + "name": "Runtime handler name. Empty for the default runtime handler.", + "features": "Supported features.", +} + +func (NodeRuntimeHandler) SwaggerDoc() map[string]string { + return map_NodeRuntimeHandler +} + +var map_NodeRuntimeHandlerFeatures = map[string]string{ + "": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", + "recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", + "userNamespaces": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", } -func (NodeResources) SwaggerDoc() map[string]string { - return map_NodeResources +func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string { + return map_NodeRuntimeHandlerFeatures } var map_NodeSelector = map[string]string{ @@ -1259,7 +1312,7 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", @@ -1270,6 +1323,8 @@ var map_NodeStatus = map[string]string{ "volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesAttached": "List of volumes that are attached to the node.", "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", + "runtimeHandlers": "The available runtime handlers.", + "features": "Features describes the set of features implemented by the CRI implementation.", } func (NodeStatus) SwaggerDoc() map[string]string { @@ -1285,7 +1340,7 @@ var map_NodeSystemInfo = map[string]string{ "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", "kubeletVersion": "Kubelet Version reported by the node.", - "kubeProxyVersion": "KubeProxy Version reported by the node.", + "kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.", "operatingSystem": "The Operating System reported by the node", "architecture": "The Architecture reported by the node", } @@ -1345,7 +1400,7 @@ var map_PersistentVolumeClaimCondition = map[string]string{ "": "PersistentVolumeClaimCondition contains details about state of pvc", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", - "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.", "message": "message is the human-readable message indicating details about last transition.", } @@ -1373,7 +1428,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", - "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1385,11 +1440,11 @@ var map_PersistentVolumeClaimStatus = map[string]string{ "phase": "phase represents the current phase of PersistentVolumeClaim.", "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "capacity": "capacity represents the actual resources of the underlying volume.", - "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", - "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1466,7 +1521,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1478,7 +1533,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { @@ -1522,8 +1577,8 @@ var map_PodAffinityTerm = map[string]string{ "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", - "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1674,9 +1729,10 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { } var map_PodResourceClaim = map[string]string{ - "": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", - "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", - "source": "Source describes where to find the ResourceClaim.", + "": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", } func (PodResourceClaim) SwaggerDoc() map[string]string { @@ -1686,7 +1742,7 @@ func (PodResourceClaim) SwaggerDoc() map[string]string { var map_PodResourceClaimStatus = map[string]string{ "": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", "name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", - "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", } func (PodResourceClaimStatus) SwaggerDoc() map[string]string { @@ -1703,17 +1759,19 @@ func (PodSchedulingGate) SwaggerDoc() map[string]string { } var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", - "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.", + "supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1741,9 +1799,9 @@ var map_PodSpec = map[string]string{ "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", - "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", @@ -1755,7 +1813,7 @@ var map_PodSpec = map[string]string{ "affinity": "If specified, the pod's scheduling constraints", "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", "tolerations": "If specified, the pod's tolerations.", - "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.", "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", @@ -1766,9 +1824,9 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", - "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", + "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", } @@ -1920,7 +1978,7 @@ func (ProbeHandler) SwaggerDoc() map[string]string { var map_ProjectedVolumeSource = map[string]string{ "": "Represents a projected volume source", - "sources": "sources is the list of volume projections", + "sources": "sources is the list of volume projections. Each entry in this list handles one source.", "defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } @@ -2046,8 +2104,9 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string { } var map_ResourceClaim = map[string]string{ - "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", - "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", } func (ResourceClaim) SwaggerDoc() map[string]string { @@ -2065,6 +2124,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { return map_ResourceFieldSelector } +var map_ResourceHealth = map[string]string{ + "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", + "health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", +} + +func (ResourceHealth) SwaggerDoc() map[string]string { + return map_ResourceHealth +} + var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -2118,6 +2187,15 @@ func (ResourceRequirements) SwaggerDoc() map[string]string { return map_ResourceRequirements } +var map_ResourceStatus = map[string]string{ + "name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", + "resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", +} + +func (ResourceStatus) SwaggerDoc() map[string]string { + return map_ResourceStatus +} + var map_SELinuxOptions = map[string]string{ "": "SELinuxOptions are the labels to be applied to the container", "user": "User is a SELinux user label that applies to the container.", @@ -2281,8 +2359,9 @@ var map_SecurityContext = map[string]string{ "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", + "appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.", } func (SecurityContext) SwaggerDoc() map[string]string { @@ -2396,6 +2475,7 @@ var map_ServiceSpec = map[string]string{ "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", "internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", + "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2578,19 +2658,32 @@ func (VolumeDevice) SwaggerDoc() map[string]string { } var map_VolumeMount = map[string]string{ - "": "VolumeMount describes a mounting of a Volume within a container.", - "name": "This must match the Name of a Volume.", - "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", - "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", - "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "recursiveReadOnly": "RecursiveReadOnly specifies whether read-only mounts should be handled recursively.\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason.\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None).\n\nIf this field is not specified, it is treated as an equivalent of Disabled.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", } func (VolumeMount) SwaggerDoc() map[string]string { return map_VolumeMount } +var map_VolumeMountStatus = map[string]string{ + "": "VolumeMountStatus shows status of volume mounts.", + "name": "Name corresponds to the name of the original VolumeMount.", + "mountPath": "MountPath corresponds to the original VolumeMount.", + "readOnly": "ReadOnly corresponds to the original VolumeMount.", + "recursiveReadOnly": "RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, depending on the mount result.", +} + +func (VolumeMountStatus) SwaggerDoc() map[string]string { + return map_VolumeMountStatus +} + var map_VolumeNodeAffinity = map[string]string{ "": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", "required": "required specifies hard node constraints that must be met.", @@ -2601,7 +2694,7 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string { } var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", + "": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", "secret": "secret information about the secret data to project", "downwardAPI": "downwardAPI information about the downwardAPI data to project", "configMap": "configMap information about the configMap data to project", @@ -2654,6 +2747,7 @@ var map_VolumeSource = map[string]string{ "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", "ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 45172e0e23..3d23f7f620 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -74,6 +74,27 @@ func (in *Affinity) DeepCopy() *Affinity { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppArmorProfile) DeepCopyInto(out *AppArmorProfile) { + *out = *in + if in.LocalhostProfile != nil { + in, out := &in.LocalhostProfile, &out.LocalhostProfile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppArmorProfile. +func (in *AppArmorProfile) DeepCopy() *AppArmorProfile { + if in == nil { + return nil + } + out := new(AppArmorProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { *out = *in @@ -419,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimSource) DeepCopyInto(out *ClaimSource) { - *out = *in - if in.ResourceClaimName != nil { - in, out := &in.ResourceClaimName, &out.ResourceClaimName - *out = new(string) - **out = **in - } - if in.ResourceClaimTemplateName != nil { - in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource. -func (in *ClaimSource) DeepCopy() *ClaimSource { - if in == nil { - return nil - } - out := new(ClaimSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { *out = *in @@ -1041,6 +1036,25 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = new(ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(ContainerUser) + (*in).DeepCopyInto(*out) + } + if in.AllocatedResourcesStatus != nil { + in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus + *out = make([]ResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1054,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerUser) DeepCopyInto(out *ContainerUser) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = new(LinuxContainerUser) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser. +func (in *ContainerUser) DeepCopy() *ContainerUser { + if in == nil { + return nil + } + out := new(ContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { *out = *in @@ -2016,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource. +func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource { + if in == nil { + return nil + } + out := new(ImageVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { *out = *in @@ -2233,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) { + *out = *in + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser. +func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser { + if in == nil { + return nil + } + out := new(LinuxContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in @@ -2667,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) { + *out = *in + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures. +func (in *NodeFeatures) DeepCopy() *NodeFeatures { + if in == nil { + return nil + } + out := new(NodeFeatures) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in @@ -2726,24 +2819,48 @@ func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { +func (in *NodeRuntimeHandler) DeepCopyInto(out *NodeRuntimeHandler) { *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(NodeRuntimeHandlerFeatures) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandler. +func (in *NodeRuntimeHandler) DeepCopy() *NodeRuntimeHandler { if in == nil { return nil } - out := new(NodeResources) + out := new(NodeRuntimeHandler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatures) { + *out = *in + if in.RecursiveReadOnlyMounts != nil { + in, out := &in.RecursiveReadOnlyMounts, &out.RecursiveReadOnlyMounts + *out = new(bool) + **out = **in + } + if in.UserNamespaces != nil { + in, out := &in.UserNamespaces, &out.UserNamespaces + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandlerFeatures. +func (in *NodeRuntimeHandlerFeatures) DeepCopy() *NodeRuntimeHandlerFeatures { + if in == nil { + return nil + } + out := new(NodeRuntimeHandlerFeatures) in.DeepCopyInto(out) return out } @@ -2908,6 +3025,18 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = new(NodeConfigStatus) (*in).DeepCopyInto(*out) } + if in.RuntimeHandlers != nil { + in, out := &in.RuntimeHandlers, &out.RuntimeHandlers + *out = make([]NodeRuntimeHandler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(NodeFeatures) + (*in).DeepCopyInto(*out) + } return } @@ -3917,7 +4046,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) { *out = *in - in.Source.DeepCopyInto(&out.Source) + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + if in.ResourceClaimTemplateName != nil { + in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName + *out = new(string) + **out = **in + } return } @@ -4001,6 +4139,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = make([]int64, len(*in)) copy(*out, *in) } + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(SupplementalGroupsPolicy) + **out = **in + } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup *out = new(int64) @@ -4021,6 +4164,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SeccompProfile) (*in).DeepCopyInto(*out) } + if in.AppArmorProfile != nil { + in, out := &in.AppArmorProfile, &out.AppArmorProfile + *out = new(AppArmorProfile) + (*in).DeepCopyInto(*out) + } return } @@ -4841,6 +4989,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth. +func (in *ResourceHealth) DeepCopy() *ResourceHealth { + if in == nil { + return nil + } + out := new(ResourceHealth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ResourceList) DeepCopyInto(out *ResourceList) { { @@ -5022,6 +5186,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceHealth, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { *out = *in @@ -5411,6 +5596,11 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SeccompProfile) (*in).DeepCopyInto(*out) } + if in.AppArmorProfile != nil { + in, out := &in.AppArmorProfile, &out.AppArmorProfile + *out = new(AppArmorProfile) + (*in).DeepCopyInto(*out) + } return } @@ -5715,6 +5905,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(ServiceInternalTrafficPolicy) **out = **in } + if in.TrafficDistribution != nil { + in, out := &in.TrafficDistribution, &out.TrafficDistribution + *out = new(string) + **out = **in + } return } @@ -6073,6 +6268,11 @@ func (in *VolumeDevice) DeepCopy() *VolumeDevice { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in + if in.RecursiveReadOnly != nil { + in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly + *out = new(RecursiveReadOnlyMode) + **out = **in + } if in.MountPropagation != nil { in, out := &in.MountPropagation, &out.MountPropagation *out = new(MountPropagationMode) @@ -6091,6 +6291,27 @@ func (in *VolumeMount) DeepCopy() *VolumeMount { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountStatus) DeepCopyInto(out *VolumeMountStatus) { + *out = *in + if in.RecursiveReadOnly != nil { + in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly + *out = new(RecursiveReadOnlyMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountStatus. +func (in *VolumeMountStatus) DeepCopy() *VolumeMountStatus { + if in == nil { + return nil + } + out := new(VolumeMountStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { *out = *in @@ -6331,6 +6552,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(EphemeralVolumeSource) (*in).DeepCopyInto(*out) } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageVolumeSource) + **out = **in + } return } diff --git a/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..6710a96d1c --- /dev/null +++ b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,274 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Binding) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Endpoints) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRange) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *List) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Namespace) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Node) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Pod) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 1 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Secret) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SecretList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Service) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/discovery/v1/doc.go b/vendor/k8s.io/api/discovery/v1/doc.go index 96ae531ce7..01913669ff 100644 --- a/vendor/k8s.io/api/discovery/v1/doc.go +++ b/vendor/k8s.io/api/discovery/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=discovery.k8s.io package v1 // import "k8s.io/api/discovery/v1" diff --git a/vendor/k8s.io/api/discovery/v1/generated.pb.go b/vendor/k8s.io/api/discovery/v1/generated.pb.go index 79f2cc09d8..5792481dc1 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1/generated.proto +// source: k8s.io/api/discovery/v1/generated.proto package v1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Endpoint) Reset() { *m = Endpoint{} } func (*Endpoint) ProtoMessage() {} func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{0} + return fileDescriptor_2237b452324cf77e, []int{0} } func (m *Endpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_Endpoint proto.InternalMessageInfo func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } func (*EndpointConditions) ProtoMessage() {} func (*EndpointConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{1} + return fileDescriptor_2237b452324cf77e, []int{1} } func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo func (m *EndpointHints) Reset() { *m = EndpointHints{} } func (*EndpointHints) ProtoMessage() {} func (*EndpointHints) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{2} + return fileDescriptor_2237b452324cf77e, []int{2} } func (m *EndpointHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_EndpointHints proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{3} + return fileDescriptor_2237b452324cf77e, []int{3} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } func (*EndpointSlice) ProtoMessage() {} func (*EndpointSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{4} + return fileDescriptor_2237b452324cf77e, []int{4} } func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } func (*EndpointSliceList) ProtoMessage() {} func (*EndpointSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{5} + return fileDescriptor_2237b452324cf77e, []int{5} } func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo func (m *ForZone) Reset() { *m = ForZone{} } func (*ForZone) ProtoMessage() {} func (*ForZone) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{6} + return fileDescriptor_2237b452324cf77e, []int{6} } func (m *ForZone) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,67 +254,66 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1/generated.proto", fileDescriptor_3a5d310fb1396ddf) -} - -var fileDescriptor_3a5d310fb1396ddf = []byte{ - // 893 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xe3, 0x44, - 0x18, 0x8e, 0x9b, 0x86, 0xda, 0x93, 0x56, 0xec, 0x8e, 0x90, 0x1a, 0x05, 0x14, 0x87, 0xa0, 0x45, - 0x91, 0x2a, 0x6c, 0x5a, 0x21, 0xb4, 0x20, 0x21, 0x51, 0xb3, 0x65, 0x97, 0xaf, 0x52, 0xcd, 0xf6, - 0xb4, 0x42, 0x5a, 0x5c, 0xfb, 0xad, 0x63, 0xd2, 0xcc, 0x58, 0x33, 0x93, 0x48, 0xe1, 0xc4, 0x85, - 0x33, 0xfc, 0x22, 0x8e, 0xa8, 0xc7, 0xbd, 0xb1, 0x27, 0x8b, 0x9a, 0xbf, 0xc0, 0x69, 0x4f, 0x68, - 0xc6, 0x9f, 0x25, 0x8d, 0xb2, 0x37, 0xcf, 0x33, 0xcf, 0xf3, 0x7e, 0x3c, 0x33, 0xf3, 0x1a, 0x7d, - 0x3e, 0x7d, 0x28, 0x9c, 0x98, 0xb9, 0xd3, 0xf9, 0x05, 0x70, 0x0a, 0x12, 0x84, 0xbb, 0x00, 0x1a, - 0x32, 0xee, 0x16, 0x1b, 0x7e, 0x12, 0xbb, 0x61, 0x2c, 0x02, 0xb6, 0x00, 0xbe, 0x74, 0x17, 0x87, - 0x6e, 0x04, 0x14, 0xb8, 0x2f, 0x21, 0x74, 0x12, 0xce, 0x24, 0xc3, 0xfb, 0x39, 0xd1, 0xf1, 0x93, - 0xd8, 0xa9, 0x88, 0xce, 0xe2, 0xb0, 0xff, 0x41, 0x14, 0xcb, 0xc9, 0xfc, 0xc2, 0x09, 0xd8, 0xcc, - 0x8d, 0x58, 0xc4, 0x5c, 0xcd, 0xbf, 0x98, 0x5f, 0xea, 0x95, 0x5e, 0xe8, 0xaf, 0x3c, 0x4e, 0x7f, - 0xd4, 0x48, 0x18, 0x30, 0x0e, 0x77, 0xe4, 0xea, 0x7f, 0x54, 0x73, 0x66, 0x7e, 0x30, 0x89, 0xa9, - 0xaa, 0x29, 0x99, 0x46, 0x0a, 0x10, 0xee, 0x0c, 0xa4, 0x7f, 0x97, 0xca, 0x5d, 0xa7, 0xe2, 0x73, - 0x2a, 0xe3, 0x19, 0xac, 0x08, 0x3e, 0xde, 0x24, 0x10, 0xc1, 0x04, 0x66, 0xfe, 0xff, 0x75, 0xa3, - 0x7f, 0xb7, 0x91, 0x79, 0x42, 0xc3, 0x84, 0xc5, 0x54, 0xe2, 0x03, 0x64, 0xf9, 0x61, 0xc8, 0x41, - 0x08, 0x10, 0x3d, 0x63, 0xd8, 0x1e, 0x5b, 0xde, 0x5e, 0x96, 0xda, 0xd6, 0x71, 0x09, 0x92, 0x7a, - 0x1f, 0x3f, 0x47, 0x28, 0x60, 0x34, 0x8c, 0x65, 0xcc, 0xa8, 0xe8, 0x6d, 0x0d, 0x8d, 0x71, 0xf7, - 0xe8, 0xc0, 0x59, 0xe3, 0xac, 0x53, 0xe6, 0xf8, 0xa2, 0x92, 0x78, 0xf8, 0x3a, 0xb5, 0x5b, 0x59, - 0x6a, 0xa3, 0x1a, 0x23, 0x8d, 0x90, 0x78, 0x8c, 0xcc, 0x09, 0x13, 0x92, 0xfa, 0x33, 0xe8, 0xb5, - 0x87, 0xc6, 0xd8, 0xf2, 0x76, 0xb3, 0xd4, 0x36, 0x9f, 0x14, 0x18, 0xa9, 0x76, 0xf1, 0x19, 0xb2, - 0xa4, 0xcf, 0x23, 0x90, 0x04, 0x2e, 0x7b, 0xdb, 0xba, 0x92, 0xf7, 0x9a, 0x95, 0xa8, 0xb3, 0x51, - 0x45, 0x7c, 0x7f, 0xf1, 0x13, 0x04, 0x8a, 0x04, 0x1c, 0x68, 0x00, 0x79, 0x73, 0xe7, 0xa5, 0x92, - 0xd4, 0x41, 0xf0, 0xaf, 0x06, 0xc2, 0x21, 0x24, 0x1c, 0x02, 0xe5, 0xd5, 0x39, 0x4b, 0xd8, 0x15, - 0x8b, 0x96, 0xbd, 0xce, 0xb0, 0x3d, 0xee, 0x1e, 0x7d, 0xb2, 0xb1, 0x4b, 0xe7, 0xd1, 0x8a, 0xf6, - 0x84, 0x4a, 0xbe, 0xf4, 0xfa, 0x45, 0xcf, 0x78, 0x95, 0x40, 0xee, 0x48, 0xa8, 0x3c, 0xa0, 0x2c, - 0x84, 0x53, 0xe5, 0xc1, 0x1b, 0xb5, 0x07, 0xa7, 0x05, 0x46, 0xaa, 0x5d, 0xfc, 0x0e, 0xda, 0xfe, - 0x99, 0x51, 0xe8, 0xed, 0x68, 0x96, 0x99, 0xa5, 0xf6, 0xf6, 0x33, 0x46, 0x81, 0x68, 0x14, 0x3f, - 0x46, 0x9d, 0x49, 0x4c, 0xa5, 0xe8, 0x99, 0xda, 0x9d, 0xf7, 0x37, 0x76, 0xf0, 0x44, 0xb1, 0x3d, - 0x2b, 0x4b, 0xed, 0x8e, 0xfe, 0x24, 0xb9, 0xbe, 0x7f, 0x82, 0xf6, 0xd7, 0xf4, 0x86, 0xef, 0xa1, - 0xf6, 0x14, 0x96, 0x3d, 0x43, 0x15, 0x40, 0xd4, 0x27, 0x7e, 0x0b, 0x75, 0x16, 0xfe, 0xd5, 0x1c, - 0xf4, 0xed, 0xb0, 0x48, 0xbe, 0xf8, 0x74, 0xeb, 0xa1, 0x31, 0xfa, 0xcd, 0x40, 0x78, 0xf5, 0x4a, - 0x60, 0x1b, 0x75, 0x38, 0xf8, 0x61, 0x1e, 0xc4, 0xcc, 0xd3, 0x13, 0x05, 0x90, 0x1c, 0xc7, 0x0f, - 0xd0, 0x8e, 0x00, 0xbe, 0x88, 0x69, 0xa4, 0x63, 0x9a, 0x5e, 0x37, 0x4b, 0xed, 0x9d, 0xa7, 0x39, - 0x44, 0xca, 0x3d, 0x7c, 0x88, 0xba, 0x12, 0xf8, 0x2c, 0xa6, 0xbe, 0x54, 0xd4, 0xb6, 0xa6, 0xbe, - 0x99, 0xa5, 0x76, 0xf7, 0xbc, 0x86, 0x49, 0x93, 0x33, 0x7a, 0x8e, 0xf6, 0x6e, 0xf5, 0x8e, 0x4f, - 0x91, 0x79, 0xc9, 0xb8, 0xf2, 0x30, 0x7f, 0x0b, 0xdd, 0xa3, 0xe1, 0x5a, 0xd7, 0xbe, 0xcc, 0x89, - 0xde, 0xbd, 0xe2, 0x78, 0xcd, 0x02, 0x10, 0xa4, 0x8a, 0x31, 0xfa, 0xd3, 0x40, 0xbb, 0x65, 0x86, - 0x33, 0xc6, 0xa5, 0x3a, 0x31, 0x7d, 0xb7, 0x8d, 0xfa, 0xc4, 0xf4, 0x99, 0x6a, 0x14, 0x3f, 0x46, - 0xa6, 0x7e, 0xa1, 0x01, 0xbb, 0xca, 0xed, 0xf3, 0x0e, 0x54, 0xe0, 0xb3, 0x02, 0x7b, 0x95, 0xda, - 0x6f, 0xaf, 0x4e, 0x1f, 0xa7, 0xdc, 0x26, 0x95, 0x58, 0xa5, 0x49, 0x18, 0x97, 0xda, 0x84, 0x4e, - 0x9e, 0x46, 0xa5, 0x27, 0x1a, 0x55, 0x4e, 0xf9, 0x49, 0x52, 0xca, 0xf4, 0xe3, 0xb1, 0x72, 0xa7, - 0x8e, 0x6b, 0x98, 0x34, 0x39, 0xa3, 0xbf, 0xb6, 0x6a, 0xab, 0x9e, 0x5e, 0xc5, 0x01, 0xe0, 0x1f, - 0x91, 0xa9, 0x06, 0x59, 0xe8, 0x4b, 0x5f, 0x77, 0xd3, 0x3d, 0xfa, 0xb0, 0x61, 0x55, 0x35, 0x8f, - 0x9c, 0x64, 0x1a, 0x29, 0x40, 0x38, 0x8a, 0x5d, 0x3f, 0xc8, 0xef, 0x40, 0xfa, 0xf5, 0x34, 0xa8, - 0x31, 0x52, 0x45, 0xc5, 0x8f, 0x50, 0xb7, 0x98, 0x3c, 0xe7, 0xcb, 0x04, 0x8a, 0x32, 0x47, 0x85, - 0xa4, 0x7b, 0x5c, 0x6f, 0xbd, 0xba, 0xbd, 0x24, 0x4d, 0x19, 0x26, 0xc8, 0x82, 0xa2, 0x70, 0x35, - 0xb1, 0xd4, 0x99, 0xbe, 0xbb, 0xf1, 0x25, 0x78, 0xf7, 0x8b, 0x34, 0x56, 0x89, 0x08, 0x52, 0x87, - 0xc1, 0x5f, 0xa3, 0x8e, 0x32, 0x52, 0xf4, 0xda, 0x3a, 0xde, 0x83, 0x8d, 0xf1, 0x94, 0xf9, 0xde, - 0x5e, 0x11, 0xb3, 0xa3, 0x56, 0x82, 0xe4, 0x21, 0x46, 0x7f, 0x18, 0xe8, 0xfe, 0x2d, 0x67, 0xbf, - 0x8d, 0x85, 0xc4, 0x3f, 0xac, 0xb8, 0xeb, 0xbc, 0x9e, 0xbb, 0x4a, 0xad, 0xbd, 0xad, 0xae, 0x65, - 0x89, 0x34, 0x9c, 0xfd, 0x06, 0x75, 0x62, 0x09, 0xb3, 0xd2, 0x8f, 0xcd, 0x93, 0x41, 0x17, 0x56, - 0x37, 0xf0, 0x95, 0x12, 0x93, 0x3c, 0xc6, 0xe8, 0x00, 0xed, 0x14, 0x37, 0x1f, 0x0f, 0x6f, 0xdd, - 0xee, 0xdd, 0x82, 0xde, 0xb8, 0xe1, 0xde, 0x67, 0xd7, 0x37, 0x83, 0xd6, 0x8b, 0x9b, 0x41, 0xeb, - 0xe5, 0xcd, 0xa0, 0xf5, 0x4b, 0x36, 0x30, 0xae, 0xb3, 0x81, 0xf1, 0x22, 0x1b, 0x18, 0x2f, 0xb3, - 0x81, 0xf1, 0x77, 0x36, 0x30, 0x7e, 0xff, 0x67, 0xd0, 0x7a, 0xb6, 0xbf, 0xe6, 0xa7, 0xfe, 0x5f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xd0, 0xcc, 0x2e, 0x07, 0x08, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/discovery/v1/generated.proto", fileDescriptor_2237b452324cf77e) +} + +var fileDescriptor_2237b452324cf77e = []byte{ + // 877 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xdc, 0x44, + 0x18, 0x5e, 0x67, 0x63, 0x62, 0x8f, 0x13, 0xd1, 0x8e, 0x90, 0x62, 0x2d, 0xc8, 0x5e, 0x8c, 0x0a, + 0x2b, 0x45, 0x78, 0x49, 0x84, 0x50, 0x41, 0xe2, 0x10, 0xd3, 0xd0, 0xf2, 0x15, 0xa2, 0x69, 0x4e, + 0x15, 0x52, 0x71, 0xec, 0x37, 0x5e, 0x93, 0xd8, 0x63, 0x79, 0x26, 0x2b, 0x2d, 0x27, 0x2e, 0x9c, + 0xe1, 0x17, 0x71, 0x44, 0x39, 0xf6, 0x46, 0x4f, 0x16, 0x31, 0x7f, 0x81, 0x53, 0x4f, 0x68, 0xc6, + 0x9f, 0x61, 0xb3, 0xda, 0xde, 0x3c, 0xcf, 0x3c, 0xcf, 0xfb, 0xf1, 0xcc, 0xcc, 0x6b, 0xf4, 0xc1, + 0xc5, 0x43, 0xe6, 0xc6, 0x74, 0xea, 0x67, 0xf1, 0x34, 0x8c, 0x59, 0x40, 0xe7, 0x90, 0x2f, 0xa6, + 0xf3, 0xfd, 0x69, 0x04, 0x29, 0xe4, 0x3e, 0x87, 0xd0, 0xcd, 0x72, 0xca, 0x29, 0xde, 0xad, 0x88, + 0xae, 0x9f, 0xc5, 0x6e, 0x4b, 0x74, 0xe7, 0xfb, 0xa3, 0x0f, 0xa3, 0x98, 0xcf, 0xae, 0xce, 0xdc, + 0x80, 0x26, 0xd3, 0x88, 0x46, 0x74, 0x2a, 0xf9, 0x67, 0x57, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0xab, + 0x8a, 0x33, 0x72, 0x7a, 0x09, 0x03, 0x9a, 0xc3, 0x1d, 0xb9, 0x46, 0x1f, 0x77, 0x9c, 0xc4, 0x0f, + 0x66, 0x71, 0x2a, 0x6a, 0xca, 0x2e, 0x22, 0x01, 0xb0, 0x69, 0x02, 0xdc, 0xbf, 0x4b, 0x35, 0x5d, + 0xa5, 0xca, 0xaf, 0x52, 0x1e, 0x27, 0xb0, 0x24, 0xf8, 0x64, 0x9d, 0x80, 0x05, 0x33, 0x48, 0xfc, + 0xff, 0xeb, 0x9c, 0x7f, 0x37, 0x91, 0x76, 0x94, 0x86, 0x19, 0x8d, 0x53, 0x8e, 0xf7, 0x90, 0xee, + 0x87, 0x61, 0x0e, 0x8c, 0x01, 0x33, 0x95, 0xf1, 0x70, 0xa2, 0x7b, 0x3b, 0x65, 0x61, 0xeb, 0x87, + 0x0d, 0x48, 0xba, 0x7d, 0xfc, 0x1c, 0xa1, 0x80, 0xa6, 0x61, 0xcc, 0x63, 0x9a, 0x32, 0x73, 0x63, + 0xac, 0x4c, 0x8c, 0x83, 0x3d, 0x77, 0x85, 0xb3, 0x6e, 0x93, 0xe3, 0x8b, 0x56, 0xe2, 0xe1, 0xeb, + 0xc2, 0x1e, 0x94, 0x85, 0x8d, 0x3a, 0x8c, 0xf4, 0x42, 0xe2, 0x09, 0xd2, 0x66, 0x94, 0xf1, 0xd4, + 0x4f, 0xc0, 0x1c, 0x8e, 0x95, 0x89, 0xee, 0x6d, 0x97, 0x85, 0xad, 0x3d, 0xa9, 0x31, 0xd2, 0xee, + 0xe2, 0x13, 0xa4, 0x73, 0x3f, 0x8f, 0x80, 0x13, 0x38, 0x37, 0x37, 0x65, 0x25, 0xef, 0xf5, 0x2b, + 0x11, 0x67, 0x23, 0x8a, 0xf8, 0xfe, 0xec, 0x27, 0x08, 0x04, 0x09, 0x72, 0x48, 0x03, 0xa8, 0x9a, + 0x3b, 0x6d, 0x94, 0xa4, 0x0b, 0x82, 0x7f, 0x55, 0x10, 0x0e, 0x21, 0xcb, 0x21, 0x10, 0x5e, 0x9d, + 0xd2, 0x8c, 0x5e, 0xd2, 0x68, 0x61, 0xaa, 0xe3, 0xe1, 0xc4, 0x38, 0xf8, 0x74, 0x6d, 0x97, 0xee, + 0xa3, 0x25, 0xed, 0x51, 0xca, 0xf3, 0x85, 0x37, 0xaa, 0x7b, 0xc6, 0xcb, 0x04, 0x72, 0x47, 0x42, + 0xe1, 0x41, 0x4a, 0x43, 0x38, 0x16, 0x1e, 0xbc, 0xd1, 0x79, 0x70, 0x5c, 0x63, 0xa4, 0xdd, 0xc5, + 0xef, 0xa0, 0xcd, 0x9f, 0x69, 0x0a, 0xe6, 0x96, 0x64, 0x69, 0x65, 0x61, 0x6f, 0x3e, 0xa3, 0x29, + 0x10, 0x89, 0xe2, 0xc7, 0x48, 0x9d, 0xc5, 0x29, 0x67, 0xa6, 0x26, 0xdd, 0x79, 0x7f, 0x6d, 0x07, + 0x4f, 0x04, 0xdb, 0xd3, 0xcb, 0xc2, 0x56, 0xe5, 0x27, 0xa9, 0xf4, 0xa3, 0x23, 0xb4, 0xbb, 0xa2, + 0x37, 0x7c, 0x0f, 0x0d, 0x2f, 0x60, 0x61, 0x2a, 0xa2, 0x00, 0x22, 0x3e, 0xf1, 0x5b, 0x48, 0x9d, + 0xfb, 0x97, 0x57, 0x20, 0x6f, 0x87, 0x4e, 0xaa, 0xc5, 0x67, 0x1b, 0x0f, 0x15, 0xe7, 0x37, 0x05, + 0xe1, 0xe5, 0x2b, 0x81, 0x6d, 0xa4, 0xe6, 0xe0, 0x87, 0x55, 0x10, 0xad, 0x4a, 0x4f, 0x04, 0x40, + 0x2a, 0x1c, 0x3f, 0x40, 0x5b, 0x0c, 0xf2, 0x79, 0x9c, 0x46, 0x32, 0xa6, 0xe6, 0x19, 0x65, 0x61, + 0x6f, 0x3d, 0xad, 0x20, 0xd2, 0xec, 0xe1, 0x7d, 0x64, 0x70, 0xc8, 0x93, 0x38, 0xf5, 0xb9, 0xa0, + 0x0e, 0x25, 0xf5, 0xcd, 0xb2, 0xb0, 0x8d, 0xd3, 0x0e, 0x26, 0x7d, 0x8e, 0xf3, 0x1c, 0xed, 0xdc, + 0xea, 0x1d, 0x1f, 0x23, 0xed, 0x9c, 0xe6, 0xc2, 0xc3, 0xea, 0x2d, 0x18, 0x07, 0xe3, 0x95, 0xae, + 0x7d, 0x59, 0x11, 0xbd, 0x7b, 0xf5, 0xf1, 0x6a, 0x35, 0xc0, 0x48, 0x1b, 0xc3, 0xf9, 0x53, 0x41, + 0xdb, 0x4d, 0x86, 0x13, 0x9a, 0x73, 0x71, 0x62, 0xf2, 0x6e, 0x2b, 0xdd, 0x89, 0xc9, 0x33, 0x95, + 0x28, 0x7e, 0x8c, 0x34, 0xf9, 0x42, 0x03, 0x7a, 0x59, 0xd9, 0xe7, 0xed, 0x89, 0xc0, 0x27, 0x35, + 0xf6, 0xaa, 0xb0, 0xdf, 0x5e, 0x9e, 0x3e, 0x6e, 0xb3, 0x4d, 0x5a, 0xb1, 0x48, 0x93, 0xd1, 0x9c, + 0x4b, 0x13, 0xd4, 0x2a, 0x8d, 0x48, 0x4f, 0x24, 0x2a, 0x9c, 0xf2, 0xb3, 0xac, 0x91, 0xc9, 0xc7, + 0xa3, 0x57, 0x4e, 0x1d, 0x76, 0x30, 0xe9, 0x73, 0x9c, 0xbf, 0x36, 0x3a, 0xab, 0x9e, 0x5e, 0xc6, + 0x01, 0xe0, 0x1f, 0x91, 0x26, 0x06, 0x59, 0xe8, 0x73, 0x5f, 0x76, 0x63, 0x1c, 0x7c, 0xd4, 0xb3, + 0xaa, 0x9d, 0x47, 0x6e, 0x76, 0x11, 0x09, 0x80, 0xb9, 0x82, 0xdd, 0x3d, 0xc8, 0xef, 0x80, 0xfb, + 0xdd, 0x34, 0xe8, 0x30, 0xd2, 0x46, 0xc5, 0x8f, 0x90, 0x51, 0x4f, 0x9e, 0xd3, 0x45, 0x06, 0x75, + 0x99, 0x4e, 0x2d, 0x31, 0x0e, 0xbb, 0xad, 0x57, 0xb7, 0x97, 0xa4, 0x2f, 0xc3, 0x04, 0xe9, 0x50, + 0x17, 0x2e, 0x26, 0x96, 0x38, 0xd3, 0x77, 0xd7, 0xbe, 0x04, 0xef, 0x7e, 0x9d, 0x46, 0x6f, 0x10, + 0x46, 0xba, 0x30, 0xf8, 0x6b, 0xa4, 0x0a, 0x23, 0x99, 0x39, 0x94, 0xf1, 0x1e, 0xac, 0x8d, 0x27, + 0xcc, 0xf7, 0x76, 0xea, 0x98, 0xaa, 0x58, 0x31, 0x52, 0x85, 0x70, 0xfe, 0x50, 0xd0, 0xfd, 0x5b, + 0xce, 0x7e, 0x1b, 0x33, 0x8e, 0x7f, 0x58, 0x72, 0xd7, 0x7d, 0x3d, 0x77, 0x85, 0x5a, 0x7a, 0xdb, + 0x5e, 0xcb, 0x06, 0xe9, 0x39, 0xfb, 0x0d, 0x52, 0x63, 0x0e, 0x49, 0xe3, 0xc7, 0xfa, 0xc9, 0x20, + 0x0b, 0xeb, 0x1a, 0xf8, 0x4a, 0x88, 0x49, 0x15, 0xc3, 0xd9, 0x43, 0x5b, 0xf5, 0xcd, 0xc7, 0xe3, + 0x5b, 0xb7, 0x7b, 0xbb, 0xa6, 0xf7, 0x6e, 0xb8, 0xf7, 0xf9, 0xf5, 0x8d, 0x35, 0x78, 0x71, 0x63, + 0x0d, 0x5e, 0xde, 0x58, 0x83, 0x5f, 0x4a, 0x4b, 0xb9, 0x2e, 0x2d, 0xe5, 0x45, 0x69, 0x29, 0x2f, + 0x4b, 0x4b, 0xf9, 0xbb, 0xb4, 0x94, 0xdf, 0xff, 0xb1, 0x06, 0xcf, 0x76, 0x57, 0xfc, 0xd4, 0xff, + 0x0b, 0x00, 0x00, 0xff, 0xff, 0x76, 0x4b, 0x26, 0xe3, 0xee, 0x07, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 6d234017b7..8ddf0dc5d3 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -54,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // deprecatedTopology contains topology information part of the v1beta1 // API. This field is deprecated, and will be removed when the v1beta1 @@ -161,7 +161,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -191,7 +191,7 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of endpoint slices repeated EndpointSlice items = 2; diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 7ebb07ca35..d6a9d0fced 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSlice represents a subset of the endpoints that implement a service. // For a given service there may be multiple EndpointSlice objects, selected by @@ -206,6 +207,7 @@ type EndpointPort struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..362867c5b9 --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go index fcb9136e74..46935574bf 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto +// source: k8s.io/api/discovery/v1beta1/generated.proto package v1beta1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Endpoint) Reset() { *m = Endpoint{} } func (*Endpoint) ProtoMessage() {} func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{0} + return fileDescriptor_6555bad15de200e0, []int{0} } func (m *Endpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_Endpoint proto.InternalMessageInfo func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } func (*EndpointConditions) ProtoMessage() {} func (*EndpointConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{1} + return fileDescriptor_6555bad15de200e0, []int{1} } func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo func (m *EndpointHints) Reset() { *m = EndpointHints{} } func (*EndpointHints) ProtoMessage() {} func (*EndpointHints) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{2} + return fileDescriptor_6555bad15de200e0, []int{2} } func (m *EndpointHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_EndpointHints proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{3} + return fileDescriptor_6555bad15de200e0, []int{3} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } func (*EndpointSlice) ProtoMessage() {} func (*EndpointSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{4} + return fileDescriptor_6555bad15de200e0, []int{4} } func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } func (*EndpointSliceList) ProtoMessage() {} func (*EndpointSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{5} + return fileDescriptor_6555bad15de200e0, []int{5} } func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo func (m *ForZone) Reset() { *m = ForZone{} } func (*ForZone) ProtoMessage() {} func (*ForZone) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{6} + return fileDescriptor_6555bad15de200e0, []int{6} } func (m *ForZone) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,66 +254,65 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_ece80bbc872d519b) -} - -var fileDescriptor_ece80bbc872d519b = []byte{ - // 871 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x41, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x9b, 0x86, 0xda, 0x93, 0x56, 0xec, 0x8e, 0x38, 0x44, 0xa5, 0xb2, 0x23, 0xa3, 0x45, - 0x11, 0x15, 0x36, 0xad, 0x56, 0x68, 0x05, 0xa7, 0x1a, 0x0a, 0x8b, 0xb4, 0xec, 0x56, 0xd3, 0x4a, - 0x48, 0x2b, 0x0e, 0x4c, 0xec, 0xa9, 0x63, 0xd2, 0xcc, 0x58, 0x33, 0x93, 0x48, 0xb9, 0xf1, 0x0f, - 0xe0, 0xb7, 0xf0, 0x17, 0x90, 0x50, 0x8f, 0x7b, 0xdc, 0x93, 0x45, 0xcd, 0xbf, 0xe8, 0x09, 0xcd, - 0x78, 0x6c, 0x27, 0x04, 0xba, 0xb9, 0x79, 0xbe, 0x79, 0xdf, 0xf7, 0xde, 0xfb, 0xde, 0xcc, 0x18, - 0x9c, 0x4f, 0x9f, 0x89, 0x20, 0x63, 0xe1, 0x74, 0x3e, 0x26, 0x9c, 0x12, 0x49, 0x44, 0xb8, 0x20, - 0x34, 0x61, 0x3c, 0x34, 0x1b, 0x38, 0xcf, 0xc2, 0x24, 0x13, 0x31, 0x5b, 0x10, 0xbe, 0x0c, 0x17, - 0x27, 0x63, 0x22, 0xf1, 0x49, 0x98, 0x12, 0x4a, 0x38, 0x96, 0x24, 0x09, 0x72, 0xce, 0x24, 0x83, - 0x47, 0x55, 0x74, 0x80, 0xf3, 0x2c, 0x68, 0xa2, 0x03, 0x13, 0x7d, 0xf8, 0x69, 0x9a, 0xc9, 0xc9, - 0x7c, 0x1c, 0xc4, 0x6c, 0x16, 0xa6, 0x2c, 0x65, 0xa1, 0x26, 0x8d, 0xe7, 0xd7, 0x7a, 0xa5, 0x17, - 0xfa, 0xab, 0x12, 0x3b, 0xf4, 0x57, 0x52, 0xc7, 0x8c, 0x93, 0x70, 0xb1, 0x91, 0xf0, 0xf0, 0x69, - 0x1b, 0x33, 0xc3, 0xf1, 0x24, 0xa3, 0xaa, 0xba, 0x7c, 0x9a, 0x2a, 0x40, 0x84, 0x33, 0x22, 0xf1, - 0x7f, 0xb1, 0xc2, 0xff, 0x63, 0xf1, 0x39, 0x95, 0xd9, 0x8c, 0x6c, 0x10, 0x3e, 0x7f, 0x17, 0x41, - 0xc4, 0x13, 0x32, 0xc3, 0xff, 0xe6, 0xf9, 0xbf, 0xef, 0x02, 0xfb, 0x9c, 0x26, 0x39, 0xcb, 0xa8, - 0x84, 0xc7, 0xc0, 0xc1, 0x49, 0xc2, 0x89, 0x10, 0x44, 0x0c, 0xac, 0x61, 0x77, 0xe4, 0x44, 0x07, - 0x65, 0xe1, 0x39, 0x67, 0x35, 0x88, 0xda, 0x7d, 0x98, 0x00, 0x10, 0x33, 0x9a, 0x64, 0x32, 0x63, - 0x54, 0x0c, 0x76, 0x86, 0xd6, 0xa8, 0x7f, 0xfa, 0x59, 0xf0, 0x90, 0xbd, 0x41, 0x9d, 0xe8, 0xab, - 0x86, 0x17, 0xc1, 0xdb, 0xc2, 0xeb, 0x94, 0x85, 0x07, 0x5a, 0x0c, 0xad, 0xe8, 0xc2, 0x11, 0xb0, - 0x27, 0x4c, 0x48, 0x8a, 0x67, 0x64, 0xd0, 0x1d, 0x5a, 0x23, 0x27, 0xda, 0x2f, 0x0b, 0xcf, 0x7e, - 0x6e, 0x30, 0xd4, 0xec, 0xc2, 0x0b, 0xe0, 0x48, 0xcc, 0x53, 0x22, 0x11, 0xb9, 0x1e, 0xec, 0xea, - 0x72, 0x3e, 0x5a, 0x2d, 0x47, 0x0d, 0x28, 0x58, 0x9c, 0x04, 0xaf, 0xc6, 0x3f, 0x93, 0x58, 0x05, - 0x11, 0x4e, 0x68, 0x4c, 0xaa, 0x0e, 0xaf, 0x6a, 0x26, 0x6a, 0x45, 0xe0, 0x18, 0xd8, 0x92, 0xe5, - 0xec, 0x86, 0xa5, 0xcb, 0x41, 0x6f, 0xd8, 0x1d, 0xf5, 0x4f, 0x9f, 0x6e, 0xd7, 0x5f, 0x70, 0x65, - 0x68, 0xe7, 0x54, 0xf2, 0x65, 0xf4, 0xc8, 0xf4, 0x68, 0xd7, 0x30, 0x6a, 0x74, 0x55, 0x7f, 0x94, - 0x25, 0xe4, 0xa5, 0xea, 0xef, 0xbd, 0xb6, 0xbf, 0x97, 0x06, 0x43, 0xcd, 0x2e, 0x7c, 0x01, 0x7a, - 0x93, 0x8c, 0x4a, 0x31, 0xd8, 0xd3, 0xbd, 0x1d, 0x6f, 0x57, 0xca, 0x73, 0x45, 0x89, 0x9c, 0xb2, - 0xf0, 0x7a, 0xfa, 0x13, 0x55, 0x22, 0x87, 0x5f, 0x82, 0x83, 0xb5, 0x22, 0xe1, 0x23, 0xd0, 0x9d, - 0x92, 0xe5, 0xc0, 0x52, 0x35, 0x20, 0xf5, 0x09, 0x3f, 0x00, 0xbd, 0x05, 0xbe, 0x99, 0x13, 0x3d, - 0x5b, 0x07, 0x55, 0x8b, 0x2f, 0x76, 0x9e, 0x59, 0xfe, 0xaf, 0x16, 0x80, 0x9b, 0xb3, 0x84, 0x1e, - 0xe8, 0x71, 0x82, 0x93, 0x4a, 0xc4, 0xae, 0x92, 0x22, 0x05, 0xa0, 0x0a, 0x87, 0x4f, 0xc0, 0x9e, - 0x20, 0x7c, 0x91, 0xd1, 0x54, 0x6b, 0xda, 0x51, 0xbf, 0x2c, 0xbc, 0xbd, 0xcb, 0x0a, 0x42, 0xf5, - 0x1e, 0x3c, 0x01, 0x7d, 0x49, 0xf8, 0x2c, 0xa3, 0x58, 0xaa, 0xd0, 0xae, 0x0e, 0x7d, 0xbf, 0x2c, - 0xbc, 0xfe, 0x55, 0x0b, 0xa3, 0xd5, 0x18, 0x3f, 0x01, 0x07, 0x6b, 0x1d, 0xc3, 0x4b, 0x60, 0x5f, - 0x33, 0xfe, 0x9a, 0x51, 0x73, 0x92, 0xfb, 0xa7, 0x4f, 0x1e, 0x36, 0xec, 0x9b, 0x2a, 0xba, 0x1d, - 0x96, 0x01, 0x04, 0x6a, 0x84, 0xfc, 0x3f, 0x2d, 0xb0, 0x5f, 0xa7, 0xb9, 0x60, 0x5c, 0xc2, 0x23, - 0xb0, 0xab, 0x4f, 0xa6, 0x76, 0x2d, 0xb2, 0xcb, 0xc2, 0xdb, 0xd5, 0x53, 0xd3, 0x28, 0xfc, 0x16, - 0xd8, 0xfa, 0x92, 0xc5, 0xec, 0xa6, 0xf2, 0x30, 0x3a, 0x56, 0xc2, 0x17, 0x06, 0xbb, 0x2f, 0xbc, - 0x0f, 0x37, 0x1f, 0x90, 0xa0, 0xde, 0x46, 0x0d, 0x59, 0xa5, 0xc9, 0x19, 0x97, 0xda, 0x89, 0x5e, - 0x95, 0x46, 0xa5, 0x47, 0x1a, 0x55, 0x76, 0xe1, 0x3c, 0xaf, 0x69, 0xfa, 0xe8, 0x3b, 0x95, 0x5d, - 0x67, 0x2d, 0x8c, 0x56, 0x63, 0xfc, 0xbb, 0x9d, 0xd6, 0xaf, 0xcb, 0x9b, 0x2c, 0x26, 0xf0, 0x27, - 0x60, 0xab, 0xb7, 0x28, 0xc1, 0x12, 0xeb, 0x6e, 0xd6, 0xef, 0x72, 0xf3, 0xa4, 0x04, 0xf9, 0x34, - 0x55, 0x80, 0x08, 0x54, 0x74, 0x7b, 0x9d, 0xbe, 0x27, 0x12, 0xb7, 0x77, 0xb9, 0xc5, 0x50, 0xa3, - 0x0a, 0xbf, 0x06, 0x7d, 0xf3, 0x78, 0x5c, 0x2d, 0x73, 0x62, 0xca, 0xf4, 0x0d, 0xa5, 0x7f, 0xd6, - 0x6e, 0xdd, 0xaf, 0x2f, 0xd1, 0x2a, 0x0d, 0xfe, 0x00, 0x1c, 0x62, 0x0a, 0x57, 0x8f, 0x8e, 0x1a, - 0xec, 0xc7, 0xdb, 0xdd, 0x84, 0xe8, 0xb1, 0xc9, 0xe5, 0xd4, 0x88, 0x40, 0xad, 0x16, 0x7c, 0x05, - 0x7a, 0xca, 0x4d, 0x31, 0xe8, 0x6a, 0xd1, 0x4f, 0xb6, 0x13, 0x55, 0x63, 0x88, 0x0e, 0x8c, 0x70, - 0x4f, 0xad, 0x04, 0xaa, 0x74, 0xfc, 0x3f, 0x2c, 0xf0, 0x78, 0xcd, 0xe3, 0x17, 0x99, 0x90, 0xf0, - 0xc7, 0x0d, 0x9f, 0x83, 0xed, 0x7c, 0x56, 0x6c, 0xed, 0x72, 0x73, 0x40, 0x6b, 0x64, 0xc5, 0xe3, - 0x0b, 0xd0, 0xcb, 0x24, 0x99, 0xd5, 0xce, 0x6c, 0xf9, 0x46, 0xe8, 0xea, 0xda, 0x2e, 0xbe, 0x53, - 0x0a, 0xa8, 0x12, 0xf2, 0x8f, 0xc1, 0x9e, 0xb9, 0x08, 0x70, 0xb8, 0x76, 0xd8, 0xf7, 0x4d, 0xf8, - 0xca, 0x81, 0x8f, 0xa2, 0xdb, 0x3b, 0xb7, 0xf3, 0xe6, 0xce, 0xed, 0xbc, 0xbd, 0x73, 0x3b, 0xbf, - 0x94, 0xae, 0x75, 0x5b, 0xba, 0xd6, 0x9b, 0xd2, 0xb5, 0xde, 0x96, 0xae, 0xf5, 0x57, 0xe9, 0x5a, - 0xbf, 0xfd, 0xed, 0x76, 0x5e, 0x1f, 0x3d, 0xf4, 0xc3, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd2, - 0xeb, 0x52, 0x19, 0xe8, 0x07, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_6555bad15de200e0) +} + +var fileDescriptor_6555bad15de200e0 = []byte{ + // 857 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xe4, 0x34, + 0x14, 0x9f, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d, + 0x1a, 0x51, 0x48, 0x68, 0xb5, 0x42, 0x2b, 0x38, 0x35, 0xb0, 0xb0, 0x48, 0xcb, 0x6e, 0xe5, 0x56, + 0x42, 0x5a, 0x71, 0xc0, 0x93, 0xb8, 0x19, 0xd3, 0x26, 0x8e, 0x62, 0x77, 0xa4, 0xb9, 0xf1, 0x0d, + 0xe0, 0xb3, 0xf0, 0x15, 0x90, 0x50, 0x8f, 0x7b, 0xdc, 0x53, 0xc4, 0x84, 0x6f, 0xb1, 0x27, 0x64, + 0xc7, 0xf9, 0x33, 0x0c, 0x94, 0xb9, 0xc5, 0x3f, 0xbf, 0xdf, 0xef, 0xbd, 0xf7, 0x7b, 0xb6, 0x03, + 0x3e, 0xbe, 0x7e, 0xc2, 0x7d, 0xca, 0x02, 0x9c, 0xd3, 0x20, 0xa6, 0x3c, 0x62, 0x0b, 0x52, 0x2c, + 0x83, 0xc5, 0xc9, 0x8c, 0x08, 0x7c, 0x12, 0x24, 0x24, 0x23, 0x05, 0x16, 0x24, 0xf6, 0xf3, 0x82, + 0x09, 0x06, 0x8f, 0xea, 0x68, 0x1f, 0xe7, 0xd4, 0x6f, 0xa3, 0x7d, 0x1d, 0x7d, 0xf8, 0x49, 0x42, + 0xc5, 0xfc, 0x76, 0xe6, 0x47, 0x2c, 0x0d, 0x12, 0x96, 0xb0, 0x40, 0x91, 0x66, 0xb7, 0x57, 0x6a, + 0xa5, 0x16, 0xea, 0xab, 0x16, 0x3b, 0xf4, 0x7a, 0xa9, 0x23, 0x56, 0x90, 0x60, 0xb1, 0x91, 0xf0, + 0xf0, 0x71, 0x17, 0x93, 0xe2, 0x68, 0x4e, 0x33, 0x59, 0x5d, 0x7e, 0x9d, 0x48, 0x80, 0x07, 0x29, + 0x11, 0xf8, 0xdf, 0x58, 0xc1, 0x7f, 0xb1, 0x8a, 0xdb, 0x4c, 0xd0, 0x94, 0x6c, 0x10, 0x3e, 0xfb, + 0x3f, 0x02, 0x8f, 0xe6, 0x24, 0xc5, 0xff, 0xe4, 0x79, 0xbf, 0xed, 0x02, 0xeb, 0x69, 0x16, 0xe7, + 0x8c, 0x66, 0x02, 0x1e, 0x03, 0x1b, 0xc7, 0x71, 0x41, 0x38, 0x27, 0x7c, 0x6c, 0x4c, 0x86, 0x53, + 0x3b, 0x3c, 0xa8, 0x4a, 0xd7, 0x3e, 0x6b, 0x40, 0xd4, 0xed, 0xc3, 0x18, 0x80, 0x88, 0x65, 0x31, + 0x15, 0x94, 0x65, 0x7c, 0xbc, 0x33, 0x31, 0xa6, 0xa3, 0xd3, 0x4f, 0xfd, 0xfb, 0xec, 0xf5, 0x9b, + 0x44, 0x5f, 0xb6, 0xbc, 0x10, 0xde, 0x95, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xa7, 0x0b, + 0xa7, 0xc0, 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xbf, 0x2a, + 0x5d, 0xeb, 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0xcf, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xd5, + 0x78, 0x57, 0x95, 0xf3, 0x41, 0xbf, 0x1c, 0x39, 0x20, 0x7f, 0x71, 0xe2, 0xbf, 0x9c, 0xfd, 0x44, + 0x22, 0x19, 0x44, 0x0a, 0x92, 0x45, 0xa4, 0xee, 0xf0, 0xb2, 0x61, 0xa2, 0x4e, 0x04, 0xce, 0x80, + 0x25, 0x58, 0xce, 0x6e, 0x58, 0xb2, 0x1c, 0x9b, 0x93, 0xe1, 0x74, 0x74, 0xfa, 0x78, 0xbb, 0xfe, + 0xfc, 0x4b, 0x4d, 0x7b, 0x9a, 0x89, 0x62, 0x19, 0x3e, 0xd0, 0x3d, 0x5a, 0x0d, 0x8c, 0x5a, 0x5d, + 0xd9, 0x5f, 0xc6, 0x62, 0xf2, 0x42, 0xf6, 0xf7, 0x4e, 0xd7, 0xdf, 0x0b, 0x8d, 0xa1, 0x76, 0x17, + 0x3e, 0x07, 0xe6, 0x9c, 0x66, 0x82, 0x8f, 0xf7, 0x54, 0x6f, 0xc7, 0xdb, 0x95, 0xf2, 0x4c, 0x52, + 0x42, 0xbb, 0x2a, 0x5d, 0x53, 0x7d, 0xa2, 0x5a, 0xe4, 0xf0, 0x0b, 0x70, 0xb0, 0x56, 0x24, 0x7c, + 0x00, 0x86, 0xd7, 0x64, 0x39, 0x36, 0x64, 0x0d, 0x48, 0x7e, 0xc2, 0xf7, 0x80, 0xb9, 0xc0, 0x37, + 0xb7, 0x44, 0xcd, 0xd6, 0x46, 0xf5, 0xe2, 0xf3, 0x9d, 0x27, 0x86, 0xf7, 0x8b, 0x01, 0xe0, 0xe6, + 0x2c, 0xa1, 0x0b, 0xcc, 0x82, 0xe0, 0xb8, 0x16, 0xb1, 0xea, 0xa4, 0x48, 0x02, 0xa8, 0xc6, 0xe1, + 0x23, 0xb0, 0xc7, 0x49, 0xb1, 0xa0, 0x59, 0xa2, 0x34, 0xad, 0x70, 0x54, 0x95, 0xee, 0xde, 0x45, + 0x0d, 0xa1, 0x66, 0x0f, 0x9e, 0x80, 0x91, 0x20, 0x45, 0x4a, 0x33, 0x2c, 0x64, 0xe8, 0x50, 0x85, + 0xbe, 0x5b, 0x95, 0xee, 0xe8, 0xb2, 0x83, 0x51, 0x3f, 0xc6, 0x8b, 0xc1, 0xc1, 0x5a, 0xc7, 0xf0, + 0x02, 0x58, 0x57, 0xac, 0x78, 0xc5, 0x32, 0x7d, 0x92, 0x47, 0xa7, 0x8f, 0xee, 0x37, 0xec, 0xeb, + 0x3a, 0xba, 0x1b, 0x96, 0x06, 0x38, 0x6a, 0x85, 0xbc, 0x3f, 0x0c, 0xb0, 0xdf, 0xa4, 0x39, 0x67, + 0x85, 0x80, 0x47, 0x60, 0x57, 0x9d, 0x4c, 0xe5, 0x5a, 0x68, 0x55, 0xa5, 0xbb, 0xab, 0xa6, 0xa6, + 0x50, 0xf8, 0x0d, 0xb0, 0xd4, 0x25, 0x8b, 0xd8, 0x4d, 0xed, 0x61, 0x78, 0x2c, 0x85, 0xcf, 0x35, + 0xf6, 0xb6, 0x74, 0xdf, 0xdf, 0x7c, 0x40, 0xfc, 0x66, 0x1b, 0xb5, 0x64, 0x99, 0x26, 0x67, 0x85, + 0x50, 0x4e, 0x98, 0x75, 0x1a, 0x99, 0x1e, 0x29, 0x54, 0xda, 0x85, 0xf3, 0xbc, 0xa1, 0xa9, 0xa3, + 0x6f, 0xd7, 0x76, 0x9d, 0x75, 0x30, 0xea, 0xc7, 0x78, 0xab, 0x9d, 0xce, 0xaf, 0x8b, 0x1b, 0x1a, + 0x11, 0xf8, 0x23, 0xb0, 0xe4, 0x5b, 0x14, 0x63, 0x81, 0x55, 0x37, 0xeb, 0x77, 0xb9, 0x7d, 0x52, + 0xfc, 0xfc, 0x3a, 0x91, 0x00, 0xf7, 0x65, 0x74, 0x77, 0x9d, 0xbe, 0x23, 0x02, 0x77, 0x77, 0xb9, + 0xc3, 0x50, 0xab, 0x0a, 0xbf, 0x02, 0x23, 0xfd, 0x78, 0x5c, 0x2e, 0x73, 0xa2, 0xcb, 0xf4, 0x34, + 0x65, 0x74, 0xd6, 0x6d, 0xbd, 0x5d, 0x5f, 0xa2, 0x3e, 0x0d, 0x7e, 0x0f, 0x6c, 0xa2, 0x0b, 0x97, + 0x8f, 0x8e, 0x1c, 0xec, 0x87, 0xdb, 0xdd, 0x84, 0xf0, 0xa1, 0xce, 0x65, 0x37, 0x08, 0x47, 0x9d, + 0x16, 0x7c, 0x09, 0x4c, 0xe9, 0x26, 0x1f, 0x0f, 0x95, 0xe8, 0x47, 0xdb, 0x89, 0xca, 0x31, 0x84, + 0x07, 0x5a, 0xd8, 0x94, 0x2b, 0x8e, 0x6a, 0x1d, 0xef, 0x77, 0x03, 0x3c, 0x5c, 0xf3, 0xf8, 0x39, + 0xe5, 0x02, 0xfe, 0xb0, 0xe1, 0xb3, 0xbf, 0x9d, 0xcf, 0x92, 0xad, 0x5c, 0x6e, 0x0f, 0x68, 0x83, + 0xf4, 0x3c, 0x3e, 0x07, 0x26, 0x15, 0x24, 0x6d, 0x9c, 0xd9, 0xf2, 0x8d, 0x50, 0xd5, 0x75, 0x5d, + 0x7c, 0x2b, 0x15, 0x50, 0x2d, 0xe4, 0x1d, 0x83, 0x3d, 0x7d, 0x11, 0xe0, 0x64, 0xed, 0xb0, 0xef, + 0xeb, 0xf0, 0xde, 0x81, 0x0f, 0xc3, 0xbb, 0x95, 0x33, 0x78, 0xbd, 0x72, 0x06, 0x6f, 0x56, 0xce, + 0xe0, 0xe7, 0xca, 0x31, 0xee, 0x2a, 0xc7, 0x78, 0x5d, 0x39, 0xc6, 0x9b, 0xca, 0x31, 0xfe, 0xac, + 0x1c, 0xe3, 0xd7, 0xbf, 0x9c, 0xc1, 0xab, 0xa3, 0xfb, 0x7e, 0xd8, 0x7f, 0x07, 0x00, 0x00, 0xff, + 0xff, 0x1c, 0xe6, 0x20, 0x06, 0xcf, 0x07, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index ec555a40b3..55828dd97d 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -54,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. @@ -153,7 +153,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -183,7 +183,7 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of endpoint slices repeated EndpointSlice items = 2; diff --git a/vendor/k8s.io/api/events/v1/doc.go b/vendor/k8s.io/api/events/v1/doc.go index 6e320e0634..5fe700ffcf 100644 --- a/vendor/k8s.io/api/events/v1/doc.go +++ b/vendor/k8s.io/api/events/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=events.k8s.io package v1 // import "k8s.io/api/events/v1" diff --git a/vendor/k8s.io/api/events/v1/generated.pb.go b/vendor/k8s.io/api/events/v1/generated.pb.go index 2ec919a95a..96a6047e86 100644 --- a/vendor/k8s.io/api/events/v1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1/generated.proto +// source: k8s.io/api/events/v1/generated.proto package v1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_ee2600587b650fac, []int{0} + return fileDescriptor_d3a3e1495c224e47, []int{0} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_ee2600587b650fac, []int{1} + return fileDescriptor_d3a3e1495c224e47, []int{1} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_ee2600587b650fac, []int{2} + return fileDescriptor_d3a3e1495c224e47, []int{2} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,60 +135,59 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1/generated.proto", fileDescriptor_ee2600587b650fac) + proto.RegisterFile("k8s.io/api/events/v1/generated.proto", fileDescriptor_d3a3e1495c224e47) } -var fileDescriptor_ee2600587b650fac = []byte{ - // 775 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0x8f, 0x77, 0x9b, 0xb4, 0x99, 0xec, 0x6e, 0xd3, 0xd9, 0x95, 0x3a, 0x74, 0x25, 0x27, 0x64, - 0x25, 0x14, 0x21, 0x61, 0xd3, 0x0a, 0x21, 0x84, 0x84, 0x44, 0xdd, 0x14, 0x54, 0xd4, 0x52, 0x69, - 0xda, 0x13, 0xe2, 0xd0, 0x89, 0xf3, 0xea, 0x9a, 0xc4, 0x33, 0xd6, 0xcc, 0x24, 0x52, 0x6f, 0x5c, - 0x90, 0x38, 0xf2, 0x05, 0xf8, 0x00, 0x88, 0x2f, 0xd2, 0x63, 0x8f, 0x3d, 0x45, 0xd4, 0x7c, 0x11, - 0xe4, 0xb1, 0x13, 0xa7, 0xf9, 0x03, 0x41, 0x7b, 0xf3, 0xbc, 0xf7, 0xfb, 0xf3, 0xde, 0xcc, 0xcb, - 0x0b, 0xfa, 0xaa, 0xff, 0x85, 0x72, 0x42, 0xe1, 0xf6, 0x87, 0x5d, 0x90, 0x1c, 0x34, 0x28, 0x77, - 0x04, 0xbc, 0x27, 0xa4, 0x9b, 0x27, 0x58, 0x1c, 0xba, 0x30, 0x02, 0xae, 0x95, 0x3b, 0xda, 0x77, - 0x03, 0xe0, 0x20, 0x99, 0x86, 0x9e, 0x13, 0x4b, 0xa1, 0x05, 0x7e, 0x93, 0xa1, 0x1c, 0x16, 0x87, - 0x4e, 0x86, 0x72, 0x46, 0xfb, 0x7b, 0x9f, 0x04, 0xa1, 0xbe, 0x19, 0x76, 0x1d, 0x5f, 0x44, 0x6e, - 0x20, 0x02, 0xe1, 0x1a, 0x70, 0x77, 0x78, 0x6d, 0x4e, 0xe6, 0x60, 0xbe, 0x32, 0x91, 0xbd, 0xd6, - 0x8c, 0x95, 0x2f, 0x24, 0x2c, 0x31, 0xda, 0xfb, 0xac, 0xc0, 0x44, 0xcc, 0xbf, 0x09, 0x39, 0xc8, - 0x5b, 0x37, 0xee, 0x07, 0x69, 0x40, 0xb9, 0x11, 0x68, 0xb6, 0x8c, 0xe5, 0xae, 0x62, 0xc9, 0x21, - 0xd7, 0x61, 0x04, 0x0b, 0x84, 0xcf, 0xff, 0x8b, 0xa0, 0xfc, 0x1b, 0x88, 0xd8, 0x3c, 0xaf, 0xf5, - 0x7b, 0x15, 0x95, 0x8f, 0xd3, 0xfe, 0xf1, 0x15, 0xda, 0x4a, 0xab, 0xe9, 0x31, 0xcd, 0x88, 0xd5, - 0xb4, 0xda, 0xb5, 0x83, 0x4f, 0x9d, 0xe2, 0x92, 0xa6, 0xa2, 0x4e, 0xdc, 0x0f, 0xd2, 0x80, 0x72, - 0x52, 0xb4, 0x33, 0xda, 0x77, 0xce, 0xbb, 0x3f, 0x81, 0xaf, 0xcf, 0x40, 0x33, 0x0f, 0xdf, 0x8d, - 0x1b, 0xa5, 0x64, 0xdc, 0x40, 0x45, 0x8c, 0x4e, 0x55, 0xf1, 0x15, 0xaa, 0x9a, 0xab, 0xbe, 0x0c, - 0x23, 0x20, 0xcf, 0x8c, 0x85, 0xbb, 0x9e, 0xc5, 0x59, 0xe8, 0x4b, 0x91, 0xd2, 0xbc, 0x9d, 0xdc, - 0xa1, 0x7a, 0x3c, 0x51, 0xa2, 0x85, 0x28, 0x3e, 0x46, 0x15, 0x05, 0x32, 0x04, 0x45, 0x9e, 0x1b, - 0xf9, 0x0f, 0x9d, 0x65, 0xcf, 0xec, 0x18, 0xee, 0x85, 0x01, 0x7a, 0x28, 0x19, 0x37, 0x2a, 0xd9, - 0x37, 0xcd, 0xc9, 0xf8, 0x0c, 0xbd, 0x96, 0x10, 0x0b, 0xa9, 0x43, 0x1e, 0x1c, 0x09, 0xae, 0xa5, - 0x18, 0x0c, 0x40, 0x92, 0x8d, 0xa6, 0xd5, 0xae, 0x7a, 0x6f, 0xf3, 0x0a, 0x5e, 0xd3, 0x45, 0x08, - 0x5d, 0xc6, 0xc3, 0xdf, 0xa2, 0x9d, 0x69, 0xf8, 0x84, 0x2b, 0xcd, 0xb8, 0x0f, 0xa4, 0x6c, 0xc4, - 0x3e, 0xc8, 0xc5, 0x76, 0xe8, 0x3c, 0x80, 0x2e, 0x72, 0xf0, 0x47, 0xa8, 0xc2, 0x7c, 0x1d, 0x0a, - 0x4e, 0x2a, 0x86, 0xfd, 0x2a, 0x67, 0x57, 0x0e, 0x4d, 0x94, 0xe6, 0xd9, 0x14, 0x27, 0x81, 0x29, - 0xc1, 0xc9, 0xe6, 0x53, 0x1c, 0x35, 0x51, 0x9a, 0x67, 0xf1, 0x25, 0xaa, 0x4a, 0x08, 0x98, 0xec, - 0x85, 0x3c, 0x20, 0x5b, 0xe6, 0xc6, 0xde, 0xcd, 0xde, 0x58, 0x3a, 0xd3, 0xc5, 0x0b, 0x53, 0xb8, - 0x06, 0x09, 0xdc, 0x9f, 0x79, 0x04, 0x3a, 0x61, 0xd3, 0x42, 0x08, 0x7f, 0x87, 0x36, 0x25, 0x0c, - 0xd2, 0x19, 0x23, 0xd5, 0xf5, 0x35, 0x6b, 0xc9, 0xb8, 0xb1, 0x49, 0x33, 0x1e, 0x9d, 0x08, 0xe0, - 0x26, 0xda, 0xe0, 0x42, 0x03, 0x41, 0xa6, 0x8f, 0x17, 0xb9, 0xef, 0xc6, 0xf7, 0x42, 0x03, 0x35, - 0x99, 0x14, 0xa1, 0x6f, 0x63, 0x20, 0xb5, 0xa7, 0x88, 0xcb, 0xdb, 0x18, 0xa8, 0xc9, 0x60, 0x40, - 0xf5, 0x1e, 0xc4, 0x12, 0xfc, 0x54, 0xf1, 0x42, 0x0c, 0xa5, 0x0f, 0xe4, 0x85, 0x29, 0xac, 0xb1, - 0xac, 0xb0, 0x6c, 0x38, 0x0c, 0xcc, 0x23, 0xb9, 0x5c, 0xbd, 0x33, 0x27, 0x40, 0x17, 0x24, 0xf1, - 0xaf, 0x16, 0x22, 0x45, 0xf0, 0x9b, 0x50, 0x2a, 0x33, 0x93, 0x4a, 0xb3, 0x28, 0x26, 0x2f, 0x8d, - 0xdf, 0xc7, 0xeb, 0x4d, 0xbb, 0x19, 0xf4, 0x66, 0x6e, 0x4d, 0x3a, 0x2b, 0x34, 0xe9, 0x4a, 0x37, - 0xfc, 0x8b, 0x85, 0x76, 0x8b, 0xe4, 0x29, 0x9b, 0xad, 0xe4, 0xd5, 0xff, 0xae, 0xa4, 0x91, 0x57, - 0xb2, 0xdb, 0x59, 0x2e, 0x49, 0x57, 0x79, 0xe1, 0x43, 0xb4, 0x5d, 0xa4, 0x8e, 0xc4, 0x90, 0x6b, - 0xb2, 0xdd, 0xb4, 0xda, 0x65, 0x6f, 0x37, 0x97, 0xdc, 0xee, 0x3c, 0x4d, 0xd3, 0x79, 0x7c, 0xeb, - 0x4f, 0x0b, 0x65, 0x3f, 0xf5, 0xd3, 0x50, 0x69, 0xfc, 0xe3, 0xc2, 0x8e, 0x72, 0xd6, 0x6b, 0x24, - 0x65, 0x9b, 0x0d, 0x55, 0xcf, 0x9d, 0xb7, 0x26, 0x91, 0x99, 0xfd, 0xf4, 0x35, 0x2a, 0x87, 0x1a, - 0x22, 0x45, 0x9e, 0x35, 0x9f, 0xb7, 0x6b, 0x07, 0x6f, 0xff, 0x65, 0x79, 0x78, 0x2f, 0x73, 0x9d, - 0xf2, 0x49, 0xca, 0xa0, 0x19, 0xb1, 0xf5, 0x87, 0x85, 0x6a, 0x33, 0xcb, 0x05, 0xbf, 0x43, 0x65, - 0xdf, 0xb4, 0x6d, 0x99, 0xb6, 0xa7, 0xa4, 0xac, 0xd9, 0x2c, 0x87, 0x87, 0xa8, 0x3e, 0x60, 0x4a, - 0x9f, 0x77, 0x15, 0xc8, 0x11, 0xf4, 0xde, 0x67, 0x3b, 0x4e, 0xe7, 0xf5, 0x74, 0x4e, 0x90, 0x2e, - 0x58, 0x78, 0x5f, 0xde, 0x3d, 0xda, 0xa5, 0xfb, 0x47, 0xbb, 0xf4, 0xf0, 0x68, 0x97, 0x7e, 0x4e, - 0x6c, 0xeb, 0x2e, 0xb1, 0xad, 0xfb, 0xc4, 0xb6, 0x1e, 0x12, 0xdb, 0xfa, 0x2b, 0xb1, 0xad, 0xdf, - 0xfe, 0xb6, 0x4b, 0x3f, 0xbc, 0x59, 0xf6, 0x6f, 0xfa, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe0, - 0xc8, 0x73, 0x3d, 0x7d, 0x07, 0x00, 0x00, +var fileDescriptor_d3a3e1495c224e47 = []byte{ + // 759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x4f, 0xdb, 0x48, + 0x14, 0x8f, 0x81, 0x04, 0x32, 0xe1, 0x4f, 0x18, 0x90, 0x98, 0x05, 0xc9, 0xc9, 0x86, 0xd5, 0x2a, + 0x5a, 0x69, 0xed, 0x05, 0xad, 0x56, 0xab, 0x3d, 0x2d, 0x26, 0xec, 0x8a, 0x0a, 0x8a, 0x34, 0x70, + 0xaa, 0x7a, 0x60, 0xe2, 0x3c, 0x8c, 0x4b, 0xec, 0xb1, 0xc6, 0x93, 0x48, 0xdc, 0x7a, 0xa9, 0xd4, + 0x63, 0xbf, 0x40, 0x3f, 0x40, 0xd5, 0x2f, 0xc2, 0x91, 0x23, 0xa7, 0xa8, 0xb8, 0x5f, 0xa4, 0xf2, + 0xd8, 0x89, 0x43, 0xfe, 0xb4, 0xa9, 0x7a, 0xf3, 0xbc, 0xf7, 0xfb, 0xf3, 0xde, 0xcc, 0xcb, 0x0b, + 0xfa, 0xe5, 0xe6, 0xef, 0xd0, 0x70, 0xb9, 0xc9, 0x02, 0xd7, 0x84, 0x2e, 0xf8, 0x32, 0x34, 0xbb, + 0x7b, 0xa6, 0x03, 0x3e, 0x08, 0x26, 0xa1, 0x65, 0x04, 0x82, 0x4b, 0x8e, 0x37, 0x13, 0x94, 0xc1, + 0x02, 0xd7, 0x48, 0x50, 0x46, 0x77, 0x6f, 0xfb, 0x77, 0xc7, 0x95, 0xd7, 0x9d, 0xa6, 0x61, 0x73, + 0xcf, 0x74, 0xb8, 0xc3, 0x4d, 0x05, 0x6e, 0x76, 0xae, 0xd4, 0x49, 0x1d, 0xd4, 0x57, 0x22, 0xb2, + 0x5d, 0x1b, 0xb2, 0xb2, 0xb9, 0x80, 0x09, 0x46, 0xdb, 0x7f, 0x66, 0x18, 0x8f, 0xd9, 0xd7, 0xae, + 0x0f, 0xe2, 0xd6, 0x0c, 0x6e, 0x9c, 0x38, 0x10, 0x9a, 0x1e, 0x48, 0x36, 0x89, 0x65, 0x4e, 0x63, + 0x89, 0x8e, 0x2f, 0x5d, 0x0f, 0xc6, 0x08, 0x7f, 0x7d, 0x8b, 0x10, 0xda, 0xd7, 0xe0, 0xb1, 0x51, + 0x5e, 0xed, 0x7d, 0x11, 0xe5, 0x8f, 0xe2, 0xfe, 0xf1, 0x25, 0x5a, 0x8a, 0xab, 0x69, 0x31, 0xc9, + 0x88, 0x56, 0xd5, 0xea, 0xa5, 0xfd, 0x3f, 0x8c, 0xec, 0x92, 0x06, 0xa2, 0x46, 0x70, 0xe3, 0xc4, + 0x81, 0xd0, 0x88, 0xd1, 0x46, 0x77, 0xcf, 0x38, 0x6b, 0xbe, 0x02, 0x5b, 0x9e, 0x82, 0x64, 0x16, + 0xbe, 0xeb, 0x55, 0x72, 0x51, 0xaf, 0x82, 0xb2, 0x18, 0x1d, 0xa8, 0xe2, 0x4b, 0x54, 0x54, 0x57, + 0x7d, 0xe1, 0x7a, 0x40, 0xe6, 0x94, 0x85, 0x39, 0x9b, 0xc5, 0xa9, 0x6b, 0x0b, 0x1e, 0xd3, 0xac, + 0xf5, 0xd4, 0xa1, 0x78, 0xd4, 0x57, 0xa2, 0x99, 0x28, 0x3e, 0x42, 0x85, 0x10, 0x84, 0x0b, 0x21, + 0x99, 0x57, 0xf2, 0x3f, 0x1b, 0x93, 0x9e, 0xd9, 0x50, 0xdc, 0x73, 0x05, 0xb4, 0x50, 0xd4, 0xab, + 0x14, 0x92, 0x6f, 0x9a, 0x92, 0xf1, 0x29, 0xda, 0x10, 0x10, 0x70, 0x21, 0x5d, 0xdf, 0x39, 0xe4, + 0xbe, 0x14, 0xbc, 0xdd, 0x06, 0x41, 0x16, 0xaa, 0x5a, 0xbd, 0x68, 0xed, 0xa4, 0x15, 0x6c, 0xd0, + 0x71, 0x08, 0x9d, 0xc4, 0xc3, 0xff, 0xa3, 0xf5, 0x41, 0xf8, 0xd8, 0x0f, 0x25, 0xf3, 0x6d, 0x20, + 0x79, 0x25, 0xf6, 0x53, 0x2a, 0xb6, 0x4e, 0x47, 0x01, 0x74, 0x9c, 0x83, 0x7f, 0x45, 0x05, 0x66, + 0x4b, 0x97, 0xfb, 0xa4, 0xa0, 0xd8, 0xab, 0x29, 0xbb, 0x70, 0xa0, 0xa2, 0x34, 0xcd, 0xc6, 0x38, + 0x01, 0x2c, 0xe4, 0x3e, 0x59, 0x7c, 0x8a, 0xa3, 0x2a, 0x4a, 0xd3, 0x2c, 0xbe, 0x40, 0x45, 0x01, + 0x0e, 0x13, 0x2d, 0xd7, 0x77, 0xc8, 0x92, 0xba, 0xb1, 0xdd, 0xe1, 0x1b, 0x8b, 0x67, 0x3a, 0x7b, + 0x61, 0x0a, 0x57, 0x20, 0xc0, 0xb7, 0x87, 0x1e, 0x81, 0xf6, 0xd9, 0x34, 0x13, 0xc2, 0xcf, 0xd0, + 0xa2, 0x80, 0x76, 0x3c, 0x63, 0xa4, 0x38, 0xbb, 0x66, 0x29, 0xea, 0x55, 0x16, 0x69, 0xc2, 0xa3, + 0x7d, 0x01, 0x5c, 0x45, 0x0b, 0x3e, 0x97, 0x40, 0x90, 0xea, 0x63, 0x39, 0xf5, 0x5d, 0x78, 0xce, + 0x25, 0x50, 0x95, 0x89, 0x11, 0xf2, 0x36, 0x00, 0x52, 0x7a, 0x8a, 0xb8, 0xb8, 0x0d, 0x80, 0xaa, + 0x0c, 0x06, 0x54, 0x6e, 0x41, 0x20, 0xc0, 0x8e, 0x15, 0xcf, 0x79, 0x47, 0xd8, 0x40, 0x96, 0x55, + 0x61, 0x95, 0x49, 0x85, 0x25, 0xc3, 0xa1, 0x60, 0x16, 0x49, 0xe5, 0xca, 0x8d, 0x11, 0x01, 0x3a, + 0x26, 0x89, 0xdf, 0x6a, 0x88, 0x64, 0xc1, 0xff, 0x5c, 0x11, 0xaa, 0x99, 0x0c, 0x25, 0xf3, 0x02, + 0xb2, 0xa2, 0xfc, 0x7e, 0x9b, 0x6d, 0xda, 0xd5, 0xa0, 0x57, 0x53, 0x6b, 0xd2, 0x98, 0xa2, 0x49, + 0xa7, 0xba, 0xe1, 0x37, 0x1a, 0xda, 0xca, 0x92, 0x27, 0x6c, 0xb8, 0x92, 0xd5, 0xef, 0xae, 0xa4, + 0x92, 0x56, 0xb2, 0xd5, 0x98, 0x2c, 0x49, 0xa7, 0x79, 0xe1, 0x03, 0xb4, 0x96, 0xa5, 0x0e, 0x79, + 0xc7, 0x97, 0x64, 0xad, 0xaa, 0xd5, 0xf3, 0xd6, 0x56, 0x2a, 0xb9, 0xd6, 0x78, 0x9a, 0xa6, 0xa3, + 0xf8, 0xda, 0x47, 0x0d, 0x25, 0x3f, 0xf5, 0x13, 0x37, 0x94, 0xf8, 0xe5, 0xd8, 0x8e, 0x32, 0x66, + 0x6b, 0x24, 0x66, 0xab, 0x0d, 0x55, 0x4e, 0x9d, 0x97, 0xfa, 0x91, 0xa1, 0xfd, 0xf4, 0x2f, 0xca, + 0xbb, 0x12, 0xbc, 0x90, 0xcc, 0x55, 0xe7, 0xeb, 0xa5, 0xfd, 0x9d, 0xaf, 0x2c, 0x0f, 0x6b, 0x25, + 0xd5, 0xc9, 0x1f, 0xc7, 0x0c, 0x9a, 0x10, 0x6b, 0x1f, 0x34, 0x54, 0x1a, 0x5a, 0x2e, 0x78, 0x17, + 0xe5, 0x6d, 0xd5, 0xb6, 0xa6, 0xda, 0x1e, 0x90, 0x92, 0x66, 0x93, 0x1c, 0xee, 0xa0, 0x72, 0x9b, + 0x85, 0xf2, 0xac, 0x19, 0x82, 0xe8, 0x42, 0xeb, 0x47, 0xb6, 0xe3, 0x60, 0x5e, 0x4f, 0x46, 0x04, + 0xe9, 0x98, 0x85, 0xf5, 0xcf, 0xdd, 0xa3, 0x9e, 0xbb, 0x7f, 0xd4, 0x73, 0x0f, 0x8f, 0x7a, 0xee, + 0x75, 0xa4, 0x6b, 0x77, 0x91, 0xae, 0xdd, 0x47, 0xba, 0xf6, 0x10, 0xe9, 0xda, 0xa7, 0x48, 0xd7, + 0xde, 0x7d, 0xd6, 0x73, 0x2f, 0x36, 0x27, 0xfd, 0x9b, 0x7e, 0x09, 0x00, 0x00, 0xff, 0xff, 0x6f, + 0x4f, 0x7a, 0xe4, 0x64, 0x07, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto index cfa16b021b..6c7e4cca19 100644 --- a/vendor/k8s.io/api/events/v1/generated.proto +++ b/vendor/k8s.io/api/events/v1/generated.proto @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -68,12 +68,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -88,15 +88,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -108,7 +108,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -123,6 +123,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go index e01a2b21e7..86b12eee15 100644 --- a/vendor/k8s.io/api/events/v1/types.go +++ b/vendor/k8s.io/api/events/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. // Events have a limited retention time and triggers and messages may evolve @@ -109,6 +110,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // EventList is a list of Event objects. type EventList struct { diff --git a/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..5217d1ac69 --- /dev/null +++ b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index 701127ff94..5d7881e8c0 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// source: k8s.io/api/events/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{0} + return fileDescriptor_99027a32dee7673b, []int{0} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{1} + return fileDescriptor_99027a32dee7673b, []int{1} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{2} + return fileDescriptor_99027a32dee7673b, []int{2} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,60 +135,59 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) + proto.RegisterFile("k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_99027a32dee7673b) } -var fileDescriptor_4f97f691c32a5ac8 = []byte{ - // 779 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x6e, 0xdb, 0x46, - 0x10, 0xc6, 0xc5, 0xc4, 0x92, 0xad, 0x55, 0x12, 0xcb, 0x9b, 0x83, 0xd7, 0x2e, 0x40, 0x09, 0x0a, - 0x10, 0x08, 0x05, 0x4a, 0xd6, 0x41, 0x51, 0xf4, 0x56, 0x84, 0x91, 0x5b, 0x24, 0xb0, 0x1b, 0x60, - 0xe3, 0x53, 0xd1, 0x43, 0x56, 0xd4, 0x84, 0x66, 0x25, 0xee, 0x12, 0xbb, 0x2b, 0x01, 0xbe, 0xf5, - 0x52, 0xa0, 0xc7, 0x3e, 0x43, 0x6f, 0xbd, 0xf5, 0x31, 0x7c, 0xf4, 0xd1, 0x27, 0xa1, 0x66, 0x5f, - 0xa4, 0xe0, 0x72, 0x25, 0xca, 0xfa, 0x03, 0xab, 0xe8, 0x8d, 0x9c, 0xf9, 0xbe, 0xdf, 0xcc, 0x2e, - 0x47, 0x23, 0x14, 0x0c, 0xbf, 0x51, 0x5e, 0x2c, 0xfc, 0xe1, 0xb8, 0x0f, 0x92, 0x83, 0x06, 0xe5, - 0x4f, 0x80, 0x0f, 0x84, 0xf4, 0x6d, 0x82, 0xa5, 0xb1, 0x0f, 0x13, 0xe0, 0x5a, 0xf9, 0x93, 0x93, - 0x3e, 0x68, 0x76, 0xe2, 0x47, 0xc0, 0x41, 0x32, 0x0d, 0x03, 0x2f, 0x95, 0x42, 0x0b, 0x7c, 0x54, - 0x48, 0x3d, 0x96, 0xc6, 0x5e, 0x21, 0xf5, 0xac, 0xf4, 0xf8, 0x8b, 0x28, 0xd6, 0x97, 0xe3, 0xbe, - 0x17, 0x8a, 0xc4, 0x8f, 0x44, 0x24, 0x7c, 0xe3, 0xe8, 0x8f, 0x3f, 0x99, 0x37, 0xf3, 0x62, 0x9e, - 0x0a, 0xd2, 0x71, 0x67, 0xa1, 0x68, 0x28, 0x24, 0xf8, 0x93, 0x95, 0x6a, 0xc7, 0x5f, 0x95, 0x9a, - 0x84, 0x85, 0x97, 0x31, 0x07, 0x79, 0xe5, 0xa7, 0xc3, 0x28, 0x0f, 0x28, 0x3f, 0x01, 0xcd, 0xd6, - 0xb9, 0xfc, 0x4d, 0x2e, 0x39, 0xe6, 0x3a, 0x4e, 0x60, 0xc5, 0xf0, 0xf5, 0x43, 0x06, 0x15, 0x5e, - 0x42, 0xc2, 0x96, 0x7d, 0x9d, 0x3f, 0xea, 0xa8, 0x7a, 0x9a, 0x5f, 0x02, 0xfe, 0x88, 0xf6, 0xf2, - 0x6e, 0x06, 0x4c, 0x33, 0xe2, 0xb4, 0x9d, 0x6e, 0xe3, 0xd5, 0x97, 0x5e, 0x79, 0x53, 0x73, 0xa8, - 0x97, 0x0e, 0xa3, 0x3c, 0xa0, 0xbc, 0x5c, 0xed, 0x4d, 0x4e, 0xbc, 0xf7, 0xfd, 0x9f, 0x21, 0xd4, - 0xe7, 0xa0, 0x59, 0x80, 0xaf, 0xa7, 0xad, 0x4a, 0x36, 0x6d, 0xa1, 0x32, 0x46, 0xe7, 0x54, 0xfc, - 0x11, 0xd5, 0xcd, 0x7d, 0x5f, 0xc4, 0x09, 0x90, 0x47, 0xa6, 0x84, 0xbf, 0x5d, 0x89, 0xf3, 0x38, - 0x94, 0x22, 0xb7, 0x05, 0x07, 0xb6, 0x42, 0xfd, 0x74, 0x46, 0xa2, 0x25, 0x14, 0xbf, 0x43, 0x35, - 0x05, 0x32, 0x06, 0x45, 0x1e, 0x1b, 0xfc, 0x4b, 0x6f, 0xe3, 0xb7, 0xf6, 0x0c, 0xe0, 0x83, 0x51, - 0x07, 0x28, 0x9b, 0xb6, 0x6a, 0xc5, 0x33, 0xb5, 0x04, 0x7c, 0x8e, 0x9e, 0x4b, 0x48, 0x85, 0xd4, - 0x31, 0x8f, 0xde, 0x08, 0xae, 0xa5, 0x18, 0x8d, 0x40, 0x92, 0x9d, 0xb6, 0xd3, 0xad, 0x07, 0x9f, - 0xd9, 0x36, 0x9e, 0xd3, 0x55, 0x09, 0x5d, 0xe7, 0xc3, 0xdf, 0xa3, 0x83, 0x79, 0xf8, 0x2d, 0x57, - 0x9a, 0xf1, 0x10, 0x48, 0xd5, 0xc0, 0x8e, 0x2c, 0xec, 0x80, 0x2e, 0x0b, 0xe8, 0xaa, 0x07, 0xbf, - 0x44, 0x35, 0x16, 0xea, 0x58, 0x70, 0x52, 0x33, 0xee, 0x67, 0xd6, 0x5d, 0x7b, 0x6d, 0xa2, 0xd4, - 0x66, 0x73, 0x9d, 0x04, 0xa6, 0x04, 0x27, 0xbb, 0xf7, 0x75, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x0b, - 0x54, 0x97, 0x10, 0x31, 0x39, 0x88, 0x79, 0x44, 0xf6, 0xcc, 0xb5, 0xbd, 0x58, 0xbc, 0xb6, 0x7c, - 0xb0, 0xcb, 0xcf, 0x4c, 0xe1, 0x13, 0x48, 0xe0, 0xe1, 0xc2, 0x97, 0xa0, 0x33, 0x37, 0x2d, 0x41, - 0xf8, 0x1d, 0xda, 0x95, 0x30, 0xca, 0x07, 0x8d, 0xd4, 0xb7, 0x67, 0x36, 0xb2, 0x69, 0x6b, 0x97, - 0x16, 0x3e, 0x3a, 0x03, 0xe0, 0x36, 0xda, 0xe1, 0x42, 0x03, 0x41, 0xe6, 0x1c, 0x4f, 0x6c, 0xdd, - 0x9d, 0x1f, 0x84, 0x06, 0x6a, 0x32, 0xb9, 0x42, 0x5f, 0xa5, 0x40, 0x1a, 0xf7, 0x15, 0x17, 0x57, - 0x29, 0x50, 0x93, 0xc1, 0x80, 0x9a, 0x03, 0x48, 0x25, 0x84, 0x39, 0xf1, 0x83, 0x18, 0xcb, 0x10, - 0xc8, 0x13, 0xd3, 0x58, 0x6b, 0x5d, 0x63, 0xc5, 0x70, 0x18, 0x59, 0x40, 0x2c, 0xae, 0xd9, 0x5b, - 0x02, 0xd0, 0x15, 0x24, 0xfe, 0xcd, 0x41, 0xa4, 0x0c, 0x7e, 0x17, 0x4b, 0x65, 0x06, 0x53, 0x69, - 0x96, 0xa4, 0xe4, 0xa9, 0xa9, 0xf7, 0xf9, 0x76, 0x23, 0x6f, 0xa6, 0xbd, 0x6d, 0x4b, 0x93, 0xde, - 0x06, 0x26, 0xdd, 0x58, 0x0d, 0xff, 0xea, 0xa0, 0xc3, 0x32, 0x79, 0xc6, 0x16, 0x3b, 0x79, 0xf6, - 0x9f, 0x3b, 0x69, 0xd9, 0x4e, 0x0e, 0x7b, 0xeb, 0x91, 0x74, 0x53, 0x2d, 0xfc, 0x1a, 0xed, 0x97, - 0xa9, 0x37, 0x62, 0xcc, 0x35, 0xd9, 0x6f, 0x3b, 0xdd, 0x6a, 0x70, 0x68, 0x91, 0xfb, 0xbd, 0xfb, - 0x69, 0xba, 0xac, 0xef, 0xfc, 0xe5, 0xa0, 0xe2, 0xf7, 0x7e, 0x16, 0x2b, 0x8d, 0x7f, 0x5a, 0x59, - 0x54, 0xde, 0x76, 0x07, 0xc9, 0xdd, 0x66, 0x4d, 0x35, 0x6d, 0xe5, 0xbd, 0x59, 0x64, 0x61, 0x49, - 0x9d, 0xa2, 0x6a, 0xac, 0x21, 0x51, 0xe4, 0x51, 0xfb, 0x71, 0xb7, 0xf1, 0xaa, 0xfd, 0xd0, 0x06, - 0x09, 0x9e, 0x5a, 0x58, 0xf5, 0x6d, 0x6e, 0xa3, 0x85, 0xbb, 0xf3, 0xa7, 0x83, 0x1a, 0x0b, 0x1b, - 0x06, 0xbf, 0x40, 0xd5, 0xd0, 0x9c, 0xdd, 0x31, 0x67, 0x9f, 0x9b, 0x8a, 0x13, 0x17, 0x39, 0x3c, - 0x46, 0xcd, 0x11, 0x53, 0xfa, 0x7d, 0x5f, 0x81, 0x9c, 0xc0, 0xe0, 0xff, 0xec, 0xc9, 0xf9, 0xd0, - 0x9e, 0x2d, 0x01, 0xe9, 0x4a, 0x89, 0xe0, 0xdb, 0xeb, 0x3b, 0xb7, 0x72, 0x73, 0xe7, 0x56, 0x6e, - 0xef, 0xdc, 0xca, 0x2f, 0x99, 0xeb, 0x5c, 0x67, 0xae, 0x73, 0x93, 0xb9, 0xce, 0x6d, 0xe6, 0x3a, - 0x7f, 0x67, 0xae, 0xf3, 0xfb, 0x3f, 0x6e, 0xe5, 0xc7, 0xa3, 0x8d, 0xff, 0xb0, 0xff, 0x06, 0x00, - 0x00, 0xff, 0xff, 0xae, 0x19, 0x45, 0xf5, 0x96, 0x07, 0x00, 0x00, +var fileDescriptor_99027a32dee7673b = []byte{ + // 764 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x4f, 0xdb, 0x48, + 0x14, 0xc7, 0x63, 0x20, 0x81, 0x4c, 0xf8, 0x11, 0x86, 0x03, 0x03, 0x2b, 0x39, 0x51, 0x90, 0x50, + 0x76, 0xa5, 0xb5, 0x17, 0xb4, 0x5a, 0xed, 0x6d, 0x85, 0x09, 0x5b, 0x81, 0xa0, 0x48, 0x03, 0xa7, + 0xaa, 0x07, 0x26, 0xce, 0xc3, 0xb8, 0xc4, 0x1e, 0x6b, 0x3c, 0x89, 0xc4, 0xad, 0x97, 0x4a, 0x3d, + 0xf6, 0x6f, 0xe8, 0xad, 0xb7, 0xfe, 0x19, 0x1c, 0x39, 0x72, 0x8a, 0x8a, 0xfb, 0x8f, 0x54, 0x1e, + 0x3b, 0x71, 0xc8, 0x0f, 0x91, 0xaa, 0x37, 0xfb, 0xbd, 0xef, 0xf7, 0xf3, 0xde, 0x8c, 0x5f, 0x5e, + 0xd0, 0xef, 0xb7, 0xff, 0x86, 0x86, 0xcb, 0x4d, 0x16, 0xb8, 0x26, 0x74, 0xc1, 0x97, 0xa1, 0xd9, + 0xdd, 0x6b, 0x82, 0x64, 0x7b, 0xa6, 0x03, 0x3e, 0x08, 0x26, 0xa1, 0x65, 0x04, 0x82, 0x4b, 0x8e, + 0xb7, 0x12, 0xa9, 0xc1, 0x02, 0xd7, 0x48, 0xa4, 0x46, 0x2a, 0xdd, 0xfe, 0xd3, 0x71, 0xe5, 0x4d, + 0xa7, 0x69, 0xd8, 0xdc, 0x33, 0x1d, 0xee, 0x70, 0x53, 0x39, 0x9a, 0x9d, 0x6b, 0xf5, 0xa6, 0x5e, + 0xd4, 0x53, 0x42, 0xda, 0xae, 0x0d, 0x15, 0xb5, 0xb9, 0x00, 0xb3, 0x3b, 0x56, 0x6d, 0xfb, 0xef, + 0x4c, 0xe3, 0x31, 0xfb, 0xc6, 0xf5, 0x41, 0xdc, 0x99, 0xc1, 0xad, 0x13, 0x07, 0x42, 0xd3, 0x03, + 0xc9, 0x26, 0xb9, 0xcc, 0x69, 0x2e, 0xd1, 0xf1, 0xa5, 0xeb, 0xc1, 0x98, 0xe1, 0x9f, 0x97, 0x0c, + 0xa1, 0x7d, 0x03, 0x1e, 0x1b, 0xf5, 0xd5, 0x3e, 0x17, 0x51, 0xfe, 0x28, 0xbe, 0x04, 0x7c, 0x85, + 0x96, 0xe2, 0x6e, 0x5a, 0x4c, 0x32, 0xa2, 0x55, 0xb5, 0x7a, 0x69, 0xff, 0x2f, 0x23, 0xbb, 0xa9, + 0x01, 0xd4, 0x08, 0x6e, 0x9d, 0x38, 0x10, 0x1a, 0xb1, 0xda, 0xe8, 0xee, 0x19, 0xe7, 0xcd, 0x77, + 0x60, 0xcb, 0x33, 0x90, 0xcc, 0xc2, 0xf7, 0xbd, 0x4a, 0x2e, 0xea, 0x55, 0x50, 0x16, 0xa3, 0x03, + 0x2a, 0xbe, 0x42, 0x45, 0x75, 0xdf, 0x97, 0xae, 0x07, 0x64, 0x4e, 0x95, 0x30, 0x67, 0x2b, 0x71, + 0xe6, 0xda, 0x82, 0xc7, 0x36, 0x6b, 0x3d, 0xad, 0x50, 0x3c, 0xea, 0x93, 0x68, 0x06, 0xc5, 0x27, + 0xa8, 0x10, 0x82, 0x70, 0x21, 0x24, 0xf3, 0x0a, 0xbf, 0x6b, 0x4c, 0xfd, 0xd6, 0x86, 0x02, 0x5c, + 0x28, 0xb5, 0x85, 0xa2, 0x5e, 0xa5, 0x90, 0x3c, 0xd3, 0x94, 0x80, 0xcf, 0xd0, 0x86, 0x80, 0x80, + 0x0b, 0xe9, 0xfa, 0xce, 0x21, 0xf7, 0xa5, 0xe0, 0xed, 0x36, 0x08, 0xb2, 0x50, 0xd5, 0xea, 0x45, + 0xeb, 0xb7, 0xb4, 0x8d, 0x0d, 0x3a, 0x2e, 0xa1, 0x93, 0x7c, 0xf8, 0x15, 0x5a, 0x1f, 0x84, 0x8f, + 0xfd, 0x50, 0x32, 0xdf, 0x06, 0x92, 0x57, 0xb0, 0xad, 0x14, 0xb6, 0x4e, 0x47, 0x05, 0x74, 0xdc, + 0x83, 0x77, 0x51, 0x81, 0xd9, 0xd2, 0xe5, 0x3e, 0x29, 0x28, 0xf7, 0x6a, 0xea, 0x2e, 0x1c, 0xa8, + 0x28, 0x4d, 0xb3, 0xb1, 0x4e, 0x00, 0x0b, 0xb9, 0x4f, 0x16, 0x9f, 0xeb, 0xa8, 0x8a, 0xd2, 0x34, + 0x8b, 0x2f, 0x51, 0x51, 0x80, 0xc3, 0x44, 0xcb, 0xf5, 0x1d, 0xb2, 0xa4, 0xae, 0x6d, 0x67, 0xf8, + 0xda, 0xe2, 0xc1, 0xce, 0x3e, 0x33, 0x85, 0x6b, 0x10, 0xe0, 0xdb, 0x43, 0x5f, 0x82, 0xf6, 0xdd, + 0x34, 0x03, 0xe1, 0x13, 0xb4, 0x28, 0xa0, 0x1d, 0x0f, 0x1a, 0x29, 0xce, 0xce, 0x2c, 0x45, 0xbd, + 0xca, 0x22, 0x4d, 0x7c, 0xb4, 0x0f, 0xc0, 0x55, 0xb4, 0xe0, 0x73, 0x09, 0x04, 0xa9, 0x73, 0x2c, + 0xa7, 0x75, 0x17, 0x5e, 0x73, 0x09, 0x54, 0x65, 0x62, 0x85, 0xbc, 0x0b, 0x80, 0x94, 0x9e, 0x2b, + 0x2e, 0xef, 0x02, 0xa0, 0x2a, 0x83, 0x01, 0x95, 0x5b, 0x10, 0x08, 0xb0, 0x63, 0xe2, 0x05, 0xef, + 0x08, 0x1b, 0xc8, 0xb2, 0x6a, 0xac, 0x32, 0xa9, 0xb1, 0x64, 0x38, 0x94, 0xcc, 0x22, 0x29, 0xae, + 0xdc, 0x18, 0x01, 0xd0, 0x31, 0x24, 0xfe, 0xa8, 0x21, 0x92, 0x05, 0xff, 0x77, 0x45, 0xa8, 0x06, + 0x33, 0x94, 0xcc, 0x0b, 0xc8, 0x8a, 0xaa, 0xf7, 0xc7, 0x6c, 0x23, 0xaf, 0xa6, 0xbd, 0x9a, 0x96, + 0x26, 0x8d, 0x29, 0x4c, 0x3a, 0xb5, 0x1a, 0xfe, 0xa0, 0xa1, 0xcd, 0x2c, 0x79, 0xca, 0x86, 0x3b, + 0x59, 0xfd, 0xe9, 0x4e, 0x2a, 0x69, 0x27, 0x9b, 0x8d, 0xc9, 0x48, 0x3a, 0xad, 0x16, 0x3e, 0x40, + 0x6b, 0x59, 0xea, 0x90, 0x77, 0x7c, 0x49, 0xd6, 0xaa, 0x5a, 0x3d, 0x6f, 0x6d, 0xa6, 0xc8, 0xb5, + 0xc6, 0xf3, 0x34, 0x1d, 0xd5, 0xd7, 0xbe, 0x6a, 0x28, 0xf9, 0xbd, 0x9f, 0xba, 0xa1, 0xc4, 0x6f, + 0xc7, 0x16, 0x95, 0x31, 0xdb, 0x41, 0x62, 0xb7, 0x5a, 0x53, 0xe5, 0xb4, 0xf2, 0x52, 0x3f, 0x32, + 0xb4, 0xa4, 0x8e, 0x50, 0xde, 0x95, 0xe0, 0x85, 0x64, 0xae, 0x3a, 0x5f, 0x2f, 0xed, 0x57, 0x5f, + 0xda, 0x20, 0xd6, 0x4a, 0x0a, 0xcb, 0x1f, 0xc7, 0x36, 0x9a, 0xb8, 0x6b, 0x5f, 0x34, 0x54, 0x1a, + 0xda, 0x30, 0x78, 0x07, 0xe5, 0x6d, 0x75, 0x76, 0x4d, 0x9d, 0x7d, 0x60, 0x4a, 0x4e, 0x9c, 0xe4, + 0x70, 0x07, 0x95, 0xdb, 0x2c, 0x94, 0xe7, 0xcd, 0x10, 0x44, 0x17, 0x5a, 0xbf, 0xb2, 0x27, 0x07, + 0x43, 0x7b, 0x3a, 0x02, 0xa4, 0x63, 0x25, 0xac, 0xff, 0xee, 0x9f, 0xf4, 0xdc, 0xc3, 0x93, 0x9e, + 0x7b, 0x7c, 0xd2, 0x73, 0xef, 0x23, 0x5d, 0xbb, 0x8f, 0x74, 0xed, 0x21, 0xd2, 0xb5, 0xc7, 0x48, + 0xd7, 0xbe, 0x45, 0xba, 0xf6, 0xe9, 0xbb, 0x9e, 0x7b, 0xb3, 0x35, 0xf5, 0x1f, 0xf6, 0x47, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x2b, 0xc1, 0x64, 0x36, 0x7d, 0x07, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index de60bdc3e8..fbdb309701 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -72,12 +72,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -92,15 +92,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -112,7 +112,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -125,6 +125,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index d967e38106..818486f39d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto +// source: k8s.io/api/extensions/v1beta1/generated.proto package v1beta1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} + return fileDescriptor_90a532284de28347, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} + return fileDescriptor_90a532284de28347, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} + return fileDescriptor_90a532284de28347, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_90a532284de28347, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_90a532284de28347, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_90a532284de28347, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_90a532284de28347, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_90a532284de28347, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_90a532284de28347, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_90a532284de28347, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_90a532284de28347, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_90a532284de28347, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_90a532284de28347, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +416,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_90a532284de28347, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_90a532284de28347, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +472,7 @@ var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_90a532284de28347, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} + return fileDescriptor_90a532284de28347, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_90a532284de28347, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_90a532284de28347, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +584,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} + return fileDescriptor_90a532284de28347, []int{19} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +612,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} + return fileDescriptor_90a532284de28347, []int{20} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +640,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_90a532284de28347, []int{21} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +668,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_90a532284de28347, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_90a532284de28347, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_90a532284de28347, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_90a532284de28347, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_90a532284de28347, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_90a532284de28347, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_90a532284de28347, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_90a532284de28347, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_90a532284de28347, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_90a532284de28347, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_90a532284de28347, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_90a532284de28347, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1004,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_90a532284de28347, []int{34} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1032,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_90a532284de28347, []int{35} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1060,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_90a532284de28347, []int{36} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} + return fileDescriptor_90a532284de28347, []int{37} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} + return fileDescriptor_90a532284de28347, []int{38} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} + return fileDescriptor_90a532284de28347, []int{39} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1172,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_90a532284de28347, []int{40} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1200,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} + return fileDescriptor_90a532284de28347, []int{41} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} + return fileDescriptor_90a532284de28347, []int{42} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1256,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} + return fileDescriptor_90a532284de28347, []int{43} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1284,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_90a532284de28347, []int{44} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1360,190 +1360,189 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) + proto.RegisterFile("k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_90a532284de28347) } -var fileDescriptor_cdc93917efc28165 = []byte{ - // 2858 bytes of a gzipped FileDescriptorProto +var fileDescriptor_90a532284de28347 = []byte{ + // 2842 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47, 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3, - 0x46, 0x84, 0x4d, 0xd8, 0xcc, 0xb0, 0x9b, 0x64, 0xc9, 0x87, 0x94, 0xb0, 0xe3, 0xdd, 0x64, 0x9d, - 0xd8, 0xe3, 0x49, 0xcd, 0x38, 0x41, 0x11, 0x01, 0xda, 0x3d, 0xe5, 0x71, 0xc7, 0x3d, 0xdd, 0xa3, - 0xee, 0x1a, 0xb3, 0xbe, 0x81, 0xe0, 0x92, 0x13, 0x5c, 0x02, 0x1c, 0x91, 0x90, 0xb8, 0x72, 0xe5, - 0x10, 0x22, 0x10, 0x41, 0x5a, 0x21, 0x0e, 0x91, 0x38, 0x90, 0x93, 0x45, 0x9c, 0x13, 0xe2, 0x1f, - 0x40, 0x7b, 0x42, 0xf5, 0xd1, 0xd5, 0xdf, 0x76, 0x8f, 0xf1, 0x5a, 0x04, 0x71, 0x5a, 0x4f, 0xbd, - 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0x57, 0x77, 0x9f, 0xf7, 0x6b, - 0x96, 0x5b, 0xdf, 0x1d, 0x6e, 0x11, 0xcf, 0x21, 0x94, 0xf8, 0xf5, 0x3d, 0xe2, 0x74, 0x5d, 0xaf, - 0x2e, 0x05, 0xc6, 0xc0, 0xaa, 0x93, 0x7b, 0x94, 0x38, 0xbe, 0xe5, 0x3a, 0x7e, 0x7d, 0xef, 0xfa, - 0x16, 0xa1, 0xc6, 0xf5, 0x7a, 0x8f, 0x38, 0xc4, 0x33, 0x28, 0xe9, 0xd6, 0x06, 0x9e, 0x4b, 0x5d, - 0xf4, 0x98, 0x50, 0xaf, 0x19, 0x03, 0xab, 0x16, 0xaa, 0xd7, 0xa4, 0xfa, 0xe2, 0xd3, 0x3d, 0x8b, - 0xee, 0x0c, 0xb7, 0x6a, 0xa6, 0xdb, 0xaf, 0xf7, 0xdc, 0x9e, 0x5b, 0xe7, 0x56, 0x5b, 0xc3, 0x6d, - 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x09, 0xb4, 0x45, 0x3d, 0xd2, 0xb9, 0xe9, 0x7a, 0xa4, 0xbe, 0x97, - 0xea, 0x71, 0xf1, 0xd9, 0x50, 0xa7, 0x6f, 0x98, 0x3b, 0x96, 0x43, 0xbc, 0xfd, 0xfa, 0x60, 0xb7, - 0xc7, 0x1a, 0xfc, 0x7a, 0x9f, 0x50, 0x23, 0xcb, 0xaa, 0x9e, 0x67, 0xe5, 0x0d, 0x1d, 0x6a, 0xf5, - 0x49, 0xca, 0xe0, 0xe6, 0x71, 0x06, 0xbe, 0xb9, 0x43, 0xfa, 0x46, 0xca, 0xee, 0x99, 0x3c, 0xbb, - 0x21, 0xb5, 0xec, 0xba, 0xe5, 0x50, 0x9f, 0x7a, 0x49, 0x23, 0xfd, 0x83, 0x12, 0x4c, 0xde, 0x36, - 0x48, 0xdf, 0x75, 0xda, 0x84, 0xa2, 0xef, 0x41, 0x95, 0x0d, 0xa3, 0x6b, 0x50, 0x63, 0x41, 0x7b, - 0x5c, 0xbb, 0x3a, 0x75, 0xe3, 0xeb, 0xb5, 0x70, 0x9a, 0x15, 0x6a, 0x6d, 0xb0, 0xdb, 0x63, 0x0d, - 0x7e, 0x8d, 0x69, 0xd7, 0xf6, 0xae, 0xd7, 0x36, 0xb6, 0xde, 0x23, 0x26, 0x5d, 0x27, 0xd4, 0x68, - 0xa0, 0xfb, 0x07, 0xcb, 0xe7, 0x0e, 0x0f, 0x96, 0x21, 0x6c, 0xc3, 0x0a, 0x15, 0x35, 0x61, 0xcc, - 0x1f, 0x10, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5c, 0xc4, 0x9a, 0xf2, 0xac, 0x3d, 0x20, - 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc1, 0xb8, 0x4f, 0x0d, 0x3a, - 0xf4, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8, 0xf8, 0x8d, - 0x25, 0x9a, 0xfe, 0x8f, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, 0x5e, - 0x84, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0x75, 0xf6, 0x07, 0xe4, - 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, 0xfd, - 0x6c, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, 0x20, - 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0xa7, 0x8a, 0x2d, - 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e, 0x80, 0x71, - 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, 0x7a, - 0x12, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, 0x2e, - 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xed, 0x54, - 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, 0x18, - 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, 0xa6, - 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0x67, 0x63, 0x11, 0xf7, 0x59, 0x68, - 0xa2, 0x77, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0xcf, 0x14, 0x74, 0xdf, 0xd8, - 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37, 0xa1, 0x4a, - 0x49, 0x7f, 0x60, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xe5, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, 0x96, - 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, 0x19, - 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, 0xc6, - 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, 0xe5, - 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, 0x00, - 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0xd7, 0x01, 0x05, 0xc3, 0x78, 0x4d, 0x24, 0x37, 0xcb, - 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, 0x8f, - 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, 0x38, - 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xf3, 0x71, 0x98, - 0x4d, 0xe4, 0x1b, 0xf4, 0x16, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, 0x88, - 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, 0x79, - 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, 0xc4, - 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, 0x6e, - 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x07, 0x53, 0xa2, 0x37, - 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, 0x77, - 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, 0x89, - 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, 0xb8, - 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, 0x38, - 0xa9, 0x8f, 0x5e, 0x83, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2a, 0x41, - 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x5e, 0x84, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, 0x2b, - 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, 0x22, - 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, 0xc8, - 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x35, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0x95, 0x58, - 0x89, 0xfd, 0x5a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, 0x2a, - 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xee, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, 0x1e, - 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xff, 0xa2, 0x04, 0x70, 0x9b, 0x0c, 0x6c, 0x77, - 0xbf, 0x4f, 0x9c, 0xb3, 0xe0, 0x50, 0x1b, 0x31, 0x0e, 0xf5, 0xf4, 0x71, 0xcb, 0xa3, 0x5c, 0xcb, - 0x25, 0x51, 0x6f, 0x27, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14, - 0x2a, 0x87, 0x34, 0xea, 0xa5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x0f, 0x8d, - 0x47, 0xbd, 0x07, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, 0xaa, - 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa4, 0xc1, - 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x64, 0xe1, 0x10, 0xcd, 0x61, 0x6d, - 0xff, 0x62, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x71, 0x18, 0x73, 0x8c, - 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0x3e, 0xd0, 0x00, 0xc9, 0x2a, - 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, 0x6d, - 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, 0x00, - 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xba, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d, - 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, 0x05, + 0x46, 0x84, 0x4d, 0xd8, 0x9d, 0x61, 0x37, 0xc9, 0x92, 0x0f, 0x29, 0x61, 0xc7, 0xbb, 0xc9, 0x3a, + 0xb1, 0xc7, 0x93, 0x9a, 0x71, 0x82, 0x22, 0x02, 0xb4, 0x7b, 0xca, 0xe3, 0x8e, 0x7b, 0xba, 0x47, + 0xdd, 0x35, 0x66, 0x7d, 0x03, 0xc1, 0x25, 0x27, 0xb8, 0x04, 0x38, 0x22, 0x21, 0x71, 0xe5, 0xca, + 0x21, 0x44, 0x20, 0x82, 0xb4, 0x42, 0x1c, 0x22, 0x71, 0x20, 0x27, 0x8b, 0x38, 0x27, 0xc4, 0x3f, + 0x80, 0xf6, 0x84, 0xea, 0xa3, 0xab, 0xbf, 0xed, 0x1e, 0xe3, 0x58, 0x04, 0x71, 0x5a, 0x4f, 0xbd, + 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0xeb, 0xbb, 0xcf, 0xf9, 0x35, + 0xcb, 0xad, 0x1b, 0x03, 0xab, 0x4e, 0xee, 0x53, 0xe2, 0xf8, 0x96, 0xeb, 0xf8, 0xf5, 0xbd, 0x1b, + 0x5b, 0x84, 0x1a, 0x37, 0xea, 0x3d, 0xe2, 0x10, 0xcf, 0xa0, 0xa4, 0x5b, 0x1b, 0x78, 0x2e, 0x75, + 0xd1, 0x63, 0x42, 0xbd, 0x66, 0x0c, 0xac, 0x5a, 0xa8, 0x5e, 0x93, 0xea, 0x8b, 0xd7, 0x7b, 0x16, + 0xdd, 0x19, 0x6e, 0xd5, 0x4c, 0xb7, 0x5f, 0xef, 0xb9, 0x3d, 0xb7, 0xce, 0xad, 0xb6, 0x86, 0xdb, + 0xfc, 0x17, 0xff, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0x7a, 0xa4, 0x73, 0xd3, 0xf5, 0x48, 0x7d, 0x2f, + 0xd5, 0xe3, 0xe2, 0x33, 0xa1, 0x4e, 0xdf, 0x30, 0x77, 0x2c, 0x87, 0x78, 0xfb, 0xf5, 0xc1, 0x6e, + 0x8f, 0x35, 0xf8, 0xf5, 0x3e, 0xa1, 0x46, 0x96, 0x55, 0x3d, 0xcf, 0xca, 0x1b, 0x3a, 0xd4, 0xea, + 0x93, 0x94, 0xc1, 0xad, 0xe3, 0x0c, 0x7c, 0x73, 0x87, 0xf4, 0x8d, 0x94, 0xdd, 0xd3, 0x79, 0x76, + 0x43, 0x6a, 0xd9, 0x75, 0xcb, 0xa1, 0x3e, 0xf5, 0x92, 0x46, 0xfa, 0xfb, 0x25, 0x98, 0xbc, 0x63, + 0x90, 0xbe, 0xeb, 0xb4, 0x09, 0x45, 0xdf, 0x83, 0x2a, 0x1b, 0x46, 0xd7, 0xa0, 0xc6, 0x82, 0xf6, + 0xb8, 0x76, 0x75, 0xea, 0xe6, 0xd7, 0x6b, 0xe1, 0x34, 0x2b, 0xd4, 0xda, 0x60, 0xb7, 0xc7, 0x1a, + 0xfc, 0x1a, 0xd3, 0xae, 0xed, 0xdd, 0xa8, 0x6d, 0x6c, 0xbd, 0x4b, 0x4c, 0xba, 0x4e, 0xa8, 0xd1, + 0x40, 0x0f, 0x0e, 0x96, 0xcf, 0x1d, 0x1e, 0x2c, 0x43, 0xd8, 0x86, 0x15, 0x2a, 0x6a, 0xc2, 0x98, + 0x3f, 0x20, 0xe6, 0x42, 0x89, 0xa3, 0x5f, 0xab, 0x1d, 0xb9, 0x88, 0x35, 0xe5, 0x59, 0x7b, 0x40, + 0xcc, 0xc6, 0x79, 0x89, 0x3c, 0xc6, 0x7e, 0x61, 0x8e, 0x83, 0xde, 0x84, 0x71, 0x9f, 0x1a, 0x74, + 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, 0x1b, + 0x4b, 0x34, 0xfd, 0x1f, 0x25, 0x40, 0x4a, 0x77, 0xc5, 0x75, 0xba, 0x16, 0xb5, 0x5c, 0x07, 0xbd, + 0x00, 0x63, 0x74, 0x7f, 0x40, 0xf8, 0xe4, 0x4c, 0x36, 0x9e, 0x08, 0x1c, 0xea, 0xec, 0x0f, 0xc8, + 0xc3, 0x83, 0xe5, 0xcb, 0x69, 0x0b, 0x26, 0xc1, 0xdc, 0x06, 0xad, 0x29, 0x57, 0x4b, 0xdc, 0xfa, + 0x99, 0x78, 0xd7, 0x0f, 0x0f, 0x96, 0x33, 0x82, 0xb0, 0xa6, 0x90, 0xe2, 0x0e, 0xa2, 0x3d, 0x40, + 0xb6, 0xe1, 0xd3, 0x8e, 0x67, 0x38, 0xbe, 0xe8, 0xc9, 0xea, 0x13, 0x39, 0x09, 0x4f, 0x15, 0x5b, + 0x34, 0x66, 0xd1, 0x58, 0x94, 0x5e, 0xa0, 0xb5, 0x14, 0x1a, 0xce, 0xe8, 0x01, 0x3d, 0x01, 0xe3, + 0x1e, 0x31, 0x7c, 0xd7, 0x59, 0x18, 0xe3, 0xa3, 0x50, 0x13, 0x88, 0x79, 0x2b, 0x96, 0x52, 0xf4, + 0x24, 0x4c, 0xf4, 0x89, 0xef, 0x1b, 0x3d, 0xb2, 0x50, 0xe1, 0x8a, 0xb3, 0x52, 0x71, 0x62, 0x5d, + 0x34, 0xe3, 0x40, 0xae, 0x7f, 0xa0, 0xc1, 0xb4, 0x9a, 0xb9, 0x35, 0xcb, 0xa7, 0xe8, 0xdb, 0xa9, + 0x38, 0xac, 0x15, 0x1b, 0x12, 0xb3, 0xe6, 0x51, 0x78, 0x41, 0xf6, 0x56, 0x0d, 0x5a, 0x22, 0x31, + 0xb8, 0x0e, 0x15, 0x8b, 0x92, 0x3e, 0x5b, 0x87, 0xf2, 0xd5, 0xa9, 0x9b, 0x57, 0x8b, 0x86, 0x4c, + 0x63, 0x5a, 0x82, 0x56, 0x56, 0x99, 0x39, 0x16, 0x28, 0xfa, 0xcf, 0xc6, 0x22, 0xee, 0xb3, 0xd0, + 0x44, 0xef, 0x40, 0xd5, 0x27, 0x36, 0x31, 0xa9, 0xeb, 0x49, 0xf7, 0x9f, 0x2e, 0xe8, 0xbe, 0xb1, + 0x45, 0xec, 0xb6, 0x34, 0x6d, 0x9c, 0x67, 0xfe, 0x07, 0xbf, 0xb0, 0x82, 0x44, 0x6f, 0x40, 0x95, + 0x92, 0xfe, 0xc0, 0x36, 0x28, 0x91, 0xfb, 0xe8, 0xcb, 0xd1, 0x21, 0xb0, 0xc8, 0x61, 0x60, 0x2d, + 0xb7, 0xdb, 0x91, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0x7b, 0x30, 0x33, + 0x1c, 0x74, 0x99, 0x26, 0x65, 0xd9, 0xa1, 0xb7, 0x2f, 0x23, 0xe9, 0x56, 0xd1, 0xb9, 0xd9, 0x8c, + 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3, 0x7d, 0xcb, + 0xc1, 0xc4, 0xe8, 0xee, 0xb7, 0x89, 0xe9, 0x3a, 0x5d, 0x9f, 0x87, 0x55, 0xa5, 0x31, 0x2f, 0x01, + 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48, 0x6e, 0x96, + 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x49, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06, 0x73, 0x1e, + 0xd9, 0xb3, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0xfd, 0x35, 0xab, 0x6f, 0xd1, 0x85, 0x71, + 0xee, 0xd3, 0xc2, 0xe1, 0xc1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0xe7, 0xe3, 0x30, + 0x9b, 0xc8, 0x37, 0xe8, 0x4d, 0xb8, 0x6c, 0x0e, 0x3d, 0x8f, 0x38, 0xb4, 0x39, 0xec, 0x6f, 0x11, + 0xaf, 0x6d, 0xee, 0x90, 0xee, 0xd0, 0x26, 0x5d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4, 0xf8, 0xf2, + 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15, 0x66, 0x89, + 0x63, 0xaa, 0x59, 0x68, 0xa6, 0x34, 0x70, 0x86, 0x15, 0xf3, 0xb1, 0x4b, 0x7c, 0xcb, 0x23, 0xdd, + 0xa4, 0x8f, 0xe5, 0xb8, 0x8f, 0x77, 0x32, 0xb5, 0x70, 0x8e, 0x35, 0x7a, 0x16, 0xa6, 0x44, 0x6f, + 0x7c, 0xfd, 0xe4, 0x42, 0x5f, 0x92, 0x60, 0x53, 0xcd, 0x50, 0x84, 0xa3, 0x7a, 0x6c, 0x68, 0xee, + 0x96, 0x4f, 0xbc, 0x3d, 0xd2, 0xcd, 0x5f, 0xe0, 0x8d, 0x94, 0x06, 0xce, 0xb0, 0x62, 0x43, 0x13, + 0x11, 0x98, 0x1a, 0xda, 0x78, 0x7c, 0x68, 0x9b, 0x99, 0x5a, 0x38, 0xc7, 0x9a, 0xc5, 0xb1, 0x70, + 0xf9, 0xf6, 0x9e, 0x61, 0xd9, 0xc6, 0x96, 0x4d, 0x16, 0x26, 0xe2, 0x71, 0xdc, 0x8c, 0x8b, 0x71, + 0x52, 0x1f, 0xbd, 0x0a, 0x17, 0x45, 0xd3, 0xa6, 0x63, 0x28, 0x90, 0x2a, 0x07, 0x79, 0x54, 0x82, + 0x5c, 0x6c, 0x26, 0x15, 0x70, 0xda, 0x06, 0xbd, 0x00, 0x33, 0xa6, 0x6b, 0xdb, 0x3c, 0x1e, 0x57, + 0xdc, 0xa1, 0x43, 0x17, 0x26, 0x39, 0x0a, 0x62, 0xfb, 0x71, 0x25, 0x26, 0xc1, 0x09, 0x4d, 0x44, + 0x00, 0xcc, 0xa0, 0xe0, 0xf8, 0x0b, 0xc0, 0xf3, 0xe3, 0x8d, 0xa2, 0x39, 0x40, 0x95, 0xaa, 0x90, + 0x03, 0xa8, 0x26, 0x1f, 0x47, 0x80, 0xf5, 0x3f, 0x6b, 0x30, 0x9f, 0x93, 0x3a, 0xd0, 0xcb, 0xb1, + 0x12, 0xfb, 0xb5, 0x44, 0x89, 0xbd, 0x92, 0x63, 0x16, 0xa9, 0xb3, 0x0e, 0x4c, 0x7b, 0x6c, 0x54, + 0x4e, 0x4f, 0xa8, 0xc8, 0x1c, 0xf9, 0xec, 0x31, 0xc3, 0xc0, 0x51, 0x9b, 0x30, 0xe7, 0x5f, 0x3c, + 0x3c, 0x58, 0x9e, 0x8e, 0xc9, 0x70, 0x1c, 0x5e, 0xff, 0x45, 0x09, 0xe0, 0x0e, 0x19, 0xd8, 0xee, + 0x7e, 0x9f, 0x38, 0x67, 0xc1, 0xa1, 0x36, 0x62, 0x1c, 0xea, 0xfa, 0x71, 0xcb, 0xa3, 0x5c, 0xcb, + 0x25, 0x51, 0x6f, 0x25, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14, + 0x2a, 0x87, 0x34, 0xea, 0xc5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x9f, 0x1b, + 0x8f, 0x7a, 0x17, 0x66, 0x18, 0xcb, 0x11, 0x6b, 0xc9, 0x39, 0xd4, 0xf8, 0xc8, 0x1c, 0x4a, 0x55, + 0xbb, 0xb5, 0x18, 0x12, 0x4e, 0x20, 0xe7, 0x70, 0xb6, 0x89, 0x2f, 0x22, 0x67, 0xfb, 0x50, 0x83, + 0x99, 0x70, 0x99, 0xce, 0x80, 0xb4, 0x35, 0xe3, 0xa4, 0xed, 0xc9, 0xc2, 0x21, 0x9a, 0xc3, 0xda, + 0xfe, 0xc5, 0x08, 0xbe, 0x52, 0x62, 0x1b, 0x7c, 0xcb, 0x30, 0x77, 0xd1, 0xe3, 0x30, 0xe6, 0x18, + 0xfd, 0x20, 0x32, 0xd5, 0x66, 0x69, 0x1a, 0x7d, 0x82, 0xb9, 0x04, 0xbd, 0xaf, 0x01, 0x92, 0x55, + 0xe0, 0xb6, 0xe3, 0xb8, 0xd4, 0x10, 0xb9, 0x52, 0xb8, 0xb5, 0x5a, 0xd8, 0xad, 0xa0, 0xc7, 0xda, + 0x66, 0x0a, 0xeb, 0xae, 0x43, 0xbd, 0xfd, 0x70, 0x91, 0xd3, 0x0a, 0x38, 0xc3, 0x01, 0x64, 0x00, + 0x78, 0x12, 0xb3, 0xe3, 0xca, 0x8d, 0x7c, 0xbd, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d, + 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x85, 0xf9, 0x1c, 0x6f, 0xd1, 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91, - 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x62, 0xe9, 0x79, 0x4d, 0xff, 0xb0, 0x12, 0x8d, 0x1d, 0xce, 0x98, + 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x42, 0xe9, 0x39, 0x4d, 0xff, 0xa0, 0x12, 0x8d, 0x1d, 0xce, 0x98, 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3, - 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0x87, 0xcb, 0xad, 0xcb, 0xa7, 0xc3, 0xad, 0xbf, 0x0b, 0x55, 0x3f, - 0x60, 0xd5, 0x63, 0x1c, 0xf2, 0xfa, 0x08, 0xf9, 0x55, 0x12, 0x6a, 0xd5, 0x81, 0xa2, 0xd2, 0x0a, - 0x34, 0x8b, 0x44, 0x57, 0x46, 0x24, 0xd1, 0xa7, 0x4a, 0x7c, 0x59, 0xbe, 0x19, 0x18, 0x43, 0x9f, - 0x74, 0x79, 0x6e, 0xab, 0x86, 0xf9, 0xa6, 0xc5, 0x5b, 0xb1, 0x94, 0xa2, 0x77, 0x63, 0x21, 0x5b, - 0x3d, 0x49, 0xc8, 0xce, 0xe4, 0x87, 0x2b, 0xda, 0x84, 0xf9, 0x81, 0xe7, 0xf6, 0x3c, 0xe2, 0xfb, - 0xb7, 0x89, 0xd1, 0xb5, 0x2d, 0x87, 0x04, 0xf3, 0x23, 0x18, 0xd1, 0x95, 0xc3, 0x83, 0xe5, 0xf9, - 0x56, 0xb6, 0x0a, 0xce, 0xb3, 0xd5, 0xef, 0x8f, 0xc1, 0x85, 0x64, 0x05, 0xcc, 0x21, 0xa9, 0xda, - 0x89, 0x48, 0xea, 0xb5, 0xc8, 0x66, 0x10, 0x0c, 0x5e, 0xad, 0x7e, 0xc6, 0x86, 0xb8, 0x05, 0xb3, - 0x32, 0x1b, 0x04, 0x42, 0x49, 0xd3, 0xd5, 0xea, 0x6f, 0xc6, 0xc5, 0x38, 0xa9, 0x8f, 0x5e, 0x82, - 0x69, 0x8f, 0xf3, 0xee, 0x00, 0x40, 0x70, 0xd7, 0x47, 0x24, 0xc0, 0x34, 0x8e, 0x0a, 0x71, 0x5c, - 0x97, 0xf1, 0xd6, 0x90, 0x8e, 0x06, 0x00, 0x63, 0x71, 0xde, 0x7a, 0x2b, 0xa9, 0x80, 0xd3, 0x36, - 0x68, 0x1d, 0x2e, 0x0d, 0x9d, 0x34, 0x94, 0x08, 0xe5, 0x2b, 0x12, 0xea, 0xd2, 0x66, 0x5a, 0x05, - 0x67, 0xd9, 0xa1, 0xed, 0x18, 0x95, 0x1d, 0xe7, 0xe9, 0xf9, 0x46, 0xe1, 0x8d, 0x57, 0x98, 0xcb, - 0x66, 0xd0, 0xed, 0x6a, 0x51, 0xba, 0xad, 0xff, 0x41, 0x8b, 0x16, 0x21, 0x45, 0x81, 0x8f, 0xbb, - 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, 0x79, - 0x3c, 0xfd, 0xfd, 0xa3, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, 0xa0, - 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0b, 0x55, - 0xf6, 0x2f, 0x73, 0x9c, 0x87, 0xeb, 0x24, 0x4f, 0x32, 0xd5, 0x96, 0x6c, 0x7b, 0x10, 0xf9, 0x1b, - 0x2b, 0x4d, 0xf4, 0x2d, 0x98, 0x60, 0x7b, 0x9b, 0x38, 0xdd, 0x82, 0xe4, 0x57, 0x3a, 0xd5, 0x10, - 0x46, 0x21, 0x9f, 0x91, 0x0d, 0x38, 0x80, 0xd3, 0x77, 0x61, 0x2e, 0x32, 0x08, 0x3c, 0xb4, 0xc9, - 0x5b, 0xac, 0x5e, 0xa1, 0x36, 0x54, 0x58, 0xef, 0xac, 0x2a, 0x95, 0x0b, 0x5c, 0x2f, 0x26, 0x26, - 0x22, 0xe4, 0x1e, 0xec, 0x97, 0x8f, 0x05, 0x96, 0xbe, 0x01, 0x13, 0xab, 0xad, 0x86, 0xed, 0x0a, - 0xbe, 0x61, 0x5a, 0x5d, 0x2f, 0x39, 0x53, 0x2b, 0xab, 0xb7, 0x31, 0xe6, 0x12, 0xa4, 0xc3, 0x38, - 0xb9, 0x67, 0x92, 0x01, 0xe5, 0x14, 0x63, 0xb2, 0x01, 0x2c, 0x91, 0xde, 0xe1, 0x2d, 0x58, 0x4a, - 0xf4, 0x9f, 0x94, 0x60, 0x42, 0x76, 0x7b, 0x06, 0xe7, 0x8f, 0xb5, 0xd8, 0xf9, 0xe3, 0xa9, 0x62, - 0x4b, 0x90, 0x7b, 0xf8, 0xe8, 0x24, 0x0e, 0x1f, 0xd7, 0x0a, 0xe2, 0x1d, 0x7d, 0xf2, 0x78, 0xbf, - 0x04, 0x33, 0xf1, 0xc5, 0x47, 0xcf, 0xc1, 0x14, 0x4b, 0xb5, 0x96, 0x49, 0x9a, 0x21, 0xc3, 0x53, - 0xd7, 0x0f, 0xed, 0x50, 0x84, 0xa3, 0x7a, 0xa8, 0xa7, 0xcc, 0x5a, 0xae, 0x47, 0xe5, 0xa0, 0xf3, - 0xa7, 0x74, 0x48, 0x2d, 0xbb, 0x26, 0x2e, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, 0xb3, - 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x6d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, 0x99, - 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, 0x4c, - 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x56, 0x83, 0x29, - 0x39, 0x17, 0x67, 0x40, 0xd4, 0xdf, 0x88, 0x13, 0xf5, 0x27, 0x0a, 0xee, 0xd0, 0x6c, 0x96, 0xfe, - 0x3b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, 0xfa, - 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, 0x45, - 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, 0xb4, - 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, 0x32, - 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa8, 0xc1, 0xa3, 0x19, 0xfe, 0x4b, 0xd2, - 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xa1, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, 0xb6, - 0x20, 0x85, 0x05, 0xd0, 0xfa, 0xaf, 0x34, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, 0x92, - 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x1b, 0x50, 0xe5, 0x6f, 0x44, 0xa6, 0x6b, 0xcb, - 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, 0x31, - 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, 0x58, - 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc3, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, 0x18, - 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, 0xd3, - 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, 0xe8, - 0x4d, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0x71, 0xd9, 0x7f, 0x4c, 0x91, 0x08, 0x5d, 0xa8, 0xf2, 0xd1, - 0x75, 0x3a, 0x2d, 0xcc, 0xa1, 0xf4, 0xdf, 0x97, 0xd4, 0x7c, 0xf0, 0x13, 0xd2, 0x37, 0xd5, 0x68, - 0x57, 0x6c, 0xc3, 0xf7, 0x79, 0x0a, 0x13, 0xa7, 0xf9, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, - 0xa8, 0x13, 0x16, 0x4f, 0xed, 0x24, 0xc5, 0x73, 0x2a, 0xab, 0x70, 0xa2, 0xbb, 0x50, 0xa6, 0x76, - 0xd1, 0x53, 0xb9, 0x44, 0xec, 0xac, 0xb5, 0x1b, 0x53, 0x72, 0xca, 0xcb, 0x9d, 0xb5, 0x36, 0x66, - 0x10, 0x68, 0x03, 0x2a, 0xde, 0xd0, 0x26, 0xac, 0x0e, 0x94, 0x8b, 0xd7, 0x15, 0x36, 0x83, 0xe1, - 0xe6, 0x63, 0xbf, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x69, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, 0xf3, - 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x7e, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, 0x7c, - 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, 0x2f, - 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, 0x4c, - 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xdd, 0x24, 0xf4, 0xfb, - 0xae, 0xb7, 0xdb, 0x72, 0x6d, 0xcb, 0xdc, 0x3f, 0x03, 0x12, 0x80, 0x63, 0x24, 0xe0, 0xb8, 0x7c, - 0x19, 0xf3, 0x2e, 0x8f, 0x0a, 0xe8, 0x1f, 0x69, 0x30, 0x1f, 0xd3, 0xbc, 0x13, 0xe6, 0x03, 0x95, - 0xa0, 0xb5, 0x42, 0x09, 0x3a, 0x06, 0xc3, 0x92, 0x5a, 0x76, 0x82, 0x46, 0x6b, 0x50, 0xa2, 0xae, - 0x8c, 0xde, 0xd1, 0x30, 0x09, 0xf1, 0xc2, 0x9a, 0xd3, 0x71, 0x71, 0x89, 0xba, 0x6c, 0x21, 0x16, - 0x62, 0x5a, 0xd1, 0x8c, 0xf6, 0x90, 0x46, 0x80, 0x61, 0x6c, 0xdb, 0x73, 0xfb, 0x27, 0x1e, 0x83, - 0x5a, 0x88, 0x57, 0x3d, 0xb7, 0x8f, 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, - 0x1b, 0xde, 0x8c, 0xf3, 0x86, 0x6b, 0xa3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, - 0x0d, 0x18, 0x6d, 0xc3, 0xd4, 0xc0, 0xed, 0xb6, 0x4f, 0xe1, 0x81, 0x76, 0x96, 0xf1, 0xb9, 0x56, - 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x1e, 0x5c, 0x64, 0xd4, 0xc2, 0x1f, 0x18, 0x26, 0x69, 0x9f, 0xc2, - 0x95, 0xd5, 0x23, 0xfc, 0x05, 0x28, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, - 0x7c, 0x21, 0x89, 0xe4, 0xb1, 0x24, 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, - 0xff, 0x35, 0x19, 0x0d, 0x9c, 0xae, 0xbe, 0x16, 0xa1, 0x07, 0xf2, 0xad, 0xe6, 0x64, 0xd4, 0xa0, - 0x29, 0x99, 0xc8, 0x49, 0x99, 0x75, 0x35, 0xc1, 0x5b, 0xbe, 0x02, 0x13, 0xc4, 0xe9, 0x72, 0xb2, - 0x2e, 0x2e, 0x42, 0xf8, 0xa8, 0xee, 0x88, 0x26, 0x1c, 0xc8, 0xf4, 0x1f, 0x97, 0x13, 0xa3, 0xe2, - 0x65, 0xf6, 0xbd, 0x53, 0x0b, 0x0e, 0x45, 0xf8, 0x73, 0x03, 0x64, 0x2b, 0xa4, 0x7f, 0x22, 0xe6, - 0xbf, 0x31, 0x4a, 0xcc, 0x47, 0xeb, 0x5f, 0x2e, 0xf9, 0x43, 0xdf, 0x81, 0x71, 0x22, 0xba, 0x10, - 0x55, 0xf5, 0xe6, 0x28, 0x5d, 0x84, 0xe9, 0x37, 0x3c, 0x67, 0xc9, 0x36, 0x89, 0x8a, 0x5e, 0x61, - 0xf3, 0xc5, 0x74, 0xd9, 0xb1, 0x44, 0xb0, 0xe7, 0xc9, 0xc6, 0x63, 0x62, 0xd8, 0xaa, 0xf9, 0xc1, - 0xc1, 0x32, 0x84, 0x3f, 0x71, 0xd4, 0x82, 0xbf, 0x9e, 0xc9, 0x3b, 0x9b, 0xb3, 0xf9, 0x02, 0x69, - 0xb4, 0xd7, 0xb3, 0xd0, 0xb5, 0x53, 0x7b, 0x3d, 0x8b, 0x40, 0x1e, 0x7d, 0x86, 0xfd, 0x67, 0x09, - 0x2e, 0x85, 0xca, 0x85, 0x5f, 0xcf, 0x32, 0x4c, 0xfe, 0xff, 0x15, 0x52, 0xb1, 0x17, 0xad, 0x70, - 0xea, 0xfe, 0xfb, 0x5e, 0xb4, 0x42, 0xdf, 0x72, 0xaa, 0xdd, 0x6f, 0x4a, 0xd1, 0x01, 0x8c, 0xf8, - 0xac, 0x72, 0x0a, 0x1f, 0xe2, 0x7c, 0xe1, 0x5e, 0x66, 0xf4, 0xbf, 0x94, 0xe1, 0x42, 0x72, 0x37, - 0xc6, 0x6e, 0xdf, 0xb5, 0x63, 0x6f, 0xdf, 0x5b, 0x30, 0xb7, 0x3d, 0xb4, 0xed, 0x7d, 0x3e, 0x86, - 0xc8, 0x15, 0xbc, 0xb8, 0xb7, 0xff, 0x92, 0xb4, 0x9c, 0x7b, 0x35, 0x43, 0x07, 0x67, 0x5a, 0xa6, - 0x2f, 0xe3, 0xc7, 0xfe, 0xd3, 0xcb, 0xf8, 0xca, 0x09, 0x2e, 0xe3, 0xb3, 0xdf, 0x33, 0xca, 0x27, - 0x7a, 0xcf, 0x38, 0xc9, 0x4d, 0x7c, 0x46, 0x12, 0x3b, 0xf6, 0xab, 0x92, 0x97, 0x61, 0x26, 0xfe, - 0x3a, 0x24, 0xd6, 0x52, 0x3c, 0x50, 0xc9, 0xb7, 0x98, 0xc8, 0x5a, 0x8a, 0x76, 0xac, 0x34, 0xf4, - 0x43, 0x0d, 0x2e, 0x67, 0x7f, 0x05, 0x82, 0x6c, 0x98, 0xe9, 0x1b, 0xf7, 0xa2, 0x5f, 0xe6, 0x68, - 0x27, 0x64, 0x2b, 0xfc, 0x59, 0x60, 0x3d, 0x86, 0x85, 0x13, 0xd8, 0xe8, 0x1d, 0xa8, 0xf6, 0x8d, - 0x7b, 0xed, 0xa1, 0xd7, 0x23, 0x27, 0x66, 0x45, 0x7c, 0x1b, 0xad, 0x4b, 0x14, 0xac, 0xf0, 0xf4, - 0xcf, 0x35, 0x98, 0xcf, 0xb9, 0xec, 0xff, 0x1f, 0x1a, 0xe5, 0xfb, 0x25, 0xa8, 0xb4, 0x4d, 0xc3, - 0x26, 0x67, 0x40, 0x28, 0x5e, 0x8f, 0x11, 0x8a, 0xe3, 0xbe, 0x26, 0xe5, 0x5e, 0xe5, 0x72, 0x09, - 0x9c, 0xe0, 0x12, 0x4f, 0x15, 0x42, 0x3b, 0x9a, 0x46, 0xbc, 0x00, 0x93, 0xaa, 0xd3, 0xd1, 0xb2, - 0x9b, 0xfe, 0xcb, 0x12, 0x4c, 0x45, 0xba, 0x18, 0x31, 0x37, 0x6e, 0xc7, 0x0a, 0x42, 0xb9, 0xc0, - 0x4d, 0x4b, 0xa4, 0xaf, 0x5a, 0x50, 0x02, 0xc4, 0xd7, 0x10, 0xe1, 0xfb, 0x77, 0xba, 0x32, 0xbc, - 0x0c, 0x33, 0xd4, 0xf0, 0x7a, 0x84, 0x2a, 0xda, 0x2e, 0x2e, 0x19, 0xd5, 0x67, 0x39, 0x9d, 0x98, - 0x14, 0x27, 0xb4, 0x17, 0x5f, 0x82, 0xe9, 0x58, 0x67, 0xa3, 0x7c, 0xcc, 0xd0, 0x58, 0xb9, 0xff, - 0xd9, 0xd2, 0xb9, 0x4f, 0x3e, 0x5b, 0x3a, 0xf7, 0xe9, 0x67, 0x4b, 0xe7, 0x7e, 0x70, 0xb8, 0xa4, - 0xdd, 0x3f, 0x5c, 0xd2, 0x3e, 0x39, 0x5c, 0xd2, 0x3e, 0x3d, 0x5c, 0xd2, 0xfe, 0x7e, 0xb8, 0xa4, - 0xfd, 0xf4, 0xf3, 0xa5, 0x73, 0xef, 0x3c, 0x76, 0xe4, 0xff, 0x6d, 0xf8, 0x77, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xf3, 0x1c, 0xa0, 0x16, 0x14, 0x31, 0x00, 0x00, + 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0xcf, 0x97, 0x5b, 0x97, 0x4f, 0x87, 0x5b, 0x7f, 0x17, 0xaa, 0x7e, + 0xc0, 0xaa, 0xc7, 0x38, 0xe4, 0x8d, 0x11, 0xf2, 0xab, 0x24, 0xd4, 0xaa, 0x03, 0x45, 0xa5, 0x15, + 0x68, 0x16, 0x89, 0xae, 0x8c, 0x48, 0xa2, 0x4f, 0x95, 0xf8, 0xb2, 0x7c, 0x33, 0x30, 0x86, 0x3e, + 0xe9, 0xf2, 0xdc, 0x56, 0x0d, 0xf3, 0x4d, 0x8b, 0xb7, 0x62, 0x29, 0x45, 0xef, 0xc4, 0x42, 0xb6, + 0x7a, 0x92, 0x90, 0x9d, 0xc9, 0x0f, 0x57, 0xb4, 0x09, 0xf3, 0x03, 0xcf, 0xed, 0x79, 0xc4, 0xf7, + 0xef, 0x10, 0xa3, 0x6b, 0x5b, 0x0e, 0x09, 0xe6, 0x47, 0x30, 0xa2, 0x2b, 0x87, 0x07, 0xcb, 0xf3, + 0xad, 0x6c, 0x15, 0x9c, 0x67, 0xab, 0x3f, 0x18, 0x83, 0x0b, 0xc9, 0x0a, 0x98, 0x43, 0x52, 0xb5, + 0x13, 0x91, 0xd4, 0x6b, 0x91, 0xcd, 0x20, 0x18, 0xbc, 0x5a, 0xfd, 0x8c, 0x0d, 0x71, 0x1b, 0x66, + 0x65, 0x36, 0x08, 0x84, 0x92, 0xa6, 0xab, 0xd5, 0xdf, 0x8c, 0x8b, 0x71, 0x52, 0x1f, 0xbd, 0x08, + 0xd3, 0x1e, 0xe7, 0xdd, 0x01, 0x80, 0xe0, 0xae, 0x8f, 0x48, 0x80, 0x69, 0x1c, 0x15, 0xe2, 0xb8, + 0x2e, 0xe3, 0xad, 0x21, 0x1d, 0x0d, 0x00, 0xc6, 0xe2, 0xbc, 0xf5, 0x76, 0x52, 0x01, 0xa7, 0x6d, + 0xd0, 0x3a, 0x5c, 0x1a, 0x3a, 0x69, 0x28, 0x11, 0xca, 0x57, 0x24, 0xd4, 0xa5, 0xcd, 0xb4, 0x0a, + 0xce, 0xb2, 0x43, 0xdb, 0x31, 0x2a, 0x3b, 0xce, 0xd3, 0xf3, 0xcd, 0xc2, 0x1b, 0xaf, 0x30, 0x97, + 0xcd, 0xa0, 0xdb, 0xd5, 0xa2, 0x74, 0x5b, 0xff, 0x83, 0x16, 0x2d, 0x42, 0x8a, 0x02, 0x1f, 0x77, + 0xcb, 0x94, 0xb2, 0x88, 0xb0, 0x23, 0x37, 0x9b, 0xfd, 0xde, 0x1a, 0x89, 0xfd, 0x86, 0xc5, 0xf3, + 0x78, 0xfa, 0xfb, 0x47, 0x0d, 0x66, 0xef, 0x75, 0x3a, 0xad, 0x55, 0x87, 0xef, 0x96, 0x96, 0x41, + 0x77, 0x58, 0x15, 0x1d, 0x18, 0x74, 0x27, 0x59, 0x45, 0x99, 0x0c, 0x73, 0x09, 0x7a, 0x06, 0xaa, + 0xec, 0x5f, 0xe6, 0x38, 0x0f, 0xd7, 0x49, 0x9e, 0x64, 0xaa, 0x2d, 0xd9, 0xf6, 0x30, 0xf2, 0x37, + 0x56, 0x9a, 0xe8, 0x5b, 0x30, 0xc1, 0xf6, 0x36, 0x71, 0xba, 0x05, 0xc9, 0xaf, 0x74, 0xaa, 0x21, + 0x8c, 0x42, 0x3e, 0x23, 0x1b, 0x70, 0x00, 0xa7, 0xef, 0xc2, 0x5c, 0x64, 0x10, 0x78, 0x68, 0x93, + 0x37, 0x59, 0xbd, 0x42, 0x6d, 0xa8, 0xb0, 0xde, 0x59, 0x55, 0x2a, 0x17, 0xb8, 0x5e, 0x4c, 0x4c, + 0x44, 0xc8, 0x3d, 0xd8, 0x2f, 0x1f, 0x0b, 0x2c, 0x7d, 0x03, 0x26, 0x56, 0x5b, 0x0d, 0xdb, 0x15, + 0x7c, 0xc3, 0xb4, 0xba, 0x5e, 0x72, 0xa6, 0x56, 0x56, 0xef, 0x60, 0xcc, 0x25, 0x48, 0x87, 0x71, + 0x72, 0xdf, 0x24, 0x03, 0xca, 0x29, 0xc6, 0x64, 0x03, 0x58, 0x22, 0xbd, 0xcb, 0x5b, 0xb0, 0x94, + 0xe8, 0x3f, 0x29, 0xc1, 0x84, 0xec, 0xf6, 0x0c, 0xce, 0x1f, 0x6b, 0xb1, 0xf3, 0xc7, 0x53, 0xc5, + 0x96, 0x20, 0xf7, 0xf0, 0xd1, 0x49, 0x1c, 0x3e, 0xae, 0x15, 0xc4, 0x3b, 0xfa, 0xe4, 0xf1, 0x5e, + 0x09, 0x66, 0xe2, 0x8b, 0x8f, 0x9e, 0x85, 0x29, 0x96, 0x6a, 0x2d, 0x93, 0x34, 0x43, 0x86, 0xa7, + 0xae, 0x1f, 0xda, 0xa1, 0x08, 0x47, 0xf5, 0x50, 0x4f, 0x99, 0xb5, 0x5c, 0x8f, 0xca, 0x41, 0xe7, + 0x4f, 0xe9, 0x90, 0x5a, 0x76, 0x4d, 0x5c, 0xb6, 0xd7, 0x56, 0x1d, 0xba, 0xe1, 0xb5, 0xa9, 0x67, + 0x39, 0xbd, 0x54, 0x47, 0x0c, 0x0c, 0x47, 0x91, 0xd1, 0x5b, 0x2c, 0xed, 0xfb, 0xee, 0xd0, 0x33, + 0x49, 0x16, 0x7d, 0x0b, 0xa8, 0x07, 0xdb, 0x08, 0xdd, 0x35, 0xd7, 0x34, 0x6c, 0xb1, 0x38, 0x98, + 0x6c, 0x13, 0x8f, 0x38, 0x26, 0x09, 0x28, 0x93, 0x80, 0xc0, 0x0a, 0x4c, 0xff, 0xad, 0x06, 0x53, + 0x72, 0x2e, 0xce, 0x80, 0xa8, 0xbf, 0x1e, 0x27, 0xea, 0x4f, 0x14, 0xdc, 0xa1, 0xd9, 0x2c, 0xfd, + 0x77, 0x1a, 0x2c, 0x06, 0xae, 0xbb, 0x46, 0xb7, 0x61, 0xd8, 0x86, 0x63, 0x12, 0x2f, 0x88, 0xf5, + 0x45, 0x28, 0x59, 0x03, 0xb9, 0x92, 0x20, 0x01, 0x4a, 0xab, 0x2d, 0x5c, 0xb2, 0x06, 0xac, 0x8a, + 0xee, 0xb8, 0x3e, 0xe5, 0x6c, 0x5e, 0x1c, 0x14, 0x95, 0xd7, 0xf7, 0x64, 0x3b, 0x56, 0x1a, 0x68, + 0x13, 0x2a, 0x03, 0xd7, 0xa3, 0xac, 0x72, 0x95, 0x13, 0xeb, 0x7b, 0x84, 0xd7, 0x6c, 0xdd, 0x64, + 0x20, 0x86, 0x3b, 0x9d, 0xc1, 0x60, 0x81, 0xa6, 0xff, 0x50, 0x83, 0x47, 0x33, 0xfc, 0x97, 0xa4, + 0xa1, 0x0b, 0x13, 0x96, 0x10, 0xca, 0xf4, 0xf2, 0x7c, 0xb1, 0x6e, 0x33, 0xa6, 0x22, 0x4c, 0x6d, + 0x41, 0x0a, 0x0b, 0xa0, 0xf5, 0x5f, 0x69, 0x70, 0x31, 0xe5, 0x2f, 0x4f, 0xd1, 0x2c, 0x9e, 0x25, + 0xdb, 0x56, 0x29, 0x9a, 0x85, 0x25, 0x97, 0xa0, 0xd7, 0xa1, 0xca, 0xdf, 0x88, 0x4c, 0xd7, 0x96, + 0x13, 0x58, 0x0f, 0x26, 0xb0, 0x25, 0xdb, 0x1f, 0x1e, 0x2c, 0x5f, 0xc9, 0x38, 0x6b, 0x07, 0x62, + 0xac, 0x00, 0xd0, 0x32, 0x54, 0x88, 0xe7, 0xb9, 0x9e, 0x4c, 0xf6, 0x93, 0x6c, 0xa6, 0xee, 0xb2, + 0x06, 0x2c, 0xda, 0xf5, 0x5f, 0x87, 0x41, 0xca, 0xb2, 0x2f, 0xf3, 0x8f, 0x2d, 0x4e, 0x32, 0x31, + 0xb2, 0xa5, 0xc3, 0x5c, 0x82, 0x86, 0x70, 0xc1, 0x4a, 0xa4, 0x6b, 0xb9, 0x3b, 0xeb, 0xc5, 0xa6, + 0x51, 0x99, 0x35, 0x16, 0x24, 0xfc, 0x85, 0xa4, 0x04, 0xa7, 0xba, 0xd0, 0x09, 0xa4, 0xb4, 0xd0, + 0x1b, 0x30, 0xb6, 0x43, 0xe9, 0x20, 0xe3, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, 0xa3, + 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2f, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0x6f, 0xaa, 0xd1, + 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, 0xb4, + 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0xf7, 0xa0, 0x4c, 0xed, + 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, 0xcc, + 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, 0xc3, + 0xcd, 0xc7, 0x7e, 0xf9, 0x58, 0xe0, 0xe8, 0x3f, 0xd2, 0x60, 0x3a, 0x56, 0x2d, 0x90, 0x07, 0xe7, + 0xed, 0xc8, 0xde, 0x91, 0xf3, 0xf0, 0xdc, 0xe8, 0xbb, 0x4e, 0x6e, 0xfa, 0x39, 0xd9, 0xef, 0xf9, + 0xa8, 0x0c, 0xc7, 0xfa, 0xd0, 0x0d, 0x80, 0x70, 0xd8, 0x6c, 0x1f, 0xb0, 0xe0, 0x15, 0x1b, 0x5e, + 0xee, 0x03, 0x16, 0xd3, 0x3e, 0x16, 0xed, 0xe8, 0x26, 0x80, 0x4f, 0x4c, 0x8f, 0xd0, 0x66, 0x98, + 0xb8, 0x54, 0x39, 0x6e, 0x2b, 0x09, 0x8e, 0x68, 0xe9, 0x7f, 0xd2, 0x60, 0xba, 0x49, 0xe8, 0xf7, + 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, 0xf9, + 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0, 0x3f, 0xd4, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, + 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c, 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, + 0x19, 0xbd, 0xa3, 0x61, 0x12, 0xe2, 0x85, 0x35, 0xa7, 0xe3, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, + 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x73, 0x1a, 0x01, 0x86, 0xb1, 0x6d, 0xcf, 0xed, 0x9f, 0x78, 0x0c, + 0x6a, 0x21, 0x5e, 0xf1, 0xdc, 0x3e, 0xe6, 0x58, 0xfa, 0x47, 0x1a, 0x5c, 0x8c, 0x69, 0x9e, 0x01, + 0x6f, 0x78, 0x23, 0xce, 0x1b, 0xae, 0x8d, 0x32, 0x90, 0x1c, 0xf6, 0xf0, 0x51, 0x29, 0x31, 0x0c, + 0x36, 0x60, 0xb4, 0x0d, 0x53, 0x03, 0xb7, 0xdb, 0x3e, 0x85, 0x07, 0xda, 0x59, 0xc6, 0xe7, 0x5a, + 0x21, 0x16, 0x8e, 0x02, 0xa3, 0xfb, 0x70, 0x91, 0x51, 0x0b, 0x7f, 0x60, 0x98, 0xa4, 0x7d, 0x0a, + 0x57, 0x56, 0x8f, 0xf0, 0x17, 0xa0, 0x24, 0x22, 0x4e, 0x77, 0x82, 0xd6, 0x61, 0xc2, 0x1a, 0xf0, + 0xf3, 0x85, 0x24, 0x92, 0xc7, 0x92, 0x30, 0x71, 0x1a, 0x11, 0x29, 0x5e, 0xfe, 0xc0, 0x01, 0x86, + 0xfe, 0xd7, 0x64, 0x34, 0x70, 0xba, 0xfa, 0x6a, 0x84, 0x1e, 0xc8, 0xb7, 0x9a, 0x93, 0x51, 0x83, + 0xa6, 0x64, 0x22, 0x27, 0x65, 0xd6, 0xd5, 0x04, 0x6f, 0xf9, 0x0a, 0x4c, 0x10, 0xa7, 0xcb, 0xc9, + 0xba, 0xb8, 0x08, 0xe1, 0xa3, 0xba, 0x2b, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x5c, 0x4e, 0x8c, 0x8a, + 0x97, 0xd9, 0x77, 0x4f, 0x2d, 0x38, 0x14, 0xe1, 0xcf, 0x0d, 0x90, 0xad, 0x90, 0xfe, 0x89, 0x98, + 0xff, 0xc6, 0x28, 0x31, 0x1f, 0xad, 0x7f, 0xb9, 0xe4, 0x0f, 0x7d, 0x07, 0xc6, 0x89, 0xe8, 0x42, + 0x54, 0xd5, 0x5b, 0xa3, 0x74, 0x11, 0xa6, 0xdf, 0xf0, 0x9c, 0x25, 0xdb, 0x24, 0x2a, 0x7a, 0x99, + 0xcd, 0x17, 0xd3, 0x65, 0xc7, 0x12, 0xc1, 0x9e, 0x27, 0x1b, 0x8f, 0x89, 0x61, 0xab, 0xe6, 0x87, + 0x07, 0xcb, 0x10, 0xfe, 0xc4, 0x51, 0x0b, 0xfe, 0x7a, 0x26, 0xef, 0x6c, 0xce, 0xe6, 0x0b, 0xa4, + 0xd1, 0x5e, 0xcf, 0x42, 0xd7, 0x4e, 0xed, 0xf5, 0x2c, 0x02, 0x79, 0xf4, 0x19, 0xf6, 0x9f, 0x25, + 0xb8, 0x14, 0x2a, 0x17, 0x7e, 0x3d, 0xcb, 0x30, 0xf9, 0xff, 0x57, 0x48, 0xc5, 0x5e, 0xb4, 0xc2, + 0xa9, 0xfb, 0xef, 0x7b, 0xd1, 0x0a, 0x7d, 0xcb, 0xa9, 0x76, 0xbf, 0x29, 0x45, 0x07, 0x30, 0xe2, + 0xb3, 0xca, 0x29, 0x7c, 0x88, 0xf3, 0x85, 0x7b, 0x99, 0xd1, 0xff, 0x52, 0x86, 0x0b, 0xc9, 0xdd, + 0x18, 0xbb, 0x7d, 0xd7, 0x8e, 0xbd, 0x7d, 0x6f, 0xc1, 0xdc, 0xf6, 0xd0, 0xb6, 0xf7, 0xf9, 0x18, + 0x22, 0x57, 0xf0, 0xe2, 0xde, 0xfe, 0x4b, 0xd2, 0x72, 0xee, 0x95, 0x0c, 0x1d, 0x9c, 0x69, 0x99, + 0xbe, 0x8c, 0x1f, 0xfb, 0x4f, 0x2f, 0xe3, 0x2b, 0x27, 0xb8, 0x8c, 0xcf, 0x7e, 0xcf, 0x28, 0x9f, + 0xe8, 0x3d, 0xe3, 0x24, 0x37, 0xf1, 0x19, 0x49, 0xec, 0xd8, 0xaf, 0x4a, 0x5e, 0x82, 0x99, 0xf8, + 0xeb, 0x90, 0x58, 0x4b, 0xf1, 0x40, 0x25, 0xdf, 0x62, 0x22, 0x6b, 0x29, 0xda, 0xb1, 0xd2, 0xd0, + 0x0f, 0x35, 0xb8, 0x9c, 0xfd, 0x15, 0x08, 0xb2, 0x61, 0xa6, 0x6f, 0xdc, 0x8f, 0x7e, 0x99, 0xa3, + 0x9d, 0x90, 0xad, 0xf0, 0x67, 0x81, 0xf5, 0x18, 0x16, 0x4e, 0x60, 0xa3, 0xb7, 0xa1, 0xda, 0x37, + 0xee, 0xb7, 0x87, 0x5e, 0x8f, 0x9c, 0x98, 0x15, 0xf1, 0x6d, 0xb4, 0x2e, 0x51, 0xb0, 0xc2, 0xd3, + 0x3f, 0xd3, 0x60, 0x3e, 0xe7, 0xb2, 0xff, 0x7f, 0x68, 0x94, 0xef, 0x95, 0xa0, 0xd2, 0x36, 0x0d, + 0x9b, 0x9c, 0x01, 0xa1, 0x78, 0x2d, 0x46, 0x28, 0x8e, 0xfb, 0x9a, 0x94, 0x7b, 0x95, 0xcb, 0x25, + 0x70, 0x82, 0x4b, 0x3c, 0x55, 0x08, 0xed, 0x68, 0x1a, 0xf1, 0x3c, 0x4c, 0xaa, 0x4e, 0x47, 0xcb, + 0x6e, 0xfa, 0x2f, 0x4b, 0x30, 0x15, 0xe9, 0x62, 0xc4, 0xdc, 0xb8, 0x1d, 0x2b, 0x08, 0xe5, 0x02, + 0x37, 0x2d, 0x91, 0xbe, 0x6a, 0x41, 0x09, 0x10, 0x5f, 0x43, 0x84, 0xef, 0xdf, 0xe9, 0xca, 0xf0, + 0x12, 0xcc, 0x50, 0xc3, 0xeb, 0x11, 0xaa, 0x68, 0xbb, 0xb8, 0x64, 0x54, 0x9f, 0xe5, 0x74, 0x62, + 0x52, 0x9c, 0xd0, 0x5e, 0x7c, 0x11, 0xa6, 0x63, 0x9d, 0x8d, 0xf2, 0x31, 0x43, 0x63, 0xe5, 0xc1, + 0xa7, 0x4b, 0xe7, 0x3e, 0xfe, 0x74, 0xe9, 0xdc, 0x27, 0x9f, 0x2e, 0x9d, 0xfb, 0xc1, 0xe1, 0x92, + 0xf6, 0xe0, 0x70, 0x49, 0xfb, 0xf8, 0x70, 0x49, 0xfb, 0xe4, 0x70, 0x49, 0xfb, 0xfb, 0xe1, 0x92, + 0xf6, 0xd3, 0xcf, 0x96, 0xce, 0xbd, 0xfd, 0xd8, 0x91, 0xff, 0xb7, 0xe1, 0xdf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x5f, 0xd8, 0x14, 0x50, 0xfb, 0x30, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 3f2549681e..9bbcaa0e26 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -37,7 +37,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -63,7 +63,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -79,7 +79,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -92,14 +92,14 @@ message DaemonSetSpec { // If empty, defaulted to labels on Pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -176,6 +176,8 @@ message DaemonSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DaemonSetCondition conditions = 10; } @@ -203,7 +205,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -223,10 +225,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -239,7 +241,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -269,10 +271,10 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -343,6 +345,8 @@ message DeploymentStatus { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -406,6 +410,7 @@ message HTTPIngressPath { // or '#'. message HTTPIngressRuleValue { // A collection of paths that map requests to backends. + // +listType=atomic repeated HTTPIngressPath paths = 1; } @@ -422,6 +427,7 @@ message IPBlock { // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the CIDR range // +optional + // +listType=atomic repeated string except = 2; } @@ -434,7 +440,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -455,13 +461,13 @@ message IngressBackend { // Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // Resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressList is a collection of Ingress. @@ -469,7 +475,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Ingress. repeated Ingress items = 2; @@ -495,6 +501,7 @@ message IngressLoadBalancerIngress { message IngressLoadBalancerStatus { // Ingress is a list containing ingress points for the load-balancer. // +optional + // +listType=atomic repeated IngressLoadBalancerIngress ingress = 1; } @@ -602,11 +609,13 @@ message IngressSpec { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic repeated IngressTLS tls = 2; // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic repeated IngressRule rules = 3; } @@ -624,6 +633,7 @@ message IngressTLS { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic repeated string hosts = 1; // SecretName is the name of the secret used to terminate SSL traffic on 443. @@ -641,7 +651,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior for this NetworkPolicy. // +optional @@ -659,6 +669,7 @@ message NetworkPolicyEgressRule { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; // List of destinations for outgoing traffic of pods selected for this rule. @@ -667,6 +678,7 @@ message NetworkPolicyEgressRule { // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic repeated NetworkPolicyPeer to = 2; } @@ -679,6 +691,7 @@ message NetworkPolicyIngressRule { // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; // List of sources which should be able to access the pods selected for this rule. @@ -687,6 +700,7 @@ message NetworkPolicyIngressRule { // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional + // +listType=atomic repeated NetworkPolicyPeer from = 2; } @@ -696,7 +710,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -711,7 +725,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. @@ -720,7 +734,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // IPBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -740,7 +754,7 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // If set, indicates that the range of ports from port to endPort, inclusive, // should be allowed by the policy. This field cannot be defined if the port field @@ -757,7 +771,7 @@ message NetworkPolicySpec { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -767,6 +781,7 @@ message NetworkPolicySpec { // If this field is empty then this NetworkPolicy does not allow any traffic // (and serves solely to ensure that the pods it selects are isolated by default). // +optional + // +listType=atomic repeated NetworkPolicyIngressRule ingress = 2; // List of egress rules to be applied to the selected pods. Outgoing traffic is @@ -777,6 +792,7 @@ message NetworkPolicySpec { // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. @@ -790,6 +806,7 @@ message NetworkPolicySpec { // an Egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated string policyTypes = 4; } @@ -801,7 +818,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -827,7 +844,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -843,7 +860,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -870,13 +887,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -905,6 +922,8 @@ message ReplicaSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicaSetCondition conditions = 6; } @@ -932,7 +951,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -954,7 +973,7 @@ message RollingUpdateDaemonSet { // cause evictions during disruption. // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -970,7 +989,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -984,14 +1003,14 @@ message RollingUpdateDeployment { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 70b349f654..09f58692f4 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -270,6 +270,8 @@ type DeploymentStatus struct { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -490,6 +492,8 @@ type DaemonSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } @@ -652,11 +656,13 @@ type IngressSpec struct { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // TODO: Add the ability to specify load-balancer IP through claims } @@ -668,6 +674,7 @@ type IngressTLS struct { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` // SecretName is the name of the secret used to terminate SSL traffic on 443. // Field is left optional to allow SSL routing based on SNI hostname alone. @@ -690,6 +697,7 @@ type IngressStatus struct { type IngressLoadBalancerStatus struct { // Ingress is a list containing ingress points for the load-balancer. // +optional + // +listType=atomic Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } @@ -767,7 +775,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -797,6 +805,7 @@ type IngressRuleValue struct { // or '#'. type HTTPIngressRuleValue struct { // A collection of paths that map requests to backends. + // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -991,6 +1000,8 @@ type ReplicaSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -1076,6 +1087,7 @@ type NetworkPolicySpec struct { // If this field is empty then this NetworkPolicy does not allow any traffic // (and serves solely to ensure that the pods it selects are isolated by default). // +optional + // +listType=atomic Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` // List of egress rules to be applied to the selected pods. Outgoing traffic is @@ -1086,6 +1098,7 @@ type NetworkPolicySpec struct { // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. @@ -1099,6 +1112,7 @@ type NetworkPolicySpec struct { // an Egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } @@ -1111,6 +1125,7 @@ type NetworkPolicyIngressRule struct { // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // List of sources which should be able to access the pods selected for this rule. @@ -1119,6 +1134,7 @@ type NetworkPolicyIngressRule struct { // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional + // +listType=atomic From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } @@ -1133,6 +1149,7 @@ type NetworkPolicyEgressRule struct { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // List of destinations for outgoing traffic of pods selected for this rule. @@ -1141,6 +1158,7 @@ type NetworkPolicyEgressRule struct { // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } @@ -1178,6 +1196,7 @@ type IPBlock struct { // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the CIDR range // +optional + // +listType=atomic Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } diff --git a/vendor/k8s.io/api/flowcontrol/v1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go index 1bc51d4066..c9e7db1589 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go index c235ba10de..b342445f71 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto +// source: k8s.io/api/flowcontrol/v1/generated.proto package v1 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{0} + return fileDescriptor_5d08a1401821035d, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{1} + return fileDescriptor_5d08a1401821035d, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{2} + return fileDescriptor_5d08a1401821035d, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{3} + return fileDescriptor_5d08a1401821035d, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{4} + return fileDescriptor_5d08a1401821035d, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{5} + return fileDescriptor_5d08a1401821035d, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{6} + return fileDescriptor_5d08a1401821035d, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{7} + return fileDescriptor_5d08a1401821035d, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{8} + return fileDescriptor_5d08a1401821035d, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{9} + return fileDescriptor_5d08a1401821035d, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{10} + return fileDescriptor_5d08a1401821035d, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{11} + return fileDescriptor_5d08a1401821035d, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{12} + return fileDescriptor_5d08a1401821035d, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{13} + return fileDescriptor_5d08a1401821035d, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{14} + return fileDescriptor_5d08a1401821035d, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{15} + return fileDescriptor_5d08a1401821035d, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{16} + return fileDescriptor_5d08a1401821035d, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{17} + return fileDescriptor_5d08a1401821035d, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{18} + return fileDescriptor_5d08a1401821035d, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{19} + return fileDescriptor_5d08a1401821035d, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{20} + return fileDescriptor_5d08a1401821035d, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{21} + return fileDescriptor_5d08a1401821035d, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{22} + return fileDescriptor_5d08a1401821035d, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,111 +714,110 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_f8a25df358697d27) -} - -var fileDescriptor_f8a25df358697d27 = []byte{ - // 1588 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5, - 0x16, 0xb6, 0x64, 0xc9, 0xb6, 0x8e, 0x9f, 0x69, 0xc7, 0x65, 0xc5, 0xb9, 0x25, 0x39, 0x73, 0xeb, - 0xe6, 0x71, 0x43, 0xa4, 0xc4, 0x45, 0x20, 0xa9, 0x00, 0xa9, 0x4c, 0x12, 0xf2, 0xb2, 0x1d, 0xa7, - 0x95, 0x07, 0x15, 0xa8, 0x82, 0xd1, 0xa8, 0x2d, 0x4d, 0x2c, 0xcd, 0x0c, 0xdd, 0x33, 0x32, 0xa6, - 0x8a, 0x2a, 0x7e, 0x42, 0x56, 0x2c, 0x59, 0xc0, 0x3f, 0x60, 0x45, 0xc1, 0x86, 0x65, 0x76, 0x64, - 0x19, 0x58, 0xa8, 0x88, 0xf8, 0x0b, 0x2c, 0x20, 0x2b, 0xaa, 0x7b, 0x7a, 0x66, 0x34, 0x92, 0x66, - 0xac, 0xf2, 0x22, 0x6c, 0xd8, 0x79, 0xce, 0xf9, 0xce, 0x77, 0xba, 0x4f, 0x9f, 0x97, 0x0c, 0xea, - 0xce, 0x05, 0x56, 0x32, 0xac, 0xf2, 0x8e, 0x5b, 0x25, 0xd4, 0x24, 0x0e, 0x61, 0xe5, 0x36, 0x31, - 0x6b, 0x16, 0x2d, 0x4b, 0x85, 0x66, 0x1b, 0xe5, 0xed, 0xa6, 0xb5, 0xab, 0x5b, 0xa6, 0x43, 0xad, - 0x66, 0xb9, 0x7d, 0xae, 0x5c, 0x27, 0x26, 0xa1, 0x9a, 0x43, 0x6a, 0x25, 0x9b, 0x5a, 0x8e, 0x85, - 0x8e, 0x78, 0xd0, 0x92, 0x66, 0x1b, 0xa5, 0x1e, 0x68, 0xa9, 0x7d, 0x6e, 0xe5, 0x4c, 0xdd, 0x70, - 0x1a, 0x6e, 0xb5, 0xa4, 0x5b, 0xad, 0x72, 0xdd, 0xaa, 0x5b, 0x65, 0x61, 0x51, 0x75, 0xb7, 0xc5, - 0x97, 0xf8, 0x10, 0x7f, 0x79, 0x4c, 0x2b, 0x6f, 0x86, 0x4e, 0x5b, 0x9a, 0xde, 0x30, 0x4c, 0x42, - 0xf7, 0xca, 0xf6, 0x4e, 0x9d, 0x0b, 0x58, 0xb9, 0x45, 0x1c, 0x6d, 0x88, 0xff, 0x95, 0x72, 0x9c, - 0x15, 0x75, 0x4d, 0xc7, 0x68, 0x91, 0x01, 0x83, 0xb7, 0xf6, 0x33, 0x60, 0x7a, 0x83, 0xb4, 0xb4, - 0x7e, 0x3b, 0xe5, 0xc7, 0x14, 0xac, 0x5e, 0xff, 0x8c, 0xb4, 0x6c, 0x67, 0x8b, 0x1a, 0x16, 0x35, - 0x9c, 0xbd, 0x75, 0xd2, 0x26, 0xcd, 0xab, 0x96, 0xb9, 0x6d, 0xd4, 0x5d, 0xaa, 0x39, 0x86, 0x65, - 0xa2, 0x0f, 0x20, 0x6f, 0x5a, 0x2d, 0xc3, 0xd4, 0xb8, 0x5c, 0x77, 0x29, 0x25, 0xa6, 0xbe, 0x57, - 0x69, 0x68, 0x94, 0xb0, 0x7c, 0x6a, 0x35, 0x75, 0x32, 0xab, 0xfe, 0xa7, 0xdb, 0x29, 0xe6, 0x37, - 0x63, 0x30, 0x38, 0xd6, 0x1a, 0xbd, 0x0b, 0xf3, 0x4d, 0x62, 0xd6, 0xb4, 0x6a, 0x93, 0x6c, 0x11, - 0xaa, 0x13, 0xd3, 0xc9, 0xa7, 0x05, 0xe1, 0x62, 0xb7, 0x53, 0x9c, 0x5f, 0x8f, 0xaa, 0x70, 0x3f, - 0x56, 0x79, 0x0c, 0xcb, 0xef, 0x37, 0xad, 0xdd, 0x6b, 0x06, 0x73, 0x0c, 0xb3, 0xee, 0x1a, 0xac, - 0x41, 0xe8, 0x06, 0x71, 0x1a, 0x56, 0x0d, 0x5d, 0x86, 0x8c, 0xb3, 0x67, 0x13, 0x71, 0xbe, 0x9c, - 0x7a, 0xfa, 0x59, 0xa7, 0x38, 0xd6, 0xed, 0x14, 0x33, 0xf7, 0xf7, 0x6c, 0xf2, 0xaa, 0x53, 0x3c, - 0x1a, 0x63, 0xc6, 0xd5, 0x58, 0x18, 0x2a, 0x4f, 0xd3, 0x00, 0x1c, 0x55, 0x11, 0x81, 0x43, 0x9f, - 0xc0, 0x14, 0x7f, 0xac, 0x9a, 0xe6, 0x68, 0x82, 0x73, 0x7a, 0xed, 0x6c, 0x29, 0x4c, 0x92, 0x20, - 0xe6, 0x25, 0x7b, 0xa7, 0xce, 0x05, 0xac, 0xc4, 0xd1, 0xa5, 0xf6, 0xb9, 0xd2, 0xdd, 0xea, 0x13, - 0xa2, 0x3b, 0x1b, 0xc4, 0xd1, 0x54, 0x24, 0x4f, 0x01, 0xa1, 0x0c, 0x07, 0xac, 0xe8, 0x0e, 0x64, - 0x98, 0x4d, 0x74, 0x11, 0x80, 0xe9, 0xb5, 0x53, 0xa5, 0xd8, 0x14, 0x2c, 0x85, 0xc7, 0xaa, 0xd8, - 0x44, 0x57, 0x67, 0xfc, 0xcb, 0xf1, 0x2f, 0x2c, 0x48, 0x50, 0x05, 0x26, 0x98, 0xa3, 0x39, 0x2e, - 0xcb, 0x8f, 0x0b, 0xba, 0xd3, 0xa3, 0xd1, 0x09, 0x13, 0x75, 0x4e, 0x12, 0x4e, 0x78, 0xdf, 0x58, - 0x52, 0x29, 0x2f, 0xd2, 0xb0, 0x18, 0x82, 0xaf, 0x5a, 0x66, 0xcd, 0x10, 0xf9, 0x71, 0x29, 0x12, - 0xeb, 0x13, 0x7d, 0xb1, 0x5e, 0x1e, 0x62, 0x12, 0xc6, 0x19, 0x5d, 0x0c, 0x4e, 0x9a, 0x16, 0xe6, - 0xc7, 0xa2, 0xce, 0x5f, 0x75, 0x8a, 0xf3, 0x81, 0x59, 0xf4, 0x3c, 0xa8, 0x0d, 0xa8, 0xa9, 0x31, - 0xe7, 0x3e, 0xd5, 0x4c, 0xe6, 0xd1, 0x1a, 0x2d, 0x22, 0x2f, 0xfc, 0xff, 0xd1, 0x5e, 0x87, 0x5b, - 0xa8, 0x2b, 0xd2, 0x25, 0x5a, 0x1f, 0x60, 0xc3, 0x43, 0x3c, 0xa0, 0xe3, 0x30, 0x41, 0x89, 0xc6, - 0x2c, 0x33, 0x9f, 0x11, 0x47, 0x0e, 0xe2, 0x85, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x05, 0x93, 0x2d, - 0xc2, 0x98, 0x56, 0x27, 0xf9, 0xac, 0x00, 0xce, 0x4b, 0xe0, 0xe4, 0x86, 0x27, 0xc6, 0xbe, 0x5e, - 0xf9, 0x21, 0x05, 0x73, 0x61, 0x9c, 0xd6, 0x0d, 0xe6, 0xa0, 0x8f, 0x06, 0x32, 0xae, 0x34, 0xda, - 0x9d, 0xb8, 0xb5, 0xc8, 0xb7, 0x05, 0xe9, 0x6e, 0xca, 0x97, 0xf4, 0x64, 0xdb, 0x6d, 0xc8, 0x1a, - 0x0e, 0x69, 0xf1, 0xa8, 0x8f, 0x9f, 0x9c, 0x5e, 0xfb, 0xdf, 0x48, 0xf9, 0xa1, 0xce, 0x4a, 0xc6, - 0xec, 0x2d, 0x6e, 0x8b, 0x3d, 0x0a, 0xe5, 0x97, 0xf1, 0xde, 0xc3, 0xf3, 0x2c, 0x44, 0xdf, 0xa4, - 0x60, 0xc5, 0x8e, 0xed, 0x28, 0xf2, 0x3e, 0xef, 0x25, 0x38, 0x8d, 0x6f, 0x47, 0x98, 0x6c, 0x13, - 0xde, 0x43, 0x88, 0xaa, 0xc8, 0xd3, 0xac, 0x24, 0x80, 0x13, 0x4e, 0x81, 0x6e, 0x03, 0x6a, 0x69, - 0x0e, 0x8f, 0x63, 0x7d, 0x8b, 0x12, 0x9d, 0xd4, 0x38, 0xab, 0x6c, 0x40, 0x41, 0x4e, 0x6c, 0x0c, - 0x20, 0xf0, 0x10, 0x2b, 0xf4, 0x05, 0x2c, 0xd6, 0x06, 0xfb, 0x89, 0x4c, 0xc6, 0xb5, 0x7d, 0xa2, - 0x3b, 0xa4, 0x13, 0xa9, 0xcb, 0xdd, 0x4e, 0x71, 0x71, 0x88, 0x02, 0x0f, 0xf3, 0x83, 0x1e, 0x41, - 0x96, 0xba, 0x4d, 0xc2, 0xf2, 0x19, 0xf1, 0x9c, 0x49, 0x0e, 0xb7, 0xac, 0xa6, 0xa1, 0xef, 0x61, - 0x8e, 0x7e, 0x64, 0x38, 0x8d, 0x8a, 0x2b, 0x9a, 0x11, 0x0b, 0xdf, 0x56, 0xa8, 0xb0, 0xc7, 0xa7, - 0xb4, 0x61, 0xa1, 0xbf, 0x3f, 0xa0, 0x2a, 0x80, 0xee, 0x97, 0x24, 0x9f, 0x00, 0xe3, 0x7d, 0xb9, - 0x19, 0x9f, 0x40, 0x41, 0x25, 0x87, 0xbd, 0x30, 0x10, 0x31, 0xdc, 0xc3, 0xaa, 0x9c, 0x85, 0x99, - 0x1b, 0xd4, 0x72, 0x6d, 0x79, 0x3c, 0xb4, 0x0a, 0x19, 0x53, 0x6b, 0xf9, 0x3d, 0x26, 0x68, 0x79, - 0x9b, 0x5a, 0x8b, 0x60, 0xa1, 0x51, 0xbe, 0x4e, 0xc1, 0xec, 0xba, 0xd1, 0x32, 0x1c, 0x4c, 0x98, - 0x6d, 0x99, 0x8c, 0xa0, 0xf3, 0x91, 0xbe, 0x74, 0xac, 0xaf, 0x2f, 0x1d, 0x8a, 0x80, 0x7b, 0x3a, - 0xd2, 0x43, 0x98, 0xfc, 0xd4, 0x25, 0xae, 0x61, 0xd6, 0x65, 0x2f, 0x2e, 0x27, 0xdc, 0xed, 0x9e, - 0x87, 0x8c, 0x24, 0x96, 0x3a, 0xcd, 0x6b, 0x5c, 0x6a, 0xb0, 0x4f, 0xa6, 0xfc, 0x91, 0x86, 0x63, - 0xc2, 0x27, 0xa9, 0xfd, 0x23, 0xc3, 0x96, 0xc0, 0x6c, 0xb3, 0xf7, 0xca, 0xf2, 0x76, 0x27, 0x13, - 0x6e, 0x17, 0x09, 0x91, 0xba, 0x24, 0x23, 0x18, 0x0d, 0x33, 0x8e, 0xb2, 0x0e, 0x9b, 0xe9, 0xe3, - 0xa3, 0xcf, 0x74, 0x74, 0x17, 0x96, 0xaa, 0x16, 0xa5, 0xd6, 0xae, 0x61, 0xd6, 0x85, 0x1f, 0x9f, - 0x24, 0x23, 0x48, 0x8e, 0x74, 0x3b, 0xc5, 0x25, 0x75, 0x18, 0x00, 0x0f, 0xb7, 0x53, 0x76, 0x61, - 0x69, 0x93, 0x77, 0x0d, 0x66, 0xb9, 0x54, 0x27, 0x61, 0xf6, 0xa3, 0x22, 0x64, 0xdb, 0x84, 0x56, - 0xbd, 0x0c, 0xce, 0xa9, 0x39, 0x9e, 0xfb, 0x0f, 0xb9, 0x00, 0x7b, 0x72, 0x7e, 0x13, 0x33, 0xb4, - 0x7c, 0x80, 0xd7, 0x59, 0x7e, 0x42, 0x40, 0xc5, 0x4d, 0x36, 0xa3, 0x2a, 0xdc, 0x8f, 0x55, 0x7e, - 0x4e, 0xc3, 0x72, 0x4c, 0xb1, 0xa1, 0x2d, 0x98, 0x62, 0xf2, 0x6f, 0x59, 0x40, 0x4a, 0xc2, 0x33, - 0x48, 0xb3, 0xb0, 0xa1, 0xfb, 0x3c, 0x38, 0x60, 0x41, 0x4f, 0x60, 0x96, 0x4a, 0xef, 0xc2, 0x9d, - 0x6c, 0xec, 0x67, 0x12, 0x68, 0x07, 0x63, 0x12, 0x3e, 0x31, 0xee, 0xe5, 0xc2, 0x51, 0x6a, 0xd4, - 0x86, 0x85, 0x9e, 0xcb, 0x7a, 0xee, 0xc6, 0x85, 0xbb, 0xb3, 0x09, 0xee, 0x86, 0xbe, 0x82, 0x9a, - 0x97, 0x1e, 0x17, 0x36, 0xfb, 0x18, 0xf1, 0x80, 0x0f, 0xe5, 0xa7, 0x34, 0x24, 0xf4, 0xfa, 0xd7, - 0xb0, 0xa3, 0x7d, 0x18, 0xd9, 0xd1, 0x2e, 0x1e, 0x68, 0x7e, 0xc5, 0xee, 0x6c, 0x7a, 0xdf, 0xce, - 0x76, 0xe9, 0x60, 0xf4, 0xc9, 0x3b, 0xdc, 0x9f, 0x69, 0xf8, 0x6f, 0xbc, 0x71, 0xb8, 0xd3, 0xdd, - 0x89, 0xf4, 0xce, 0xb7, 0xfb, 0x7a, 0xe7, 0x89, 0x11, 0x28, 0xfe, 0xdd, 0xf1, 0xfa, 0x76, 0xbc, - 0x5f, 0x53, 0x50, 0x88, 0x8f, 0xdb, 0x6b, 0xd8, 0xf9, 0x1e, 0x47, 0x77, 0xbe, 0xf3, 0x07, 0xca, - 0xaf, 0x98, 0x1d, 0xf0, 0x46, 0x52, 0x5a, 0x05, 0x2b, 0xdb, 0x08, 0x63, 0xfc, 0xdb, 0x74, 0x52, - 0x94, 0xc4, 0x72, 0xb9, 0xcf, 0xef, 0x8d, 0x88, 0xf5, 0x75, 0x93, 0x0f, 0x97, 0x16, 0x9f, 0x0f, - 0x5e, 0x2e, 0xea, 0x30, 0xd9, 0xf4, 0x86, 0xb0, 0xac, 0xe2, 0x77, 0xf6, 0x9b, 0x7f, 0x49, 0xe3, - 0xda, 0x1b, 0xf5, 0x12, 0x86, 0x7d, 0x66, 0xf4, 0x31, 0x4c, 0x10, 0xf1, 0xab, 0x7a, 0x84, 0x52, - 0xde, 0xef, 0xe7, 0xb7, 0x0a, 0x3c, 0xed, 0x3c, 0x14, 0x96, 0xb4, 0xca, 0x57, 0x29, 0x58, 0xdd, - 0xaf, 0x07, 0x20, 0x3a, 0x64, 0x4f, 0x3b, 0xd8, 0xce, 0x3d, 0xfa, 0xde, 0xf6, 0x5d, 0x0a, 0x0e, - 0x0f, 0xdb, 0x89, 0x78, 0x41, 0xf1, 0x45, 0x28, 0xd8, 0x62, 0x82, 0x82, 0xba, 0x27, 0xa4, 0x58, - 0x6a, 0xd1, 0x1b, 0x30, 0xd5, 0xd0, 0xcc, 0x5a, 0xc5, 0xf8, 0xdc, 0x5f, 0xc5, 0x83, 0x94, 0xbe, - 0x29, 0xe5, 0x38, 0x40, 0xa0, 0x6b, 0xb0, 0x20, 0xec, 0xd6, 0x89, 0x59, 0x77, 0x1a, 0xe2, 0x1d, - 0xe4, 0xb6, 0x11, 0xcc, 0x95, 0x7b, 0x7d, 0x7a, 0x3c, 0x60, 0xa1, 0xfc, 0x95, 0x02, 0x74, 0x90, - 0x05, 0xe1, 0x34, 0xe4, 0x34, 0xdb, 0x10, 0x7b, 0xaa, 0x57, 0x54, 0x39, 0x75, 0xb6, 0xdb, 0x29, - 0xe6, 0xae, 0x6c, 0xdd, 0xf2, 0x84, 0x38, 0xd4, 0x73, 0xb0, 0x3f, 0x45, 0xbd, 0x69, 0x29, 0xc1, - 0xbe, 0x63, 0x86, 0x43, 0x3d, 0xba, 0x00, 0x33, 0x7a, 0xd3, 0x65, 0x0e, 0xa1, 0x15, 0xdd, 0xb2, - 0x89, 0x68, 0x42, 0x53, 0xea, 0x61, 0x79, 0xa7, 0x99, 0xab, 0x3d, 0x3a, 0x1c, 0x41, 0xa2, 0x12, - 0x00, 0xaf, 0x23, 0x66, 0x6b, 0xdc, 0x4f, 0x56, 0xf8, 0x99, 0xe3, 0x0f, 0xb6, 0x19, 0x48, 0x71, - 0x0f, 0x42, 0x79, 0x02, 0x4b, 0x15, 0x42, 0xdb, 0x86, 0x4e, 0xae, 0xe8, 0xba, 0xe5, 0x9a, 0x8e, - 0xbf, 0x71, 0x97, 0x21, 0x17, 0xc0, 0x64, 0xa9, 0x1d, 0x92, 0xfe, 0x73, 0x01, 0x17, 0x0e, 0x31, - 0x41, 0x6d, 0xa7, 0x63, 0x6b, 0xfb, 0xfb, 0x34, 0x4c, 0x86, 0xf4, 0x99, 0x1d, 0xc3, 0xac, 0x49, - 0xe6, 0xa3, 0x3e, 0xfa, 0x8e, 0x61, 0xd6, 0x5e, 0x75, 0x8a, 0xd3, 0x12, 0xc6, 0x3f, 0xb1, 0x00, - 0xa2, 0x6b, 0x90, 0x71, 0x19, 0xa1, 0xb2, 0x6a, 0x8f, 0x27, 0xe4, 0xf1, 0x03, 0x46, 0xa8, 0xbf, - 0x32, 0x4d, 0x71, 0x52, 0x2e, 0xc0, 0xc2, 0x1a, 0xdd, 0x84, 0x6c, 0x9d, 0xbf, 0x87, 0x2c, 0xcc, - 0x13, 0x09, 0x34, 0xbd, 0xbf, 0x3f, 0xbc, 0xc7, 0x17, 0x12, 0xec, 0x11, 0xa0, 0x26, 0xcc, 0xb1, - 0x48, 0xe0, 0xc4, 0x23, 0x25, 0xaf, 0x40, 0x43, 0x23, 0xad, 0xa2, 0x6e, 0xa7, 0x38, 0x17, 0x55, - 0xe1, 0x3e, 0x6e, 0xa5, 0x0c, 0xd3, 0x3d, 0xd7, 0xda, 0xbf, 0x8f, 0xaa, 0x97, 0x9f, 0xbd, 0x2c, - 0x8c, 0x3d, 0x7f, 0x59, 0x18, 0x7b, 0xf1, 0xb2, 0x30, 0xf6, 0x65, 0xb7, 0x90, 0x7a, 0xd6, 0x2d, - 0xa4, 0x9e, 0x77, 0x0b, 0xa9, 0x17, 0xdd, 0x42, 0xea, 0xb7, 0x6e, 0x21, 0xf5, 0xf4, 0xf7, 0xc2, - 0xd8, 0xe3, 0x23, 0xb1, 0xff, 0x13, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x0a, 0x3e, 0x83, - 0x48, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_5d08a1401821035d) +} + +var fileDescriptor_5d08a1401821035d = []byte{ + // 1575 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0xdb, 0x56, + 0x16, 0x36, 0x65, 0xc9, 0xb6, 0x8e, 0x9f, 0xb9, 0x8e, 0x61, 0xc5, 0x19, 0x48, 0x0e, 0x07, 0x93, + 0xc7, 0x64, 0x42, 0x25, 0xc6, 0x64, 0x26, 0x41, 0x66, 0x26, 0x08, 0x93, 0x4c, 0x5e, 0xb6, 0xe3, + 0x5c, 0xe5, 0x51, 0xa4, 0x05, 0x5a, 0x9a, 0xba, 0x96, 0x18, 0x8b, 0x8f, 0xf2, 0x92, 0x72, 0x5d, + 0xa0, 0x40, 0x7f, 0x42, 0x56, 0x5d, 0x76, 0xd1, 0xfe, 0x83, 0xae, 0x8a, 0x76, 0xd3, 0x65, 0x76, + 0xcd, 0x32, 0xed, 0x42, 0x68, 0xd4, 0xbf, 0xd0, 0x45, 0x9b, 0x55, 0x71, 0x2f, 0x2f, 0x49, 0x51, + 0x12, 0x69, 0xc1, 0x8b, 0x74, 0xd3, 0x9d, 0x79, 0xce, 0x77, 0xbe, 0x73, 0xef, 0xb9, 0xe7, 0x25, + 0xc3, 0x99, 0xdd, 0x4b, 0x54, 0x31, 0xec, 0xaa, 0xe6, 0x18, 0xd5, 0x9d, 0x96, 0xbd, 0xa7, 0xdb, + 0x96, 0xe7, 0xda, 0xad, 0x6a, 0xfb, 0x42, 0xb5, 0x41, 0x2c, 0xe2, 0x6a, 0x1e, 0xa9, 0x2b, 0x8e, + 0x6b, 0x7b, 0x36, 0x3a, 0x16, 0x40, 0x15, 0xcd, 0x31, 0x94, 0x1e, 0xa8, 0xd2, 0xbe, 0xb0, 0x72, + 0xae, 0x61, 0x78, 0x4d, 0x7f, 0x5b, 0xd1, 0x6d, 0xb3, 0xda, 0xb0, 0x1b, 0x76, 0x95, 0x5b, 0x6c, + 0xfb, 0x3b, 0xfc, 0x8b, 0x7f, 0xf0, 0xbf, 0x02, 0xa6, 0x95, 0x7f, 0xc6, 0x4e, 0x4d, 0x4d, 0x6f, + 0x1a, 0x16, 0x71, 0xf7, 0xab, 0xce, 0x6e, 0x83, 0x09, 0x68, 0xd5, 0x24, 0x9e, 0x36, 0xc4, 0xff, + 0x4a, 0x35, 0xcd, 0xca, 0xf5, 0x2d, 0xcf, 0x30, 0xc9, 0x80, 0xc1, 0xbf, 0x0e, 0x32, 0xa0, 0x7a, + 0x93, 0x98, 0x5a, 0xbf, 0x9d, 0xfc, 0xad, 0x04, 0xab, 0x37, 0x3f, 0x22, 0xa6, 0xe3, 0x6d, 0xb9, + 0x86, 0xed, 0x1a, 0xde, 0xfe, 0x3a, 0x69, 0x93, 0xd6, 0x75, 0xdb, 0xda, 0x31, 0x1a, 0xbe, 0xab, + 0x79, 0x86, 0x6d, 0xa1, 0x77, 0xa0, 0x64, 0xd9, 0xa6, 0x61, 0x69, 0x4c, 0xae, 0xfb, 0xae, 0x4b, + 0x2c, 0x7d, 0xbf, 0xd6, 0xd4, 0x5c, 0x42, 0x4b, 0xd2, 0xaa, 0x74, 0xba, 0xa0, 0xfe, 0xa5, 0xdb, + 0xa9, 0x94, 0x36, 0x53, 0x30, 0x38, 0xd5, 0x1a, 0xfd, 0x17, 0xe6, 0x5b, 0xc4, 0xaa, 0x6b, 0xdb, + 0x2d, 0xb2, 0x45, 0x5c, 0x9d, 0x58, 0x5e, 0x29, 0xc7, 0x09, 0x17, 0xbb, 0x9d, 0xca, 0xfc, 0x7a, + 0x52, 0x85, 0xfb, 0xb1, 0xf2, 0x53, 0x58, 0xfe, 0x7f, 0xcb, 0xde, 0xbb, 0x61, 0x50, 0xcf, 0xb0, + 0x1a, 0xbe, 0x41, 0x9b, 0xc4, 0xdd, 0x20, 0x5e, 0xd3, 0xae, 0xa3, 0xab, 0x90, 0xf7, 0xf6, 0x1d, + 0xc2, 0xcf, 0x57, 0x54, 0xcf, 0xbe, 0xe8, 0x54, 0xc6, 0xba, 0x9d, 0x4a, 0xfe, 0xe1, 0xbe, 0x43, + 0xde, 0x74, 0x2a, 0xc7, 0x53, 0xcc, 0x98, 0x1a, 0x73, 0x43, 0xf9, 0x79, 0x0e, 0x80, 0xa1, 0x6a, + 0x3c, 0x70, 0xe8, 0x03, 0x98, 0x62, 0x8f, 0x55, 0xd7, 0x3c, 0x8d, 0x73, 0x4e, 0xaf, 0x9d, 0x57, + 0xe2, 0x24, 0x89, 0x62, 0xae, 0x38, 0xbb, 0x0d, 0x26, 0xa0, 0x0a, 0x43, 0x2b, 0xed, 0x0b, 0xca, + 0xfd, 0xed, 0x67, 0x44, 0xf7, 0x36, 0x88, 0xa7, 0xa9, 0x48, 0x9c, 0x02, 0x62, 0x19, 0x8e, 0x58, + 0xd1, 0x3d, 0xc8, 0x53, 0x87, 0xe8, 0x3c, 0x00, 0xd3, 0x6b, 0x67, 0x94, 0xd4, 0x14, 0x54, 0xe2, + 0x63, 0xd5, 0x1c, 0xa2, 0xab, 0x33, 0xe1, 0xe5, 0xd8, 0x17, 0xe6, 0x24, 0xa8, 0x06, 0x13, 0xd4, + 0xd3, 0x3c, 0x9f, 0x96, 0xc6, 0x39, 0xdd, 0xd9, 0xd1, 0xe8, 0xb8, 0x89, 0x3a, 0x27, 0x08, 0x27, + 0x82, 0x6f, 0x2c, 0xa8, 0xe4, 0x57, 0x39, 0x58, 0x8c, 0xc1, 0xd7, 0x6d, 0xab, 0x6e, 0xf0, 0xfc, + 0xb8, 0x92, 0x88, 0xf5, 0xa9, 0xbe, 0x58, 0x2f, 0x0f, 0x31, 0x89, 0xe3, 0x8c, 0x2e, 0x47, 0x27, + 0xcd, 0x71, 0xf3, 0x13, 0x49, 0xe7, 0x6f, 0x3a, 0x95, 0xf9, 0xc8, 0x2c, 0x79, 0x1e, 0xd4, 0x06, + 0xd4, 0xd2, 0xa8, 0xf7, 0xd0, 0xd5, 0x2c, 0x1a, 0xd0, 0x1a, 0x26, 0x11, 0x17, 0xfe, 0xfb, 0x68, + 0xaf, 0xc3, 0x2c, 0xd4, 0x15, 0xe1, 0x12, 0xad, 0x0f, 0xb0, 0xe1, 0x21, 0x1e, 0xd0, 0x49, 0x98, + 0x70, 0x89, 0x46, 0x6d, 0xab, 0x94, 0xe7, 0x47, 0x8e, 0xe2, 0x85, 0xb9, 0x14, 0x0b, 0x2d, 0x3a, + 0x03, 0x93, 0x26, 0xa1, 0x54, 0x6b, 0x90, 0x52, 0x81, 0x03, 0xe7, 0x05, 0x70, 0x72, 0x23, 0x10, + 0xe3, 0x50, 0x2f, 0x7f, 0x23, 0xc1, 0x5c, 0x1c, 0xa7, 0x75, 0x83, 0x7a, 0xe8, 0xbd, 0x81, 0x8c, + 0x53, 0x46, 0xbb, 0x13, 0xb3, 0xe6, 0xf9, 0xb6, 0x20, 0xdc, 0x4d, 0x85, 0x92, 0x9e, 0x6c, 0xbb, + 0x0b, 0x05, 0xc3, 0x23, 0x26, 0x8b, 0xfa, 0xf8, 0xe9, 0xe9, 0xb5, 0xbf, 0x8d, 0x94, 0x1f, 0xea, + 0xac, 0x60, 0x2c, 0xdc, 0x61, 0xb6, 0x38, 0xa0, 0x90, 0x7f, 0x18, 0xef, 0x3d, 0x3c, 0xcb, 0x42, + 0xf4, 0x85, 0x04, 0x2b, 0x4e, 0x6a, 0x47, 0x11, 0xf7, 0xf9, 0x5f, 0x86, 0xd3, 0xf4, 0x76, 0x84, + 0xc9, 0x0e, 0x61, 0x3d, 0x84, 0xa8, 0xb2, 0x38, 0xcd, 0x4a, 0x06, 0x38, 0xe3, 0x14, 0xe8, 0x2e, + 0x20, 0x53, 0xf3, 0x58, 0x1c, 0x1b, 0x5b, 0x2e, 0xd1, 0x49, 0x9d, 0xb1, 0x8a, 0x06, 0x14, 0xe5, + 0xc4, 0xc6, 0x00, 0x02, 0x0f, 0xb1, 0x42, 0x9f, 0xc0, 0x62, 0x7d, 0xb0, 0x9f, 0x88, 0x64, 0x5c, + 0x3b, 0x20, 0xba, 0x43, 0x3a, 0x91, 0xba, 0xdc, 0xed, 0x54, 0x16, 0x87, 0x28, 0xf0, 0x30, 0x3f, + 0xe8, 0x09, 0x14, 0x5c, 0xbf, 0x45, 0x68, 0x29, 0xcf, 0x9f, 0x33, 0xcb, 0xe1, 0x96, 0xdd, 0x32, + 0xf4, 0x7d, 0xcc, 0xd0, 0x4f, 0x0c, 0xaf, 0x59, 0xf3, 0x79, 0x33, 0xa2, 0xf1, 0xdb, 0x72, 0x15, + 0x0e, 0xf8, 0xe4, 0x36, 0x2c, 0xf4, 0xf7, 0x07, 0xb4, 0x0d, 0xa0, 0x87, 0x25, 0xc9, 0x26, 0xc0, + 0x78, 0x5f, 0x6e, 0xa6, 0x27, 0x50, 0x54, 0xc9, 0x71, 0x2f, 0x8c, 0x44, 0x14, 0xf7, 0xb0, 0xca, + 0xe7, 0x61, 0xe6, 0x96, 0x6b, 0xfb, 0x8e, 0x38, 0x1e, 0x5a, 0x85, 0xbc, 0xa5, 0x99, 0x61, 0x8f, + 0x89, 0x5a, 0xde, 0xa6, 0x66, 0x12, 0xcc, 0x35, 0xf2, 0xe7, 0x12, 0xcc, 0xae, 0x1b, 0xa6, 0xe1, + 0x61, 0x42, 0x1d, 0xdb, 0xa2, 0x04, 0x5d, 0x4c, 0xf4, 0xa5, 0x13, 0x7d, 0x7d, 0xe9, 0x48, 0x02, + 0xdc, 0xd3, 0x91, 0x1e, 0xc3, 0xe4, 0x87, 0x3e, 0xf1, 0x0d, 0xab, 0x21, 0x7a, 0x71, 0x35, 0xe3, + 0x6e, 0x0f, 0x02, 0x64, 0x22, 0xb1, 0xd4, 0x69, 0x56, 0xe3, 0x42, 0x83, 0x43, 0x32, 0xf9, 0x97, + 0x1c, 0x9c, 0xe0, 0x3e, 0x49, 0xfd, 0x0f, 0x19, 0xb6, 0x04, 0x66, 0x5b, 0xbd, 0x57, 0x16, 0xb7, + 0x3b, 0x9d, 0x71, 0xbb, 0x44, 0x88, 0xd4, 0x25, 0x11, 0xc1, 0x64, 0x98, 0x71, 0x92, 0x75, 0xd8, + 0x4c, 0x1f, 0x1f, 0x7d, 0xa6, 0xa3, 0xfb, 0xb0, 0xb4, 0x6d, 0xbb, 0xae, 0xbd, 0x67, 0x58, 0x0d, + 0xee, 0x27, 0x24, 0xc9, 0x73, 0x92, 0x63, 0xdd, 0x4e, 0x65, 0x49, 0x1d, 0x06, 0xc0, 0xc3, 0xed, + 0xe4, 0x3d, 0x58, 0xda, 0x64, 0x5d, 0x83, 0xda, 0xbe, 0xab, 0x93, 0x38, 0xfb, 0x51, 0x05, 0x0a, + 0x6d, 0xe2, 0x6e, 0x07, 0x19, 0x5c, 0x54, 0x8b, 0x2c, 0xf7, 0x1f, 0x33, 0x01, 0x0e, 0xe4, 0xec, + 0x26, 0x56, 0x6c, 0xf9, 0x08, 0xaf, 0xd3, 0xd2, 0x04, 0x87, 0xf2, 0x9b, 0x6c, 0x26, 0x55, 0xb8, + 0x1f, 0x2b, 0x7f, 0x9f, 0x83, 0xe5, 0x94, 0x62, 0x43, 0x5b, 0x30, 0x45, 0xc5, 0xdf, 0xa2, 0x80, + 0xe4, 0x8c, 0x67, 0x10, 0x66, 0x71, 0x43, 0x0f, 0x79, 0x70, 0xc4, 0x82, 0x9e, 0xc1, 0xac, 0x2b, + 0xbc, 0x73, 0x77, 0xa2, 0xb1, 0x9f, 0xcb, 0xa0, 0x1d, 0x8c, 0x49, 0xfc, 0xc4, 0xb8, 0x97, 0x0b, + 0x27, 0xa9, 0x51, 0x1b, 0x16, 0x7a, 0x2e, 0x1b, 0xb8, 0x1b, 0xe7, 0xee, 0xce, 0x67, 0xb8, 0x1b, + 0xfa, 0x0a, 0x6a, 0x49, 0x78, 0x5c, 0xd8, 0xec, 0x63, 0xc4, 0x03, 0x3e, 0xe4, 0xef, 0x72, 0x90, + 0xd1, 0xeb, 0xdf, 0xc2, 0x8e, 0xf6, 0x6e, 0x62, 0x47, 0xbb, 0x7c, 0xa8, 0xf9, 0x95, 0xba, 0xb3, + 0xe9, 0x7d, 0x3b, 0xdb, 0x95, 0xc3, 0xd1, 0x67, 0xef, 0x70, 0xbf, 0xe6, 0xe0, 0xaf, 0xe9, 0xc6, + 0xf1, 0x4e, 0x77, 0x2f, 0xd1, 0x3b, 0xff, 0xdd, 0xd7, 0x3b, 0x4f, 0x8d, 0x40, 0xf1, 0xe7, 0x8e, + 0xd7, 0xb7, 0xe3, 0xfd, 0x28, 0x41, 0x39, 0x3d, 0x6e, 0x6f, 0x61, 0xe7, 0x7b, 0x9a, 0xdc, 0xf9, + 0x2e, 0x1e, 0x2a, 0xbf, 0x52, 0x76, 0xc0, 0x5b, 0x59, 0x69, 0x15, 0xad, 0x6c, 0x23, 0x8c, 0xf1, + 0x2f, 0x73, 0x59, 0x51, 0xe2, 0xcb, 0xe5, 0x01, 0xbf, 0x37, 0x12, 0xd6, 0x37, 0x2d, 0x36, 0x5c, + 0x4c, 0x36, 0x1f, 0x82, 0x5c, 0xd4, 0x61, 0xb2, 0x15, 0x0c, 0x61, 0x51, 0xc5, 0xff, 0x39, 0x68, + 0xfe, 0x65, 0x8d, 0xeb, 0x60, 0xd4, 0x0b, 0x18, 0x0e, 0x99, 0xd1, 0xfb, 0x30, 0x41, 0xf8, 0xaf, + 0xea, 0x11, 0x4a, 0xf9, 0xa0, 0x9f, 0xdf, 0x2a, 0xb0, 0xb4, 0x0b, 0x50, 0x58, 0xd0, 0xca, 0x9f, + 0x49, 0xb0, 0x7a, 0x50, 0x0f, 0x40, 0xee, 0x90, 0x3d, 0xed, 0x70, 0x3b, 0xf7, 0xe8, 0x7b, 0xdb, + 0x57, 0x12, 0x1c, 0x1d, 0xb6, 0x13, 0xb1, 0x82, 0x62, 0x8b, 0x50, 0xb4, 0xc5, 0x44, 0x05, 0xf5, + 0x80, 0x4b, 0xb1, 0xd0, 0xa2, 0x7f, 0xc0, 0x54, 0x53, 0xb3, 0xea, 0x35, 0xe3, 0xe3, 0x70, 0x15, + 0x8f, 0x52, 0xfa, 0xb6, 0x90, 0xe3, 0x08, 0x81, 0x6e, 0xc0, 0x02, 0xb7, 0x5b, 0x27, 0x56, 0xc3, + 0x6b, 0xf2, 0x77, 0x10, 0xdb, 0x46, 0x34, 0x57, 0x1e, 0xf4, 0xe9, 0xf1, 0x80, 0x85, 0xfc, 0x9b, + 0x04, 0xe8, 0x30, 0x0b, 0xc2, 0x59, 0x28, 0x6a, 0x8e, 0xc1, 0xf7, 0xd4, 0xa0, 0xa8, 0x8a, 0xea, + 0x6c, 0xb7, 0x53, 0x29, 0x5e, 0xdb, 0xba, 0x13, 0x08, 0x71, 0xac, 0x67, 0xe0, 0x70, 0x8a, 0x06, + 0xd3, 0x52, 0x80, 0x43, 0xc7, 0x14, 0xc7, 0x7a, 0x74, 0x09, 0x66, 0xf4, 0x96, 0x4f, 0x3d, 0xe2, + 0xd6, 0x74, 0xdb, 0x21, 0xbc, 0x09, 0x4d, 0xa9, 0x47, 0xc5, 0x9d, 0x66, 0xae, 0xf7, 0xe8, 0x70, + 0x02, 0x89, 0x14, 0x00, 0x56, 0x47, 0xd4, 0xd1, 0x98, 0x9f, 0x02, 0xf7, 0x33, 0xc7, 0x1e, 0x6c, + 0x33, 0x92, 0xe2, 0x1e, 0x84, 0xfc, 0x0c, 0x96, 0x6a, 0xc4, 0x6d, 0x1b, 0x3a, 0xb9, 0xa6, 0xeb, + 0xb6, 0x6f, 0x79, 0xe1, 0xc6, 0x5d, 0x85, 0x62, 0x04, 0x13, 0xa5, 0x76, 0x44, 0xf8, 0x2f, 0x46, + 0x5c, 0x38, 0xc6, 0x44, 0xb5, 0x9d, 0x4b, 0xad, 0xed, 0xaf, 0x73, 0x30, 0x19, 0xd3, 0xe7, 0x77, + 0x0d, 0xab, 0x2e, 0x98, 0x8f, 0x87, 0xe8, 0x7b, 0x86, 0x55, 0x7f, 0xd3, 0xa9, 0x4c, 0x0b, 0x18, + 0xfb, 0xc4, 0x1c, 0x88, 0x6e, 0x40, 0xde, 0xa7, 0xc4, 0x15, 0x55, 0x7b, 0x32, 0x23, 0x8f, 0x1f, + 0x51, 0xe2, 0x86, 0x2b, 0xd3, 0x14, 0x23, 0x65, 0x02, 0xcc, 0xad, 0xd1, 0x6d, 0x28, 0x34, 0xd8, + 0x7b, 0x88, 0xc2, 0x3c, 0x95, 0x41, 0xd3, 0xfb, 0xfb, 0x23, 0x78, 0x7c, 0x2e, 0xc1, 0x01, 0x01, + 0x6a, 0xc1, 0x1c, 0x4d, 0x04, 0x8e, 0x3f, 0x52, 0xf6, 0x0a, 0x34, 0x34, 0xd2, 0x2a, 0xea, 0x76, + 0x2a, 0x73, 0x49, 0x15, 0xee, 0xe3, 0x96, 0xab, 0x30, 0xdd, 0x73, 0xad, 0x83, 0xfb, 0xa8, 0x7a, + 0xf5, 0xc5, 0xeb, 0xf2, 0xd8, 0xcb, 0xd7, 0xe5, 0xb1, 0x57, 0xaf, 0xcb, 0x63, 0x9f, 0x76, 0xcb, + 0xd2, 0x8b, 0x6e, 0x59, 0x7a, 0xd9, 0x2d, 0x4b, 0xaf, 0xba, 0x65, 0xe9, 0xa7, 0x6e, 0x59, 0x7a, + 0xfe, 0x73, 0x79, 0xec, 0xe9, 0xb1, 0xd4, 0xff, 0x89, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x16, + 0x4e, 0x14, 0xcf, 0x2f, 0x15, 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1/generated.proto index a5c6f4fc4f..33a135889e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -302,7 +302,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -327,7 +327,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -341,7 +341,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/flowcontrol/v1/types.go b/vendor/k8s.io/api/flowcontrol/v1/types.go index e62d23280e..ad72bcee22 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types.go @@ -106,6 +106,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +127,7 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -381,6 +383,7 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +403,7 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..fbab9868c7 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go index 33f4b97e39..96e368f6fd 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +// source: k8s.io/api/flowcontrol/v1beta1/generated.proto package v1beta1 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{0} + return fileDescriptor_3a5cb22a034fcb2a, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{1} + return fileDescriptor_3a5cb22a034fcb2a, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{2} + return fileDescriptor_3a5cb22a034fcb2a, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{3} + return fileDescriptor_3a5cb22a034fcb2a, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{4} + return fileDescriptor_3a5cb22a034fcb2a, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{5} + return fileDescriptor_3a5cb22a034fcb2a, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{6} + return fileDescriptor_3a5cb22a034fcb2a, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{7} + return fileDescriptor_3a5cb22a034fcb2a, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{8} + return fileDescriptor_3a5cb22a034fcb2a, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{9} + return fileDescriptor_3a5cb22a034fcb2a, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{10} + return fileDescriptor_3a5cb22a034fcb2a, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{11} + return fileDescriptor_3a5cb22a034fcb2a, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{12} + return fileDescriptor_3a5cb22a034fcb2a, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{13} + return fileDescriptor_3a5cb22a034fcb2a, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{14} + return fileDescriptor_3a5cb22a034fcb2a, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{15} + return fileDescriptor_3a5cb22a034fcb2a, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{16} + return fileDescriptor_3a5cb22a034fcb2a, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{17} + return fileDescriptor_3a5cb22a034fcb2a, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{18} + return fileDescriptor_3a5cb22a034fcb2a, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{19} + return fileDescriptor_3a5cb22a034fcb2a, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{20} + return fileDescriptor_3a5cb22a034fcb2a, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{21} + return fileDescriptor_3a5cb22a034fcb2a, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{22} + return fileDescriptor_3a5cb22a034fcb2a, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,112 +714,111 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_80171c2a4e3669de) + proto.RegisterFile("k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_3a5cb22a034fcb2a) } -var fileDescriptor_80171c2a4e3669de = []byte{ - // 1614 bytes of a gzipped FileDescriptorProto +var fileDescriptor_3a5cb22a034fcb2a = []byte{ + // 1599 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x73, 0xdb, 0xc4, 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x5f, 0x7e, 0x76, 0xd3, 0x4c, 0xfc, 0x4d, 0xbf, 0x63, 0xa7, 0x62, 0x86, 0x02, 0x6d, 0xe5, 0xb6, 0xb4, 0xb4, 0xc0, 0xf0, 0x23, 0x4a, 0x4b, 0x29, 0x4d, 0xd2, 0x74, - 0xd3, 0x42, 0xa7, 0x74, 0x86, 0xca, 0xf2, 0xc6, 0x56, 0x63, 0x4b, 0xea, 0xae, 0xe4, 0x10, 0x7a, - 0x61, 0xf8, 0x0b, 0x38, 0xc3, 0x91, 0x03, 0x27, 0x2e, 0x5c, 0x39, 0x70, 0xa4, 0xc3, 0xa9, 0xc7, - 0x9e, 0x0c, 0x35, 0x27, 0xfe, 0x03, 0xe8, 0x0c, 0x33, 0xcc, 0xae, 0xd6, 0x92, 0xe5, 0x5f, 0xf2, - 0xb4, 0x33, 0x3d, 0x71, 0x8b, 0xde, 0xfb, 0xbc, 0xcf, 0xdb, 0x7d, 0xfb, 0x7e, 0x39, 0x70, 0x79, - 0xef, 0x02, 0xd3, 0x2c, 0xa7, 0xb8, 0xe7, 0x97, 0x08, 0xb5, 0x89, 0x47, 0x58, 0xb1, 0x41, 0xec, - 0xb2, 0x43, 0x8b, 0x52, 0x61, 0xb8, 0x56, 0x71, 0xb7, 0xe6, 0xec, 0x9b, 0x8e, 0xed, 0x51, 0xa7, - 0x56, 0x6c, 0x9c, 0x2e, 0x11, 0xcf, 0x38, 0x5d, 0xac, 0x10, 0x9b, 0x50, 0xc3, 0x23, 0x65, 0xcd, - 0xa5, 0x8e, 0xe7, 0xa0, 0x7c, 0x80, 0xd7, 0x0c, 0xd7, 0xd2, 0x3a, 0xf0, 0x9a, 0xc4, 0xaf, 0x9c, - 0xac, 0x58, 0x5e, 0xd5, 0x2f, 0x69, 0xa6, 0x53, 0x2f, 0x56, 0x9c, 0x8a, 0x53, 0x14, 0x66, 0x25, - 0x7f, 0x57, 0x7c, 0x89, 0x0f, 0xf1, 0x57, 0x40, 0xb7, 0x72, 0x36, 0x72, 0x5f, 0x37, 0xcc, 0xaa, - 0x65, 0x13, 0x7a, 0x50, 0x74, 0xf7, 0x2a, 0x5c, 0xc0, 0x8a, 0x75, 0xe2, 0x19, 0xc5, 0x46, 0xcf, - 0x21, 0x56, 0x8a, 0x83, 0xac, 0xa8, 0x6f, 0x7b, 0x56, 0x9d, 0xf4, 0x18, 0xbc, 0x91, 0x64, 0xc0, - 0xcc, 0x2a, 0xa9, 0x1b, 0xdd, 0x76, 0xea, 0x4f, 0x0a, 0xac, 0x5e, 0xfa, 0x9c, 0xd4, 0x5d, 0x6f, - 0x9b, 0x5a, 0x0e, 0xb5, 0xbc, 0x83, 0x0d, 0xd2, 0x20, 0xb5, 0x75, 0xc7, 0xde, 0xb5, 0x2a, 0x3e, - 0x35, 0x3c, 0xcb, 0xb1, 0xd1, 0x2d, 0xc8, 0xd9, 0x4e, 0xdd, 0xb2, 0x0d, 0x2e, 0x37, 0x7d, 0x4a, - 0x89, 0x6d, 0x1e, 0xec, 0x54, 0x0d, 0x4a, 0x58, 0x4e, 0x59, 0x55, 0x5e, 0xc9, 0xe8, 0xff, 0x6f, - 0x35, 0x0b, 0xb9, 0xad, 0x01, 0x18, 0x3c, 0xd0, 0x1a, 0xbd, 0x03, 0xf3, 0x35, 0x62, 0x97, 0x8d, - 0x52, 0x8d, 0x6c, 0x13, 0x6a, 0x12, 0xdb, 0xcb, 0xa5, 0x04, 0xe1, 0x62, 0xab, 0x59, 0x98, 0xdf, - 0x88, 0xab, 0x70, 0x37, 0x56, 0xbd, 0x0d, 0xcb, 0x1f, 0xd4, 0x9c, 0xfd, 0x8b, 0x16, 0xf3, 0x2c, - 0xbb, 0xe2, 0x5b, 0xac, 0x4a, 0xe8, 0x26, 0xf1, 0xaa, 0x4e, 0x19, 0xbd, 0x07, 0x69, 0xef, 0xc0, - 0x25, 0xe2, 0x7c, 0x59, 0xfd, 0xf8, 0xc3, 0x66, 0x61, 0xac, 0xd5, 0x2c, 0xa4, 0x6f, 0x1c, 0xb8, - 0xe4, 0x69, 0xb3, 0x70, 0x64, 0x80, 0x19, 0x57, 0x63, 0x61, 0xa8, 0x7e, 0x93, 0x02, 0xe0, 0xa8, - 0x1d, 0x11, 0x38, 0x74, 0x17, 0xa6, 0xf8, 0x63, 0x95, 0x0d, 0xcf, 0x10, 0x9c, 0xd3, 0x67, 0x4e, - 0x69, 0x51, 0xa6, 0x84, 0x31, 0xd7, 0xdc, 0xbd, 0x0a, 0x17, 0x30, 0x8d, 0xa3, 0xb5, 0xc6, 0x69, - 0xed, 0x5a, 0xe9, 0x1e, 0x31, 0xbd, 0x4d, 0xe2, 0x19, 0x3a, 0x92, 0xa7, 0x80, 0x48, 0x86, 0x43, - 0x56, 0xb4, 0x0d, 0x69, 0xe6, 0x12, 0x53, 0x04, 0x60, 0xfa, 0x8c, 0xa6, 0x0d, 0xcf, 0x43, 0x2d, - 0x3a, 0xdb, 0x8e, 0x4b, 0x4c, 0x7d, 0xa6, 0x7d, 0x43, 0xfe, 0x85, 0x05, 0x13, 0xba, 0x05, 0x13, - 0xcc, 0x33, 0x3c, 0x9f, 0xe5, 0xc6, 0x7b, 0x4e, 0x9c, 0xc4, 0x29, 0xec, 0xf4, 0x39, 0xc9, 0x3a, - 0x11, 0x7c, 0x63, 0xc9, 0xa7, 0x3e, 0x4e, 0xc1, 0x62, 0x04, 0x5e, 0x77, 0xec, 0xb2, 0x25, 0x32, - 0xe5, 0xed, 0x58, 0xd4, 0x8f, 0x75, 0x45, 0x7d, 0xb9, 0x8f, 0x49, 0x14, 0x71, 0xf4, 0x66, 0x78, - 0xdc, 0x94, 0x30, 0x3f, 0x1a, 0x77, 0xfe, 0xb4, 0x59, 0x98, 0x0f, 0xcd, 0xe2, 0xe7, 0x41, 0x0d, - 0x40, 0x35, 0x83, 0x79, 0x37, 0xa8, 0x61, 0xb3, 0x80, 0xd6, 0xaa, 0x13, 0x79, 0xeb, 0xd7, 0x46, - 0x7b, 0x27, 0x6e, 0xa1, 0xaf, 0x48, 0x97, 0x68, 0xa3, 0x87, 0x0d, 0xf7, 0xf1, 0x80, 0x5e, 0x86, - 0x09, 0x4a, 0x0c, 0xe6, 0xd8, 0xb9, 0xb4, 0x38, 0x72, 0x18, 0x2f, 0x2c, 0xa4, 0x58, 0x6a, 0xd1, - 0xab, 0x30, 0x59, 0x27, 0x8c, 0x19, 0x15, 0x92, 0xcb, 0x08, 0xe0, 0xbc, 0x04, 0x4e, 0x6e, 0x06, - 0x62, 0xdc, 0xd6, 0xab, 0x3f, 0x2b, 0x30, 0x17, 0xc5, 0x69, 0xc3, 0x62, 0x1e, 0xba, 0xd3, 0x93, - 0x7b, 0xda, 0x68, 0x77, 0xe2, 0xd6, 0x22, 0xf3, 0x16, 0xa4, 0xbb, 0xa9, 0xb6, 0xa4, 0x23, 0xef, - 0xae, 0x41, 0xc6, 0xf2, 0x48, 0x9d, 0x47, 0x7d, 0xbc, 0x2b, 0x5c, 0x09, 0x49, 0xa2, 0xcf, 0x4a, - 0xda, 0xcc, 0x15, 0x4e, 0x80, 0x03, 0x1e, 0xf5, 0xcf, 0xf1, 0xce, 0x1b, 0xf0, 0x7c, 0x44, 0xdf, - 0x2b, 0xb0, 0xe2, 0x0e, 0x6c, 0x30, 0xf2, 0x52, 0xeb, 0x49, 0x9e, 0x07, 0xb7, 0x28, 0x4c, 0x76, - 0x09, 0xef, 0x2b, 0x44, 0x57, 0xe5, 0x91, 0x56, 0x86, 0x80, 0x87, 0x1c, 0x05, 0x7d, 0x04, 0xa8, - 0x6e, 0x78, 0x3c, 0xa2, 0x95, 0x6d, 0x4a, 0x4c, 0x52, 0xe6, 0xac, 0xb2, 0x29, 0x85, 0xd9, 0xb1, - 0xd9, 0x83, 0xc0, 0x7d, 0xac, 0xd0, 0x57, 0x0a, 0x2c, 0x96, 0x7b, 0x9b, 0x8c, 0xcc, 0xcb, 0xf3, - 0xa3, 0x04, 0xba, 0x4f, 0x8f, 0xd2, 0x97, 0x5b, 0xcd, 0xc2, 0x62, 0x1f, 0x05, 0xee, 0xe7, 0x0c, - 0xdd, 0x81, 0x0c, 0xf5, 0x6b, 0x84, 0xe5, 0xd2, 0xe2, 0x79, 0x13, 0xbd, 0x6e, 0x3b, 0x35, 0xcb, - 0x3c, 0xc0, 0xdc, 0xe4, 0x13, 0xcb, 0xab, 0xee, 0xf8, 0xa2, 0x57, 0xb1, 0xe8, 0xad, 0x85, 0x0a, - 0x07, 0xa4, 0xea, 0x03, 0x58, 0xe8, 0x6e, 0x1a, 0xa8, 0x02, 0x60, 0xb6, 0xeb, 0x94, 0x0f, 0x08, - 0xee, 0xf6, 0xf5, 0xd1, 0xb3, 0x2a, 0xac, 0xf1, 0xa8, 0x5f, 0x86, 0x22, 0x86, 0x3b, 0xa8, 0xd5, - 0x53, 0x30, 0x73, 0x99, 0x3a, 0xbe, 0x2b, 0xcf, 0x88, 0x56, 0x21, 0x6d, 0x1b, 0xf5, 0x76, 0xf7, - 0x09, 0x3b, 0xe2, 0x96, 0x51, 0x27, 0x58, 0x68, 0xd4, 0xef, 0x14, 0x98, 0xdd, 0xb0, 0xea, 0x96, - 0x87, 0x09, 0x73, 0x1d, 0x9b, 0x11, 0x74, 0x2e, 0xd6, 0xb1, 0x8e, 0x76, 0x75, 0xac, 0x43, 0x31, - 0x70, 0x47, 0xaf, 0xfa, 0x14, 0x26, 0xef, 0xfb, 0xc4, 0xb7, 0xec, 0x8a, 0xec, 0xd7, 0x67, 0x93, - 0x2e, 0x78, 0x3d, 0x80, 0xc7, 0xb2, 0x4d, 0x9f, 0xe6, 0x2d, 0x40, 0x6a, 0x70, 0x9b, 0x51, 0xfd, - 0x27, 0x05, 0x47, 0x85, 0x63, 0x52, 0x1e, 0x32, 0x95, 0xef, 0x40, 0xce, 0x60, 0xcc, 0xa7, 0xa4, - 0x3c, 0x68, 0x2a, 0xaf, 0xca, 0xdb, 0xe4, 0xd6, 0x06, 0xe0, 0xf0, 0x40, 0x06, 0x74, 0x0f, 0x66, - 0x6b, 0x9d, 0x77, 0x97, 0xd7, 0x3c, 0x99, 0x74, 0xcd, 0x58, 0xc0, 0xf4, 0x25, 0x79, 0x82, 0x78, - 0xd0, 0x71, 0x9c, 0xba, 0xdf, 0x16, 0x30, 0x3e, 0xfa, 0x16, 0x80, 0xae, 0xc1, 0x52, 0xc9, 0xa1, - 0xd4, 0xd9, 0xb7, 0xec, 0x8a, 0xf0, 0xd3, 0x26, 0x49, 0x0b, 0x92, 0xff, 0xb5, 0x9a, 0x85, 0x25, - 0xbd, 0x1f, 0x00, 0xf7, 0xb7, 0x53, 0xf7, 0x61, 0x69, 0x8b, 0xf7, 0x14, 0xe6, 0xf8, 0xd4, 0x24, - 0x51, 0x41, 0xa0, 0x02, 0x64, 0x1a, 0x84, 0x96, 0x82, 0xa4, 0xce, 0xea, 0x59, 0x5e, 0x0e, 0x1f, - 0x73, 0x01, 0x0e, 0xe4, 0xfc, 0x26, 0x76, 0x64, 0x79, 0x13, 0x6f, 0xb0, 0xdc, 0x84, 0x80, 0x8a, - 0x9b, 0x6c, 0xc5, 0x55, 0xb8, 0x1b, 0xab, 0x36, 0x53, 0xb0, 0x3c, 0xa0, 0xfe, 0xd0, 0x4d, 0x98, - 0x62, 0xf2, 0x6f, 0x59, 0x53, 0xc7, 0x92, 0xde, 0x42, 0xda, 0x46, 0xdd, 0xbf, 0x4d, 0x86, 0x43, - 0x2a, 0xe4, 0xc0, 0x2c, 0x95, 0x47, 0x10, 0x3e, 0xe5, 0x14, 0x38, 0x93, 0xc4, 0xdd, 0x1b, 0x9d, - 0xe8, 0xb1, 0x71, 0x27, 0x21, 0x8e, 0xf3, 0xa3, 0x07, 0xb0, 0xd0, 0x71, 0xed, 0xc0, 0xe7, 0xb8, - 0xf0, 0x79, 0x2e, 0xc9, 0x67, 0xdf, 0x47, 0xd1, 0x73, 0xd2, 0xed, 0xc2, 0x56, 0x17, 0x2d, 0xee, - 0x71, 0xa4, 0xfe, 0x9a, 0x82, 0x21, 0x83, 0xe1, 0x05, 0x2c, 0x79, 0x77, 0x63, 0x4b, 0xde, 0xbb, - 0xcf, 0x3e, 0xf1, 0x06, 0x2e, 0x7d, 0xd5, 0xae, 0xa5, 0xef, 0xfd, 0xe7, 0xf0, 0x31, 0x7c, 0x09, - 0xfc, 0x2b, 0x05, 0x2f, 0x0d, 0x36, 0x8e, 0x96, 0xc2, 0xab, 0xb1, 0x16, 0x7b, 0xbe, 0xab, 0xc5, - 0x1e, 0x1b, 0x81, 0xe2, 0xbf, 0x25, 0xb1, 0x6b, 0x49, 0xfc, 0x4d, 0x81, 0xfc, 0xe0, 0xb8, 0xbd, - 0x80, 0xa5, 0xf1, 0xb3, 0xf8, 0xd2, 0xf8, 0xd6, 0xb3, 0x27, 0xd9, 0x80, 0x25, 0xf2, 0xf2, 0xb0, - 0xdc, 0x0a, 0xd7, 0xbd, 0x11, 0x46, 0xfe, 0x0f, 0xa9, 0x61, 0xa1, 0x12, 0xdb, 0x69, 0xc2, 0xaf, - 0x96, 0x98, 0xf5, 0x25, 0x9b, 0x8f, 0x9e, 0x3a, 0x9f, 0x1e, 0x41, 0x42, 0x56, 0x61, 0xb2, 0x16, - 0xcc, 0x6a, 0x59, 0xd4, 0x6b, 0x23, 0x8d, 0xc8, 0x61, 0xa3, 0x3d, 0x58, 0x0b, 0x24, 0x0c, 0xb7, - 0xe9, 0x51, 0x19, 0x26, 0x88, 0xf8, 0xa9, 0x3e, 0x6a, 0x65, 0x27, 0xfd, 0xb0, 0xd7, 0x81, 0x67, - 0x61, 0x80, 0xc2, 0x92, 0x5b, 0xfd, 0x56, 0x81, 0xd5, 0xa4, 0x96, 0x80, 0xf6, 0xfb, 0xac, 0x78, - 0xcf, 0xb1, 0xbe, 0x8f, 0xbe, 0xf2, 0xfd, 0xa8, 0xc0, 0xe1, 0x7e, 0x9b, 0x14, 0x2f, 0x32, 0xbe, - 0x3e, 0x85, 0xbb, 0x4f, 0x58, 0x64, 0xd7, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x01, 0x53, 0x55, 0xc3, - 0x2e, 0xef, 0x58, 0x5f, 0xb4, 0xb7, 0xfa, 0x30, 0xcd, 0x3f, 0x94, 0x72, 0x1c, 0x22, 0xd0, 0x45, - 0x58, 0x10, 0x76, 0x1b, 0xc4, 0xae, 0x78, 0x55, 0xf1, 0x22, 0x72, 0x35, 0x09, 0xa7, 0xce, 0xf5, - 0x2e, 0x3d, 0xee, 0xb1, 0x50, 0xff, 0x56, 0x00, 0x3d, 0xcb, 0x36, 0x71, 0x1c, 0xb2, 0x86, 0x6b, - 0x89, 0x15, 0x37, 0x28, 0xb4, 0xac, 0x3e, 0xdb, 0x6a, 0x16, 0xb2, 0x6b, 0xdb, 0x57, 0x02, 0x21, - 0x8e, 0xf4, 0x1c, 0xdc, 0x1e, 0xb4, 0xc1, 0x40, 0x95, 0xe0, 0xb6, 0x63, 0x86, 0x23, 0x3d, 0xba, - 0x00, 0x33, 0x66, 0xcd, 0x67, 0x1e, 0xa1, 0x3b, 0xa6, 0xe3, 0x12, 0xd1, 0x98, 0xa6, 0xf4, 0xc3, - 0xf2, 0x4e, 0x33, 0xeb, 0x1d, 0x3a, 0x1c, 0x43, 0x22, 0x0d, 0x80, 0x97, 0x15, 0x73, 0x0d, 0xee, - 0x27, 0x23, 0xfc, 0xcc, 0xf1, 0x07, 0xdb, 0x0a, 0xa5, 0xb8, 0x03, 0xa1, 0xde, 0x83, 0xa5, 0x1d, - 0x42, 0x1b, 0x96, 0x49, 0xd6, 0x4c, 0xd3, 0xf1, 0x6d, 0xaf, 0xbd, 0xac, 0x17, 0x21, 0x1b, 0xc2, - 0x64, 0xe5, 0x1d, 0x92, 0xfe, 0xb3, 0x21, 0x17, 0x8e, 0x30, 0x61, 0xa9, 0xa7, 0x06, 0x96, 0xfa, - 0x2f, 0x29, 0x98, 0x8c, 0xe8, 0xd3, 0x7b, 0x96, 0x5d, 0x96, 0xcc, 0x47, 0xda, 0xe8, 0xab, 0x96, - 0x5d, 0x7e, 0xda, 0x2c, 0x4c, 0x4b, 0x18, 0xff, 0xc4, 0x02, 0x88, 0xae, 0x40, 0xda, 0x67, 0x84, - 0xca, 0x22, 0x3e, 0x9e, 0x94, 0xcc, 0x37, 0x19, 0xa1, 0xed, 0xfd, 0x6a, 0x8a, 0x33, 0x73, 0x01, - 0x16, 0x14, 0x68, 0x13, 0x32, 0x15, 0xfe, 0x28, 0xb2, 0x4e, 0x4f, 0x24, 0x71, 0x75, 0xfe, 0x88, - 0x09, 0xd2, 0x40, 0x48, 0x70, 0xc0, 0x82, 0xee, 0xc3, 0x1c, 0x8b, 0x85, 0x50, 0x3c, 0xd7, 0x08, - 0xfb, 0x52, 0xdf, 0xc0, 0xeb, 0xa8, 0xd5, 0x2c, 0xcc, 0xc5, 0x55, 0xb8, 0xcb, 0x81, 0x5a, 0x84, - 0xe9, 0x8e, 0x0b, 0x26, 0x77, 0x59, 0xfd, 0xe2, 0xc3, 0x27, 0xf9, 0xb1, 0x47, 0x4f, 0xf2, 0x63, - 0x8f, 0x9f, 0xe4, 0xc7, 0xbe, 0x6c, 0xe5, 0x95, 0x87, 0xad, 0xbc, 0xf2, 0xa8, 0x95, 0x57, 0x1e, - 0xb7, 0xf2, 0xca, 0xef, 0xad, 0xbc, 0xf2, 0xf5, 0x1f, 0xf9, 0xb1, 0xdb, 0xf9, 0xe1, 0xff, 0x8b, - 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3a, 0xda, 0x82, 0x48, 0xc5, 0x15, 0x00, 0x00, + 0xd3, 0x42, 0xa7, 0x74, 0x86, 0x2a, 0xf2, 0xc6, 0x56, 0x63, 0xfd, 0xa8, 0x56, 0x4a, 0x08, 0xbd, + 0x30, 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x13, 0x17, 0xae, 0x1c, 0x38, 0xd2, 0xe1, 0xd4, 0x63, + 0x4f, 0x86, 0x9a, 0x13, 0xff, 0x01, 0x74, 0x86, 0x19, 0x66, 0x57, 0x2b, 0xc9, 0xb2, 0x2d, 0xcb, + 0xd3, 0xce, 0xf4, 0xc4, 0x2d, 0x7a, 0xfb, 0x79, 0x9f, 0xb7, 0xef, 0xed, 0xfb, 0xe5, 0x80, 0xb2, + 0x7b, 0x81, 0x2a, 0x86, 0x5d, 0xd5, 0x1c, 0xa3, 0xba, 0xd3, 0xb4, 0xf7, 0x75, 0xdb, 0xf2, 0x5c, + 0xbb, 0x59, 0xdd, 0x3b, 0xbd, 0x4d, 0x3c, 0xed, 0x74, 0xb5, 0x4e, 0x2c, 0xe2, 0x6a, 0x1e, 0xa9, + 0x29, 0x8e, 0x6b, 0x7b, 0x36, 0x2a, 0x07, 0x78, 0x45, 0x73, 0x0c, 0xa5, 0x03, 0xaf, 0x08, 0xfc, + 0xd2, 0xc9, 0xba, 0xe1, 0x35, 0xfc, 0x6d, 0x45, 0xb7, 0xcd, 0x6a, 0xdd, 0xae, 0xdb, 0x55, 0xae, + 0xb6, 0xed, 0xef, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x0a, 0xe8, 0x96, 0xce, 0xc6, 0xe6, 0x4d, 0x4d, + 0x6f, 0x18, 0x16, 0x71, 0x0f, 0xaa, 0xce, 0x6e, 0x9d, 0x09, 0x68, 0xd5, 0x24, 0x9e, 0x56, 0xdd, + 0xeb, 0xb9, 0xc4, 0x52, 0x35, 0x4d, 0xcb, 0xf5, 0x2d, 0xcf, 0x30, 0x49, 0x8f, 0xc2, 0x1b, 0x59, + 0x0a, 0x54, 0x6f, 0x10, 0x53, 0xeb, 0xd6, 0x93, 0x7f, 0x92, 0x60, 0xf9, 0xd2, 0xe7, 0xc4, 0x74, + 0xbc, 0x4d, 0xd7, 0xb0, 0x5d, 0xc3, 0x3b, 0x58, 0x23, 0x7b, 0xa4, 0xb9, 0x6a, 0x5b, 0x3b, 0x46, + 0xdd, 0x77, 0x35, 0xcf, 0xb0, 0x2d, 0x74, 0x0b, 0x4a, 0x96, 0x6d, 0x1a, 0x96, 0xc6, 0xe4, 0xba, + 0xef, 0xba, 0xc4, 0xd2, 0x0f, 0xb6, 0x1a, 0x9a, 0x4b, 0x68, 0x49, 0x5a, 0x96, 0x5e, 0x29, 0xa8, + 0xff, 0x6f, 0xb7, 0x2a, 0xa5, 0x8d, 0x14, 0x0c, 0x4e, 0xd5, 0x46, 0xef, 0xc0, 0x6c, 0x93, 0x58, + 0x35, 0x6d, 0xbb, 0x49, 0x36, 0x89, 0xab, 0x13, 0xcb, 0x2b, 0xe5, 0x38, 0xe1, 0x7c, 0xbb, 0x55, + 0x99, 0x5d, 0x4b, 0x1e, 0xe1, 0x6e, 0xac, 0x7c, 0x1b, 0x16, 0x3f, 0x68, 0xda, 0xfb, 0x17, 0x0d, + 0xea, 0x19, 0x56, 0xdd, 0x37, 0x68, 0x83, 0xb8, 0xeb, 0xc4, 0x6b, 0xd8, 0x35, 0xf4, 0x1e, 0xe4, + 0xbd, 0x03, 0x87, 0xf0, 0xfb, 0x15, 0xd5, 0xe3, 0x0f, 0x5b, 0x95, 0x91, 0x76, 0xab, 0x92, 0xbf, + 0x71, 0xe0, 0x90, 0xa7, 0xad, 0xca, 0x91, 0x14, 0x35, 0x76, 0x8c, 0xb9, 0xa2, 0xfc, 0x4d, 0x0e, + 0x80, 0xa1, 0xb6, 0x78, 0xe0, 0xd0, 0x5d, 0x98, 0x60, 0x8f, 0x55, 0xd3, 0x3c, 0x8d, 0x73, 0x4e, + 0x9e, 0x39, 0xa5, 0xc4, 0x99, 0x12, 0xc5, 0x5c, 0x71, 0x76, 0xeb, 0x4c, 0x40, 0x15, 0x86, 0x56, + 0xf6, 0x4e, 0x2b, 0xd7, 0xb6, 0xef, 0x11, 0xdd, 0x5b, 0x27, 0x9e, 0xa6, 0x22, 0x71, 0x0b, 0x88, + 0x65, 0x38, 0x62, 0x45, 0x9b, 0x90, 0xa7, 0x0e, 0xd1, 0x79, 0x00, 0x26, 0xcf, 0x28, 0xca, 0xe0, + 0x3c, 0x54, 0xe2, 0xbb, 0x6d, 0x39, 0x44, 0x57, 0xa7, 0x42, 0x0f, 0xd9, 0x17, 0xe6, 0x4c, 0xe8, + 0x16, 0x8c, 0x51, 0x4f, 0xf3, 0x7c, 0x5a, 0x1a, 0xed, 0xb9, 0x71, 0x16, 0x27, 0xd7, 0x53, 0x67, + 0x04, 0xeb, 0x58, 0xf0, 0x8d, 0x05, 0x9f, 0xfc, 0x38, 0x07, 0xf3, 0x31, 0x78, 0xd5, 0xb6, 0x6a, + 0x06, 0xcf, 0x94, 0xb7, 0x13, 0x51, 0x3f, 0xd6, 0x15, 0xf5, 0xc5, 0x3e, 0x2a, 0x71, 0xc4, 0xd1, + 0x9b, 0xd1, 0x75, 0x73, 0x5c, 0xfd, 0x68, 0xd2, 0xf8, 0xd3, 0x56, 0x65, 0x36, 0x52, 0x4b, 0xde, + 0x07, 0xed, 0x01, 0x6a, 0x6a, 0xd4, 0xbb, 0xe1, 0x6a, 0x16, 0x0d, 0x68, 0x0d, 0x93, 0x08, 0xaf, + 0x5f, 0x1b, 0xee, 0x9d, 0x98, 0x86, 0xba, 0x24, 0x4c, 0xa2, 0xb5, 0x1e, 0x36, 0xdc, 0xc7, 0x02, + 0x7a, 0x19, 0xc6, 0x5c, 0xa2, 0x51, 0xdb, 0x2a, 0xe5, 0xf9, 0x95, 0xa3, 0x78, 0x61, 0x2e, 0xc5, + 0xe2, 0x14, 0xbd, 0x0a, 0xe3, 0x26, 0xa1, 0x54, 0xab, 0x93, 0x52, 0x81, 0x03, 0x67, 0x05, 0x70, + 0x7c, 0x3d, 0x10, 0xe3, 0xf0, 0x5c, 0xfe, 0x59, 0x82, 0x99, 0x38, 0x4e, 0x6b, 0x06, 0xf5, 0xd0, + 0x9d, 0x9e, 0xdc, 0x53, 0x86, 0xf3, 0x89, 0x69, 0xf3, 0xcc, 0x9b, 0x13, 0xe6, 0x26, 0x42, 0x49, + 0x47, 0xde, 0x5d, 0x83, 0x82, 0xe1, 0x11, 0x93, 0x45, 0x7d, 0xb4, 0x2b, 0x5c, 0x19, 0x49, 0xa2, + 0x4e, 0x0b, 0xda, 0xc2, 0x15, 0x46, 0x80, 0x03, 0x1e, 0xf9, 0xcf, 0xd1, 0x4e, 0x0f, 0x58, 0x3e, + 0xa2, 0xef, 0x25, 0x58, 0x72, 0x52, 0x1b, 0x8c, 0x70, 0x6a, 0x35, 0xcb, 0x72, 0x7a, 0x8b, 0xc2, + 0x64, 0x87, 0xb0, 0xbe, 0x42, 0x54, 0x59, 0x5c, 0x69, 0x69, 0x00, 0x78, 0xc0, 0x55, 0xd0, 0x47, + 0x80, 0x4c, 0xcd, 0x63, 0x11, 0xad, 0x6f, 0xba, 0x44, 0x27, 0x35, 0xc6, 0x2a, 0x9a, 0x52, 0x94, + 0x1d, 0xeb, 0x3d, 0x08, 0xdc, 0x47, 0x0b, 0x7d, 0x25, 0xc1, 0x7c, 0xad, 0xb7, 0xc9, 0x88, 0xbc, + 0x3c, 0x3f, 0x4c, 0xa0, 0xfb, 0xf4, 0x28, 0x75, 0xb1, 0xdd, 0xaa, 0xcc, 0xf7, 0x39, 0xc0, 0xfd, + 0x8c, 0xa1, 0x3b, 0x50, 0x70, 0xfd, 0x26, 0xa1, 0xa5, 0x3c, 0x7f, 0xde, 0x4c, 0xab, 0x9b, 0x76, + 0xd3, 0xd0, 0x0f, 0x30, 0x53, 0xf9, 0xc4, 0xf0, 0x1a, 0x5b, 0x3e, 0xef, 0x55, 0x34, 0x7e, 0x6b, + 0x7e, 0x84, 0x03, 0x52, 0xf9, 0x01, 0xcc, 0x75, 0x37, 0x0d, 0x54, 0x07, 0xd0, 0xc3, 0x3a, 0x65, + 0x03, 0x82, 0x99, 0x7d, 0x7d, 0xf8, 0xac, 0x8a, 0x6a, 0x3c, 0xee, 0x97, 0x91, 0x88, 0xe2, 0x0e, + 0x6a, 0xf9, 0x14, 0x4c, 0x5d, 0x76, 0x6d, 0xdf, 0x11, 0x77, 0x44, 0xcb, 0x90, 0xb7, 0x34, 0x33, + 0xec, 0x3e, 0x51, 0x47, 0xdc, 0xd0, 0x4c, 0x82, 0xf9, 0x89, 0xfc, 0x9d, 0x04, 0xd3, 0x6b, 0x86, + 0x69, 0x78, 0x98, 0x50, 0xc7, 0xb6, 0x28, 0x41, 0xe7, 0x12, 0x1d, 0xeb, 0x68, 0x57, 0xc7, 0x3a, + 0x94, 0x00, 0x77, 0xf4, 0xaa, 0x4f, 0x61, 0xfc, 0xbe, 0x4f, 0x7c, 0xc3, 0xaa, 0x8b, 0x7e, 0x7d, + 0x36, 0xcb, 0xc1, 0xeb, 0x01, 0x3c, 0x91, 0x6d, 0xea, 0x24, 0x6b, 0x01, 0xe2, 0x04, 0x87, 0x8c, + 0xf2, 0x3f, 0x39, 0x38, 0xca, 0x0d, 0x93, 0xda, 0x80, 0xa9, 0x7c, 0x07, 0x4a, 0x1a, 0xa5, 0xbe, + 0x4b, 0x6a, 0x69, 0x53, 0x79, 0x59, 0x78, 0x53, 0x5a, 0x49, 0xc1, 0xe1, 0x54, 0x06, 0x74, 0x0f, + 0xa6, 0x9b, 0x9d, 0xbe, 0x0b, 0x37, 0x4f, 0x66, 0xb9, 0x99, 0x08, 0x98, 0xba, 0x20, 0x6e, 0x90, + 0x0c, 0x3a, 0x4e, 0x52, 0xf7, 0xdb, 0x02, 0x46, 0x87, 0xdf, 0x02, 0xd0, 0x35, 0x58, 0xd8, 0xb6, + 0x5d, 0xd7, 0xde, 0x37, 0xac, 0x3a, 0xb7, 0x13, 0x92, 0xe4, 0x39, 0xc9, 0xff, 0xda, 0xad, 0xca, + 0x82, 0xda, 0x0f, 0x80, 0xfb, 0xeb, 0xc9, 0xfb, 0xb0, 0xb0, 0xc1, 0x7a, 0x0a, 0xb5, 0x7d, 0x57, + 0x27, 0x71, 0x41, 0xa0, 0x0a, 0x14, 0xf6, 0x88, 0xbb, 0x1d, 0x24, 0x75, 0x51, 0x2d, 0xb2, 0x72, + 0xf8, 0x98, 0x09, 0x70, 0x20, 0x67, 0x9e, 0x58, 0xb1, 0xe6, 0x4d, 0xbc, 0x46, 0x4b, 0x63, 0x1c, + 0xca, 0x3d, 0xd9, 0x48, 0x1e, 0xe1, 0x6e, 0xac, 0xdc, 0xca, 0xc1, 0x62, 0x4a, 0xfd, 0xa1, 0x9b, + 0x30, 0x41, 0xc5, 0xdf, 0xa2, 0xa6, 0x8e, 0x65, 0xbd, 0x85, 0xd0, 0x8d, 0xbb, 0x7f, 0x48, 0x86, + 0x23, 0x2a, 0x64, 0xc3, 0xb4, 0x2b, 0xae, 0xc0, 0x6d, 0x8a, 0x29, 0x70, 0x26, 0x8b, 0xbb, 0x37, + 0x3a, 0xf1, 0x63, 0xe3, 0x4e, 0x42, 0x9c, 0xe4, 0x47, 0x0f, 0x60, 0xae, 0xc3, 0xed, 0xc0, 0xe6, + 0x28, 0xb7, 0x79, 0x2e, 0xcb, 0x66, 0xdf, 0x47, 0x51, 0x4b, 0xc2, 0xec, 0xdc, 0x46, 0x17, 0x2d, + 0xee, 0x31, 0x24, 0xff, 0x9a, 0x83, 0x01, 0x83, 0xe1, 0x05, 0x2c, 0x79, 0x77, 0x13, 0x4b, 0xde, + 0xbb, 0xcf, 0x3e, 0xf1, 0x52, 0x97, 0xbe, 0x46, 0xd7, 0xd2, 0xf7, 0xfe, 0x73, 0xd8, 0x18, 0xbc, + 0x04, 0xfe, 0x95, 0x83, 0x97, 0xd2, 0x95, 0xe3, 0xa5, 0xf0, 0x6a, 0xa2, 0xc5, 0x9e, 0xef, 0x6a, + 0xb1, 0xc7, 0x86, 0xa0, 0xf8, 0x6f, 0x49, 0xec, 0x5a, 0x12, 0x7f, 0x93, 0xa0, 0x9c, 0x1e, 0xb7, + 0x17, 0xb0, 0x34, 0x7e, 0x96, 0x5c, 0x1a, 0xdf, 0x7a, 0xf6, 0x24, 0x4b, 0x59, 0x22, 0x2f, 0x0f, + 0xca, 0xad, 0x68, 0xdd, 0x1b, 0x62, 0xe4, 0xff, 0x90, 0x1b, 0x14, 0x2a, 0xbe, 0x9d, 0x66, 0xfc, + 0x6a, 0x49, 0x68, 0x5f, 0xb2, 0xd8, 0xe8, 0x31, 0xd9, 0xf4, 0x08, 0x12, 0xb2, 0x01, 0xe3, 0xcd, + 0x60, 0x56, 0x8b, 0xa2, 0x5e, 0x19, 0x6a, 0x44, 0x0e, 0x1a, 0xed, 0xc1, 0x5a, 0x20, 0x60, 0x38, + 0xa4, 0x47, 0x35, 0x18, 0x23, 0xfc, 0xa7, 0xfa, 0xb0, 0x95, 0x9d, 0xf5, 0xc3, 0x5e, 0x05, 0x96, + 0x85, 0x01, 0x0a, 0x0b, 0x6e, 0xf9, 0x5b, 0x09, 0x96, 0xb3, 0x5a, 0x02, 0xda, 0xef, 0xb3, 0xe2, + 0x3d, 0xc7, 0xfa, 0x3e, 0xfc, 0xca, 0xf7, 0xa3, 0x04, 0x87, 0xfb, 0x6d, 0x52, 0xac, 0xc8, 0xd8, + 0xfa, 0x14, 0xed, 0x3e, 0x51, 0x91, 0x5d, 0xe7, 0x52, 0x2c, 0x4e, 0xd1, 0x09, 0x98, 0x68, 0x68, + 0x56, 0x6d, 0xcb, 0xf8, 0x22, 0xdc, 0xea, 0xa3, 0x34, 0xff, 0x50, 0xc8, 0x71, 0x84, 0x40, 0x17, + 0x61, 0x8e, 0xeb, 0xad, 0x11, 0xab, 0xee, 0x35, 0xf8, 0x8b, 0x88, 0xd5, 0x24, 0x9a, 0x3a, 0xd7, + 0xbb, 0xce, 0x71, 0x8f, 0x86, 0xfc, 0xb7, 0x04, 0xe8, 0x59, 0xb6, 0x89, 0xe3, 0x50, 0xd4, 0x1c, + 0x83, 0xaf, 0xb8, 0x41, 0xa1, 0x15, 0xd5, 0xe9, 0x76, 0xab, 0x52, 0x5c, 0xd9, 0xbc, 0x12, 0x08, + 0x71, 0x7c, 0xce, 0xc0, 0xe1, 0xa0, 0x0d, 0x06, 0xaa, 0x00, 0x87, 0x86, 0x29, 0x8e, 0xcf, 0xd1, + 0x05, 0x98, 0xd2, 0x9b, 0x3e, 0xf5, 0x88, 0xbb, 0xa5, 0xdb, 0x0e, 0xe1, 0x8d, 0x69, 0x42, 0x3d, + 0x2c, 0x7c, 0x9a, 0x5a, 0xed, 0x38, 0xc3, 0x09, 0x24, 0x52, 0x00, 0x58, 0x59, 0x51, 0x47, 0x63, + 0x76, 0x0a, 0xdc, 0xce, 0x0c, 0x7b, 0xb0, 0x8d, 0x48, 0x8a, 0x3b, 0x10, 0xf2, 0x3d, 0x58, 0xd8, + 0x22, 0xee, 0x9e, 0xa1, 0x93, 0x15, 0x5d, 0xb7, 0x7d, 0xcb, 0x0b, 0x97, 0xf5, 0x2a, 0x14, 0x23, + 0x98, 0xa8, 0xbc, 0x43, 0xc2, 0x7e, 0x31, 0xe2, 0xc2, 0x31, 0x26, 0x2a, 0xf5, 0x5c, 0x6a, 0xa9, + 0xff, 0x92, 0x83, 0xf1, 0x98, 0x3e, 0xbf, 0x6b, 0x58, 0x35, 0xc1, 0x7c, 0x24, 0x44, 0x5f, 0x35, + 0xac, 0xda, 0xd3, 0x56, 0x65, 0x52, 0xc0, 0xd8, 0x27, 0xe6, 0x40, 0x74, 0x05, 0xf2, 0x3e, 0x25, + 0xae, 0x28, 0xe2, 0xe3, 0x59, 0xc9, 0x7c, 0x93, 0x12, 0x37, 0xdc, 0xaf, 0x26, 0x18, 0x33, 0x13, + 0x60, 0x4e, 0x81, 0xd6, 0xa1, 0x50, 0x67, 0x8f, 0x22, 0xea, 0xf4, 0x44, 0x16, 0x57, 0xe7, 0x8f, + 0x98, 0x20, 0x0d, 0xb8, 0x04, 0x07, 0x2c, 0xe8, 0x3e, 0xcc, 0xd0, 0x44, 0x08, 0xf9, 0x73, 0x0d, + 0xb1, 0x2f, 0xf5, 0x0d, 0xbc, 0x8a, 0xda, 0xad, 0xca, 0x4c, 0xf2, 0x08, 0x77, 0x19, 0x90, 0xab, + 0x30, 0xd9, 0xe1, 0x60, 0x76, 0x97, 0x55, 0x2f, 0x3e, 0x7c, 0x52, 0x1e, 0x79, 0xf4, 0xa4, 0x3c, + 0xf2, 0xf8, 0x49, 0x79, 0xe4, 0xcb, 0x76, 0x59, 0x7a, 0xd8, 0x2e, 0x4b, 0x8f, 0xda, 0x65, 0xe9, + 0x71, 0xbb, 0x2c, 0xfd, 0xde, 0x2e, 0x4b, 0x5f, 0xff, 0x51, 0x1e, 0xb9, 0x5d, 0x1e, 0xfc, 0xbf, + 0xd8, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, 0x42, 0x4c, 0x0f, 0xac, 0x15, 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 04b54820c7..61ed3833ae 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -295,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go index 7f8ee08506..f646446df9 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +// source: k8s.io/api/flowcontrol/v1beta2/generated.proto package v1beta2 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{0} + return fileDescriptor_2e620af2eea53237, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{1} + return fileDescriptor_2e620af2eea53237, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{2} + return fileDescriptor_2e620af2eea53237, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{3} + return fileDescriptor_2e620af2eea53237, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{4} + return fileDescriptor_2e620af2eea53237, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{5} + return fileDescriptor_2e620af2eea53237, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{6} + return fileDescriptor_2e620af2eea53237, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{7} + return fileDescriptor_2e620af2eea53237, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{8} + return fileDescriptor_2e620af2eea53237, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{9} + return fileDescriptor_2e620af2eea53237, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{10} + return fileDescriptor_2e620af2eea53237, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{11} + return fileDescriptor_2e620af2eea53237, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{12} + return fileDescriptor_2e620af2eea53237, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{13} + return fileDescriptor_2e620af2eea53237, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{14} + return fileDescriptor_2e620af2eea53237, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{15} + return fileDescriptor_2e620af2eea53237, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{16} + return fileDescriptor_2e620af2eea53237, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{17} + return fileDescriptor_2e620af2eea53237, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{18} + return fileDescriptor_2e620af2eea53237, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{19} + return fileDescriptor_2e620af2eea53237, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{20} + return fileDescriptor_2e620af2eea53237, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{21} + return fileDescriptor_2e620af2eea53237, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{22} + return fileDescriptor_2e620af2eea53237, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,113 +714,112 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_ed300aa8e672704e) -} - -var fileDescriptor_ed300aa8e672704e = []byte{ - // 1617 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5, - 0x16, 0xf6, 0xc8, 0x92, 0x6d, 0x1d, 0x3f, 0xd3, 0x8e, 0xcb, 0xba, 0xce, 0x2d, 0xc9, 0x99, 0x5b, - 0x75, 0x73, 0x2f, 0x49, 0x46, 0x89, 0x49, 0x48, 0x80, 0xe2, 0xe1, 0x71, 0x42, 0x08, 0xb1, 0x1d, - 0xa7, 0x9d, 0x40, 0x2a, 0xa4, 0x8a, 0x8c, 0x46, 0x6d, 0x69, 0x62, 0x69, 0x66, 0xd2, 0x3d, 0x23, - 0x63, 0xb2, 0xa1, 0xf8, 0x05, 0xac, 0x61, 0xc9, 0x82, 0x15, 0x1b, 0xb6, 0x2c, 0x58, 0x92, 0x62, - 0x95, 0x65, 0x56, 0x82, 0x88, 0x15, 0xff, 0x00, 0x52, 0x45, 0x15, 0xd5, 0x3d, 0xad, 0x19, 0x8d, - 0x5e, 0xa3, 0x4a, 0xaa, 0xb2, 0x62, 0xe7, 0x39, 0xe7, 0x3b, 0xdf, 0xe9, 0x3e, 0x7d, 0x5e, 0x32, - 0x5c, 0xd9, 0xbf, 0xc8, 0x34, 0xcb, 0x29, 0xee, 0xfb, 0x25, 0x42, 0x6d, 0xe2, 0x11, 0x56, 0x6c, - 0x10, 0xbb, 0xec, 0xd0, 0xa2, 0x54, 0x18, 0xae, 0x55, 0xdc, 0xab, 0x39, 0x07, 0xa6, 0x63, 0x7b, - 0xd4, 0xa9, 0x15, 0x1b, 0x67, 0x4b, 0xc4, 0x33, 0xd6, 0x8a, 0x15, 0x62, 0x13, 0x6a, 0x78, 0xa4, - 0xac, 0xb9, 0xd4, 0xf1, 0x1c, 0x94, 0x0f, 0xf0, 0x9a, 0xe1, 0x5a, 0x5a, 0x07, 0x5e, 0x93, 0xf8, - 0x95, 0xd3, 0x15, 0xcb, 0xab, 0xfa, 0x25, 0xcd, 0x74, 0xea, 0xc5, 0x8a, 0x53, 0x71, 0x8a, 0xc2, - 0xac, 0xe4, 0xef, 0x89, 0x2f, 0xf1, 0x21, 0xfe, 0x0a, 0xe8, 0x56, 0xce, 0x45, 0xee, 0xeb, 0x86, - 0x59, 0xb5, 0x6c, 0x42, 0x0f, 0x8b, 0xee, 0x7e, 0x85, 0x0b, 0x58, 0xb1, 0x4e, 0x3c, 0xa3, 0xd8, - 0x38, 0xdb, 0x7d, 0x88, 0x95, 0xe2, 0x20, 0x2b, 0xea, 0xdb, 0x9e, 0x55, 0x27, 0x3d, 0x06, 0xaf, - 0x25, 0x19, 0x30, 0xb3, 0x4a, 0xea, 0x46, 0xb7, 0x9d, 0xfa, 0x83, 0x02, 0xab, 0x97, 0x3f, 0x25, - 0x75, 0xd7, 0xdb, 0xa1, 0x96, 0x43, 0x2d, 0xef, 0x70, 0x93, 0x34, 0x48, 0x6d, 0xc3, 0xb1, 0xf7, - 0xac, 0x8a, 0x4f, 0x0d, 0xcf, 0x72, 0x6c, 0x74, 0x1b, 0x72, 0xb6, 0x53, 0xb7, 0x6c, 0x83, 0xcb, - 0x4d, 0x9f, 0x52, 0x62, 0x9b, 0x87, 0xbb, 0x55, 0x83, 0x12, 0x96, 0x53, 0x56, 0x95, 0xff, 0x65, - 0xf4, 0x7f, 0xb7, 0x9a, 0x85, 0xdc, 0xf6, 0x00, 0x0c, 0x1e, 0x68, 0x8d, 0xde, 0x82, 0xf9, 0x1a, - 0xb1, 0xcb, 0x46, 0xa9, 0x46, 0x76, 0x08, 0x35, 0x89, 0xed, 0xe5, 0x52, 0x82, 0x70, 0xb1, 0xd5, - 0x2c, 0xcc, 0x6f, 0xc6, 0x55, 0xb8, 0x1b, 0xab, 0xde, 0x81, 0xe5, 0xf7, 0x6a, 0xce, 0xc1, 0x25, - 0x8b, 0x79, 0x96, 0x5d, 0xf1, 0x2d, 0x56, 0x25, 0x74, 0x8b, 0x78, 0x55, 0xa7, 0x8c, 0xde, 0x81, - 0xb4, 0x77, 0xe8, 0x12, 0x71, 0xbe, 0xac, 0x7e, 0xf2, 0x51, 0xb3, 0x30, 0xd6, 0x6a, 0x16, 0xd2, - 0x37, 0x0f, 0x5d, 0xf2, 0xac, 0x59, 0x38, 0x36, 0xc0, 0x8c, 0xab, 0xb1, 0x30, 0x54, 0xbf, 0x4a, - 0x01, 0x70, 0xd4, 0xae, 0x08, 0x1c, 0xba, 0x07, 0x53, 0xfc, 0xb1, 0xca, 0x86, 0x67, 0x08, 0xce, - 0xe9, 0xb5, 0x33, 0x5a, 0x94, 0x29, 0x61, 0xcc, 0x35, 0x77, 0xbf, 0xc2, 0x05, 0x4c, 0xe3, 0x68, - 0xad, 0x71, 0x56, 0xbb, 0x5e, 0xba, 0x4f, 0x4c, 0x6f, 0x8b, 0x78, 0x86, 0x8e, 0xe4, 0x29, 0x20, - 0x92, 0xe1, 0x90, 0x15, 0xed, 0x40, 0x9a, 0xb9, 0xc4, 0x14, 0x01, 0x98, 0x5e, 0xd3, 0xb4, 0xe1, - 0x79, 0xa8, 0x45, 0x67, 0xdb, 0x75, 0x89, 0xa9, 0xcf, 0xb4, 0x6f, 0xc8, 0xbf, 0xb0, 0x60, 0x42, - 0xb7, 0x61, 0x82, 0x79, 0x86, 0xe7, 0xb3, 0xdc, 0x78, 0xcf, 0x89, 0x93, 0x38, 0x85, 0x9d, 0x3e, - 0x27, 0x59, 0x27, 0x82, 0x6f, 0x2c, 0xf9, 0xd4, 0x27, 0x29, 0x58, 0x8c, 0xc0, 0x1b, 0x8e, 0x5d, - 0xb6, 0x44, 0xa6, 0xbc, 0x19, 0x8b, 0xfa, 0x89, 0xae, 0xa8, 0x2f, 0xf7, 0x31, 0x89, 0x22, 0x8e, - 0x5e, 0x0f, 0x8f, 0x9b, 0x12, 0xe6, 0xc7, 0xe3, 0xce, 0x9f, 0x35, 0x0b, 0xf3, 0xa1, 0x59, 0xfc, - 0x3c, 0xa8, 0x01, 0xa8, 0x66, 0x30, 0xef, 0x26, 0x35, 0x6c, 0x16, 0xd0, 0x5a, 0x75, 0x22, 0x6f, - 0xfd, 0xca, 0x68, 0xef, 0xc4, 0x2d, 0xf4, 0x15, 0xe9, 0x12, 0x6d, 0xf6, 0xb0, 0xe1, 0x3e, 0x1e, - 0xd0, 0x7f, 0x61, 0x82, 0x12, 0x83, 0x39, 0x76, 0x2e, 0x2d, 0x8e, 0x1c, 0xc6, 0x0b, 0x0b, 0x29, - 0x96, 0x5a, 0xf4, 0x7f, 0x98, 0xac, 0x13, 0xc6, 0x8c, 0x0a, 0xc9, 0x65, 0x04, 0x70, 0x5e, 0x02, - 0x27, 0xb7, 0x02, 0x31, 0x6e, 0xeb, 0xd5, 0x1f, 0x15, 0x98, 0x8b, 0xe2, 0xb4, 0x69, 0x31, 0x0f, - 0xdd, 0xed, 0xc9, 0x3d, 0x6d, 0xb4, 0x3b, 0x71, 0x6b, 0x91, 0x79, 0x0b, 0xd2, 0xdd, 0x54, 0x5b, - 0xd2, 0x91, 0x77, 0xd7, 0x21, 0x63, 0x79, 0xa4, 0xce, 0xa3, 0x3e, 0xde, 0x15, 0xae, 0x84, 0x24, - 0xd1, 0x67, 0x25, 0x6d, 0xe6, 0x2a, 0x27, 0xc0, 0x01, 0x8f, 0xfa, 0xfb, 0x78, 0xe7, 0x0d, 0x78, - 0x3e, 0xa2, 0x6f, 0x15, 0x58, 0x71, 0x07, 0x36, 0x18, 0x79, 0xa9, 0x8d, 0x24, 0xcf, 0x83, 0x5b, - 0x14, 0x26, 0x7b, 0x84, 0xf7, 0x15, 0xa2, 0xab, 0xf2, 0x48, 0x2b, 0x43, 0xc0, 0x43, 0x8e, 0x82, - 0x3e, 0x00, 0x54, 0x37, 0x3c, 0x1e, 0xd1, 0xca, 0x0e, 0x25, 0x26, 0x29, 0x73, 0x56, 0xd9, 0x94, - 0xc2, 0xec, 0xd8, 0xea, 0x41, 0xe0, 0x3e, 0x56, 0xe8, 0x0b, 0x05, 0x16, 0xcb, 0xbd, 0x4d, 0x46, - 0xe6, 0xe5, 0x85, 0x51, 0x02, 0xdd, 0xa7, 0x47, 0xe9, 0xcb, 0xad, 0x66, 0x61, 0xb1, 0x8f, 0x02, - 0xf7, 0x73, 0x86, 0xee, 0x42, 0x86, 0xfa, 0x35, 0xc2, 0x72, 0x69, 0xf1, 0xbc, 0x89, 0x5e, 0x77, - 0x9c, 0x9a, 0x65, 0x1e, 0x62, 0x6e, 0xf2, 0x91, 0xe5, 0x55, 0x77, 0x7d, 0xd1, 0xab, 0x58, 0xf4, - 0xd6, 0x42, 0x85, 0x03, 0x52, 0xf5, 0x21, 0x2c, 0x74, 0x37, 0x0d, 0x54, 0x01, 0x30, 0xdb, 0x75, - 0xca, 0x07, 0x04, 0x77, 0xfb, 0xea, 0xe8, 0x59, 0x15, 0xd6, 0x78, 0xd4, 0x2f, 0x43, 0x11, 0xc3, - 0x1d, 0xd4, 0xea, 0x19, 0x98, 0xb9, 0x42, 0x1d, 0xdf, 0x95, 0x67, 0x44, 0xab, 0x90, 0xb6, 0x8d, - 0x7a, 0xbb, 0xfb, 0x84, 0x1d, 0x71, 0xdb, 0xa8, 0x13, 0x2c, 0x34, 0xea, 0x37, 0x0a, 0xcc, 0x6e, - 0x5a, 0x75, 0xcb, 0xc3, 0x84, 0xb9, 0x8e, 0xcd, 0x08, 0x3a, 0x1f, 0xeb, 0x58, 0xc7, 0xbb, 0x3a, - 0xd6, 0x91, 0x18, 0xb8, 0xa3, 0x57, 0x7d, 0x0c, 0x93, 0x0f, 0x7c, 0xe2, 0x5b, 0x76, 0x45, 0xf6, - 0xeb, 0x73, 0x49, 0x17, 0xbc, 0x11, 0xc0, 0x63, 0xd9, 0xa6, 0x4f, 0xf3, 0x16, 0x20, 0x35, 0xb8, - 0xcd, 0xa8, 0xfe, 0x95, 0x82, 0xe3, 0xc2, 0x31, 0x29, 0x0f, 0x99, 0xca, 0x77, 0x21, 0x67, 0x30, - 0xe6, 0x53, 0x52, 0x1e, 0x34, 0x95, 0x57, 0xe5, 0x6d, 0x72, 0xeb, 0x03, 0x70, 0x78, 0x20, 0x03, - 0xba, 0x0f, 0xb3, 0xb5, 0xce, 0xbb, 0xcb, 0x6b, 0x9e, 0x4e, 0xba, 0x66, 0x2c, 0x60, 0xfa, 0x92, - 0x3c, 0x41, 0x3c, 0xe8, 0x38, 0x4e, 0xdd, 0x6f, 0x0b, 0x18, 0x1f, 0x7d, 0x0b, 0x40, 0xd7, 0x61, - 0xa9, 0xe4, 0x50, 0xea, 0x1c, 0x58, 0x76, 0x45, 0xf8, 0x69, 0x93, 0xa4, 0x05, 0xc9, 0xbf, 0x5a, - 0xcd, 0xc2, 0x92, 0xde, 0x0f, 0x80, 0xfb, 0xdb, 0xa9, 0x07, 0xb0, 0xb4, 0xcd, 0x7b, 0x0a, 0x73, - 0x7c, 0x6a, 0x92, 0xa8, 0x20, 0x50, 0x01, 0x32, 0x0d, 0x42, 0x4b, 0x41, 0x52, 0x67, 0xf5, 0x2c, - 0x2f, 0x87, 0x0f, 0xb9, 0x00, 0x07, 0x72, 0x7e, 0x13, 0x3b, 0xb2, 0xbc, 0x85, 0x37, 0x59, 0x6e, - 0x42, 0x40, 0xc5, 0x4d, 0xb6, 0xe3, 0x2a, 0xdc, 0x8d, 0x55, 0x9b, 0x29, 0x58, 0x1e, 0x50, 0x7f, - 0xe8, 0x16, 0x4c, 0x31, 0xf9, 0xb7, 0xac, 0xa9, 0x13, 0x49, 0x6f, 0x21, 0x6d, 0xa3, 0xee, 0xdf, - 0x26, 0xc3, 0x21, 0x15, 0x72, 0x60, 0x96, 0xca, 0x23, 0x08, 0x9f, 0x72, 0x0a, 0xac, 0x25, 0x71, - 0xf7, 0x46, 0x27, 0x7a, 0x6c, 0xdc, 0x49, 0x88, 0xe3, 0xfc, 0xe8, 0x21, 0x2c, 0x74, 0x5c, 0x3b, - 0xf0, 0x39, 0x2e, 0x7c, 0x9e, 0x4f, 0xf2, 0xd9, 0xf7, 0x51, 0xf4, 0x9c, 0x74, 0xbb, 0xb0, 0xdd, - 0x45, 0x8b, 0x7b, 0x1c, 0xa9, 0x3f, 0xa7, 0x60, 0xc8, 0x60, 0x78, 0x09, 0x4b, 0xde, 0xbd, 0xd8, - 0x92, 0xf7, 0xf6, 0xf3, 0x4f, 0xbc, 0x81, 0x4b, 0x5f, 0xb5, 0x6b, 0xe9, 0x7b, 0xf7, 0x05, 0x7c, - 0x0c, 0x5f, 0x02, 0xff, 0x48, 0xc1, 0x7f, 0x06, 0x1b, 0x47, 0x4b, 0xe1, 0xb5, 0x58, 0x8b, 0xbd, - 0xd0, 0xd5, 0x62, 0x4f, 0x8c, 0x40, 0xf1, 0xcf, 0x92, 0xd8, 0xb5, 0x24, 0xfe, 0xa2, 0x40, 0x7e, - 0x70, 0xdc, 0x5e, 0xc2, 0xd2, 0xf8, 0x49, 0x7c, 0x69, 0x7c, 0xe3, 0xf9, 0x93, 0x6c, 0xc0, 0x12, - 0x79, 0x65, 0x58, 0x6e, 0x85, 0xeb, 0xde, 0x08, 0x23, 0xff, 0xbb, 0xd4, 0xb0, 0x50, 0x89, 0xed, - 0x34, 0xe1, 0x57, 0x4b, 0xcc, 0xfa, 0xb2, 0xcd, 0x47, 0x4f, 0x9d, 0x4f, 0x8f, 0x20, 0x21, 0xab, - 0x30, 0x59, 0x0b, 0x66, 0xb5, 0x2c, 0xea, 0xf5, 0x91, 0x46, 0xe4, 0xb0, 0xd1, 0x1e, 0xac, 0x05, - 0x12, 0x86, 0xdb, 0xf4, 0xa8, 0x0c, 0x13, 0x44, 0xfc, 0x54, 0x1f, 0xb5, 0xb2, 0x93, 0x7e, 0xd8, - 0xeb, 0xc0, 0xb3, 0x30, 0x40, 0x61, 0xc9, 0xad, 0x7e, 0xad, 0xc0, 0x6a, 0x52, 0x4b, 0x40, 0x07, - 0x7d, 0x56, 0xbc, 0x17, 0x58, 0xdf, 0x47, 0x5f, 0xf9, 0xbe, 0x57, 0xe0, 0x68, 0xbf, 0x4d, 0x8a, - 0x17, 0x19, 0x5f, 0x9f, 0xc2, 0xdd, 0x27, 0x2c, 0xb2, 0x1b, 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x82, - 0xa9, 0xaa, 0x61, 0x97, 0x77, 0xad, 0xcf, 0xda, 0x5b, 0x7d, 0x98, 0xe6, 0xef, 0x4b, 0x39, 0x0e, - 0x11, 0xe8, 0x12, 0x2c, 0x08, 0xbb, 0x4d, 0x62, 0x57, 0xbc, 0xaa, 0x78, 0x11, 0xb9, 0x9a, 0x84, - 0x53, 0xe7, 0x46, 0x97, 0x1e, 0xf7, 0x58, 0xa8, 0x7f, 0x2a, 0x80, 0x9e, 0x67, 0x9b, 0x38, 0x09, - 0x59, 0xc3, 0xb5, 0xc4, 0x8a, 0x1b, 0x14, 0x5a, 0x56, 0x9f, 0x6d, 0x35, 0x0b, 0xd9, 0xf5, 0x9d, - 0xab, 0x81, 0x10, 0x47, 0x7a, 0x0e, 0x6e, 0x0f, 0xda, 0x60, 0xa0, 0x4a, 0x70, 0xdb, 0x31, 0xc3, - 0x91, 0x1e, 0x5d, 0x84, 0x19, 0xb3, 0xe6, 0x33, 0x8f, 0xd0, 0x5d, 0xd3, 0x71, 0x89, 0x68, 0x4c, - 0x53, 0xfa, 0x51, 0x79, 0xa7, 0x99, 0x8d, 0x0e, 0x1d, 0x8e, 0x21, 0x91, 0x06, 0xc0, 0xcb, 0x8a, - 0xb9, 0x06, 0xf7, 0x93, 0x11, 0x7e, 0xe6, 0xf8, 0x83, 0x6d, 0x87, 0x52, 0xdc, 0x81, 0x50, 0xef, - 0xc3, 0xd2, 0x2e, 0xa1, 0x0d, 0xcb, 0x24, 0xeb, 0xa6, 0xe9, 0xf8, 0xb6, 0xd7, 0x5e, 0xd6, 0x8b, - 0x90, 0x0d, 0x61, 0xb2, 0xf2, 0x8e, 0x48, 0xff, 0xd9, 0x90, 0x0b, 0x47, 0x98, 0xb0, 0xd4, 0x53, - 0x03, 0x4b, 0xfd, 0xa7, 0x14, 0x4c, 0x46, 0xf4, 0xe9, 0x7d, 0xcb, 0x2e, 0x4b, 0xe6, 0x63, 0x6d, - 0xf4, 0x35, 0xcb, 0x2e, 0x3f, 0x6b, 0x16, 0xa6, 0x25, 0x8c, 0x7f, 0x62, 0x01, 0x44, 0x57, 0x21, - 0xed, 0x33, 0x42, 0x65, 0x11, 0x9f, 0x4c, 0x4a, 0xe6, 0x5b, 0x8c, 0xd0, 0xf6, 0x7e, 0x35, 0xc5, - 0x99, 0xb9, 0x00, 0x0b, 0x0a, 0xb4, 0x05, 0x99, 0x0a, 0x7f, 0x14, 0x59, 0xa7, 0xa7, 0x92, 0xb8, - 0x3a, 0x7f, 0xc4, 0x04, 0x69, 0x20, 0x24, 0x38, 0x60, 0x41, 0x0f, 0x60, 0x8e, 0xc5, 0x42, 0x28, - 0x9e, 0x6b, 0x84, 0x7d, 0xa9, 0x6f, 0xe0, 0x75, 0xd4, 0x6a, 0x16, 0xe6, 0xe2, 0x2a, 0xdc, 0xe5, - 0x40, 0x2d, 0xc2, 0x74, 0xc7, 0x05, 0x93, 0xbb, 0xac, 0x7e, 0xe9, 0xd1, 0xd3, 0xfc, 0xd8, 0xe3, - 0xa7, 0xf9, 0xb1, 0x27, 0x4f, 0xf3, 0x63, 0x9f, 0xb7, 0xf2, 0xca, 0xa3, 0x56, 0x5e, 0x79, 0xdc, - 0xca, 0x2b, 0x4f, 0x5a, 0x79, 0xe5, 0xd7, 0x56, 0x5e, 0xf9, 0xf2, 0xb7, 0xfc, 0xd8, 0x9d, 0xfc, - 0xf0, 0xff, 0xc5, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x4d, 0x1e, 0x25, 0xc5, 0x15, 0x00, - 0x00, + proto.RegisterFile("k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_2e620af2eea53237) +} + +var fileDescriptor_2e620af2eea53237 = []byte{ + // 1602 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x73, 0xdb, 0xd4, + 0x16, 0x8f, 0x1c, 0x3b, 0x89, 0x4f, 0x3e, 0x7b, 0xd3, 0x4c, 0xfc, 0xd2, 0x37, 0x76, 0xaa, 0x37, + 0xf3, 0xfa, 0x1e, 0x6d, 0xe5, 0x36, 0xb4, 0xb4, 0xc0, 0xf0, 0x11, 0xa5, 0xa5, 0x94, 0x26, 0x69, + 0x7a, 0xd3, 0x42, 0xa7, 0x74, 0x86, 0x2a, 0xf2, 0x8d, 0xad, 0xc6, 0xfa, 0xa8, 0xae, 0x94, 0x10, + 0xba, 0x61, 0xf8, 0x0b, 0x58, 0xc3, 0x92, 0x05, 0x2b, 0x36, 0x6c, 0x59, 0xb0, 0xa4, 0xc3, 0xaa, + 0xcb, 0xae, 0x0c, 0x35, 0x2b, 0xfe, 0x03, 0xe8, 0x0c, 0x33, 0xcc, 0xbd, 0xba, 0x92, 0x2c, 0xdb, + 0xb2, 0x3c, 0xed, 0x4c, 0x57, 0xec, 0xa2, 0x73, 0x7f, 0xe7, 0x77, 0xee, 0x39, 0xf7, 0x7c, 0x39, + 0xa0, 0xec, 0x5d, 0xa4, 0x8a, 0x61, 0x57, 0x35, 0xc7, 0xa8, 0xee, 0x36, 0xed, 0x03, 0xdd, 0xb6, + 0x3c, 0xd7, 0x6e, 0x56, 0xf7, 0xcf, 0xee, 0x10, 0x4f, 0x5b, 0xa9, 0xd6, 0x89, 0x45, 0x5c, 0xcd, + 0x23, 0x35, 0xc5, 0x71, 0x6d, 0xcf, 0x46, 0xe5, 0x00, 0xaf, 0x68, 0x8e, 0xa1, 0x74, 0xe0, 0x15, + 0x81, 0x5f, 0x3a, 0x5d, 0x37, 0xbc, 0x86, 0xbf, 0xa3, 0xe8, 0xb6, 0x59, 0xad, 0xdb, 0x75, 0xbb, + 0xca, 0xd5, 0x76, 0xfc, 0x5d, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x01, 0xdd, 0xd2, 0xb9, 0xd8, 0xbc, + 0xa9, 0xe9, 0x0d, 0xc3, 0x22, 0xee, 0x61, 0xd5, 0xd9, 0xab, 0x33, 0x01, 0xad, 0x9a, 0xc4, 0xd3, + 0xaa, 0xfb, 0x67, 0xbb, 0x2f, 0xb1, 0x54, 0x4d, 0xd3, 0x72, 0x7d, 0xcb, 0x33, 0x4c, 0xd2, 0xa3, + 0xf0, 0x5a, 0x96, 0x02, 0xd5, 0x1b, 0xc4, 0xd4, 0xba, 0xf5, 0xe4, 0x1f, 0x24, 0x58, 0xbe, 0xfc, + 0x29, 0x31, 0x1d, 0x6f, 0xcb, 0x35, 0x6c, 0xd7, 0xf0, 0x0e, 0xd7, 0xc9, 0x3e, 0x69, 0xae, 0xd9, + 0xd6, 0xae, 0x51, 0xf7, 0x5d, 0xcd, 0x33, 0x6c, 0x0b, 0xdd, 0x86, 0x92, 0x65, 0x9b, 0x86, 0xa5, + 0x31, 0xb9, 0xee, 0xbb, 0x2e, 0xb1, 0xf4, 0xc3, 0xed, 0x86, 0xe6, 0x12, 0x5a, 0x92, 0x96, 0xa5, + 0xff, 0x15, 0xd4, 0x7f, 0xb7, 0x5b, 0x95, 0xd2, 0x66, 0x0a, 0x06, 0xa7, 0x6a, 0xa3, 0xb7, 0x60, + 0xb6, 0x49, 0xac, 0x9a, 0xb6, 0xd3, 0x24, 0x5b, 0xc4, 0xd5, 0x89, 0xe5, 0x95, 0x72, 0x9c, 0x70, + 0xbe, 0xdd, 0xaa, 0xcc, 0xae, 0x27, 0x8f, 0x70, 0x37, 0x56, 0xbe, 0x03, 0x8b, 0xef, 0x35, 0xed, + 0x83, 0x4b, 0x06, 0xf5, 0x0c, 0xab, 0xee, 0x1b, 0xb4, 0x41, 0xdc, 0x0d, 0xe2, 0x35, 0xec, 0x1a, + 0x7a, 0x07, 0xf2, 0xde, 0xa1, 0x43, 0xf8, 0xfd, 0x8a, 0xea, 0xc9, 0x47, 0xad, 0xca, 0x48, 0xbb, + 0x55, 0xc9, 0xdf, 0x3c, 0x74, 0xc8, 0xb3, 0x56, 0xe5, 0x58, 0x8a, 0x1a, 0x3b, 0xc6, 0x5c, 0x51, + 0xfe, 0x2a, 0x07, 0xc0, 0x50, 0xdb, 0x3c, 0x70, 0xe8, 0x1e, 0x4c, 0xb0, 0xc7, 0xaa, 0x69, 0x9e, + 0xc6, 0x39, 0x27, 0x57, 0xce, 0x28, 0x71, 0xa6, 0x44, 0x31, 0x57, 0x9c, 0xbd, 0x3a, 0x13, 0x50, + 0x85, 0xa1, 0x95, 0xfd, 0xb3, 0xca, 0xf5, 0x9d, 0xfb, 0x44, 0xf7, 0x36, 0x88, 0xa7, 0xa9, 0x48, + 0xdc, 0x02, 0x62, 0x19, 0x8e, 0x58, 0xd1, 0x16, 0xe4, 0xa9, 0x43, 0x74, 0x1e, 0x80, 0xc9, 0x15, + 0x45, 0x19, 0x9c, 0x87, 0x4a, 0x7c, 0xb7, 0x6d, 0x87, 0xe8, 0xea, 0x54, 0xe8, 0x21, 0xfb, 0xc2, + 0x9c, 0x09, 0xdd, 0x86, 0x31, 0xea, 0x69, 0x9e, 0x4f, 0x4b, 0xa3, 0x3d, 0x37, 0xce, 0xe2, 0xe4, + 0x7a, 0xea, 0x8c, 0x60, 0x1d, 0x0b, 0xbe, 0xb1, 0xe0, 0x93, 0x9f, 0xe4, 0x60, 0x3e, 0x06, 0xaf, + 0xd9, 0x56, 0xcd, 0xe0, 0x99, 0xf2, 0x66, 0x22, 0xea, 0x27, 0xba, 0xa2, 0xbe, 0xd8, 0x47, 0x25, + 0x8e, 0x38, 0x7a, 0x3d, 0xba, 0x6e, 0x8e, 0xab, 0x1f, 0x4f, 0x1a, 0x7f, 0xd6, 0xaa, 0xcc, 0x46, + 0x6a, 0xc9, 0xfb, 0xa0, 0x7d, 0x40, 0x4d, 0x8d, 0x7a, 0x37, 0x5d, 0xcd, 0xa2, 0x01, 0xad, 0x61, + 0x12, 0xe1, 0xf5, 0x2b, 0xc3, 0xbd, 0x13, 0xd3, 0x50, 0x97, 0x84, 0x49, 0xb4, 0xde, 0xc3, 0x86, + 0xfb, 0x58, 0x40, 0xff, 0x85, 0x31, 0x97, 0x68, 0xd4, 0xb6, 0x4a, 0x79, 0x7e, 0xe5, 0x28, 0x5e, + 0x98, 0x4b, 0xb1, 0x38, 0x45, 0xff, 0x87, 0x71, 0x93, 0x50, 0xaa, 0xd5, 0x49, 0xa9, 0xc0, 0x81, + 0xb3, 0x02, 0x38, 0xbe, 0x11, 0x88, 0x71, 0x78, 0x2e, 0xff, 0x28, 0xc1, 0x4c, 0x1c, 0xa7, 0x75, + 0x83, 0x7a, 0xe8, 0x6e, 0x4f, 0xee, 0x29, 0xc3, 0xf9, 0xc4, 0xb4, 0x79, 0xe6, 0xcd, 0x09, 0x73, + 0x13, 0xa1, 0xa4, 0x23, 0xef, 0xae, 0x43, 0xc1, 0xf0, 0x88, 0xc9, 0xa2, 0x3e, 0xda, 0x15, 0xae, + 0x8c, 0x24, 0x51, 0xa7, 0x05, 0x6d, 0xe1, 0x2a, 0x23, 0xc0, 0x01, 0x8f, 0xfc, 0xfb, 0x68, 0xa7, + 0x07, 0x2c, 0x1f, 0xd1, 0xb7, 0x12, 0x2c, 0x39, 0xa9, 0x0d, 0x46, 0x38, 0xb5, 0x96, 0x65, 0x39, + 0xbd, 0x45, 0x61, 0xb2, 0x4b, 0x58, 0x5f, 0x21, 0xaa, 0x2c, 0xae, 0xb4, 0x34, 0x00, 0x3c, 0xe0, + 0x2a, 0xe8, 0x03, 0x40, 0xa6, 0xe6, 0xb1, 0x88, 0xd6, 0xb7, 0x5c, 0xa2, 0x93, 0x1a, 0x63, 0x15, + 0x4d, 0x29, 0xca, 0x8e, 0x8d, 0x1e, 0x04, 0xee, 0xa3, 0x85, 0xbe, 0x90, 0x60, 0xbe, 0xd6, 0xdb, + 0x64, 0x44, 0x5e, 0x5e, 0x18, 0x26, 0xd0, 0x7d, 0x7a, 0x94, 0xba, 0xd8, 0x6e, 0x55, 0xe6, 0xfb, + 0x1c, 0xe0, 0x7e, 0xc6, 0xd0, 0x5d, 0x28, 0xb8, 0x7e, 0x93, 0xd0, 0x52, 0x9e, 0x3f, 0x6f, 0xa6, + 0xd5, 0x2d, 0xbb, 0x69, 0xe8, 0x87, 0x98, 0xa9, 0x7c, 0x64, 0x78, 0x8d, 0x6d, 0x9f, 0xf7, 0x2a, + 0x1a, 0xbf, 0x35, 0x3f, 0xc2, 0x01, 0xa9, 0xfc, 0x10, 0xe6, 0xba, 0x9b, 0x06, 0xaa, 0x03, 0xe8, + 0x61, 0x9d, 0xb2, 0x01, 0xc1, 0xcc, 0xbe, 0x3a, 0x7c, 0x56, 0x45, 0x35, 0x1e, 0xf7, 0xcb, 0x48, + 0x44, 0x71, 0x07, 0xb5, 0x7c, 0x06, 0xa6, 0xae, 0xb8, 0xb6, 0xef, 0x88, 0x3b, 0xa2, 0x65, 0xc8, + 0x5b, 0x9a, 0x19, 0x76, 0x9f, 0xa8, 0x23, 0x6e, 0x6a, 0x26, 0xc1, 0xfc, 0x44, 0xfe, 0x46, 0x82, + 0xe9, 0x75, 0xc3, 0x34, 0x3c, 0x4c, 0xa8, 0x63, 0x5b, 0x94, 0xa0, 0xf3, 0x89, 0x8e, 0x75, 0xbc, + 0xab, 0x63, 0x1d, 0x49, 0x80, 0x3b, 0x7a, 0xd5, 0xc7, 0x30, 0xfe, 0xc0, 0x27, 0xbe, 0x61, 0xd5, + 0x45, 0xbf, 0x3e, 0x97, 0xe5, 0xe0, 0x8d, 0x00, 0x9e, 0xc8, 0x36, 0x75, 0x92, 0xb5, 0x00, 0x71, + 0x82, 0x43, 0x46, 0xf9, 0xaf, 0x1c, 0x1c, 0xe7, 0x86, 0x49, 0x6d, 0xc0, 0x54, 0xbe, 0x0b, 0x25, + 0x8d, 0x52, 0xdf, 0x25, 0xb5, 0xb4, 0xa9, 0xbc, 0x2c, 0xbc, 0x29, 0xad, 0xa6, 0xe0, 0x70, 0x2a, + 0x03, 0xba, 0x0f, 0xd3, 0xcd, 0x4e, 0xdf, 0x85, 0x9b, 0xa7, 0xb3, 0xdc, 0x4c, 0x04, 0x4c, 0x5d, + 0x10, 0x37, 0x48, 0x06, 0x1d, 0x27, 0xa9, 0xfb, 0x6d, 0x01, 0xa3, 0xc3, 0x6f, 0x01, 0xe8, 0x3a, + 0x2c, 0xec, 0xd8, 0xae, 0x6b, 0x1f, 0x18, 0x56, 0x9d, 0xdb, 0x09, 0x49, 0xf2, 0x9c, 0xe4, 0x5f, + 0xed, 0x56, 0x65, 0x41, 0xed, 0x07, 0xc0, 0xfd, 0xf5, 0xe4, 0x03, 0x58, 0xd8, 0x64, 0x3d, 0x85, + 0xda, 0xbe, 0xab, 0x93, 0xb8, 0x20, 0x50, 0x05, 0x0a, 0xfb, 0xc4, 0xdd, 0x09, 0x92, 0xba, 0xa8, + 0x16, 0x59, 0x39, 0x7c, 0xc8, 0x04, 0x38, 0x90, 0x33, 0x4f, 0xac, 0x58, 0xf3, 0x16, 0x5e, 0xa7, + 0xa5, 0x31, 0x0e, 0xe5, 0x9e, 0x6c, 0x26, 0x8f, 0x70, 0x37, 0x56, 0x6e, 0xe5, 0x60, 0x31, 0xa5, + 0xfe, 0xd0, 0x2d, 0x98, 0xa0, 0xe2, 0x6f, 0x51, 0x53, 0x27, 0xb2, 0xde, 0x42, 0xe8, 0xc6, 0xdd, + 0x3f, 0x24, 0xc3, 0x11, 0x15, 0xb2, 0x61, 0xda, 0x15, 0x57, 0xe0, 0x36, 0xc5, 0x14, 0x58, 0xc9, + 0xe2, 0xee, 0x8d, 0x4e, 0xfc, 0xd8, 0xb8, 0x93, 0x10, 0x27, 0xf9, 0xd1, 0x43, 0x98, 0xeb, 0x70, + 0x3b, 0xb0, 0x39, 0xca, 0x6d, 0x9e, 0xcf, 0xb2, 0xd9, 0xf7, 0x51, 0xd4, 0x92, 0x30, 0x3b, 0xb7, + 0xd9, 0x45, 0x8b, 0x7b, 0x0c, 0xc9, 0x3f, 0xe7, 0x60, 0xc0, 0x60, 0x78, 0x09, 0x4b, 0xde, 0xbd, + 0xc4, 0x92, 0xf7, 0xf6, 0xf3, 0x4f, 0xbc, 0xd4, 0xa5, 0xaf, 0xd1, 0xb5, 0xf4, 0xbd, 0xfb, 0x02, + 0x36, 0x06, 0x2f, 0x81, 0x7f, 0xe4, 0xe0, 0x3f, 0xe9, 0xca, 0xf1, 0x52, 0x78, 0x2d, 0xd1, 0x62, + 0x2f, 0x74, 0xb5, 0xd8, 0x13, 0x43, 0x50, 0xfc, 0xb3, 0x24, 0x76, 0x2d, 0x89, 0xbf, 0x48, 0x50, + 0x4e, 0x8f, 0xdb, 0x4b, 0x58, 0x1a, 0x3f, 0x49, 0x2e, 0x8d, 0x6f, 0x3c, 0x7f, 0x92, 0xa5, 0x2c, + 0x91, 0x57, 0x06, 0xe5, 0x56, 0xb4, 0xee, 0x0d, 0x31, 0xf2, 0xbf, 0xcb, 0x0d, 0x0a, 0x15, 0xdf, + 0x4e, 0x33, 0x7e, 0xb5, 0x24, 0xb4, 0x2f, 0x5b, 0x6c, 0xf4, 0x98, 0x6c, 0x7a, 0x04, 0x09, 0xd9, + 0x80, 0xf1, 0x66, 0x30, 0xab, 0x45, 0x51, 0xaf, 0x0e, 0x35, 0x22, 0x07, 0x8d, 0xf6, 0x60, 0x2d, + 0x10, 0x30, 0x1c, 0xd2, 0xa3, 0x1a, 0x8c, 0x11, 0xfe, 0x53, 0x7d, 0xd8, 0xca, 0xce, 0xfa, 0x61, + 0xaf, 0x02, 0xcb, 0xc2, 0x00, 0x85, 0x05, 0xb7, 0xfc, 0xb5, 0x04, 0xcb, 0x59, 0x2d, 0x01, 0x1d, + 0xf4, 0x59, 0xf1, 0x5e, 0x60, 0x7d, 0x1f, 0x7e, 0xe5, 0xfb, 0x5e, 0x82, 0xa3, 0xfd, 0x36, 0x29, + 0x56, 0x64, 0x6c, 0x7d, 0x8a, 0x76, 0x9f, 0xa8, 0xc8, 0x6e, 0x70, 0x29, 0x16, 0xa7, 0xe8, 0x14, + 0x4c, 0x34, 0x34, 0xab, 0xb6, 0x6d, 0x7c, 0x16, 0x6e, 0xf5, 0x51, 0x9a, 0xbf, 0x2f, 0xe4, 0x38, + 0x42, 0xa0, 0x4b, 0x30, 0xc7, 0xf5, 0xd6, 0x89, 0x55, 0xf7, 0x1a, 0xfc, 0x45, 0xc4, 0x6a, 0x12, + 0x4d, 0x9d, 0x1b, 0x5d, 0xe7, 0xb8, 0x47, 0x43, 0xfe, 0x53, 0x02, 0xf4, 0x3c, 0xdb, 0xc4, 0x49, + 0x28, 0x6a, 0x8e, 0xc1, 0x57, 0xdc, 0xa0, 0xd0, 0x8a, 0xea, 0x74, 0xbb, 0x55, 0x29, 0xae, 0x6e, + 0x5d, 0x0d, 0x84, 0x38, 0x3e, 0x67, 0xe0, 0x70, 0xd0, 0x06, 0x03, 0x55, 0x80, 0x43, 0xc3, 0x14, + 0xc7, 0xe7, 0xe8, 0x22, 0x4c, 0xe9, 0x4d, 0x9f, 0x7a, 0xc4, 0xdd, 0xd6, 0x6d, 0x87, 0xf0, 0xc6, + 0x34, 0xa1, 0x1e, 0x15, 0x3e, 0x4d, 0xad, 0x75, 0x9c, 0xe1, 0x04, 0x12, 0x29, 0x00, 0xac, 0xac, + 0xa8, 0xa3, 0x31, 0x3b, 0x05, 0x6e, 0x67, 0x86, 0x3d, 0xd8, 0x66, 0x24, 0xc5, 0x1d, 0x08, 0xf9, + 0x3e, 0x2c, 0x6c, 0x13, 0x77, 0xdf, 0xd0, 0xc9, 0xaa, 0xae, 0xdb, 0xbe, 0xe5, 0x85, 0xcb, 0x7a, + 0x15, 0x8a, 0x11, 0x4c, 0x54, 0xde, 0x11, 0x61, 0xbf, 0x18, 0x71, 0xe1, 0x18, 0x13, 0x95, 0x7a, + 0x2e, 0xb5, 0xd4, 0x7f, 0xca, 0xc1, 0x78, 0x4c, 0x9f, 0xdf, 0x33, 0xac, 0x9a, 0x60, 0x3e, 0x16, + 0xa2, 0xaf, 0x19, 0x56, 0xed, 0x59, 0xab, 0x32, 0x29, 0x60, 0xec, 0x13, 0x73, 0x20, 0xba, 0x0a, + 0x79, 0x9f, 0x12, 0x57, 0x14, 0xf1, 0xc9, 0xac, 0x64, 0xbe, 0x45, 0x89, 0x1b, 0xee, 0x57, 0x13, + 0x8c, 0x99, 0x09, 0x30, 0xa7, 0x40, 0x1b, 0x50, 0xa8, 0xb3, 0x47, 0x11, 0x75, 0x7a, 0x2a, 0x8b, + 0xab, 0xf3, 0x47, 0x4c, 0x90, 0x06, 0x5c, 0x82, 0x03, 0x16, 0xf4, 0x00, 0x66, 0x68, 0x22, 0x84, + 0xfc, 0xb9, 0x86, 0xd8, 0x97, 0xfa, 0x06, 0x5e, 0x45, 0xed, 0x56, 0x65, 0x26, 0x79, 0x84, 0xbb, + 0x0c, 0xc8, 0x55, 0x98, 0xec, 0x70, 0x30, 0xbb, 0xcb, 0xaa, 0x97, 0x1e, 0x3d, 0x2d, 0x8f, 0x3c, + 0x7e, 0x5a, 0x1e, 0x79, 0xf2, 0xb4, 0x3c, 0xf2, 0x79, 0xbb, 0x2c, 0x3d, 0x6a, 0x97, 0xa5, 0xc7, + 0xed, 0xb2, 0xf4, 0xa4, 0x5d, 0x96, 0x7e, 0x6d, 0x97, 0xa5, 0x2f, 0x7f, 0x2b, 0x8f, 0xdc, 0x29, + 0x0f, 0xfe, 0x5f, 0xec, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xd5, 0xd0, 0x62, 0xac, 0x15, + 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index a832114afe..d6073fc925 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -295,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go index c6598306d9..e0a3fc1e18 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto +// source: k8s.io/api/flowcontrol/v1beta3/generated.proto package v1beta3 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{0} + return fileDescriptor_52ab6629c083d251, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{1} + return fileDescriptor_52ab6629c083d251, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{2} + return fileDescriptor_52ab6629c083d251, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{3} + return fileDescriptor_52ab6629c083d251, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{4} + return fileDescriptor_52ab6629c083d251, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{5} + return fileDescriptor_52ab6629c083d251, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{6} + return fileDescriptor_52ab6629c083d251, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{7} + return fileDescriptor_52ab6629c083d251, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{8} + return fileDescriptor_52ab6629c083d251, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{9} + return fileDescriptor_52ab6629c083d251, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{10} + return fileDescriptor_52ab6629c083d251, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{11} + return fileDescriptor_52ab6629c083d251, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{12} + return fileDescriptor_52ab6629c083d251, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{13} + return fileDescriptor_52ab6629c083d251, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{14} + return fileDescriptor_52ab6629c083d251, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{15} + return fileDescriptor_52ab6629c083d251, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{16} + return fileDescriptor_52ab6629c083d251, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{17} + return fileDescriptor_52ab6629c083d251, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{18} + return fileDescriptor_52ab6629c083d251, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{19} + return fileDescriptor_52ab6629c083d251, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{20} + return fileDescriptor_52ab6629c083d251, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{21} + return fileDescriptor_52ab6629c083d251, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_803504887082f044, []int{22} + return fileDescriptor_52ab6629c083d251, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,112 +714,111 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto", fileDescriptor_803504887082f044) -} - -var fileDescriptor_803504887082f044 = []byte{ - // 1604 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x73, 0xdb, 0x54, - 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x4f, 0x9e, 0xbd, 0x69, 0x26, 0xfe, 0xd2, 0x6f, 0xec, 0x54, 0xdf, - 0xcc, 0x57, 0xa0, 0xad, 0xdc, 0x27, 0x2d, 0x30, 0x3c, 0xaa, 0xb4, 0x94, 0xd2, 0x24, 0x4d, 0x6f, - 0x5a, 0xe8, 0x94, 0xce, 0x50, 0x59, 0xbe, 0xb1, 0xd5, 0x58, 0x8f, 0xea, 0x4a, 0x0e, 0xa1, 0x1b, - 0x86, 0xbf, 0x80, 0x35, 0x2c, 0x59, 0xb0, 0x62, 0xc3, 0x96, 0x05, 0x4b, 0x3a, 0xac, 0xba, 0xec, - 0xca, 0x50, 0xb3, 0xe2, 0x3f, 0x80, 0xce, 0x30, 0xc3, 0xdc, 0xab, 0x2b, 0xc9, 0xf2, 0x4b, 0x9e, - 0x74, 0xa6, 0x2b, 0x76, 0xd1, 0x79, 0xfc, 0xce, 0xbd, 0xe7, 0x9e, 0xc7, 0xcf, 0x81, 0xab, 0xbb, - 0x17, 0xa9, 0x62, 0xd8, 0xe5, 0x5d, 0xbf, 0x42, 0x5c, 0x8b, 0x78, 0x84, 0x96, 0x9b, 0xc4, 0xaa, - 0xda, 0x6e, 0x59, 0x28, 0x34, 0xc7, 0x28, 0xef, 0x34, 0xec, 0x3d, 0xdd, 0xb6, 0x3c, 0xd7, 0x6e, - 0x94, 0x9b, 0xa7, 0x2b, 0xc4, 0xd3, 0xce, 0x96, 0x6b, 0xc4, 0x22, 0xae, 0xe6, 0x91, 0xaa, 0xe2, - 0xb8, 0xb6, 0x67, 0xa3, 0x62, 0x60, 0xaf, 0x68, 0x8e, 0xa1, 0x74, 0xd8, 0x2b, 0xc2, 0x7e, 0xe5, - 0x64, 0xcd, 0xf0, 0xea, 0x7e, 0x45, 0xd1, 0x6d, 0xb3, 0x5c, 0xb3, 0x6b, 0x76, 0x99, 0xbb, 0x55, - 0xfc, 0x1d, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x01, 0xdc, 0xca, 0xb9, 0x38, 0xbc, 0xa9, 0xe9, 0x75, - 0xc3, 0x22, 0xee, 0x7e, 0xd9, 0xd9, 0xad, 0x31, 0x01, 0x2d, 0x9b, 0xc4, 0xd3, 0xca, 0xcd, 0xd3, - 0xdd, 0x87, 0x58, 0x29, 0x0f, 0xf2, 0x72, 0x7d, 0xcb, 0x33, 0x4c, 0xd2, 0xe3, 0xf0, 0x7a, 0x9a, - 0x03, 0xd5, 0xeb, 0xc4, 0xd4, 0xba, 0xfd, 0xe4, 0x1f, 0x25, 0x58, 0xbd, 0xf2, 0x19, 0x31, 0x1d, - 0x6f, 0xcb, 0x35, 0x6c, 0xd7, 0xf0, 0xf6, 0xd7, 0x49, 0x93, 0x34, 0xd6, 0x6c, 0x6b, 0xc7, 0xa8, - 0xf9, 0xae, 0xe6, 0x19, 0xb6, 0x85, 0xee, 0x40, 0xc1, 0xb2, 0x4d, 0xc3, 0xd2, 0x98, 0x5c, 0xf7, - 0x5d, 0x97, 0x58, 0xfa, 0xfe, 0x76, 0x5d, 0x73, 0x09, 0x2d, 0x48, 0xab, 0xd2, 0x2b, 0x39, 0xf5, - 0xbf, 0xed, 0x56, 0xa9, 0xb0, 0x39, 0xc0, 0x06, 0x0f, 0xf4, 0x46, 0x6f, 0xc3, 0x7c, 0x83, 0x58, - 0x55, 0xad, 0xd2, 0x20, 0x5b, 0xc4, 0xd5, 0x89, 0xe5, 0x15, 0x32, 0x1c, 0x70, 0xb1, 0xdd, 0x2a, - 0xcd, 0xaf, 0x27, 0x55, 0xb8, 0xdb, 0x56, 0xbe, 0x0b, 0xcb, 0xef, 0x37, 0xec, 0xbd, 0xcb, 0x06, - 0xf5, 0x0c, 0xab, 0xe6, 0x1b, 0xb4, 0x4e, 0xdc, 0x0d, 0xe2, 0xd5, 0xed, 0x2a, 0x7a, 0x17, 0xb2, - 0xde, 0xbe, 0x43, 0xf8, 0xf9, 0xf2, 0xea, 0xf1, 0xc7, 0xad, 0xd2, 0x58, 0xbb, 0x55, 0xca, 0xde, - 0xda, 0x77, 0xc8, 0xf3, 0x56, 0xe9, 0xc8, 0x00, 0x37, 0xa6, 0xc6, 0xdc, 0x51, 0xfe, 0x3a, 0x03, - 0xc0, 0xac, 0xb6, 0x79, 0xe2, 0xd0, 0x7d, 0x98, 0x62, 0x8f, 0x55, 0xd5, 0x3c, 0x8d, 0x63, 0x4e, - 0x9f, 0x39, 0xa5, 0xc4, 0x95, 0x12, 0xe5, 0x5c, 0x71, 0x76, 0x6b, 0x4c, 0x40, 0x15, 0x66, 0xad, - 0x34, 0x4f, 0x2b, 0x37, 0x2a, 0x0f, 0x88, 0xee, 0x6d, 0x10, 0x4f, 0x53, 0x91, 0x38, 0x05, 0xc4, - 0x32, 0x1c, 0xa1, 0xa2, 0x2d, 0xc8, 0x52, 0x87, 0xe8, 0x3c, 0x01, 0xd3, 0x67, 0x14, 0x65, 0x78, - 0x1d, 0x2a, 0xf1, 0xd9, 0xb6, 0x1d, 0xa2, 0xab, 0x33, 0xe1, 0x0d, 0xd9, 0x17, 0xe6, 0x48, 0xe8, - 0x0e, 0x4c, 0x50, 0x4f, 0xf3, 0x7c, 0x5a, 0x18, 0xef, 0x39, 0x71, 0x1a, 0x26, 0xf7, 0x53, 0xe7, - 0x04, 0xea, 0x44, 0xf0, 0x8d, 0x05, 0x9e, 0xfc, 0x34, 0x03, 0x8b, 0xb1, 0xf1, 0x9a, 0x6d, 0x55, - 0x0d, 0x5e, 0x29, 0x6f, 0x25, 0xb2, 0x7e, 0xac, 0x2b, 0xeb, 0xcb, 0x7d, 0x5c, 0xe2, 0x8c, 0xa3, - 0x37, 0xa2, 0xe3, 0x66, 0xb8, 0xfb, 0xd1, 0x64, 0xf0, 0xe7, 0xad, 0xd2, 0x7c, 0xe4, 0x96, 0x3c, - 0x0f, 0x6a, 0x02, 0x6a, 0x68, 0xd4, 0xbb, 0xe5, 0x6a, 0x16, 0x0d, 0x60, 0x0d, 0x93, 0x88, 0x5b, - 0xbf, 0x36, 0xda, 0x3b, 0x31, 0x0f, 0x75, 0x45, 0x84, 0x44, 0xeb, 0x3d, 0x68, 0xb8, 0x4f, 0x04, - 0xf4, 0x7f, 0x98, 0x70, 0x89, 0x46, 0x6d, 0xab, 0x90, 0xe5, 0x47, 0x8e, 0xf2, 0x85, 0xb9, 0x14, - 0x0b, 0x2d, 0x7a, 0x15, 0x26, 0x4d, 0x42, 0xa9, 0x56, 0x23, 0x85, 0x1c, 0x37, 0x9c, 0x17, 0x86, - 0x93, 0x1b, 0x81, 0x18, 0x87, 0x7a, 0xf9, 0x27, 0x09, 0xe6, 0xe2, 0x3c, 0xad, 0x1b, 0xd4, 0x43, - 0xf7, 0x7a, 0x6a, 0x4f, 0x19, 0xed, 0x4e, 0xcc, 0x9b, 0x57, 0xde, 0x82, 0x08, 0x37, 0x15, 0x4a, - 0x3a, 0xea, 0xee, 0x06, 0xe4, 0x0c, 0x8f, 0x98, 0x2c, 0xeb, 0xe3, 0x5d, 0xe9, 0x4a, 0x29, 0x12, - 0x75, 0x56, 0xc0, 0xe6, 0xae, 0x31, 0x00, 0x1c, 0xe0, 0xc8, 0x7f, 0x8c, 0x77, 0xde, 0x80, 0xd5, - 0x23, 0xfa, 0x4e, 0x82, 0x15, 0x67, 0xe0, 0x80, 0x11, 0x97, 0x5a, 0x4b, 0x8b, 0x3c, 0x78, 0x44, - 0x61, 0xb2, 0x43, 0xd8, 0x5c, 0x21, 0xaa, 0x2c, 0x8e, 0xb4, 0x32, 0xc4, 0x78, 0xc8, 0x51, 0xd0, - 0x87, 0x80, 0x4c, 0xcd, 0x63, 0x19, 0xad, 0x6d, 0xb9, 0x44, 0x27, 0x55, 0x86, 0x2a, 0x86, 0x52, - 0x54, 0x1d, 0x1b, 0x3d, 0x16, 0xb8, 0x8f, 0x17, 0xfa, 0x52, 0x82, 0xc5, 0x6a, 0xef, 0x90, 0x11, - 0x75, 0x79, 0x61, 0x94, 0x44, 0xf7, 0x99, 0x51, 0xea, 0x72, 0xbb, 0x55, 0x5a, 0xec, 0xa3, 0xc0, - 0xfd, 0x82, 0xa1, 0x7b, 0x90, 0x73, 0xfd, 0x06, 0xa1, 0x85, 0x2c, 0x7f, 0xde, 0xd4, 0xa8, 0x5b, - 0x76, 0xc3, 0xd0, 0xf7, 0x31, 0x73, 0xf9, 0xd8, 0xf0, 0xea, 0xdb, 0x3e, 0x9f, 0x55, 0x34, 0x7e, - 0x6b, 0xae, 0xc2, 0x01, 0xa8, 0xfc, 0x08, 0x16, 0xba, 0x87, 0x06, 0xaa, 0x01, 0xe8, 0x61, 0x9f, - 0xb2, 0x05, 0xc1, 0xc2, 0x9e, 0x1d, 0xbd, 0xaa, 0xa2, 0x1e, 0x8f, 0xe7, 0x65, 0x24, 0xa2, 0xb8, - 0x03, 0x5a, 0x3e, 0x05, 0x33, 0x57, 0x5d, 0xdb, 0x77, 0xc4, 0x19, 0xd1, 0x2a, 0x64, 0x2d, 0xcd, - 0x0c, 0xa7, 0x4f, 0x34, 0x11, 0x37, 0x35, 0x93, 0x60, 0xae, 0x91, 0xbf, 0x95, 0x60, 0x76, 0xdd, - 0x30, 0x0d, 0x0f, 0x13, 0xea, 0xd8, 0x16, 0x25, 0xe8, 0x7c, 0x62, 0x62, 0x1d, 0xed, 0x9a, 0x58, - 0x87, 0x12, 0xc6, 0x1d, 0xb3, 0xea, 0x13, 0x98, 0x7c, 0xe8, 0x13, 0xdf, 0xb0, 0x6a, 0x62, 0x5e, - 0x9f, 0x4b, 0xbb, 0xe0, 0xcd, 0xc0, 0x3c, 0x51, 0x6d, 0xea, 0x34, 0x1b, 0x01, 0x42, 0x83, 0x43, - 0x44, 0xf9, 0xef, 0x0c, 0x1c, 0xe5, 0x81, 0x49, 0x75, 0xc8, 0x56, 0xbe, 0x97, 0xba, 0x95, 0x57, - 0xc5, 0x6d, 0x0e, 0xb2, 0x99, 0x1f, 0xc0, 0x6c, 0xa3, 0xf3, 0xee, 0xe2, 0x9a, 0x27, 0xd3, 0xae, - 0x99, 0x48, 0x98, 0xba, 0x24, 0x4e, 0x90, 0x4c, 0x3a, 0x4e, 0x42, 0xf7, 0x63, 0x01, 0xe3, 0xa3, - 0xb3, 0x00, 0x74, 0x03, 0x96, 0x2a, 0xb6, 0xeb, 0xda, 0x7b, 0x86, 0x55, 0xe3, 0x71, 0x42, 0x90, - 0x2c, 0x07, 0xf9, 0x4f, 0xbb, 0x55, 0x5a, 0x52, 0xfb, 0x19, 0xe0, 0xfe, 0x7e, 0xf2, 0x1e, 0x2c, - 0x6d, 0xb2, 0x99, 0x42, 0x6d, 0xdf, 0xd5, 0x49, 0xdc, 0x10, 0xa8, 0x04, 0xb9, 0x26, 0x71, 0x2b, - 0x41, 0x51, 0xe7, 0xd5, 0x3c, 0x6b, 0x87, 0x8f, 0x98, 0x00, 0x07, 0x72, 0x76, 0x13, 0x2b, 0xf6, - 0xbc, 0x8d, 0xd7, 0x69, 0x61, 0x82, 0x9b, 0xf2, 0x9b, 0x6c, 0x26, 0x55, 0xb8, 0xdb, 0x56, 0x6e, - 0x65, 0x60, 0x79, 0x40, 0xff, 0xa1, 0xdb, 0x30, 0x45, 0xc5, 0xdf, 0xa2, 0xa7, 0x8e, 0xa5, 0xbd, - 0x85, 0xf0, 0x8d, 0xa7, 0x7f, 0x08, 0x86, 0x23, 0x28, 0x64, 0xc3, 0xac, 0x2b, 0x8e, 0xc0, 0x63, - 0x8a, 0x2d, 0x70, 0x26, 0x0d, 0xbb, 0x37, 0x3b, 0xf1, 0x63, 0xe3, 0x4e, 0x40, 0x9c, 0xc4, 0x47, - 0x8f, 0x60, 0xa1, 0xe3, 0xda, 0x41, 0xcc, 0x71, 0x1e, 0xf3, 0x7c, 0x5a, 0xcc, 0xbe, 0x8f, 0xa2, - 0x16, 0x44, 0xd8, 0x85, 0xcd, 0x2e, 0x58, 0xdc, 0x13, 0x48, 0xfe, 0x25, 0x03, 0x43, 0x16, 0xc3, - 0x4b, 0x20, 0x79, 0xf7, 0x13, 0x24, 0xef, 0x9d, 0x83, 0x6f, 0xbc, 0x81, 0xa4, 0xaf, 0xde, 0x45, - 0xfa, 0xde, 0x7b, 0x81, 0x18, 0xc3, 0x49, 0xe0, 0x9f, 0x19, 0xf8, 0xdf, 0x60, 0xe7, 0x98, 0x14, - 0x5e, 0x4f, 0x8c, 0xd8, 0x0b, 0x5d, 0x23, 0xf6, 0xd8, 0x08, 0x10, 0xff, 0x92, 0xc4, 0x2e, 0x92, - 0xf8, 0xab, 0x04, 0xc5, 0xc1, 0x79, 0x7b, 0x09, 0xa4, 0xf1, 0xd3, 0x24, 0x69, 0x7c, 0xf3, 0xe0, - 0x45, 0x36, 0x80, 0x44, 0x5e, 0x1d, 0x56, 0x5b, 0x11, 0xdd, 0x1b, 0x61, 0xe5, 0x7f, 0x9f, 0x19, - 0x96, 0x2a, 0xce, 0x4e, 0x53, 0x7e, 0xb5, 0x24, 0xbc, 0xaf, 0x58, 0x6c, 0xf5, 0x98, 0x6c, 0x7b, - 0x04, 0x05, 0x59, 0x87, 0xc9, 0x46, 0xb0, 0xab, 0x45, 0x53, 0x5f, 0x1a, 0x69, 0x45, 0x0e, 0x5b, - 0xed, 0x01, 0x2d, 0x10, 0x66, 0x38, 0x84, 0x47, 0x55, 0x98, 0x20, 0xfc, 0xa7, 0xfa, 0xa8, 0x9d, - 0x9d, 0xf6, 0xc3, 0x5e, 0x05, 0x56, 0x85, 0x81, 0x15, 0x16, 0xd8, 0xf2, 0x37, 0x12, 0xac, 0xa6, - 0x8d, 0x04, 0xb4, 0xd7, 0x87, 0xe2, 0xbd, 0x00, 0x7d, 0x1f, 0x9d, 0xf2, 0xfd, 0x20, 0xc1, 0xe1, - 0x7e, 0x4c, 0x8a, 0x35, 0x19, 0xa3, 0x4f, 0x11, 0xf7, 0x89, 0x9a, 0xec, 0x26, 0x97, 0x62, 0xa1, - 0x45, 0x27, 0x60, 0xaa, 0xae, 0x59, 0xd5, 0x6d, 0xe3, 0xf3, 0x90, 0xd5, 0x47, 0x65, 0xfe, 0x81, - 0x90, 0xe3, 0xc8, 0x02, 0x5d, 0x86, 0x05, 0xee, 0xb7, 0x4e, 0xac, 0x9a, 0x57, 0xe7, 0x2f, 0x22, - 0xa8, 0x49, 0xb4, 0x75, 0x6e, 0x76, 0xe9, 0x71, 0x8f, 0x87, 0xfc, 0x97, 0x04, 0xe8, 0x20, 0x6c, - 0xe2, 0x38, 0xe4, 0x35, 0xc7, 0xe0, 0x14, 0x37, 0x68, 0xb4, 0xbc, 0x3a, 0xdb, 0x6e, 0x95, 0xf2, - 0x97, 0xb6, 0xae, 0x05, 0x42, 0x1c, 0xeb, 0x99, 0x71, 0xb8, 0x68, 0x83, 0x85, 0x2a, 0x8c, 0xc3, - 0xc0, 0x14, 0xc7, 0x7a, 0x74, 0x11, 0x66, 0xf4, 0x86, 0x4f, 0x3d, 0xe2, 0x6e, 0xeb, 0xb6, 0x43, - 0xf8, 0x60, 0x9a, 0x52, 0x0f, 0x8b, 0x3b, 0xcd, 0xac, 0x75, 0xe8, 0x70, 0xc2, 0x12, 0x29, 0x00, - 0xac, 0xad, 0xa8, 0xa3, 0xb1, 0x38, 0x39, 0x1e, 0x67, 0x8e, 0x3d, 0xd8, 0x66, 0x24, 0xc5, 0x1d, - 0x16, 0xf2, 0x03, 0x58, 0xda, 0x26, 0x6e, 0xd3, 0xd0, 0xc9, 0x25, 0x5d, 0xb7, 0x7d, 0xcb, 0x0b, - 0xc9, 0x7a, 0x19, 0xf2, 0x91, 0x99, 0xe8, 0xbc, 0x43, 0x22, 0x7e, 0x3e, 0xc2, 0xc2, 0xb1, 0x4d, - 0xd4, 0xea, 0x99, 0x81, 0xad, 0xfe, 0x73, 0x06, 0x26, 0x63, 0xf8, 0xec, 0xae, 0x61, 0x55, 0x05, - 0xf2, 0x91, 0xd0, 0xfa, 0xba, 0x61, 0x55, 0x9f, 0xb7, 0x4a, 0xd3, 0xc2, 0x8c, 0x7d, 0x62, 0x6e, - 0x88, 0xae, 0x41, 0xd6, 0xa7, 0xc4, 0x15, 0x4d, 0x7c, 0x3c, 0xad, 0x98, 0x6f, 0x53, 0xe2, 0x86, - 0xfc, 0x6a, 0x8a, 0x21, 0x33, 0x01, 0xe6, 0x10, 0x68, 0x03, 0x72, 0x35, 0xf6, 0x28, 0xa2, 0x4f, - 0x4f, 0xa4, 0x61, 0x75, 0xfe, 0x88, 0x09, 0xca, 0x80, 0x4b, 0x70, 0x80, 0x82, 0x1e, 0xc2, 0x1c, - 0x4d, 0xa4, 0x90, 0x3f, 0xd7, 0x08, 0x7c, 0xa9, 0x6f, 0xe2, 0x55, 0xd4, 0x6e, 0x95, 0xe6, 0x92, - 0x2a, 0xdc, 0x15, 0x40, 0x2e, 0xc3, 0x74, 0xc7, 0x05, 0xd3, 0xa7, 0xac, 0x7a, 0xf9, 0xf1, 0xb3, - 0xe2, 0xd8, 0x93, 0x67, 0xc5, 0xb1, 0xa7, 0xcf, 0x8a, 0x63, 0x5f, 0xb4, 0x8b, 0xd2, 0xe3, 0x76, - 0x51, 0x7a, 0xd2, 0x2e, 0x4a, 0x4f, 0xdb, 0x45, 0xe9, 0xb7, 0x76, 0x51, 0xfa, 0xea, 0xf7, 0xe2, - 0xd8, 0xdd, 0xe2, 0xf0, 0xff, 0xc5, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xc5, 0x22, 0x46, - 0xc5, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/flowcontrol/v1beta3/generated.proto", fileDescriptor_52ab6629c083d251) +} + +var fileDescriptor_52ab6629c083d251 = []byte{ + // 1589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x6f, 0xdc, 0x54, + 0x17, 0x8f, 0x27, 0x33, 0x49, 0xe6, 0xe4, 0xd9, 0x9b, 0x46, 0x99, 0x2f, 0xfd, 0x34, 0x93, 0xfa, + 0x93, 0xbe, 0x02, 0x6d, 0x3d, 0x7d, 0xd2, 0x02, 0xe2, 0x51, 0xa7, 0xa5, 0x94, 0x26, 0x69, 0x7a, + 0xd3, 0x42, 0x55, 0x2a, 0x51, 0xc7, 0x73, 0xe3, 0x71, 0x33, 0x7e, 0xd4, 0xd7, 0x4e, 0x08, 0xdd, + 0x20, 0xfe, 0x02, 0xd6, 0xb0, 0x64, 0xc1, 0x8a, 0x0d, 0x5b, 0x16, 0x2c, 0xa9, 0x58, 0x75, 0xd9, + 0xd5, 0x40, 0x87, 0x15, 0xff, 0x01, 0x54, 0x42, 0x42, 0xf7, 0xfa, 0xda, 0x1e, 0xcf, 0xcb, 0xa3, + 0x54, 0xea, 0x8a, 0x5d, 0x7c, 0xee, 0x39, 0xbf, 0x73, 0xcf, 0xb9, 0xe7, 0xf1, 0x9b, 0x80, 0xb2, + 0x73, 0x91, 0x2a, 0xa6, 0x53, 0xd5, 0x5c, 0xb3, 0xba, 0xdd, 0x70, 0xf6, 0x74, 0xc7, 0xf6, 0x3d, + 0xa7, 0x51, 0xdd, 0x3d, 0xbd, 0x45, 0x7c, 0xed, 0x6c, 0xd5, 0x20, 0x36, 0xf1, 0x34, 0x9f, 0xd4, + 0x14, 0xd7, 0x73, 0x7c, 0x07, 0x95, 0x43, 0x7d, 0x45, 0x73, 0x4d, 0xa5, 0x4d, 0x5f, 0x11, 0xfa, + 0x4b, 0x27, 0x0d, 0xd3, 0xaf, 0x07, 0x5b, 0x8a, 0xee, 0x58, 0x55, 0xc3, 0x31, 0x9c, 0x2a, 0x37, + 0xdb, 0x0a, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x85, 0x70, 0x4b, 0xe7, 0x12, 0xf7, 0x96, 0xa6, + 0xd7, 0x4d, 0x9b, 0x78, 0xfb, 0x55, 0x77, 0xc7, 0x60, 0x02, 0x5a, 0xb5, 0x88, 0xaf, 0x55, 0x77, + 0x4f, 0x77, 0x5e, 0x62, 0xa9, 0xda, 0xcf, 0xca, 0x0b, 0x6c, 0xdf, 0xb4, 0x48, 0x97, 0xc1, 0xeb, + 0x59, 0x06, 0x54, 0xaf, 0x13, 0x4b, 0xeb, 0xb4, 0x93, 0x7f, 0x94, 0x60, 0xf9, 0xca, 0x67, 0xc4, + 0x72, 0xfd, 0x0d, 0xcf, 0x74, 0x3c, 0xd3, 0xdf, 0x5f, 0x25, 0xbb, 0xa4, 0xb1, 0xe2, 0xd8, 0xdb, + 0xa6, 0x11, 0x78, 0x9a, 0x6f, 0x3a, 0x36, 0xba, 0x03, 0x25, 0xdb, 0xb1, 0x4c, 0x5b, 0x63, 0x72, + 0x3d, 0xf0, 0x3c, 0x62, 0xeb, 0xfb, 0x9b, 0x75, 0xcd, 0x23, 0xb4, 0x24, 0x2d, 0x4b, 0xaf, 0x14, + 0xd4, 0xff, 0xb6, 0x9a, 0x95, 0xd2, 0x7a, 0x1f, 0x1d, 0xdc, 0xd7, 0x1a, 0xbd, 0x0d, 0xb3, 0x0d, + 0x62, 0xd7, 0xb4, 0xad, 0x06, 0xd9, 0x20, 0x9e, 0x4e, 0x6c, 0xbf, 0x94, 0xe3, 0x80, 0xf3, 0xad, + 0x66, 0x65, 0x76, 0x35, 0x7d, 0x84, 0x3b, 0x75, 0xe5, 0xbb, 0xb0, 0xf8, 0x7e, 0xc3, 0xd9, 0xbb, + 0x6c, 0x52, 0xdf, 0xb4, 0x8d, 0xc0, 0xa4, 0x75, 0xe2, 0xad, 0x11, 0xbf, 0xee, 0xd4, 0xd0, 0xbb, + 0x90, 0xf7, 0xf7, 0x5d, 0xc2, 0xef, 0x57, 0x54, 0x8f, 0x3f, 0x6e, 0x56, 0x46, 0x5a, 0xcd, 0x4a, + 0xfe, 0xd6, 0xbe, 0x4b, 0x9e, 0x37, 0x2b, 0x47, 0xfa, 0x98, 0xb1, 0x63, 0xcc, 0x0d, 0xe5, 0xaf, + 0x73, 0x00, 0x4c, 0x6b, 0x93, 0x27, 0x0e, 0xdd, 0x87, 0x09, 0xf6, 0x58, 0x35, 0xcd, 0xd7, 0x38, + 0xe6, 0xe4, 0x99, 0x53, 0x4a, 0x52, 0x29, 0x71, 0xce, 0x15, 0x77, 0xc7, 0x60, 0x02, 0xaa, 0x30, + 0x6d, 0x65, 0xf7, 0xb4, 0x72, 0x63, 0xeb, 0x01, 0xd1, 0xfd, 0x35, 0xe2, 0x6b, 0x2a, 0x12, 0xb7, + 0x80, 0x44, 0x86, 0x63, 0x54, 0xb4, 0x01, 0x79, 0xea, 0x12, 0x9d, 0x27, 0x60, 0xf2, 0x8c, 0xa2, + 0x0c, 0xae, 0x43, 0x25, 0xb9, 0xdb, 0xa6, 0x4b, 0x74, 0x75, 0x2a, 0x8a, 0x90, 0x7d, 0x61, 0x8e, + 0x84, 0xee, 0xc0, 0x18, 0xf5, 0x35, 0x3f, 0xa0, 0xa5, 0xd1, 0xae, 0x1b, 0x67, 0x61, 0x72, 0x3b, + 0x75, 0x46, 0xa0, 0x8e, 0x85, 0xdf, 0x58, 0xe0, 0xc9, 0x4f, 0x73, 0x30, 0x9f, 0x28, 0xaf, 0x38, + 0x76, 0xcd, 0xe4, 0x95, 0xf2, 0x56, 0x2a, 0xeb, 0xc7, 0x3a, 0xb2, 0xbe, 0xd8, 0xc3, 0x24, 0xc9, + 0x38, 0x7a, 0x23, 0xbe, 0x6e, 0x8e, 0x9b, 0x1f, 0x4d, 0x3b, 0x7f, 0xde, 0xac, 0xcc, 0xc6, 0x66, + 0xe9, 0xfb, 0xa0, 0x5d, 0x40, 0x0d, 0x8d, 0xfa, 0xb7, 0x3c, 0xcd, 0xa6, 0x21, 0xac, 0x69, 0x11, + 0x11, 0xf5, 0x6b, 0xc3, 0xbd, 0x13, 0xb3, 0x50, 0x97, 0x84, 0x4b, 0xb4, 0xda, 0x85, 0x86, 0x7b, + 0x78, 0x40, 0xff, 0x87, 0x31, 0x8f, 0x68, 0xd4, 0xb1, 0x4b, 0x79, 0x7e, 0xe5, 0x38, 0x5f, 0x98, + 0x4b, 0xb1, 0x38, 0x45, 0xaf, 0xc2, 0xb8, 0x45, 0x28, 0xd5, 0x0c, 0x52, 0x2a, 0x70, 0xc5, 0x59, + 0xa1, 0x38, 0xbe, 0x16, 0x8a, 0x71, 0x74, 0x2e, 0xff, 0x24, 0xc1, 0x4c, 0x92, 0xa7, 0x55, 0x93, + 0xfa, 0xe8, 0x5e, 0x57, 0xed, 0x29, 0xc3, 0xc5, 0xc4, 0xac, 0x79, 0xe5, 0xcd, 0x09, 0x77, 0x13, + 0x91, 0xa4, 0xad, 0xee, 0x6e, 0x40, 0xc1, 0xf4, 0x89, 0xc5, 0xb2, 0x3e, 0xda, 0x91, 0xae, 0x8c, + 0x22, 0x51, 0xa7, 0x05, 0x6c, 0xe1, 0x1a, 0x03, 0xc0, 0x21, 0x8e, 0xfc, 0xc7, 0x68, 0x7b, 0x04, + 0xac, 0x1e, 0xd1, 0x77, 0x12, 0x2c, 0xb9, 0x7d, 0x07, 0x8c, 0x08, 0x6a, 0x25, 0xcb, 0x73, 0xff, + 0x11, 0x85, 0xc9, 0x36, 0x61, 0x73, 0x85, 0xa8, 0xb2, 0xb8, 0xd2, 0xd2, 0x00, 0xe5, 0x01, 0x57, + 0x41, 0x1f, 0x02, 0xb2, 0x34, 0x9f, 0x65, 0xd4, 0xd8, 0xf0, 0x88, 0x4e, 0x6a, 0x0c, 0x55, 0x0c, + 0xa5, 0xb8, 0x3a, 0xd6, 0xba, 0x34, 0x70, 0x0f, 0x2b, 0xf4, 0xa5, 0x04, 0xf3, 0xb5, 0xee, 0x21, + 0x23, 0xea, 0xf2, 0xc2, 0x30, 0x89, 0xee, 0x31, 0xa3, 0xd4, 0xc5, 0x56, 0xb3, 0x32, 0xdf, 0xe3, + 0x00, 0xf7, 0x72, 0x86, 0xee, 0x41, 0xc1, 0x0b, 0x1a, 0x84, 0x96, 0xf2, 0xfc, 0x79, 0x33, 0xbd, + 0x6e, 0x38, 0x0d, 0x53, 0xdf, 0xc7, 0xcc, 0xe4, 0x63, 0xd3, 0xaf, 0x6f, 0x06, 0x7c, 0x56, 0xd1, + 0xe4, 0xad, 0xf9, 0x11, 0x0e, 0x41, 0xe5, 0x47, 0x30, 0xd7, 0x39, 0x34, 0x90, 0x01, 0xa0, 0x47, + 0x7d, 0xca, 0x16, 0x04, 0x73, 0x7b, 0x76, 0xf8, 0xaa, 0x8a, 0x7b, 0x3c, 0x99, 0x97, 0xb1, 0x88, + 0xe2, 0x36, 0x68, 0xf9, 0x14, 0x4c, 0x5d, 0xf5, 0x9c, 0xc0, 0x15, 0x77, 0x44, 0xcb, 0x90, 0xb7, + 0x35, 0x2b, 0x9a, 0x3e, 0xf1, 0x44, 0x5c, 0xd7, 0x2c, 0x82, 0xf9, 0x89, 0xfc, 0xad, 0x04, 0xd3, + 0xab, 0xa6, 0x65, 0xfa, 0x98, 0x50, 0xd7, 0xb1, 0x29, 0x41, 0xe7, 0x53, 0x13, 0xeb, 0x68, 0xc7, + 0xc4, 0x3a, 0x94, 0x52, 0x6e, 0x9b, 0x55, 0x9f, 0xc0, 0xf8, 0xc3, 0x80, 0x04, 0xa6, 0x6d, 0x88, + 0x79, 0x7d, 0x2e, 0x2b, 0xc0, 0x9b, 0xa1, 0x7a, 0xaa, 0xda, 0xd4, 0x49, 0x36, 0x02, 0xc4, 0x09, + 0x8e, 0x10, 0xe5, 0xbf, 0x73, 0x70, 0x94, 0x3b, 0x26, 0xb5, 0x01, 0x5b, 0xf9, 0x5e, 0xe6, 0x56, + 0x5e, 0x16, 0xd1, 0x1c, 0x64, 0x33, 0x3f, 0x80, 0xe9, 0x46, 0x7b, 0xec, 0x22, 0xcc, 0x93, 0x59, + 0x61, 0xa6, 0x12, 0xa6, 0x2e, 0x88, 0x1b, 0xa4, 0x93, 0x8e, 0xd3, 0xd0, 0xbd, 0x58, 0xc0, 0xe8, + 0xf0, 0x2c, 0x00, 0xdd, 0x80, 0x85, 0x2d, 0xc7, 0xf3, 0x9c, 0x3d, 0xd3, 0x36, 0xb8, 0x9f, 0x08, + 0x24, 0xcf, 0x41, 0xfe, 0xd3, 0x6a, 0x56, 0x16, 0xd4, 0x5e, 0x0a, 0xb8, 0xb7, 0x9d, 0xbc, 0x07, + 0x0b, 0xeb, 0x6c, 0xa6, 0x50, 0x27, 0xf0, 0x74, 0x92, 0x34, 0x04, 0xaa, 0x40, 0x61, 0x97, 0x78, + 0x5b, 0x61, 0x51, 0x17, 0xd5, 0x22, 0x6b, 0x87, 0x8f, 0x98, 0x00, 0x87, 0x72, 0x16, 0x89, 0x9d, + 0x58, 0xde, 0xc6, 0xab, 0xb4, 0x34, 0xc6, 0x55, 0x79, 0x24, 0xeb, 0xe9, 0x23, 0xdc, 0xa9, 0x2b, + 0x37, 0x73, 0xb0, 0xd8, 0xa7, 0xff, 0xd0, 0x6d, 0x98, 0xa0, 0xe2, 0x6f, 0xd1, 0x53, 0xc7, 0xb2, + 0xde, 0x42, 0xd8, 0x26, 0xd3, 0x3f, 0x02, 0xc3, 0x31, 0x14, 0x72, 0x60, 0xda, 0x13, 0x57, 0xe0, + 0x3e, 0xc5, 0x16, 0x38, 0x93, 0x85, 0xdd, 0x9d, 0x9d, 0xe4, 0xb1, 0x71, 0x3b, 0x20, 0x4e, 0xe3, + 0xa3, 0x47, 0x30, 0xd7, 0x16, 0x76, 0xe8, 0x73, 0x94, 0xfb, 0x3c, 0x9f, 0xe5, 0xb3, 0xe7, 0xa3, + 0xa8, 0x25, 0xe1, 0x76, 0x6e, 0xbd, 0x03, 0x16, 0x77, 0x39, 0x92, 0x7f, 0xc9, 0xc1, 0x80, 0xc5, + 0xf0, 0x12, 0x48, 0xde, 0xfd, 0x14, 0xc9, 0x7b, 0xe7, 0xe0, 0x1b, 0xaf, 0x2f, 0xe9, 0xab, 0x77, + 0x90, 0xbe, 0xf7, 0x5e, 0xc0, 0xc7, 0x60, 0x12, 0xf8, 0x67, 0x0e, 0xfe, 0xd7, 0xdf, 0x38, 0x21, + 0x85, 0xd7, 0x53, 0x23, 0xf6, 0x42, 0xc7, 0x88, 0x3d, 0x36, 0x04, 0xc4, 0xbf, 0x24, 0xb1, 0x83, + 0x24, 0xfe, 0x2a, 0x41, 0xb9, 0x7f, 0xde, 0x5e, 0x02, 0x69, 0xfc, 0x34, 0x4d, 0x1a, 0xdf, 0x3c, + 0x78, 0x91, 0xf5, 0x21, 0x91, 0x57, 0x07, 0xd5, 0x56, 0x4c, 0xf7, 0x86, 0x58, 0xf9, 0xdf, 0xe7, + 0x06, 0xa5, 0x8a, 0xb3, 0xd3, 0x8c, 0x5f, 0x2d, 0x29, 0xeb, 0x2b, 0x36, 0x5b, 0x3d, 0x16, 0xdb, + 0x1e, 0x61, 0x41, 0xd6, 0x61, 0xbc, 0x11, 0xee, 0x6a, 0xd1, 0xd4, 0x97, 0x86, 0x5a, 0x91, 0x83, + 0x56, 0x7b, 0x48, 0x0b, 0x84, 0x1a, 0x8e, 0xe0, 0x51, 0x0d, 0xc6, 0x08, 0xff, 0xa9, 0x3e, 0x6c, + 0x67, 0x67, 0xfd, 0xb0, 0x57, 0x81, 0x55, 0x61, 0xa8, 0x85, 0x05, 0xb6, 0xfc, 0x8d, 0x04, 0xcb, + 0x59, 0x23, 0x01, 0xed, 0xf5, 0xa0, 0x78, 0x2f, 0x40, 0xdf, 0x87, 0xa7, 0x7c, 0x3f, 0x48, 0x70, + 0xb8, 0x17, 0x93, 0x62, 0x4d, 0xc6, 0xe8, 0x53, 0xcc, 0x7d, 0xe2, 0x26, 0xbb, 0xc9, 0xa5, 0x58, + 0x9c, 0xa2, 0x13, 0x30, 0x51, 0xd7, 0xec, 0xda, 0xa6, 0xf9, 0x79, 0xc4, 0xea, 0xe3, 0x32, 0xff, + 0x40, 0xc8, 0x71, 0xac, 0x81, 0x2e, 0xc3, 0x1c, 0xb7, 0x5b, 0x25, 0xb6, 0xe1, 0xd7, 0xf9, 0x8b, + 0x08, 0x6a, 0x12, 0x6f, 0x9d, 0x9b, 0x1d, 0xe7, 0xb8, 0xcb, 0x42, 0xfe, 0x4b, 0x02, 0x74, 0x10, + 0x36, 0x71, 0x1c, 0x8a, 0x9a, 0x6b, 0x72, 0x8a, 0x1b, 0x36, 0x5a, 0x51, 0x9d, 0x6e, 0x35, 0x2b, + 0xc5, 0x4b, 0x1b, 0xd7, 0x42, 0x21, 0x4e, 0xce, 0x99, 0x72, 0xb4, 0x68, 0xc3, 0x85, 0x2a, 0x94, + 0x23, 0xc7, 0x14, 0x27, 0xe7, 0xe8, 0x22, 0x4c, 0xe9, 0x8d, 0x80, 0xfa, 0xc4, 0xdb, 0xd4, 0x1d, + 0x97, 0xf0, 0xc1, 0x34, 0xa1, 0x1e, 0x16, 0x31, 0x4d, 0xad, 0xb4, 0x9d, 0xe1, 0x94, 0x26, 0x52, + 0x00, 0x58, 0x5b, 0x51, 0x57, 0x63, 0x7e, 0x0a, 0xdc, 0xcf, 0x0c, 0x7b, 0xb0, 0xf5, 0x58, 0x8a, + 0xdb, 0x34, 0xe4, 0x07, 0xb0, 0xb0, 0x49, 0xbc, 0x5d, 0x53, 0x27, 0x97, 0x74, 0xdd, 0x09, 0x6c, + 0x3f, 0x22, 0xeb, 0x55, 0x28, 0xc6, 0x6a, 0xa2, 0xf3, 0x0e, 0x09, 0xff, 0xc5, 0x18, 0x0b, 0x27, + 0x3a, 0x71, 0xab, 0xe7, 0xfa, 0xb6, 0xfa, 0xcf, 0x39, 0x18, 0x4f, 0xe0, 0xf3, 0x3b, 0xa6, 0x5d, + 0x13, 0xc8, 0x47, 0x22, 0xed, 0xeb, 0xa6, 0x5d, 0x7b, 0xde, 0xac, 0x4c, 0x0a, 0x35, 0xf6, 0x89, + 0xb9, 0x22, 0xba, 0x06, 0xf9, 0x80, 0x12, 0x4f, 0x34, 0xf1, 0xf1, 0xac, 0x62, 0xbe, 0x4d, 0x89, + 0x17, 0xf1, 0xab, 0x09, 0x86, 0xcc, 0x04, 0x98, 0x43, 0xa0, 0x35, 0x28, 0x18, 0xec, 0x51, 0x44, + 0x9f, 0x9e, 0xc8, 0xc2, 0x6a, 0xff, 0x11, 0x13, 0x96, 0x01, 0x97, 0xe0, 0x10, 0x05, 0x3d, 0x84, + 0x19, 0x9a, 0x4a, 0x21, 0x7f, 0xae, 0x21, 0xf8, 0x52, 0xcf, 0xc4, 0xab, 0xa8, 0xd5, 0xac, 0xcc, + 0xa4, 0x8f, 0x70, 0x87, 0x03, 0xb9, 0x0a, 0x93, 0x6d, 0x01, 0x66, 0x4f, 0x59, 0xf5, 0xf2, 0xe3, + 0x67, 0xe5, 0x91, 0x27, 0xcf, 0xca, 0x23, 0x4f, 0x9f, 0x95, 0x47, 0xbe, 0x68, 0x95, 0xa5, 0xc7, + 0xad, 0xb2, 0xf4, 0xa4, 0x55, 0x96, 0x9e, 0xb6, 0xca, 0xd2, 0x6f, 0xad, 0xb2, 0xf4, 0xd5, 0xef, + 0xe5, 0x91, 0xbb, 0xe5, 0xc1, 0xff, 0x8b, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x03, 0x5d, 0xec, + 0x01, 0xac, 0x15, 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto index eda0f7829e..c6504d4353 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -297,7 +297,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -322,7 +322,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -336,7 +336,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go new file mode 100644 index 0000000000..5db6d52d47 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=imagepolicy.k8s.io + +package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..57732a5164 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -0,0 +1,1374 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/imagepolicy/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ImageReview) Reset() { *m = ImageReview{} } +func (*ImageReview) ProtoMessage() {} +func (*ImageReview) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{0} +} +func (m *ImageReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReview.Merge(m, src) +} +func (m *ImageReview) XXX_Size() int { + return m.Size() +} +func (m *ImageReview) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReview proto.InternalMessageInfo + +func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} } +func (*ImageReviewContainerSpec) ProtoMessage() {} +func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{1} +} +func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src) +} +func (m *ImageReviewContainerSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo + +func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } +func (*ImageReviewSpec) ProtoMessage() {} +func (*ImageReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{2} +} +func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewSpec.Merge(m, src) +} +func (m *ImageReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo + +func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } +func (*ImageReviewStatus) ProtoMessage() {} +func (*ImageReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{3} +} +func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewStatus.Merge(m, src) +} +func (m *ImageReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview") + proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec") + proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry") + proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry") +} + +func init() { + proto.RegisterFile("k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_7620d1538838ac6f) +} + +var fileDescriptor_7620d1538838ac6f = []byte{ + // 593 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0x9b, 0x74, 0xff, 0xea, 0x02, 0xeb, 0x0c, 0x48, 0x51, 0x0f, 0xe9, 0x54, 0x24, 0x34, + 0x0e, 0xd8, 0xb4, 0x42, 0x68, 0x70, 0x00, 0x35, 0xd3, 0x24, 0x38, 0x00, 0x92, 0xb9, 0xed, 0x84, + 0x9b, 0x9a, 0xd4, 0xb4, 0x89, 0xa3, 0xd8, 0xe9, 0xe8, 0x8d, 0x4f, 0x80, 0xf8, 0x06, 0x7c, 0x11, + 0x3e, 0x40, 0x8f, 0x3b, 0xee, 0x34, 0xd1, 0x70, 0xe4, 0x4b, 0xa0, 0x38, 0x69, 0x13, 0xda, 0xa1, + 0xa9, 0xb7, 0xbc, 0xef, 0xeb, 0xe7, 0xf7, 0x3e, 0x79, 0x62, 0x05, 0xe0, 0xd1, 0xb1, 0x44, 0x5c, + 0x60, 0x1a, 0x72, 0xcc, 0x7d, 0xea, 0xb1, 0x50, 0x8c, 0xb9, 0x3b, 0xc5, 0x93, 0x0e, 0x1d, 0x87, + 0x43, 0xda, 0xc1, 0x1e, 0x0b, 0x58, 0x44, 0x15, 0x1b, 0xa0, 0x30, 0x12, 0x4a, 0xc0, 0x56, 0x26, + 0x40, 0x34, 0xe4, 0xa8, 0x24, 0x40, 0x0b, 0x41, 0xf3, 0xb1, 0xc7, 0xd5, 0x30, 0xee, 0x23, 0x57, + 0xf8, 0xd8, 0x13, 0x9e, 0xc0, 0x5a, 0xd7, 0x8f, 0x3f, 0xe9, 0x4a, 0x17, 0xfa, 0x29, 0xe3, 0x35, + 0x9f, 0x16, 0x06, 0x7c, 0xea, 0x0e, 0x79, 0xc0, 0xa2, 0x29, 0x0e, 0x47, 0x5e, 0xda, 0x90, 0xd8, + 0x67, 0x8a, 0xe2, 0xc9, 0x9a, 0x8b, 0x26, 0xfe, 0x9f, 0x2a, 0x8a, 0x03, 0xc5, 0x7d, 0xb6, 0x26, + 0x78, 0x76, 0x93, 0x40, 0xba, 0x43, 0xe6, 0xd3, 0x55, 0x5d, 0xfb, 0x87, 0x09, 0xea, 0x6f, 0xd2, + 0xd7, 0x24, 0x6c, 0xc2, 0xd9, 0x39, 0xfc, 0x08, 0xf6, 0x52, 0x4f, 0x03, 0xaa, 0xa8, 0x65, 0x1c, + 0x1a, 0x47, 0xf5, 0xee, 0x13, 0x54, 0x24, 0xb2, 0x44, 0xa3, 0x70, 0xe4, 0xa5, 0x0d, 0x89, 0xd2, + 0xd3, 0x68, 0xd2, 0x41, 0xef, 0xfb, 0x9f, 0x99, 0xab, 0xde, 0x32, 0x45, 0x1d, 0x38, 0xbb, 0x6a, + 0x55, 0x92, 0xab, 0x16, 0x28, 0x7a, 0x64, 0x49, 0x85, 0x04, 0x6c, 0xc9, 0x90, 0xb9, 0x96, 0xb9, + 0x46, 0xbf, 0x36, 0x6f, 0x54, 0x72, 0xf7, 0x21, 0x64, 0xae, 0x73, 0x2b, 0xa7, 0x6f, 0xa5, 0x15, + 0xd1, 0x2c, 0x78, 0x06, 0x76, 0xa4, 0xa2, 0x2a, 0x96, 0x56, 0x55, 0x53, 0xbb, 0x1b, 0x51, 0xb5, + 0xd2, 0xb9, 0x93, 0x73, 0x77, 0xb2, 0x9a, 0xe4, 0xc4, 0xf6, 0x2b, 0x60, 0x95, 0x0e, 0x9f, 0x88, + 0x40, 0xd1, 0x34, 0x82, 0x74, 0x3b, 0x7c, 0x00, 0xb6, 0x35, 0x5d, 0x47, 0x55, 0x73, 0x6e, 0xe7, + 0x88, 0xed, 0x4c, 0x90, 0xcd, 0xda, 0x7f, 0x4c, 0xb0, 0xbf, 0xf2, 0x12, 0xd0, 0x07, 0xc0, 0x5d, + 0x90, 0xa4, 0x65, 0x1c, 0x56, 0x8f, 0xea, 0xdd, 0xe7, 0x9b, 0x98, 0xfe, 0xc7, 0x47, 0x91, 0xf8, + 0xb2, 0x2d, 0x49, 0x69, 0x01, 0xfc, 0x02, 0xea, 0x34, 0x08, 0x84, 0xa2, 0x8a, 0x8b, 0x40, 0x5a, + 0xa6, 0xde, 0xd7, 0xdb, 0x34, 0x7a, 0xd4, 0x2b, 0x18, 0xa7, 0x81, 0x8a, 0xa6, 0xce, 0xdd, 0x7c, + 0x6f, 0xbd, 0x34, 0x21, 0xe5, 0x55, 0x10, 0x83, 0x5a, 0x40, 0x7d, 0x26, 0x43, 0xea, 0x32, 0xfd, + 0x71, 0x6a, 0xce, 0x41, 0x2e, 0xaa, 0xbd, 0x5b, 0x0c, 0x48, 0x71, 0xa6, 0xf9, 0x12, 0x34, 0x56, + 0xd7, 0xc0, 0x06, 0xa8, 0x8e, 0xd8, 0x34, 0x0b, 0x99, 0xa4, 0x8f, 0xf0, 0x1e, 0xd8, 0x9e, 0xd0, + 0x71, 0xcc, 0xf4, 0x2d, 0xaa, 0x91, 0xac, 0x78, 0x61, 0x1e, 0x1b, 0xed, 0x9f, 0x26, 0x38, 0x58, + 0xfb, 0xb8, 0xf0, 0x11, 0xd8, 0xa5, 0xe3, 0xb1, 0x38, 0x67, 0x03, 0x4d, 0xd9, 0x73, 0xf6, 0x73, + 0x13, 0xbb, 0xbd, 0xac, 0x4d, 0x16, 0x73, 0xf8, 0x10, 0xec, 0x44, 0x8c, 0x4a, 0x11, 0x64, 0xec, + 0xe2, 0x5e, 0x10, 0xdd, 0x25, 0xf9, 0x14, 0x7e, 0x33, 0x40, 0x83, 0xc6, 0x03, 0xae, 0x4a, 0x76, + 0xad, 0xaa, 0x4e, 0xf6, 0xf5, 0xe6, 0xd7, 0x0f, 0xf5, 0x56, 0x50, 0x59, 0xc0, 0x56, 0xbe, 0xbc, + 0xb1, 0x3a, 0x26, 0x6b, 0xbb, 0x9b, 0x27, 0xe0, 0xfe, 0xb5, 0x90, 0x4d, 0xe2, 0x73, 0x4e, 0x67, + 0x73, 0xbb, 0x72, 0x31, 0xb7, 0x2b, 0x97, 0x73, 0xbb, 0xf2, 0x35, 0xb1, 0x8d, 0x59, 0x62, 0x1b, + 0x17, 0x89, 0x6d, 0x5c, 0x26, 0xb6, 0xf1, 0x2b, 0xb1, 0x8d, 0xef, 0xbf, 0xed, 0xca, 0x59, 0xeb, + 0x86, 0xbf, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0x86, 0x92, 0x15, 0x77, 0x05, 0x00, + 0x00, +} + +func (m *ImageReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ImageReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewContainerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewContainerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReviewContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ImageReviewContainerSpec{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ImageReviewSpec{`, + `Containers:` + repeatedStringForContainers + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewStatus) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&ImageReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ImageReviewContainerSpec{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto new file mode 100644 index 0000000000..5ea5c0ec8e --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.imagepolicy.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/imagepolicy/v1alpha1"; + +// ImageReview checks if the set of images in a pod are allowed. +message ImageReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the pod being evaluated + optional ImageReviewSpec spec = 2; + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + optional ImageReviewStatus status = 3; +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +message ImageReviewContainerSpec { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + optional string image = 1; +} + +// ImageReviewSpec is a description of the pod creation request. +message ImageReviewSpec { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + // +listType=atomic + repeated ImageReviewContainerSpec containers = 1; + + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + map annotations = 2; + + // Namespace is the namespace the pod is being created in. + // +optional + optional string namespace = 3; +} + +// ImageReviewStatus is the result of the review for the pod creation request. +message ImageReviewStatus { + // Allowed indicates that all images were allowed to be run. + optional bool allowed = 1; + + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + optional string reason = 2; + + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + map auditAnnotations = 3; +} + diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go new file mode 100644 index 0000000000..477571bbb2 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "imagepolicy.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ImageReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go new file mode 100644 index 0000000000..19ac2b536f --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageReview checks if the set of images in a pod are allowed. +type ImageReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the pod being evaluated + Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageReviewSpec is a description of the pod creation request. +type ImageReviewSpec struct { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + // +listType=atomic + Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"` + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` + // Namespace is the namespace the pod is being created in. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +type ImageReviewContainerSpec struct { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // In future, we may add command line overrides, exec health check command lines, and so on. +} + +// ImageReviewStatus is the result of the review for the pod creation request. +type ImageReviewStatus struct { + // Allowed indicates that all images were allowed to be run. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"` +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..dadf95e1d5 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ImageReview = map[string]string{ + "": "ImageReview checks if the set of images in a pod are allowed.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the pod being evaluated", + "status": "Status is filled in by the backend and indicates whether the pod should be allowed.", +} + +func (ImageReview) SwaggerDoc() map[string]string { + return map_ImageReview +} + +var map_ImageReviewContainerSpec = map[string]string{ + "": "ImageReviewContainerSpec is a description of a container within the pod creation request.", + "image": "This can be in the form image:tag or image@SHA:012345679abcdef.", +} + +func (ImageReviewContainerSpec) SwaggerDoc() map[string]string { + return map_ImageReviewContainerSpec +} + +var map_ImageReviewSpec = map[string]string{ + "": "ImageReviewSpec is a description of the pod creation request.", + "containers": "Containers is a list of a subset of the information in each container of the Pod being created.", + "annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.", + "namespace": "Namespace is the namespace the pod is being created in.", +} + +func (ImageReviewSpec) SwaggerDoc() map[string]string { + return map_ImageReviewSpec +} + +var map_ImageReviewStatus = map[string]string{ + "": "ImageReviewStatus is the result of the review for the pod creation request.", + "allowed": "Allowed indicates that all images were allowed to be run.", + "reason": "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong. Kubernetes may truncate excessively long errors when displaying to the user.", + "auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'. The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).", +} + +func (ImageReviewStatus) SwaggerDoc() map[string]string { + return map_ImageReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f230656f3f --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,121 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReview) DeepCopyInto(out *ImageReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview. +func (in *ImageReview) DeepCopy() *ImageReview { + if in == nil { + return nil + } + out := new(ImageReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec. +func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec { + if in == nil { + return nil + } + out := new(ImageReviewContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ImageReviewContainerSpec, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec. +func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec { + if in == nil { + return nil + } + out := new(ImageReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) { + *out = *in + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus. +func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus { + if in == nil { + return nil + } + out := new(ImageReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index d3ffd5ed17..1d13e7bab3 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index daeaea5dce..7c023e6903 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto +// source: k8s.io/api/networking/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{0} + return fileDescriptor_2c41434372fec1d7, []int{0} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{1} + return fileDescriptor_2c41434372fec1d7, []int{1} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{2} + return fileDescriptor_2c41434372fec1d7, []int{2} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{3} + return fileDescriptor_2c41434372fec1d7, []int{3} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{4} + return fileDescriptor_2c41434372fec1d7, []int{4} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{5} + return fileDescriptor_2c41434372fec1d7, []int{5} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{6} + return fileDescriptor_2c41434372fec1d7, []int{6} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{7} + return fileDescriptor_2c41434372fec1d7, []int{7} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{8} + return fileDescriptor_2c41434372fec1d7, []int{8} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{9} + return fileDescriptor_2c41434372fec1d7, []int{9} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{10} + return fileDescriptor_2c41434372fec1d7, []int{10} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{11} + return fileDescriptor_2c41434372fec1d7, []int{11} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{12} + return fileDescriptor_2c41434372fec1d7, []int{12} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{13} + return fileDescriptor_2c41434372fec1d7, []int{13} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{14} + return fileDescriptor_2c41434372fec1d7, []int{14} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} } func (*IngressServiceBackend) ProtoMessage() {} func (*IngressServiceBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{15} + return fileDescriptor_2c41434372fec1d7, []int{15} } func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{16} + return fileDescriptor_2c41434372fec1d7, []int{16} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +527,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{17} + return fileDescriptor_2c41434372fec1d7, []int{17} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +555,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{18} + return fileDescriptor_2c41434372fec1d7, []int{18} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +583,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{19} + return fileDescriptor_2c41434372fec1d7, []int{19} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +611,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{20} + return fileDescriptor_2c41434372fec1d7, []int{20} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +639,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{21} + return fileDescriptor_2c41434372fec1d7, []int{21} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +667,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{22} + return fileDescriptor_2c41434372fec1d7, []int{22} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +695,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{23} + return fileDescriptor_2c41434372fec1d7, []int{23} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +723,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{24} + return fileDescriptor_2c41434372fec1d7, []int{24} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +751,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{25} + return fileDescriptor_2c41434372fec1d7, []int{25} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +779,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} } func (*ServiceBackendPort) ProtoMessage() {} func (*ServiceBackendPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{26} + return fileDescriptor_2c41434372fec1d7, []int{26} } func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,116 +835,115 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) -} - -var fileDescriptor_1c72867a70a7cc90 = []byte{ - // 1671 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xd5, - 0x1a, 0xcf, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x6e, 0xab, 0xeb, 0x9b, 0xab, 0x6b, 0xe7, - 0x8e, 0x68, 0x1b, 0x68, 0x6b, 0xd3, 0xb4, 0x42, 0xb0, 0x01, 0x3a, 0x69, 0x9a, 0x86, 0xa6, 0x8e, - 0x75, 0x6c, 0x15, 0x81, 0x78, 0x74, 0x32, 0x3e, 0xb1, 0xa7, 0x1e, 0xcf, 0x19, 0x9d, 0x39, 0x0e, - 0xad, 0x84, 0x10, 0x1b, 0x16, 0xec, 0xf8, 0x17, 0x10, 0x7f, 0x01, 0x82, 0x05, 0x12, 0x82, 0xc2, - 0x06, 0x75, 0x59, 0x89, 0x4d, 0x37, 0x58, 0xd4, 0xfc, 0x17, 0x59, 0xa1, 0xf3, 0x98, 0x97, 0x1f, - 0xb5, 0xa9, 0xaa, 0xac, 0x92, 0xf3, 0x7d, 0xdf, 0xf9, 0x7d, 0x8f, 0xf3, 0xbd, 0xc6, 0xe0, 0x5a, - 0xfb, 0x75, 0xbf, 0x64, 0x93, 0x72, 0xbb, 0x7b, 0x80, 0xa9, 0x8b, 0x19, 0xf6, 0xcb, 0x47, 0xd8, - 0x6d, 0x10, 0x5a, 0x56, 0x0c, 0xd3, 0xb3, 0xcb, 0x2e, 0x66, 0x9f, 0x10, 0xda, 0xb6, 0xdd, 0x66, - 0xf9, 0xe8, 0x72, 0xb9, 0x89, 0x5d, 0x4c, 0x4d, 0x86, 0x1b, 0x25, 0x8f, 0x12, 0x46, 0x60, 0x5e, - 0x4a, 0x96, 0x4c, 0xcf, 0x2e, 0x45, 0x92, 0xa5, 0xa3, 0xcb, 0x6b, 0x97, 0x9a, 0x36, 0x6b, 0x75, - 0x0f, 0x4a, 0x16, 0xe9, 0x94, 0x9b, 0xa4, 0x49, 0xca, 0xe2, 0xc2, 0x41, 0xf7, 0x50, 0x9c, 0xc4, - 0x41, 0xfc, 0x27, 0x81, 0xd6, 0xf4, 0x98, 0x4a, 0x8b, 0x50, 0x3c, 0x42, 0xd9, 0xda, 0xd5, 0x48, - 0xa6, 0x63, 0x5a, 0x2d, 0xdb, 0xc5, 0xf4, 0x41, 0xd9, 0x6b, 0x37, 0x39, 0xc1, 0x2f, 0x77, 0x30, - 0x33, 0x47, 0xdd, 0x2a, 0x8f, 0xbb, 0x45, 0xbb, 0x2e, 0xb3, 0x3b, 0x78, 0xe8, 0xc2, 0x6b, 0x93, - 0x2e, 0xf8, 0x56, 0x0b, 0x77, 0xcc, 0xa1, 0x7b, 0x57, 0xc6, 0xdd, 0xeb, 0x32, 0xdb, 0x29, 0xdb, - 0x2e, 0xf3, 0x19, 0x1d, 0xbc, 0xa4, 0xff, 0xac, 0x81, 0x53, 0x37, 0xeb, 0xf5, 0xea, 0xae, 0xdb, - 0xa4, 0xd8, 0xf7, 0xab, 0x26, 0x6b, 0xc1, 0x75, 0x30, 0xe7, 0x99, 0xac, 0x95, 0xd7, 0xd6, 0xb5, - 0x8d, 0x05, 0x63, 0xf1, 0x51, 0xaf, 0x38, 0xd3, 0xef, 0x15, 0xe7, 0x38, 0x0f, 0x09, 0x0e, 0xbc, - 0x0a, 0xb2, 0xfc, 0x6f, 0xfd, 0x81, 0x87, 0xf3, 0xb3, 0x42, 0x2a, 0xdf, 0xef, 0x15, 0xb3, 0x55, - 0x45, 0x3b, 0x8e, 0xfd, 0x8f, 0x42, 0x49, 0x58, 0x03, 0x99, 0x03, 0xd3, 0x6a, 0x63, 0xb7, 0x91, - 0x4f, 0xad, 0x6b, 0x1b, 0xb9, 0xcd, 0x8d, 0xd2, 0xb8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, - 0x9c, 0x52, 0x46, 0x64, 0x14, 0x01, 0x05, 0x48, 0xfa, 0x21, 0x38, 0x1d, 0xb3, 0x1f, 0x75, 0x1d, - 0x7c, 0xc7, 0x74, 0xba, 0x18, 0x56, 0x40, 0x9a, 0x2b, 0xf6, 0xf3, 0xda, 0xfa, 0xec, 0x46, 0x6e, - 0xf3, 0xe5, 0xf1, 0xaa, 0x06, 0xdc, 0x37, 0x96, 0x94, 0xae, 0x34, 0x3f, 0xf9, 0x48, 0xc2, 0xe8, - 0xfb, 0x20, 0xb3, 0x5b, 0x35, 0x1c, 0x62, 0xb5, 0x79, 0x7c, 0x2c, 0xbb, 0x41, 0x07, 0xe3, 0xb3, - 0xb5, 0x7b, 0x1d, 0x21, 0xc1, 0x81, 0x3a, 0x98, 0xc7, 0xf7, 0x2d, 0xec, 0xb1, 0x7c, 0x6a, 0x7d, - 0x76, 0x63, 0xc1, 0x00, 0xfd, 0x5e, 0x71, 0x7e, 0x5b, 0x50, 0x90, 0xe2, 0xe8, 0x5f, 0xa4, 0x40, - 0x46, 0xa9, 0x85, 0x77, 0x41, 0x96, 0xa7, 0x4f, 0xc3, 0x64, 0xa6, 0x40, 0xcd, 0x6d, 0xbe, 0x1a, - 0xb3, 0x37, 0x7c, 0xcd, 0x92, 0xd7, 0x6e, 0x72, 0x82, 0x5f, 0xe2, 0xd2, 0xdc, 0xf6, 0xfd, 0x83, - 0x7b, 0xd8, 0x62, 0xb7, 0x31, 0x33, 0x0d, 0xa8, 0xec, 0x00, 0x11, 0x0d, 0x85, 0xa8, 0x70, 0x07, - 0xcc, 0xf9, 0x1e, 0xb6, 0x54, 0xe0, 0xcf, 0x4e, 0x0c, 0x7c, 0xcd, 0xc3, 0x56, 0xe4, 0x1a, 0x3f, - 0x21, 0x01, 0x00, 0xf7, 0xc1, 0xbc, 0xcf, 0x4c, 0xd6, 0xf5, 0xc5, 0xc3, 0xe7, 0x36, 0xcf, 0x4f, - 0x86, 0x12, 0xe2, 0xc6, 0xb2, 0x02, 0x9b, 0x97, 0x67, 0xa4, 0x60, 0xf4, 0x5f, 0x35, 0xb0, 0x9c, - 0x7c, 0x6d, 0x78, 0x07, 0x64, 0x7c, 0x4c, 0x8f, 0x6c, 0x0b, 0xe7, 0xe7, 0x84, 0x92, 0xf2, 0x64, - 0x25, 0x52, 0x3e, 0xc8, 0x97, 0x1c, 0xcf, 0x15, 0x45, 0x43, 0x01, 0x18, 0x7c, 0x17, 0x64, 0x29, - 0xf6, 0x49, 0x97, 0x5a, 0x58, 0x59, 0x7f, 0x29, 0x0e, 0xcc, 0xeb, 0x9e, 0x43, 0xf2, 0x64, 0x6d, - 0xec, 0x11, 0xcb, 0x74, 0x64, 0x28, 0x11, 0x3e, 0xc4, 0x14, 0xbb, 0x16, 0x36, 0x16, 0x79, 0x96, - 0x23, 0x05, 0x81, 0x42, 0x30, 0x5e, 0x45, 0x8b, 0xca, 0x90, 0x2d, 0xc7, 0x3c, 0x91, 0x07, 0xdd, - 0x4b, 0x3c, 0xe8, 0x2b, 0x13, 0x03, 0x24, 0xec, 0x1a, 0xf7, 0xaa, 0xfa, 0x4f, 0x1a, 0x58, 0x89, - 0x0b, 0xee, 0xd9, 0x3e, 0x83, 0x1f, 0x0c, 0x39, 0x51, 0x9a, 0xce, 0x09, 0x7e, 0x5b, 0xb8, 0xb0, - 0xa2, 0x54, 0x65, 0x03, 0x4a, 0xcc, 0x81, 0x5b, 0x20, 0x6d, 0x33, 0xdc, 0xf1, 0x45, 0x89, 0xe4, - 0x36, 0xcf, 0x4d, 0xe7, 0x41, 0x54, 0x9d, 0xbb, 0xfc, 0x32, 0x92, 0x18, 0xfa, 0x1f, 0x1a, 0x28, - 0xc6, 0xc5, 0xaa, 0x26, 0x35, 0x3b, 0x98, 0x61, 0xea, 0x87, 0x8f, 0x07, 0x37, 0x40, 0xd6, 0xac, - 0xee, 0xee, 0x50, 0xd2, 0xf5, 0x82, 0xd2, 0xe5, 0xa6, 0x5d, 0x53, 0x34, 0x14, 0x72, 0x79, 0x81, - 0xb7, 0x6d, 0xd5, 0xa5, 0x62, 0x05, 0x7e, 0xcb, 0x76, 0x1b, 0x48, 0x70, 0xb8, 0x84, 0x6b, 0x76, - 0x82, 0xe6, 0x17, 0x4a, 0x54, 0xcc, 0x0e, 0x46, 0x82, 0x03, 0x8b, 0x20, 0xed, 0x5b, 0xc4, 0x93, - 0x19, 0xbc, 0x60, 0x2c, 0x70, 0x93, 0x6b, 0x9c, 0x80, 0x24, 0x1d, 0x5e, 0x00, 0x0b, 0x5c, 0xd0, - 0xf7, 0x4c, 0x0b, 0xe7, 0xd3, 0x42, 0x68, 0xa9, 0xdf, 0x2b, 0x2e, 0x54, 0x02, 0x22, 0x8a, 0xf8, - 0xfa, 0xb7, 0x03, 0xef, 0xc3, 0x9f, 0x0e, 0x6e, 0x02, 0x60, 0x11, 0x97, 0x51, 0xe2, 0x38, 0x38, - 0xe8, 0x46, 0x61, 0xd2, 0x6c, 0x85, 0x1c, 0x14, 0x93, 0x82, 0x36, 0x00, 0x5e, 0x18, 0x1b, 0x95, - 0x3c, 0x6f, 0x4c, 0x17, 0xfa, 0x11, 0x31, 0x35, 0x96, 0xb9, 0xaa, 0x18, 0x23, 0x06, 0xae, 0x7f, - 0xa7, 0x81, 0x9c, 0xba, 0x7f, 0x02, 0xe9, 0x74, 0x23, 0x99, 0x4e, 0xff, 0x9f, 0x3c, 0x5a, 0x46, - 0x67, 0xd2, 0x0f, 0x1a, 0x58, 0x0b, 0xac, 0x26, 0x66, 0xc3, 0x30, 0x1d, 0xd3, 0xb5, 0x30, 0x0d, - 0x3a, 0xf5, 0x1a, 0x48, 0xd9, 0x41, 0xfa, 0x00, 0x05, 0x90, 0xda, 0xad, 0xa2, 0x94, 0xed, 0xc1, - 0x8b, 0x20, 0xdb, 0x22, 0x3e, 0x13, 0x89, 0x21, 0x53, 0x27, 0x34, 0xf8, 0xa6, 0xa2, 0xa3, 0x50, - 0x02, 0x56, 0x41, 0xda, 0x23, 0x94, 0xf9, 0xf9, 0x39, 0x61, 0xf0, 0x85, 0x89, 0x06, 0x57, 0x09, - 0x65, 0xaa, 0x97, 0x46, 0x23, 0x8a, 0x23, 0x20, 0x09, 0xa4, 0x7f, 0x0a, 0xfe, 0x33, 0xc2, 0x72, - 0x79, 0x05, 0x7e, 0x0c, 0x32, 0xb6, 0x64, 0xaa, 0x89, 0x78, 0x75, 0xa2, 0xc2, 0x11, 0xfe, 0x47, - 0x83, 0x38, 0x18, 0xb8, 0x01, 0xaa, 0xfe, 0x8d, 0x06, 0x56, 0x87, 0x2c, 0x15, 0xbb, 0x04, 0xa1, - 0x4c, 0x44, 0x2c, 0x1d, 0xdb, 0x25, 0x08, 0x65, 0x48, 0x70, 0xe0, 0x2d, 0x90, 0x15, 0xab, 0x88, - 0x45, 0x1c, 0x15, 0xb5, 0x72, 0x10, 0xb5, 0xaa, 0xa2, 0x1f, 0xf7, 0x8a, 0xff, 0x1d, 0xde, 0xcf, - 0x4a, 0x01, 0x1b, 0x85, 0x00, 0xbc, 0xea, 0x30, 0xa5, 0x84, 0xaa, 0xc2, 0x14, 0x55, 0xb7, 0xcd, - 0x09, 0x48, 0xd2, 0xf5, 0xaf, 0xa3, 0xa4, 0xe4, 0xbb, 0x02, 0xb7, 0x8f, 0xbf, 0xc8, 0xe0, 0x2c, - 0xe7, 0xef, 0x85, 0x04, 0x07, 0x7a, 0x60, 0xc5, 0x1e, 0x58, 0x2e, 0xa6, 0x6e, 0xba, 0xe1, 0x0d, - 0x23, 0xaf, 0x90, 0x57, 0x06, 0x39, 0x68, 0x08, 0x5d, 0xbf, 0x0b, 0x86, 0xa4, 0x78, 0xbb, 0x6f, - 0x31, 0xe6, 0x8d, 0x28, 0x9c, 0xf1, 0xdb, 0x4c, 0xa4, 0x3d, 0x2b, 0x7c, 0xaa, 0xd7, 0xab, 0x48, - 0xa0, 0xe8, 0x5f, 0x6a, 0xe0, 0xcc, 0xc8, 0xc1, 0x19, 0x36, 0x36, 0x6d, 0x6c, 0x63, 0xab, 0xa8, - 0x17, 0x95, 0x31, 0xb8, 0x38, 0xde, 0x92, 0x24, 0x32, 0x7f, 0xf1, 0x51, 0xef, 0xaf, 0xff, 0x96, - 0x0a, 0x5f, 0x44, 0x74, 0xb5, 0xb7, 0xc3, 0x78, 0x8b, 0xae, 0xc3, 0x35, 0xab, 0x1e, 0x7a, 0x3a, - 0x16, 0xbf, 0x90, 0x87, 0x86, 0xa4, 0x61, 0x03, 0x2c, 0x37, 0xf0, 0xa1, 0xd9, 0x75, 0x98, 0xd2, - 0xad, 0xa2, 0x36, 0xfd, 0xba, 0x09, 0xfb, 0xbd, 0xe2, 0xf2, 0xf5, 0x04, 0x06, 0x1a, 0xc0, 0x84, - 0x5b, 0x60, 0x96, 0x39, 0x41, 0xbb, 0x79, 0x69, 0x22, 0x74, 0x7d, 0xaf, 0x66, 0xe4, 0x94, 0xfb, - 0xb3, 0xf5, 0xbd, 0x1a, 0xe2, 0xb7, 0xe1, 0x3b, 0x20, 0x4d, 0xbb, 0x0e, 0xe6, 0xcb, 0xd4, 0xec, - 0x54, 0x7b, 0x19, 0x7f, 0xd3, 0xa8, 0xfc, 0xf9, 0xc9, 0x47, 0x12, 0x42, 0xff, 0x0c, 0x2c, 0x25, - 0x36, 0x2e, 0xd8, 0x01, 0x8b, 0x4e, 0xac, 0x84, 0x55, 0x14, 0xae, 0xfc, 0xa3, 0xba, 0x57, 0x0d, - 0xe7, 0xb4, 0xd2, 0xb8, 0x18, 0xe7, 0xa1, 0x04, 0xbc, 0x6e, 0x02, 0x10, 0xf9, 0xca, 0x2b, 0x91, - 0x97, 0x8f, 0xec, 0x36, 0xaa, 0x12, 0x79, 0x55, 0xf9, 0x48, 0xd2, 0xf9, 0xf4, 0xf2, 0xb1, 0x45, - 0x31, 0xab, 0x44, 0xfd, 0x32, 0x9c, 0x5e, 0xb5, 0x90, 0x83, 0x62, 0x52, 0xfa, 0x2f, 0x1a, 0x58, - 0xaa, 0x48, 0x93, 0xab, 0xc4, 0xb1, 0xad, 0x07, 0x27, 0xb0, 0x68, 0xdd, 0x4e, 0x2c, 0x5a, 0xcf, - 0x68, 0xd3, 0x09, 0xc3, 0xc6, 0x6e, 0x5a, 0xdf, 0x6b, 0xe0, 0xdf, 0x09, 0xc9, 0xed, 0xa8, 0x19, - 0x85, 0x23, 0x41, 0x9b, 0x34, 0x12, 0x12, 0x08, 0xa2, 0xb4, 0x46, 0x8e, 0x04, 0xb8, 0x03, 0x52, - 0x8c, 0xa8, 0x1c, 0x9d, 0x1a, 0x0e, 0x63, 0x1a, 0xcd, 0xb6, 0x3a, 0x41, 0x29, 0x46, 0xf4, 0x1f, - 0x35, 0x90, 0x4f, 0x48, 0xc5, 0x9b, 0xe8, 0x8b, 0xb7, 0xfb, 0x36, 0x98, 0x3b, 0xa4, 0xa4, 0xf3, - 0x3c, 0x96, 0x87, 0x41, 0xbf, 0x41, 0x49, 0x07, 0x09, 0x18, 0xfd, 0xa1, 0x06, 0x56, 0x13, 0x92, - 0x27, 0xb0, 0x90, 0xec, 0x25, 0x17, 0x92, 0xf3, 0x53, 0xfa, 0x30, 0x66, 0x2d, 0x79, 0x98, 0x1a, - 0xf0, 0x80, 0xfb, 0x0a, 0x0f, 0x41, 0xce, 0x23, 0x8d, 0x1a, 0x76, 0xb0, 0xc5, 0xc8, 0xa8, 0x02, - 0x7f, 0x96, 0x13, 0xe6, 0x01, 0x76, 0x82, 0xab, 0xc6, 0xa9, 0x7e, 0xaf, 0x98, 0xab, 0x46, 0x58, - 0x28, 0x0e, 0x0c, 0xef, 0x83, 0xd5, 0x70, 0x17, 0x0d, 0xb5, 0xa5, 0x9e, 0x5f, 0xdb, 0x99, 0x7e, - 0xaf, 0xb8, 0x5a, 0x19, 0x44, 0x44, 0xc3, 0x4a, 0xe0, 0x4d, 0x90, 0xb1, 0x3d, 0xf1, 0xd9, 0xad, - 0xbe, 0xd8, 0x9e, 0xb5, 0xd8, 0xc9, 0xef, 0x73, 0xf9, 0xf1, 0xa7, 0x0e, 0x28, 0xb8, 0xae, 0xff, - 0x3e, 0x98, 0x03, 0x3c, 0xe1, 0xe0, 0x4e, 0x6c, 0xfb, 0x90, 0x33, 0xef, 0xc2, 0xf3, 0x6d, 0x1e, - 0xc9, 0xb1, 0x38, 0xbe, 0x09, 0x75, 0x99, 0xed, 0x94, 0xe4, 0x8f, 0x31, 0xa5, 0x5d, 0x97, 0xed, - 0xd3, 0x1a, 0xa3, 0xb6, 0xdb, 0x94, 0x23, 0x3a, 0xb6, 0x16, 0x9d, 0x05, 0x19, 0x35, 0x35, 0x85, - 0xe3, 0x69, 0xe9, 0xd5, 0xb6, 0x24, 0xa1, 0x80, 0xa7, 0x1f, 0x0f, 0xe6, 0x85, 0x98, 0xa1, 0xf7, - 0x5e, 0x58, 0x5e, 0xfc, 0x4b, 0x65, 0xe3, 0xf8, 0xdc, 0xf8, 0x30, 0x5a, 0x2c, 0x65, 0xa6, 0x6f, - 0x4e, 0x99, 0xe9, 0xf1, 0x89, 0x36, 0x76, 0xad, 0x84, 0xef, 0x81, 0x79, 0x2c, 0xd1, 0xe5, 0x88, - 0xbc, 0x3c, 0x25, 0x7a, 0xd4, 0x56, 0xa3, 0x5f, 0x1e, 0x14, 0x4d, 0x01, 0xc2, 0xb7, 0x78, 0x94, - 0xb8, 0x2c, 0xff, 0xe0, 0x97, 0x7b, 0xf8, 0x82, 0xf1, 0x3f, 0xe9, 0x6c, 0x48, 0x3e, 0xe6, 0x1f, - 0x38, 0xe1, 0x11, 0xc5, 0x6f, 0xe8, 0x1f, 0x01, 0x38, 0xbc, 0xe4, 0x4c, 0xb1, 0x42, 0x9d, 0x03, - 0xf3, 0x6e, 0xb7, 0x73, 0x80, 0x65, 0x0d, 0xa5, 0x23, 0x03, 0x2b, 0x82, 0x8a, 0x14, 0xd7, 0x78, - 0xf3, 0xd1, 0xd3, 0xc2, 0xcc, 0xe3, 0xa7, 0x85, 0x99, 0x27, 0x4f, 0x0b, 0x33, 0x9f, 0xf7, 0x0b, - 0xda, 0xa3, 0x7e, 0x41, 0x7b, 0xdc, 0x2f, 0x68, 0x4f, 0xfa, 0x05, 0xed, 0xcf, 0x7e, 0x41, 0xfb, - 0xea, 0xaf, 0xc2, 0xcc, 0xfb, 0xf9, 0x71, 0xbf, 0x96, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd4, - 0x46, 0x40, 0xf2, 0x61, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/networking/v1/generated.proto", fileDescriptor_2c41434372fec1d7) +} + +var fileDescriptor_2c41434372fec1d7 = []byte{ + // 1652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x55, + 0x14, 0xce, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x69, 0x85, 0x09, 0xc2, 0x0e, 0x23, 0xda, + 0x06, 0xda, 0xda, 0x34, 0xad, 0x10, 0x6c, 0x78, 0x4c, 0x9a, 0xa6, 0xa1, 0xa9, 0x63, 0x5d, 0x5b, + 0x45, 0x20, 0x1e, 0x9d, 0x8c, 0x6f, 0x9c, 0x69, 0xc6, 0x33, 0xa3, 0x3b, 0xd7, 0xa5, 0x95, 0x10, + 0x62, 0xc3, 0x82, 0x1d, 0x7f, 0x01, 0xf1, 0x0b, 0x10, 0x2c, 0x90, 0x10, 0x14, 0x36, 0xa8, 0xcb, + 0x4a, 0x6c, 0xba, 0xc1, 0xa2, 0xe6, 0x5f, 0x64, 0x85, 0xee, 0x63, 0x1e, 0x7e, 0xd5, 0xa6, 0xaa, + 0xb2, 0x4a, 0xee, 0x39, 0xe7, 0x7e, 0xe7, 0x71, 0xcf, 0x6b, 0x0c, 0x6b, 0x87, 0x6f, 0x06, 0x25, + 0xdb, 0x2b, 0x9b, 0xbe, 0x5d, 0x76, 0x09, 0xfb, 0xdc, 0xa3, 0x87, 0xb6, 0xdb, 0x2c, 0xdf, 0xb9, + 0x58, 0x6e, 0x12, 0x97, 0x50, 0x93, 0x91, 0x46, 0xc9, 0xa7, 0x1e, 0xf3, 0x50, 0x5e, 0x4a, 0x96, + 0x4c, 0xdf, 0x2e, 0xc5, 0x92, 0xa5, 0x3b, 0x17, 0x57, 0x2e, 0x34, 0x6d, 0x76, 0xd0, 0xde, 0x2b, + 0x59, 0x5e, 0xab, 0xdc, 0xf4, 0x9a, 0x5e, 0x59, 0x5c, 0xd8, 0x6b, 0xef, 0x8b, 0x93, 0x38, 0x88, + 0xff, 0x24, 0xd0, 0x8a, 0x9e, 0x50, 0x69, 0x79, 0x94, 0x0c, 0x51, 0xb6, 0x72, 0x39, 0x96, 0x69, + 0x99, 0xd6, 0x81, 0xed, 0x12, 0x7a, 0xaf, 0xec, 0x1f, 0x36, 0x39, 0x21, 0x28, 0xb7, 0x08, 0x33, + 0x87, 0xdd, 0x2a, 0x8f, 0xba, 0x45, 0xdb, 0x2e, 0xb3, 0x5b, 0x64, 0xe0, 0xc2, 0x1b, 0xe3, 0x2e, + 0x04, 0xd6, 0x01, 0x69, 0x99, 0x03, 0xf7, 0x2e, 0x8d, 0xba, 0xd7, 0x66, 0xb6, 0x53, 0xb6, 0x5d, + 0x16, 0x30, 0xda, 0x7f, 0x49, 0xff, 0x4d, 0x83, 0x13, 0xd7, 0xea, 0xf5, 0xea, 0xb6, 0xdb, 0xa4, + 0x24, 0x08, 0xaa, 0x26, 0x3b, 0x40, 0xab, 0x30, 0xe3, 0x9b, 0xec, 0x20, 0xaf, 0xad, 0x6a, 0x6b, + 0x73, 0xc6, 0xfc, 0x83, 0x4e, 0x71, 0xaa, 0xdb, 0x29, 0xce, 0x70, 0x1e, 0x16, 0x1c, 0x74, 0x19, + 0xb2, 0xfc, 0x6f, 0xfd, 0x9e, 0x4f, 0xf2, 0xd3, 0x42, 0x2a, 0xdf, 0xed, 0x14, 0xb3, 0x55, 0x45, + 0x3b, 0x4a, 0xfc, 0x8f, 0x23, 0x49, 0x54, 0x83, 0xcc, 0x9e, 0x69, 0x1d, 0x12, 0xb7, 0x91, 0x4f, + 0xad, 0x6a, 0x6b, 0xb9, 0xf5, 0xb5, 0xd2, 0xa8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, 0x9c, + 0x50, 0x46, 0x64, 0x14, 0x01, 0x87, 0x48, 0xfa, 0x3e, 0x9c, 0x4c, 0xd8, 0x8f, 0xdb, 0x0e, 0xb9, + 0x69, 0x3a, 0x6d, 0x82, 0x2a, 0x90, 0xe6, 0x8a, 0x83, 0xbc, 0xb6, 0x3a, 0xbd, 0x96, 0x5b, 0x7f, + 0x75, 0xb4, 0xaa, 0x3e, 0xf7, 0x8d, 0x05, 0xa5, 0x2b, 0xcd, 0x4f, 0x01, 0x96, 0x30, 0xfa, 0x2e, + 0x64, 0xb6, 0xab, 0x86, 0xe3, 0x59, 0x87, 0x3c, 0x3e, 0x96, 0xdd, 0xa0, 0xfd, 0xf1, 0xd9, 0xd8, + 0xbe, 0x82, 0xb1, 0xe0, 0x20, 0x1d, 0x66, 0xc9, 0x5d, 0x8b, 0xf8, 0x2c, 0x9f, 0x5a, 0x9d, 0x5e, + 0x9b, 0x33, 0xa0, 0xdb, 0x29, 0xce, 0x6e, 0x0a, 0x0a, 0x56, 0x1c, 0xfd, 0xeb, 0x14, 0x64, 0x94, + 0x5a, 0x74, 0x0b, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a, 0x6e, 0xfd, 0xf5, 0x84, 0xbd, + 0xd1, 0x6b, 0x96, 0xfc, 0xc3, 0x26, 0x27, 0x04, 0x25, 0x2e, 0xcd, 0x6d, 0xdf, 0xdd, 0xbb, 0x4d, + 0x2c, 0x76, 0x83, 0x30, 0xd3, 0x40, 0xca, 0x0e, 0x88, 0x69, 0x38, 0x42, 0x45, 0x5b, 0x30, 0x13, + 0xf8, 0xc4, 0x52, 0x81, 0x3f, 0x3d, 0x36, 0xf0, 0x35, 0x9f, 0x58, 0xb1, 0x6b, 0xfc, 0x84, 0x05, + 0x00, 0xda, 0x85, 0xd9, 0x80, 0x99, 0xac, 0x1d, 0x88, 0x87, 0xcf, 0xad, 0x9f, 0x1d, 0x0f, 0x25, + 0xc4, 0x8d, 0x45, 0x05, 0x36, 0x2b, 0xcf, 0x58, 0xc1, 0xe8, 0x7f, 0x68, 0xb0, 0xd8, 0xfb, 0xda, + 0xe8, 0x26, 0x64, 0x02, 0x42, 0xef, 0xd8, 0x16, 0xc9, 0xcf, 0x08, 0x25, 0xe5, 0xf1, 0x4a, 0xa4, + 0x7c, 0x98, 0x2f, 0x39, 0x9e, 0x2b, 0x8a, 0x86, 0x43, 0x30, 0xf4, 0x01, 0x64, 0x29, 0x09, 0xbc, + 0x36, 0xb5, 0x88, 0xb2, 0xfe, 0x42, 0x12, 0x98, 0xd7, 0x3d, 0x87, 0xe4, 0xc9, 0xda, 0xd8, 0xf1, + 0x2c, 0xd3, 0x91, 0xa1, 0xc4, 0x64, 0x9f, 0x50, 0xe2, 0x5a, 0xc4, 0x98, 0xe7, 0x59, 0x8e, 0x15, + 0x04, 0x8e, 0xc0, 0x78, 0x15, 0xcd, 0x2b, 0x43, 0x36, 0x1c, 0xf3, 0x58, 0x1e, 0x74, 0xa7, 0xe7, + 0x41, 0x5f, 0x1b, 0x1b, 0x20, 0x61, 0xd7, 0xa8, 0x57, 0xd5, 0x7f, 0xd5, 0x60, 0x29, 0x29, 0xb8, + 0x63, 0x07, 0x0c, 0x7d, 0x3c, 0xe0, 0x44, 0x69, 0x32, 0x27, 0xf8, 0x6d, 0xe1, 0xc2, 0x92, 0x52, + 0x95, 0x0d, 0x29, 0x09, 0x07, 0xae, 0x43, 0xda, 0x66, 0xa4, 0x15, 0x88, 0x12, 0xc9, 0xad, 0x9f, + 0x99, 0xcc, 0x83, 0xb8, 0x3a, 0xb7, 0xf9, 0x65, 0x2c, 0x31, 0xf4, 0xbf, 0x35, 0x28, 0x26, 0xc5, + 0xaa, 0x26, 0x35, 0x5b, 0x84, 0x11, 0x1a, 0x44, 0x8f, 0x87, 0xd6, 0x20, 0x6b, 0x56, 0xb7, 0xb7, + 0xa8, 0xd7, 0xf6, 0xc3, 0xd2, 0xe5, 0xa6, 0xbd, 0xa7, 0x68, 0x38, 0xe2, 0xf2, 0x02, 0x3f, 0xb4, + 0x55, 0x97, 0x4a, 0x14, 0xf8, 0x75, 0xdb, 0x6d, 0x60, 0xc1, 0xe1, 0x12, 0xae, 0xd9, 0x0a, 0x9b, + 0x5f, 0x24, 0x51, 0x31, 0x5b, 0x04, 0x0b, 0x0e, 0x2a, 0x42, 0x3a, 0xb0, 0x3c, 0x5f, 0x66, 0xf0, + 0x9c, 0x31, 0xc7, 0x4d, 0xae, 0x71, 0x02, 0x96, 0x74, 0x74, 0x0e, 0xe6, 0xb8, 0x60, 0xe0, 0x9b, + 0x16, 0xc9, 0xa7, 0x85, 0xd0, 0x42, 0xb7, 0x53, 0x9c, 0xab, 0x84, 0x44, 0x1c, 0xf3, 0xf5, 0x1f, + 0xfa, 0xde, 0x87, 0x3f, 0x1d, 0x5a, 0x07, 0xb0, 0x3c, 0x97, 0x51, 0xcf, 0x71, 0x48, 0xd8, 0x8d, + 0xa2, 0xa4, 0xd9, 0x88, 0x38, 0x38, 0x21, 0x85, 0x6c, 0x00, 0x3f, 0x8a, 0x8d, 0x4a, 0x9e, 0xb7, + 0x26, 0x0b, 0xfd, 0x90, 0x98, 0x1a, 0x8b, 0x5c, 0x55, 0x82, 0x91, 0x00, 0xd7, 0x7f, 0xd4, 0x20, + 0xa7, 0xee, 0x1f, 0x43, 0x3a, 0x5d, 0xed, 0x4d, 0xa7, 0x97, 0xc7, 0x8f, 0x96, 0xe1, 0x99, 0xf4, + 0xb3, 0x06, 0x2b, 0xa1, 0xd5, 0x9e, 0xd9, 0x30, 0x4c, 0xc7, 0x74, 0x2d, 0x42, 0xc3, 0x4e, 0xbd, + 0x02, 0x29, 0x3b, 0x4c, 0x1f, 0x50, 0x00, 0xa9, 0xed, 0x2a, 0x4e, 0xd9, 0x3e, 0x3a, 0x0f, 0xd9, + 0x03, 0x2f, 0x60, 0x22, 0x31, 0x64, 0xea, 0x44, 0x06, 0x5f, 0x53, 0x74, 0x1c, 0x49, 0xa0, 0x2a, + 0xa4, 0x7d, 0x8f, 0xb2, 0x20, 0x3f, 0x23, 0x0c, 0x3e, 0x37, 0xd6, 0xe0, 0xaa, 0x47, 0x99, 0xea, + 0xa5, 0xf1, 0x88, 0xe2, 0x08, 0x58, 0x02, 0xe9, 0x5f, 0xc0, 0x0b, 0x43, 0x2c, 0x97, 0x57, 0xd0, + 0x67, 0x90, 0xb1, 0x25, 0x53, 0x4d, 0xc4, 0xcb, 0x63, 0x15, 0x0e, 0xf1, 0x3f, 0x1e, 0xc4, 0xe1, + 0xc0, 0x0d, 0x51, 0xf5, 0xef, 0x35, 0x58, 0x1e, 0xb0, 0x54, 0xec, 0x12, 0x1e, 0x65, 0x22, 0x62, + 0xe9, 0xc4, 0x2e, 0xe1, 0x51, 0x86, 0x05, 0x07, 0x5d, 0x87, 0xac, 0x58, 0x45, 0x2c, 0xcf, 0x51, + 0x51, 0x2b, 0x87, 0x51, 0xab, 0x2a, 0xfa, 0x51, 0xa7, 0xf8, 0xe2, 0xe0, 0x7e, 0x56, 0x0a, 0xd9, + 0x38, 0x02, 0xe0, 0x55, 0x47, 0x28, 0xf5, 0xa8, 0x2a, 0x4c, 0x51, 0x75, 0x9b, 0x9c, 0x80, 0x25, + 0x5d, 0xff, 0x2e, 0x4e, 0x4a, 0xbe, 0x2b, 0x70, 0xfb, 0xf8, 0x8b, 0xf4, 0xcf, 0x72, 0xfe, 0x5e, + 0x58, 0x70, 0x90, 0x0f, 0x4b, 0x76, 0xdf, 0x72, 0x31, 0x71, 0xd3, 0x8d, 0x6e, 0x18, 0x79, 0x85, + 0xbc, 0xd4, 0xcf, 0xc1, 0x03, 0xe8, 0xfa, 0x2d, 0x18, 0x90, 0xe2, 0xed, 0xfe, 0x80, 0x31, 0x7f, + 0x48, 0xe1, 0x8c, 0xde, 0x66, 0x62, 0xed, 0x59, 0xe1, 0x53, 0xbd, 0x5e, 0xc5, 0x02, 0x45, 0xff, + 0x46, 0x83, 0x53, 0x43, 0x07, 0x67, 0xd4, 0xd8, 0xb4, 0x91, 0x8d, 0xad, 0xa2, 0x5e, 0x54, 0xc6, + 0xe0, 0xfc, 0x68, 0x4b, 0x7a, 0x91, 0xf9, 0x8b, 0x0f, 0x7b, 0x7f, 0xfd, 0xcf, 0x54, 0xf4, 0x22, + 0xa2, 0xab, 0xbd, 0x1b, 0xc5, 0x5b, 0x74, 0x1d, 0xae, 0x59, 0xf5, 0xd0, 0x93, 0x89, 0xf8, 0x45, + 0x3c, 0x3c, 0x20, 0x8d, 0x1a, 0xb0, 0xd8, 0x20, 0xfb, 0x66, 0xdb, 0x61, 0x4a, 0xb7, 0x8a, 0xda, + 0xe4, 0xeb, 0x26, 0xea, 0x76, 0x8a, 0x8b, 0x57, 0x7a, 0x30, 0x70, 0x1f, 0x26, 0xda, 0x80, 0x69, + 0xe6, 0x84, 0xed, 0xe6, 0x95, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a, 0x91, 0x53, 0xee, 0x4f, 0xd7, 0x77, + 0x6a, 0x98, 0xdf, 0x46, 0xef, 0x43, 0x9a, 0xb6, 0x1d, 0xc2, 0x97, 0xa9, 0xe9, 0x89, 0xf6, 0x32, + 0xfe, 0xa6, 0x71, 0xf9, 0xf3, 0x53, 0x80, 0x25, 0x84, 0xfe, 0x25, 0x2c, 0xf4, 0x6c, 0x5c, 0xa8, + 0x05, 0xf3, 0x4e, 0xa2, 0x84, 0x55, 0x14, 0x2e, 0xfd, 0xaf, 0xba, 0x57, 0x0d, 0xe7, 0xa4, 0xd2, + 0x38, 0x9f, 0xe4, 0xe1, 0x1e, 0x78, 0xdd, 0x04, 0x88, 0x7d, 0xe5, 0x95, 0xc8, 0xcb, 0x47, 0x76, + 0x1b, 0x55, 0x89, 0xbc, 0xaa, 0x02, 0x2c, 0xe9, 0x7c, 0x7a, 0x05, 0xc4, 0xa2, 0x84, 0x55, 0xe2, + 0x7e, 0x19, 0x4d, 0xaf, 0x5a, 0xc4, 0xc1, 0x09, 0x29, 0xfd, 0x77, 0x0d, 0x16, 0x2a, 0xd2, 0xe4, + 0xaa, 0xe7, 0xd8, 0xd6, 0xbd, 0x63, 0x58, 0xb4, 0x6e, 0xf4, 0x2c, 0x5a, 0x4f, 0x68, 0xd3, 0x3d, + 0x86, 0x8d, 0xdc, 0xb4, 0x7e, 0xd2, 0xe0, 0xf9, 0x1e, 0xc9, 0xcd, 0xb8, 0x19, 0x45, 0x23, 0x41, + 0x1b, 0x37, 0x12, 0x7a, 0x10, 0x44, 0x69, 0x0d, 0x1d, 0x09, 0x68, 0x0b, 0x52, 0xcc, 0x53, 0x39, + 0x3a, 0x31, 0x1c, 0x21, 0x34, 0x9e, 0x6d, 0x75, 0x0f, 0xa7, 0x98, 0xa7, 0xff, 0xa2, 0x41, 0xbe, + 0x47, 0x2a, 0xd9, 0x44, 0x9f, 0xbd, 0xdd, 0x37, 0x60, 0x66, 0x9f, 0x7a, 0xad, 0xa7, 0xb1, 0x3c, + 0x0a, 0xfa, 0x55, 0xea, 0xb5, 0xb0, 0x80, 0xd1, 0xef, 0x6b, 0xb0, 0xdc, 0x23, 0x79, 0x0c, 0x0b, + 0xc9, 0x4e, 0xef, 0x42, 0x72, 0x76, 0x42, 0x1f, 0x46, 0xac, 0x25, 0xf7, 0x53, 0x7d, 0x1e, 0x70, + 0x5f, 0xd1, 0x3e, 0xe4, 0x7c, 0xaf, 0x51, 0x23, 0x0e, 0xb1, 0x98, 0x37, 0xac, 0xc0, 0x9f, 0xe4, + 0x84, 0xb9, 0x47, 0x9c, 0xf0, 0xaa, 0x71, 0xa2, 0xdb, 0x29, 0xe6, 0xaa, 0x31, 0x16, 0x4e, 0x02, + 0xa3, 0xbb, 0xb0, 0x1c, 0xed, 0xa2, 0x91, 0xb6, 0xd4, 0xd3, 0x6b, 0x3b, 0xd5, 0xed, 0x14, 0x97, + 0x2b, 0xfd, 0x88, 0x78, 0x50, 0x09, 0xba, 0x06, 0x19, 0xdb, 0x17, 0x9f, 0xdd, 0xea, 0x8b, 0xed, + 0x49, 0x8b, 0x9d, 0xfc, 0x3e, 0x97, 0x1f, 0x7f, 0xea, 0x80, 0xc3, 0xeb, 0xfa, 0x5f, 0xfd, 0x39, + 0xc0, 0x13, 0x0e, 0x6d, 0x25, 0xb6, 0x0f, 0x39, 0xf3, 0xce, 0x3d, 0xdd, 0xe6, 0xd1, 0x3b, 0x16, + 0x47, 0x37, 0xa1, 0x36, 0xb3, 0x9d, 0x92, 0xfc, 0x31, 0xa6, 0xb4, 0xed, 0xb2, 0x5d, 0x5a, 0x63, + 0xd4, 0x76, 0x9b, 0x72, 0x44, 0x27, 0xd6, 0xa2, 0xd3, 0x90, 0x51, 0x53, 0x53, 0x38, 0x9e, 0x96, + 0x5e, 0x6d, 0x4a, 0x12, 0x0e, 0x79, 0xfa, 0x51, 0x7f, 0x5e, 0x88, 0x19, 0x7a, 0xfb, 0x99, 0xe5, + 0xc5, 0x73, 0x2a, 0x1b, 0x47, 0xe7, 0xc6, 0x27, 0xf1, 0x62, 0x29, 0x33, 0x7d, 0x7d, 0xc2, 0x4c, + 0x4f, 0x4e, 0xb4, 0x91, 0x6b, 0x25, 0xfa, 0x10, 0x66, 0x89, 0x44, 0x97, 0x23, 0xf2, 0xe2, 0x84, + 0xe8, 0x71, 0x5b, 0x8d, 0x7f, 0x79, 0x50, 0x34, 0x05, 0x88, 0xde, 0xe1, 0x51, 0xe2, 0xb2, 0xfc, + 0x83, 0x5f, 0xee, 0xe1, 0x73, 0xc6, 0x4b, 0xd2, 0xd9, 0x88, 0x7c, 0xc4, 0x3f, 0x70, 0xa2, 0x23, + 0x4e, 0xde, 0xd0, 0x3f, 0x05, 0x34, 0xb8, 0xe4, 0x4c, 0xb0, 0x42, 0x9d, 0x81, 0x59, 0xb7, 0xdd, + 0xda, 0x23, 0xb2, 0x86, 0xd2, 0xb1, 0x81, 0x15, 0x41, 0xc5, 0x8a, 0x6b, 0xbc, 0xfd, 0xe0, 0x71, + 0x61, 0xea, 0xe1, 0xe3, 0xc2, 0xd4, 0xa3, 0xc7, 0x85, 0xa9, 0xaf, 0xba, 0x05, 0xed, 0x41, 0xb7, + 0xa0, 0x3d, 0xec, 0x16, 0xb4, 0x47, 0xdd, 0x82, 0xf6, 0x4f, 0xb7, 0xa0, 0x7d, 0xfb, 0x6f, 0x61, + 0xea, 0xa3, 0xfc, 0xa8, 0x5f, 0x4b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24, 0x03, 0xec, 0x04, + 0x48, 0x15, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index b50dd491e0..c72fdc8f37 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -84,6 +84,7 @@ message IPBlock { // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the cidr range // +optional + // +listType=atomic repeated string except = 2; } @@ -95,7 +96,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -120,7 +121,7 @@ message IngressBackend { // service.Port must not be specified. // This is a mutually exclusive setting with "Service". // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -132,7 +133,7 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -144,7 +145,7 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IngressClasses. repeated IngressClass items = 2; @@ -199,7 +200,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Ingress. repeated Ingress items = 2; @@ -225,6 +226,7 @@ message IngressLoadBalancerIngress { message IngressLoadBalancerStatus { // ingress is a list containing ingress points for the load-balancer. // +optional + // +listType=atomic repeated IngressLoadBalancerIngress ingress = 1; } @@ -379,7 +381,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional @@ -396,6 +398,7 @@ message NetworkPolicyEgressRule { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; // to is a list of destinations for outgoing traffic of pods selected for this rule. @@ -404,6 +407,7 @@ message NetworkPolicyEgressRule { // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic repeated NetworkPolicyPeer to = 2; } @@ -416,6 +420,7 @@ message NetworkPolicyIngressRule { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; // from is a list of sources which should be able to access the pods selected for this rule. @@ -424,6 +429,7 @@ message NetworkPolicyIngressRule { // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional + // +listType=atomic repeated NetworkPolicyPeer from = 2; } @@ -432,7 +438,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -448,7 +454,7 @@ message NetworkPolicyPeer { // the pods matching podSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // namespaceSelector selects namespaces using cluster-scoped labels. This field follows // standard label selector semantics; if present but empty, it selects all namespaces. @@ -457,7 +463,7 @@ message NetworkPolicyPeer { // the pods matching podSelector in the namespaces selected by namespaceSelector. // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -477,7 +483,7 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field @@ -495,7 +501,7 @@ message NetworkPolicySpec { // the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // ingress is a list of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -505,6 +511,7 @@ message NetworkPolicySpec { // this field is empty then this NetworkPolicy does not allow any traffic (and serves // solely to ensure that the pods it selects are isolated by default) // +optional + // +listType=atomic repeated NetworkPolicyIngressRule ingress = 2; // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic @@ -515,6 +522,7 @@ message NetworkPolicySpec { // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated NetworkPolicyEgressRule egress = 3; // policyTypes is a list of rule types that the NetworkPolicy relates to. @@ -528,10 +536,12 @@ message NetworkPolicySpec { // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated string policyTypes = 4; } // ServiceBackendPort is the service port being referenced. +// +structType=atomic message ServiceBackendPort { // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index a17e2cb5b3..d75e27558d 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { @@ -74,6 +75,7 @@ type NetworkPolicySpec struct { // this field is empty then this NetworkPolicy does not allow any traffic (and serves // solely to ensure that the pods it selects are isolated by default) // +optional + // +listType=atomic Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic @@ -84,6 +86,7 @@ type NetworkPolicySpec struct { // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // policyTypes is a list of rule types that the NetworkPolicy relates to. @@ -97,6 +100,7 @@ type NetworkPolicySpec struct { // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } @@ -109,6 +113,7 @@ type NetworkPolicyIngressRule struct { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // from is a list of sources which should be able to access the pods selected for this rule. @@ -117,6 +122,7 @@ type NetworkPolicyIngressRule struct { // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional + // +listType=atomic From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } @@ -130,6 +136,7 @@ type NetworkPolicyEgressRule struct { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // to is a list of destinations for outgoing traffic of pods selected for this rule. @@ -138,6 +145,7 @@ type NetworkPolicyEgressRule struct { // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } @@ -175,6 +183,7 @@ type IPBlock struct { // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the cidr range // +optional + // +listType=atomic Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } @@ -206,6 +215,7 @@ type NetworkPolicyPeer struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { @@ -222,6 +232,7 @@ type NetworkPolicyList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services @@ -247,6 +258,7 @@ type Ingress struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressList is a collection of Ingress. type IngressList struct { @@ -329,6 +341,7 @@ type IngressStatus struct { type IngressLoadBalancerStatus struct { // ingress is a list containing ingress points for the load-balancer. // +optional + // +listType=atomic Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } @@ -406,7 +419,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -518,6 +531,7 @@ type IngressServiceBackend struct { } // ServiceBackendPort is the service port being referenced. +// +structType=atomic type ServiceBackendPort struct { // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". @@ -533,6 +547,7 @@ type ServiceBackendPort struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClass represents the class of the Ingress, referenced by the Ingress // Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be @@ -607,6 +622,7 @@ type IngressClassParametersReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClassList is a collection of IngressClasses. type IngressClassList struct { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..21e8c671a5 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Ingress) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClass) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index 949ea513fe..0d42034837 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto +// source: k8s.io/api/networking/v1alpha1/generated.proto package v1alpha1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *IPAddress) Reset() { *m = IPAddress{} } func (*IPAddress) ProtoMessage() {} func (*IPAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{0} + return fileDescriptor_c1cb39e7b48ce50d, []int{0} } func (m *IPAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_IPAddress proto.InternalMessageInfo func (m *IPAddressList) Reset() { *m = IPAddressList{} } func (*IPAddressList) ProtoMessage() {} func (*IPAddressList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{1} + return fileDescriptor_c1cb39e7b48ce50d, []int{1} } func (m *IPAddressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_IPAddressList proto.InternalMessageInfo func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } func (*IPAddressSpec) ProtoMessage() {} func (*IPAddressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{2} + return fileDescriptor_c1cb39e7b48ce50d, []int{2} } func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo func (m *ParentReference) Reset() { *m = ParentReference{} } func (*ParentReference) ProtoMessage() {} func (*ParentReference) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{3} + return fileDescriptor_c1cb39e7b48ce50d, []int{3} } func (m *ParentReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ParentReference proto.InternalMessageInfo func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } func (*ServiceCIDR) ProtoMessage() {} func (*ServiceCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{4} + return fileDescriptor_c1cb39e7b48ce50d, []int{4} } func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } func (*ServiceCIDRList) ProtoMessage() {} func (*ServiceCIDRList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{5} + return fileDescriptor_c1cb39e7b48ce50d, []int{5} } func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } func (*ServiceCIDRSpec) ProtoMessage() {} func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{6} + return fileDescriptor_c1cb39e7b48ce50d, []int{6} } func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } func (*ServiceCIDRStatus) ProtoMessage() {} func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{7} + return fileDescriptor_c1cb39e7b48ce50d, []int{7} } func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -280,52 +280,51 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1b7ac8d7d97acec) -} - -var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 648 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0x5f, 0xbf, 0x52, 0xaf, 0xa2, 0x2e, 0x9c, 0x28, 0x6c, 0x8a, - 0xa0, 0x33, 0x24, 0x42, 0x88, 0x2d, 0x6e, 0xa5, 0xaa, 0x12, 0xb4, 0x65, 0xba, 0x02, 0x75, 0xc1, - 0xc4, 0xbe, 0x75, 0x4c, 0xf0, 0x8f, 0x66, 0xc6, 0x01, 0x76, 0x3c, 0x02, 0x2f, 0xc0, 0x73, 0xb0, - 0x02, 0x89, 0x5d, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xf3, 0x22, 0x68, 0xc6, 0x8e, 0x9d, 0x34, 0xea, - 0xdf, 0xa6, 0x3b, 0xcf, 0xb9, 0xe7, 0x9c, 0xb9, 0xe7, 0xce, 0x8c, 0x8c, 0x76, 0x46, 0x2f, 0x04, - 0x0e, 0x62, 0x32, 0x4a, 0x07, 0xc0, 0x23, 0x90, 0x20, 0xc8, 0x18, 0x22, 0x2f, 0xe6, 0xa4, 0x28, - 0xb0, 0x24, 0x20, 0x11, 0xc8, 0x4f, 0x31, 0x1f, 0x05, 0x91, 0x4f, 0xc6, 0x3d, 0xf6, 0x31, 0x19, - 0xb2, 0x1e, 0xf1, 0x21, 0x02, 0xce, 0x24, 0x78, 0x38, 0xe1, 0xb1, 0x8c, 0x2d, 0x3b, 0xe7, 0x63, - 0x96, 0x04, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0xd3, 0x0f, 0xe4, 0x30, 0x1d, 0x60, 0x37, 0x0e, - 0x89, 0x1f, 0xfb, 0x31, 0xd1, 0xb2, 0x41, 0x7a, 0xac, 0x57, 0x7a, 0xa1, 0xbf, 0x72, 0xbb, 0xf5, - 0x67, 0xd5, 0xf6, 0x21, 0x73, 0x87, 0x41, 0x04, 0xfc, 0x0b, 0x49, 0x46, 0xbe, 0x02, 0x04, 0x09, - 0x41, 0x32, 0x32, 0x9e, 0x6b, 0x62, 0x9d, 0x5c, 0xa5, 0xe2, 0x69, 0x24, 0x83, 0x10, 0xe6, 0x04, - 0xcf, 0x6f, 0x12, 0x08, 0x77, 0x08, 0x21, 0xbb, 0xac, 0xeb, 0xfe, 0x32, 0x90, 0xb9, 0x7b, 0xf0, - 0xd2, 0xf3, 0x38, 0x08, 0x61, 0xbd, 0x47, 0xcb, 0xaa, 0x23, 0x8f, 0x49, 0xd6, 0x32, 0x3a, 0xc6, - 0x46, 0xb3, 0xff, 0x14, 0x57, 0xe3, 0x28, 0x8d, 0x71, 0x32, 0xf2, 0x15, 0x20, 0xb0, 0x62, 0xe3, - 0x71, 0x0f, 0xef, 0x0f, 0x3e, 0x80, 0x2b, 0x5f, 0x83, 0x64, 0x8e, 0x75, 0x72, 0xde, 0xae, 0x65, - 0xe7, 0x6d, 0x54, 0x61, 0xb4, 0x74, 0xb5, 0xf6, 0x51, 0x5d, 0x24, 0xe0, 0xb6, 0x16, 0xb4, 0xfb, - 0x26, 0xbe, 0x7e, 0xd8, 0xb8, 0x6c, 0xed, 0x30, 0x01, 0xd7, 0xf9, 0xaf, 0xb0, 0xae, 0xab, 0x15, - 0xd5, 0x46, 0xdd, 0x9f, 0x06, 0x5a, 0x29, 0x59, 0xaf, 0x02, 0x21, 0xad, 0xa3, 0xb9, 0x10, 0xf8, - 0x76, 0x21, 0x94, 0x5a, 0x47, 0x78, 0x50, 0xec, 0xb3, 0x3c, 0x41, 0xa6, 0x02, 0xec, 0xa1, 0x46, - 0x20, 0x21, 0x14, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xff, 0xe8, 0xd6, 0x09, 0x9c, 0x95, 0xc2, - 0xb5, 0xb1, 0xab, 0xf4, 0x34, 0xb7, 0xe9, 0x86, 0x53, 0xed, 0xab, 0x58, 0xd6, 0x11, 0x32, 0x13, - 0xc6, 0x21, 0x92, 0x14, 0x8e, 0x8b, 0xfe, 0xc9, 0x4d, 0x9b, 0x1c, 0x4c, 0x04, 0xc0, 0x21, 0x72, - 0xc1, 0x59, 0xc9, 0xce, 0xdb, 0x66, 0x09, 0xd2, 0xca, 0xb0, 0xfb, 0xc3, 0x40, 0xab, 0x97, 0xd8, - 0xd6, 0x43, 0xd4, 0xf0, 0x79, 0x9c, 0x26, 0x7a, 0x37, 0xb3, 0xea, 0x73, 0x47, 0x81, 0x34, 0xaf, - 0x59, 0x4f, 0xd0, 0x32, 0x07, 0x11, 0xa7, 0xdc, 0x05, 0x7d, 0x78, 0x66, 0x35, 0x25, 0x5a, 0xe0, - 0xb4, 0x64, 0x58, 0x04, 0x99, 0x11, 0x0b, 0x41, 0x24, 0xcc, 0x85, 0xd6, 0xa2, 0xa6, 0xaf, 0x15, - 0x74, 0x73, 0x6f, 0x52, 0xa0, 0x15, 0xc7, 0xea, 0xa0, 0xba, 0x5a, 0xb4, 0xea, 0x9a, 0x5b, 0x1e, - 0xb4, 0xe2, 0x52, 0x5d, 0xe9, 0x7e, 0x5f, 0x40, 0xcd, 0x43, 0xe0, 0xe3, 0xc0, 0x85, 0xad, 0xdd, - 0x6d, 0x7a, 0x0f, 0x77, 0xf5, 0xcd, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, 0xbb, 0xea, 0xb6, - 0x5a, 0x6f, 0xd1, 0x92, 0x90, 0x4c, 0xa6, 0x42, 0x0f, 0xa5, 0xd9, 0xef, 0xdd, 0xc5, 0x54, 0x0b, - 0x9d, 0xff, 0x0b, 0xdb, 0xa5, 0x7c, 0x4d, 0x0b, 0xc3, 0xee, 0x6f, 0x03, 0xad, 0x4e, 0xb1, 0xef, - 0xe1, 0x29, 0x1c, 0xcc, 0x3e, 0x85, 0xc7, 0x77, 0xc8, 0x72, 0xc5, 0x63, 0xe8, 0xcf, 0x44, 0xd0, - 0xcf, 0xa1, 0x8d, 0x1a, 0x6e, 0xe0, 0x71, 0xd1, 0x32, 0x3a, 0x8b, 0x1b, 0xa6, 0x63, 0x2a, 0x8d, - 0x2a, 0x0a, 0x9a, 0xe3, 0xdd, 0xcf, 0x68, 0x6d, 0x6e, 0x48, 0x96, 0x8b, 0x90, 0x1b, 0x47, 0x5e, - 0x20, 0x83, 0x38, 0xca, 0xa5, 0xb3, 0x07, 0x78, 0x4d, 0xf4, 0xad, 0x89, 0xae, 0xba, 0x1d, 0x25, - 0x24, 0xe8, 0x94, 0xad, 0xb3, 0x7d, 0x72, 0x61, 0xd7, 0x4e, 0x2f, 0xec, 0xda, 0xd9, 0x85, 0x5d, - 0xfb, 0x9a, 0xd9, 0xc6, 0x49, 0x66, 0x1b, 0xa7, 0x99, 0x6d, 0x9c, 0x65, 0xb6, 0xf1, 0x27, 0xb3, - 0x8d, 0x6f, 0x7f, 0xed, 0xda, 0x3b, 0xfb, 0xfa, 0xff, 0xcf, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x29, 0x82, 0x11, 0x57, 0xb9, 0x06, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d) +} + +var fileDescriptor_c1cb39e7b48ce50d = []byte{ + // 634 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a, + 0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6, + 0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05, + 0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e, + 0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46, + 0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99, + 0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e, + 0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38, + 0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a, + 0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9, + 0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43, + 0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15, + 0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42, + 0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45, + 0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06, + 0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e, + 0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58, + 0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85, + 0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3, + 0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05, + 0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09, + 0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7, + 0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04, + 0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0, + 0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85, + 0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5, + 0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b, + 0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa, + 0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39, + 0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, + 0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd, + 0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56, + 0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4, + 0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d, + 0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62, + 0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5, + 0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce, + 0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad, + 0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00, + 0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00, } func (m *IPAddress) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index fb7971745d..80ec6af735 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -39,7 +39,7 @@ message IPAddress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IPAddress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message IPAddressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IPAddresses. repeated IPAddress items = 2; @@ -91,7 +91,7 @@ message ServiceCIDR { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the ServiceCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -109,7 +109,7 @@ message ServiceCIDRList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of ServiceCIDRs. repeated ServiceCIDR items = 2; @@ -119,8 +119,12 @@ message ServiceCIDRList { message ServiceCIDRSpec { // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. // This field is immutable. // +optional + // +listType=atomic repeated string cidrs = 1; } @@ -133,6 +137,6 @@ message ServiceCIDRStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 9d56ca193e..0e454f0263 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -109,8 +109,12 @@ type ServiceCIDR struct { type ServiceCIDRSpec struct { // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. // This field is immutable. // +optional + // +listType=atomic CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` } diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 481ec06030..4c8eb57a7a 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -91,7 +91,7 @@ func (ServiceCIDRList) SwaggerDoc() map[string]string { var map_ServiceCIDRSpec = map[string]string{ "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", - "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.", } func (ServiceCIDRSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 6f298cd781..a924725f28 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto +// source: k8s.io/api/networking/v1beta1/generated.proto package v1beta1 @@ -27,6 +27,7 @@ import ( proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -48,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{0} + return fileDescriptor_9497719c79c89d2d, []int{0} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +77,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{1} + return fileDescriptor_9497719c79c89d2d, []int{1} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -101,10 +102,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{2} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{3} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{4} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{2} + return fileDescriptor_9497719c79c89d2d, []int{5} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +217,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{3} + return fileDescriptor_9497719c79c89d2d, []int{6} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +245,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{4} + return fileDescriptor_9497719c79c89d2d, []int{7} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +273,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{5} + return fileDescriptor_9497719c79c89d2d, []int{8} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +301,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{6} + return fileDescriptor_9497719c79c89d2d, []int{9} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +329,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{7} + return fileDescriptor_9497719c79c89d2d, []int{10} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +357,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{8} + return fileDescriptor_9497719c79c89d2d, []int{11} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +385,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{9} + return fileDescriptor_9497719c79c89d2d, []int{12} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +413,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{10} + return fileDescriptor_9497719c79c89d2d, []int{13} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +441,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{11} + return fileDescriptor_9497719c79c89d2d, []int{14} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +469,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{12} + return fileDescriptor_9497719c79c89d2d, []int{15} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +497,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{13} + return fileDescriptor_9497719c79c89d2d, []int{16} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +525,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{14} + return fileDescriptor_9497719c79c89d2d, []int{17} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +553,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{15} + return fileDescriptor_9497719c79c89d2d, []int{18} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +581,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{16} + return fileDescriptor_9497719c79c89d2d, []int{19} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -521,9 +606,152 @@ func (m *IngressTLS) XXX_DiscardUnknown() { var xxx_messageInfo_IngressTLS proto.InternalMessageInfo +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{20} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{21} +} +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) +} +func (m *ServiceCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo + +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{22} +} +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) +} +func (m *ServiceCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo + +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{23} +} +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) +} +func (m *ServiceCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{24} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1beta1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1beta1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1beta1.IPAddressSpec") proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") @@ -539,92 +767,111 @@ func init() { proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1beta1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRStatus") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) -} - -var fileDescriptor_5bea11de0ceb8f53 = []byte{ - // 1247 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xcf, 0xda, 0x71, 0xe3, 0x8c, 0xd3, 0x36, 0x0c, 0x3d, 0x98, 0xa0, 0xda, 0xd1, 0x1e, 0x50, - 0xa0, 0xed, 0x6e, 0x93, 0x16, 0x54, 0x2e, 0x08, 0x36, 0x02, 0x12, 0x25, 0x24, 0x66, 0x6c, 0x1e, - 0x42, 0x1c, 0x18, 0xaf, 0xa7, 0xf6, 0xe2, 0xf5, 0xee, 0x6a, 0x66, 0x36, 0xa8, 0x37, 0x10, 0x27, - 0x4e, 0xf0, 0x3f, 0x20, 0xf1, 0x27, 0x20, 0x2e, 0x48, 0x08, 0x2e, 0x39, 0xf6, 0xd8, 0x0b, 0x11, - 0x31, 0xff, 0x45, 0x4f, 0xe8, 0x9b, 0x9d, 0x7d, 0xf8, 0x91, 0xd6, 0xe1, 0xd0, 0x53, 0xbc, 0xdf, - 0xe3, 0xf7, 0xbd, 0xbf, 0xf9, 0x82, 0x3e, 0x18, 0x3e, 0x10, 0x96, 0x17, 0xda, 0xc3, 0xb8, 0xcb, - 0x78, 0xc0, 0x24, 0x13, 0xf6, 0x09, 0x0b, 0x7a, 0x21, 0xb7, 0x35, 0x83, 0x46, 0x9e, 0x1d, 0x30, - 0xf9, 0x4d, 0xc8, 0x87, 0x5e, 0xd0, 0xb7, 0x4f, 0xb6, 0xbb, 0x4c, 0xd2, 0x6d, 0xbb, 0xcf, 0x02, - 0xc6, 0xa9, 0x64, 0x3d, 0x2b, 0xe2, 0xa1, 0x0c, 0xf1, 0xcd, 0x44, 0xdc, 0xa2, 0x91, 0x67, 0xe5, - 0xe2, 0x96, 0x16, 0xdf, 0xb8, 0xd3, 0xf7, 0xe4, 0x20, 0xee, 0x5a, 0x6e, 0x38, 0xb2, 0xfb, 0x61, - 0x3f, 0xb4, 0x95, 0x56, 0x37, 0x7e, 0xa8, 0xbe, 0xd4, 0x87, 0xfa, 0x95, 0xa0, 0x6d, 0x98, 0x05, - 0xe3, 0x6e, 0xc8, 0x99, 0x7d, 0x32, 0x63, 0x71, 0xe3, 0x7e, 0x2e, 0x33, 0xa2, 0xee, 0xc0, 0x0b, - 0x18, 0x7f, 0x64, 0x47, 0xc3, 0x3e, 0x10, 0x84, 0x3d, 0x62, 0x92, 0xce, 0xd3, 0xb2, 0x2f, 0xd2, - 0xe2, 0x71, 0x20, 0xbd, 0x11, 0x9b, 0x51, 0x78, 0xeb, 0x79, 0x0a, 0xc2, 0x1d, 0xb0, 0x11, 0x9d, - 0xd1, 0xbb, 0x77, 0x91, 0x5e, 0x2c, 0x3d, 0xdf, 0xf6, 0x02, 0x29, 0x24, 0x9f, 0x56, 0x32, 0xff, - 0x32, 0xd0, 0xf5, 0xbd, 0x4e, 0xa7, 0xb5, 0x1f, 0xf4, 0x39, 0x13, 0xa2, 0x45, 0xe5, 0x00, 0x6f, - 0xa2, 0xe5, 0x88, 0xca, 0x41, 0xdd, 0xd8, 0x34, 0xb6, 0x56, 0x9d, 0xb5, 0xd3, 0xb3, 0xe6, 0xd2, - 0xf8, 0xac, 0xb9, 0x0c, 0x3c, 0xa2, 0x38, 0xf8, 0x3e, 0xaa, 0xc2, 0xdf, 0xce, 0xa3, 0x88, 0xd5, - 0xcb, 0x4a, 0xaa, 0x3e, 0x3e, 0x6b, 0x56, 0x5b, 0x9a, 0xf6, 0xb4, 0xf0, 0x9b, 0x64, 0x92, 0xf8, - 0x73, 0xb4, 0xd2, 0xa5, 0xee, 0x90, 0x05, 0xbd, 0x7a, 0x69, 0xd3, 0xd8, 0xaa, 0xed, 0xdc, 0xb1, - 0x9e, 0x59, 0x43, 0x4b, 0x3b, 0xe5, 0x24, 0x4a, 0xce, 0x75, 0xed, 0xc9, 0x8a, 0x26, 0x90, 0x14, - 0xce, 0x1c, 0xa2, 0x1b, 0x85, 0x20, 0x48, 0xec, 0xb3, 0x4f, 0xa9, 0x1f, 0x33, 0xdc, 0x46, 0x15, - 0xb0, 0x2e, 0xea, 0xc6, 0x66, 0x79, 0xab, 0xb6, 0x63, 0x3d, 0xc7, 0xde, 0x54, 0x22, 0x9c, 0xab, - 0xda, 0x60, 0x05, 0xbe, 0x04, 0x49, 0xb0, 0xcc, 0x1f, 0x4b, 0x68, 0x45, 0x4b, 0xe1, 0xaf, 0x50, - 0x15, 0xea, 0xde, 0xa3, 0x92, 0xaa, 0x74, 0xd5, 0x76, 0xee, 0x16, 0x6c, 0x64, 0x65, 0xb0, 0xa2, - 0x61, 0x1f, 0x08, 0xc2, 0x02, 0x69, 0xeb, 0x64, 0xdb, 0x3a, 0xee, 0x7e, 0xcd, 0x5c, 0xf9, 0x11, - 0x93, 0xd4, 0xc1, 0xda, 0x0a, 0xca, 0x69, 0x24, 0x43, 0xc5, 0x87, 0x68, 0x59, 0x44, 0xcc, 0xd5, - 0x19, 0x7b, 0x63, 0xb1, 0x8c, 0xb5, 0x23, 0xe6, 0xe6, 0x85, 0x83, 0x2f, 0xa2, 0x50, 0x70, 0x07, - 0x5d, 0x11, 0x92, 0xca, 0x58, 0xa8, 0xb2, 0xd5, 0x76, 0x6e, 0x2f, 0x88, 0xa7, 0x74, 0x9c, 0x6b, - 0x1a, 0xf1, 0x4a, 0xf2, 0x4d, 0x34, 0x96, 0xf9, 0x43, 0x09, 0x5d, 0x9b, 0xac, 0x15, 0x7e, 0x13, - 0xd5, 0x04, 0xe3, 0x27, 0x9e, 0xcb, 0x8e, 0xe8, 0x88, 0xe9, 0x56, 0x7a, 0x59, 0xeb, 0xd7, 0xda, - 0x39, 0x8b, 0x14, 0xe5, 0x70, 0x3f, 0x53, 0x6b, 0x85, 0x5c, 0xea, 0xa0, 0x2f, 0x4e, 0x29, 0x74, - 0xb6, 0x95, 0x74, 0xb6, 0xb5, 0x1f, 0xc8, 0x63, 0xde, 0x96, 0xdc, 0x0b, 0xfa, 0x33, 0x86, 0x00, - 0x8c, 0x14, 0x91, 0xf1, 0x67, 0xa8, 0xca, 0x99, 0x08, 0x63, 0xee, 0x32, 0x9d, 0x8a, 0x89, 0x66, - 0x84, 0x15, 0x00, 0x65, 0x82, 0xbe, 0xed, 0x1d, 0x86, 0x2e, 0xf5, 0x93, 0xe2, 0x10, 0xf6, 0x90, - 0x71, 0x16, 0xb8, 0xcc, 0x59, 0x83, 0x86, 0x27, 0x1a, 0x82, 0x64, 0x60, 0x30, 0x50, 0x6b, 0x3a, - 0x17, 0xbb, 0x3e, 0x7d, 0x21, 0x2d, 0xf2, 0xf1, 0x44, 0x8b, 0xd8, 0x8b, 0x95, 0x54, 0x39, 0x77, - 0x51, 0x9f, 0x98, 0x7f, 0x1a, 0x68, 0xbd, 0x28, 0x78, 0xe8, 0x09, 0x89, 0xbf, 0x9c, 0x89, 0xc4, - 0x5a, 0x2c, 0x12, 0xd0, 0x56, 0x71, 0xac, 0x6b, 0x53, 0xd5, 0x94, 0x52, 0x88, 0xa2, 0x85, 0x2a, - 0x9e, 0x64, 0x23, 0x51, 0x2f, 0xa9, 0x59, 0xbd, 0x75, 0x89, 0x30, 0xf2, 0x41, 0xdd, 0x07, 0x04, - 0x92, 0x00, 0x99, 0x7f, 0x1b, 0xa8, 0x59, 0x14, 0x6b, 0x51, 0x4e, 0x47, 0x4c, 0x32, 0x2e, 0xb2, - 0x32, 0xe2, 0x2d, 0x54, 0xa5, 0xad, 0xfd, 0x0f, 0x79, 0x18, 0x47, 0xe9, 0xbe, 0x03, 0xff, 0xde, - 0xd3, 0x34, 0x92, 0x71, 0x61, 0x2b, 0x0e, 0x3d, 0xbd, 0xba, 0x0a, 0x5b, 0xf1, 0xc0, 0x0b, 0x7a, - 0x44, 0x71, 0x40, 0x22, 0x80, 0x66, 0x2f, 0x4f, 0x4a, 0xa8, 0x2e, 0x57, 0x1c, 0xdc, 0x44, 0x15, - 0xe1, 0x86, 0x11, 0xab, 0x2f, 0x2b, 0x91, 0x55, 0x70, 0xb9, 0x0d, 0x04, 0x92, 0xd0, 0xf1, 0x2d, - 0xb4, 0x0a, 0x82, 0x22, 0xa2, 0x2e, 0xab, 0x57, 0x94, 0xd0, 0xd5, 0xf1, 0x59, 0x73, 0xf5, 0x28, - 0x25, 0x92, 0x9c, 0x6f, 0xfe, 0x3a, 0x55, 0x24, 0xa8, 0x1f, 0xde, 0x41, 0xc8, 0x0d, 0x03, 0xc9, - 0x43, 0xdf, 0x67, 0x5c, 0x87, 0x94, 0xb5, 0xcf, 0x6e, 0xc6, 0x21, 0x05, 0x29, 0x1c, 0x20, 0x14, - 0x65, 0xb9, 0xd1, 0x6d, 0xf4, 0xce, 0x25, 0xf2, 0x3f, 0x27, 0xb1, 0xce, 0x35, 0xb0, 0x57, 0x60, - 0x14, 0x2c, 0x98, 0xbf, 0x19, 0xa8, 0xa6, 0xf5, 0x5f, 0x40, 0x63, 0x1d, 0x4c, 0x36, 0xd6, 0x6b, - 0x0b, 0x3e, 0x3a, 0xf3, 0x7b, 0xea, 0x77, 0x03, 0x6d, 0xa4, 0xae, 0x87, 0xb4, 0xe7, 0x50, 0x9f, - 0x06, 0x2e, 0xe3, 0xe9, 0x7b, 0xb0, 0x81, 0x4a, 0x5e, 0xda, 0x48, 0x48, 0x03, 0x94, 0xf6, 0x5b, - 0xa4, 0xe4, 0x45, 0xf8, 0x36, 0xaa, 0x0e, 0x42, 0x21, 0x55, 0x8b, 0x24, 0x4d, 0x94, 0x79, 0xbd, - 0xa7, 0xe9, 0x24, 0x93, 0xc0, 0x9f, 0xa0, 0x4a, 0x14, 0x72, 0x29, 0xea, 0xcb, 0xca, 0xeb, 0xbb, - 0x8b, 0x79, 0x0d, 0xbb, 0x4d, 0x2f, 0xeb, 0xfc, 0xf1, 0x02, 0x18, 0x92, 0xa0, 0x99, 0xdf, 0x19, - 0xe8, 0x95, 0x39, 0xfe, 0x27, 0x3a, 0xb8, 0x87, 0x56, 0xbc, 0x84, 0xa9, 0x5f, 0xcc, 0xb7, 0x17, - 0x33, 0x3b, 0x27, 0x15, 0xf9, 0x6b, 0x9d, 0xbe, 0xca, 0x29, 0xb4, 0xf9, 0xb3, 0x81, 0x5e, 0x9a, - 0xf1, 0x57, 0x5d, 0x1d, 0xb0, 0xf3, 0x21, 0x79, 0x95, 0xc2, 0xd5, 0x01, 0xab, 0x5b, 0x71, 0xf0, - 0x01, 0xaa, 0xaa, 0xa3, 0xc5, 0x0d, 0x7d, 0x9d, 0x40, 0x3b, 0x4d, 0x60, 0x4b, 0xd3, 0x9f, 0x9e, - 0x35, 0x5f, 0x9d, 0xbd, 0xe4, 0xac, 0x94, 0x4d, 0x32, 0x00, 0x18, 0x45, 0xc6, 0x79, 0xc8, 0xf5, - 0xb4, 0xaa, 0x51, 0x7c, 0x1f, 0x08, 0x24, 0xa1, 0x9b, 0xbf, 0xe4, 0x4d, 0x0a, 0x07, 0x05, 0xf8, - 0x07, 0xc5, 0x99, 0xbe, 0x8a, 0xa0, 0x74, 0x44, 0x71, 0x70, 0x8c, 0xd6, 0xbd, 0xa9, 0x0b, 0xe4, - 0x72, 0x3b, 0x39, 0x53, 0x73, 0xea, 0x1a, 0x7e, 0x7d, 0x9a, 0x43, 0x66, 0x4c, 0x98, 0x0c, 0xcd, - 0x48, 0xc1, 0x93, 0x30, 0x90, 0x32, 0xd2, 0xd3, 0x74, 0x6f, 0xf1, 0xbb, 0x27, 0x77, 0xa1, 0xaa, - 0xa2, 0xeb, 0x74, 0x5a, 0x44, 0x41, 0x99, 0x7f, 0x94, 0xb2, 0x7c, 0xa8, 0x45, 0xf3, 0x6e, 0x16, - 0xad, 0xda, 0x01, 0xea, 0x99, 0x4f, 0xd6, 0xda, 0x8d, 0x82, 0xe3, 0x19, 0x8f, 0xcc, 0x48, 0xe3, - 0x4e, 0x7e, 0x0f, 0x1a, 0xff, 0xe7, 0x1e, 0xac, 0xcd, 0xbb, 0x05, 0xf1, 0x1e, 0x2a, 0x4b, 0x3f, - 0x1d, 0xf6, 0xd7, 0x17, 0x43, 0xec, 0x1c, 0xb6, 0x9d, 0x9a, 0x4e, 0x79, 0xb9, 0x73, 0xd8, 0x26, - 0x00, 0x81, 0x8f, 0x51, 0x85, 0xc7, 0x3e, 0x83, 0x5b, 0xa9, 0xbc, 0xf8, 0xed, 0x05, 0x19, 0xcc, - 0x87, 0x0f, 0xbe, 0x04, 0x49, 0x70, 0xcc, 0xef, 0x0d, 0x74, 0x75, 0xe2, 0xa2, 0xc2, 0x1c, 0xad, - 0xf9, 0x85, 0xd9, 0xd1, 0x79, 0x78, 0x70, 0xf9, 0xa9, 0xd3, 0x43, 0x7f, 0x43, 0xdb, 0x5d, 0x2b, - 0xf2, 0xc8, 0x84, 0x0d, 0x93, 0x22, 0x94, 0x87, 0x0d, 0x73, 0x00, 0xcd, 0x9b, 0x0c, 0xbc, 0x9e, - 0x03, 0xe8, 0x69, 0x41, 0x12, 0x3a, 0x3c, 0x28, 0x82, 0xb9, 0x9c, 0xc9, 0xa3, 0x7c, 0x71, 0x65, - 0x0f, 0x4a, 0x3b, 0xe3, 0x90, 0x82, 0x94, 0xb3, 0x7b, 0x7a, 0xde, 0x58, 0x7a, 0x7c, 0xde, 0x58, - 0x7a, 0x72, 0xde, 0x58, 0xfa, 0x76, 0xdc, 0x30, 0x4e, 0xc7, 0x0d, 0xe3, 0xf1, 0xb8, 0x61, 0x3c, - 0x19, 0x37, 0x8c, 0x7f, 0xc6, 0x0d, 0xe3, 0xa7, 0x7f, 0x1b, 0x4b, 0x5f, 0xdc, 0x7c, 0xe6, 0x3f, - 0x7c, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x08, 0x04, 0x22, 0x31, 0x29, 0x0e, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_9497719c79c89d2d) +} + +var fileDescriptor_9497719c79c89d2d = []byte{ + // 1457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xc5, + 0x1f, 0xcf, 0x3a, 0x71, 0xe3, 0x8c, 0xd3, 0x26, 0x9d, 0x5f, 0x0f, 0xfe, 0x05, 0xd5, 0x8e, 0x16, + 0x09, 0x85, 0x3e, 0x76, 0xdb, 0xb4, 0xa0, 0x72, 0x41, 0xd4, 0x01, 0x51, 0xab, 0x69, 0xb2, 0x8c, + 0x0d, 0x54, 0xc0, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0xc7, 0x81, 0xde, 0x40, + 0x9c, 0x38, 0xc1, 0x9d, 0x23, 0x12, 0x7f, 0x02, 0x70, 0xa0, 0x52, 0x05, 0x97, 0x1e, 0x7b, 0xec, + 0x85, 0x88, 0x9a, 0xff, 0xa2, 0x27, 0xf4, 0x9d, 0x9d, 0x7d, 0xf9, 0xd1, 0x6c, 0x38, 0xe4, 0x54, + 0xef, 0xf7, 0x3d, 0xdf, 0xe7, 0xa7, 0x41, 0x57, 0x07, 0xb7, 0x42, 0xc3, 0xf1, 0x4d, 0x1a, 0x38, + 0xa6, 0xc7, 0xc4, 0x97, 0x3e, 0x1f, 0x38, 0x5e, 0xcf, 0x3c, 0xbc, 0x7e, 0xc0, 0x04, 0xbd, 0x6e, + 0xf6, 0x98, 0xc7, 0x38, 0x15, 0xac, 0x6b, 0x04, 0xdc, 0x17, 0x3e, 0xbe, 0x18, 0x89, 0x1b, 0x34, + 0x70, 0x8c, 0x54, 0xdc, 0x50, 0xe2, 0x1b, 0x57, 0x7b, 0x8e, 0xe8, 0x8f, 0x0e, 0x0c, 0xdb, 0x1f, + 0x9a, 0x3d, 0xbf, 0xe7, 0x9b, 0x52, 0xeb, 0x60, 0xf4, 0x40, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0xb2, + 0xb6, 0xa1, 0x67, 0x9c, 0xdb, 0x3e, 0x67, 0xe6, 0xe1, 0x94, 0xc7, 0x8d, 0x9b, 0xa9, 0xcc, 0x90, + 0xda, 0x7d, 0xc7, 0x63, 0xfc, 0xa1, 0x19, 0x0c, 0x7a, 0x40, 0x08, 0xcd, 0x21, 0x13, 0x74, 0x96, + 0x96, 0x39, 0x4f, 0x8b, 0x8f, 0x3c, 0xe1, 0x0c, 0xd9, 0x94, 0xc2, 0x9b, 0xc7, 0x29, 0x84, 0x76, + 0x9f, 0x0d, 0xe9, 0x94, 0xde, 0x8d, 0x79, 0x7a, 0x23, 0xe1, 0xb8, 0xa6, 0xe3, 0x89, 0x50, 0xf0, + 0x49, 0x25, 0xfd, 0x4f, 0x0d, 0xad, 0xdd, 0xe9, 0x74, 0xac, 0x96, 0xd7, 0xe3, 0x2c, 0x0c, 0x2d, + 0x2a, 0xfa, 0x78, 0x13, 0x2d, 0x05, 0x54, 0xf4, 0x6b, 0xda, 0xa6, 0xb6, 0xb5, 0xd2, 0x5c, 0x7d, + 0x72, 0xd4, 0x58, 0x18, 0x1f, 0x35, 0x96, 0x80, 0x47, 0x24, 0x07, 0xdf, 0x44, 0x15, 0xf8, 0xb7, + 0xf3, 0x30, 0x60, 0xb5, 0x45, 0x29, 0x55, 0x1b, 0x1f, 0x35, 0x2a, 0x96, 0xa2, 0xbd, 0xc8, 0xfc, + 0x26, 0x89, 0x24, 0xbe, 0x8f, 0x96, 0x0f, 0xa8, 0x3d, 0x60, 0x5e, 0xb7, 0x56, 0xda, 0xd4, 0xb6, + 0xaa, 0xdb, 0x57, 0x8d, 0x97, 0xd6, 0xd0, 0x50, 0x41, 0x35, 0x23, 0xa5, 0xe6, 0x9a, 0x8a, 0x64, + 0x59, 0x11, 0x48, 0x6c, 0x4e, 0x1f, 0xa0, 0x0b, 0x99, 0x47, 0x90, 0x91, 0xcb, 0x3e, 0xa2, 0xee, + 0x88, 0xe1, 0x36, 0x2a, 0x83, 0xf7, 0xb0, 0xa6, 0x6d, 0x2e, 0x6e, 0x55, 0xb7, 0x8d, 0x63, 0xfc, + 0x4d, 0x24, 0xa2, 0x79, 0x56, 0x39, 0x2c, 0xc3, 0x57, 0x48, 0x22, 0x5b, 0xfa, 0x23, 0x0d, 0xad, + 0xb4, 0xac, 0xdb, 0xdd, 0x2e, 0xc8, 0xe1, 0xcf, 0x51, 0x05, 0x2a, 0xdf, 0xa5, 0x82, 0xca, 0x84, + 0x55, 0xb7, 0xaf, 0x65, 0xbc, 0x24, 0x85, 0x30, 0x82, 0x41, 0x0f, 0x08, 0xa1, 0x01, 0xd2, 0xc6, + 0xe1, 0x75, 0x63, 0xff, 0xe0, 0x0b, 0x66, 0x8b, 0x7b, 0x4c, 0xd0, 0x26, 0x56, 0x7e, 0x50, 0x4a, + 0x23, 0x89, 0x55, 0xbc, 0x87, 0x96, 0xc2, 0x80, 0xd9, 0x2a, 0x67, 0x57, 0x8e, 0xcb, 0x59, 0x1c, + 0x59, 0x3b, 0x60, 0x76, 0x5a, 0x3c, 0xf8, 0x22, 0xd2, 0x8e, 0xfe, 0xbb, 0x86, 0xce, 0x26, 0x52, + 0xbb, 0x4e, 0x28, 0xf0, 0x67, 0x53, 0x6f, 0x30, 0x8a, 0xbd, 0x01, 0xb4, 0xe5, 0x0b, 0xd6, 0x95, + 0x9f, 0x4a, 0x4c, 0xc9, 0xc4, 0x7f, 0x0f, 0x95, 0x1d, 0xc1, 0x86, 0x61, 0xad, 0x24, 0x8b, 0xb0, + 0x55, 0xf4, 0x01, 0x69, 0xfa, 0x5b, 0xa0, 0x4e, 0x22, 0x2b, 0xba, 0x9b, 0x89, 0x1e, 0x5e, 0x85, + 0x3f, 0x45, 0x2b, 0x01, 0xe5, 0xcc, 0x13, 0x84, 0x3d, 0x98, 0x11, 0xfe, 0x2c, 0x1f, 0x56, 0x2c, + 0xcf, 0x38, 0xf3, 0x6c, 0xd6, 0x3c, 0x3b, 0x3e, 0x6a, 0xac, 0x24, 0x44, 0x92, 0xda, 0xd3, 0xbf, + 0x2f, 0xa1, 0x65, 0xd5, 0x12, 0xa7, 0x50, 0xea, 0xdd, 0x5c, 0xa9, 0x2f, 0x15, 0x1b, 0x8f, 0x79, + 0x85, 0xc6, 0x1d, 0x74, 0x26, 0x14, 0x54, 0x8c, 0x42, 0x39, 0xa3, 0x05, 0x5a, 0x47, 0xd9, 0x93, + 0x3a, 0xcd, 0x73, 0xca, 0xe2, 0x99, 0xe8, 0x9b, 0x28, 0x5b, 0xfa, 0x77, 0x25, 0x74, 0x2e, 0x3f, + 0x98, 0xf8, 0x0d, 0x54, 0x0d, 0x19, 0x3f, 0x74, 0x6c, 0xb6, 0x47, 0x87, 0x4c, 0xed, 0x8d, 0xff, + 0x29, 0xfd, 0x6a, 0x3b, 0x65, 0x91, 0xac, 0x1c, 0xee, 0x25, 0x6a, 0x96, 0xcf, 0x85, 0x7a, 0xf4, + 0xfc, 0x94, 0xc2, 0x1a, 0x33, 0xa2, 0x35, 0x66, 0xb4, 0x3c, 0xb1, 0xcf, 0xdb, 0x82, 0x3b, 0x5e, + 0x6f, 0xca, 0x11, 0x18, 0x23, 0x59, 0xcb, 0xf8, 0x63, 0x54, 0xe1, 0x2c, 0xf4, 0x47, 0xdc, 0x66, + 0x2a, 0x15, 0xb9, 0xcd, 0x03, 0xfb, 0x1e, 0xca, 0x04, 0x4b, 0xaa, 0xbb, 0xeb, 0xdb, 0xd4, 0x8d, + 0x8a, 0x93, 0xf6, 0xc7, 0x2a, 0xb4, 0x36, 0x51, 0x26, 0x48, 0x62, 0x0c, 0xb6, 0xe7, 0xaa, 0xca, + 0xc5, 0x8e, 0x4b, 0x4f, 0xa5, 0x45, 0x3e, 0xc8, 0xb5, 0x88, 0x59, 0xac, 0xa4, 0x32, 0xb8, 0xb9, + 0x0b, 0xe1, 0x0f, 0x0d, 0xad, 0x67, 0x05, 0x4f, 0x61, 0x27, 0x58, 0xf9, 0x9d, 0x70, 0xf9, 0x04, + 0xcf, 0x98, 0xb3, 0x16, 0xfe, 0xd2, 0x50, 0x23, 0x2b, 0x66, 0x51, 0x4e, 0x87, 0x4c, 0x30, 0x1e, + 0x26, 0x65, 0xc4, 0x5b, 0xa8, 0x42, 0xad, 0xd6, 0xfb, 0xdc, 0x1f, 0x05, 0xf1, 0x71, 0x83, 0xf8, + 0x6e, 0x2b, 0x1a, 0x49, 0xb8, 0x70, 0x02, 0x07, 0x8e, 0xba, 0x53, 0x99, 0x13, 0x78, 0xd7, 0xf1, + 0xba, 0x44, 0x72, 0x40, 0xc2, 0x83, 0x66, 0x5f, 0xcc, 0x4b, 0xc8, 0x2e, 0x97, 0x1c, 0xdc, 0x40, + 0xe5, 0xd0, 0xf6, 0x03, 0x56, 0x5b, 0x92, 0x22, 0x2b, 0x10, 0x72, 0x1b, 0x08, 0x24, 0xa2, 0xe3, + 0xcb, 0x68, 0x05, 0x04, 0xc3, 0x80, 0xda, 0xac, 0x56, 0x96, 0x42, 0x72, 0x11, 0xed, 0xc5, 0x44, + 0x92, 0xf2, 0xf5, 0x5f, 0x26, 0x8a, 0x24, 0x57, 0xdf, 0x36, 0x42, 0xb6, 0xef, 0x09, 0xee, 0xbb, + 0x2e, 0xe3, 0xea, 0x49, 0x49, 0xfb, 0xec, 0x24, 0x1c, 0x92, 0x91, 0xc2, 0x1e, 0x42, 0x41, 0x92, + 0x1b, 0xd5, 0x46, 0x6f, 0x9f, 0x20, 0xff, 0x33, 0x12, 0xdb, 0x3c, 0x07, 0xfe, 0x32, 0x8c, 0x8c, + 0x07, 0xfd, 0x37, 0x0d, 0x55, 0x95, 0xfe, 0x29, 0x34, 0xd6, 0xdd, 0x7c, 0x63, 0xbd, 0x56, 0x10, + 0x61, 0xcc, 0xee, 0xa9, 0x47, 0x1a, 0xda, 0x88, 0x43, 0xf7, 0x69, 0xb7, 0x49, 0x5d, 0xea, 0xd9, + 0x8c, 0xc7, 0xf7, 0x60, 0x03, 0x95, 0x9c, 0xb8, 0x91, 0x90, 0x32, 0x50, 0x6a, 0x59, 0xa4, 0xe4, + 0x04, 0xf8, 0x0a, 0xaa, 0xf4, 0xfd, 0x50, 0xc8, 0x16, 0x89, 0x9a, 0x28, 0x89, 0xfa, 0x8e, 0xa2, + 0x93, 0x44, 0x02, 0x7f, 0x88, 0xca, 0x81, 0xcf, 0x45, 0x58, 0x5b, 0x92, 0x51, 0x5f, 0x2b, 0x16, + 0x35, 0xec, 0x36, 0xb5, 0xac, 0x53, 0xa4, 0x02, 0x66, 0x48, 0x64, 0x4d, 0xff, 0x46, 0x43, 0xff, + 0x9f, 0x11, 0x7f, 0xa4, 0x83, 0xbb, 0x68, 0xd9, 0x89, 0x98, 0x0a, 0x1e, 0xbd, 0x55, 0xcc, 0xed, + 0x8c, 0x54, 0xa4, 0xd0, 0x2c, 0x86, 0x60, 0xb1, 0x69, 0xfd, 0x27, 0x0d, 0x9d, 0x9f, 0x8a, 0x57, + 0x42, 0x4c, 0xd8, 0xf9, 0x90, 0xbc, 0x72, 0x06, 0x62, 0xc2, 0xea, 0x96, 0x1c, 0x7c, 0x17, 0x55, + 0x24, 0x42, 0xb5, 0x7d, 0x57, 0x25, 0xd0, 0x8c, 0x13, 0x68, 0x29, 0xfa, 0x8b, 0xa3, 0xc6, 0x2b, + 0xd3, 0xb0, 0xdd, 0x88, 0xd9, 0x24, 0x31, 0x00, 0xa3, 0xc8, 0x38, 0xf7, 0xb9, 0x9a, 0x56, 0x39, + 0x8a, 0xef, 0x01, 0x81, 0x44, 0x74, 0xfd, 0xe7, 0xb4, 0x49, 0x01, 0x3d, 0x42, 0x7c, 0x50, 0x9c, + 0x49, 0x08, 0x0c, 0xa5, 0x23, 0x92, 0x83, 0x47, 0x68, 0xdd, 0x99, 0x80, 0x9b, 0x27, 0xdb, 0xc9, + 0x89, 0x5a, 0xb3, 0xa6, 0xcc, 0xaf, 0x4f, 0x72, 0xc8, 0x94, 0x0b, 0x9d, 0xa1, 0x29, 0x29, 0x38, + 0x09, 0x7d, 0x21, 0x02, 0x35, 0x4d, 0x37, 0x8a, 0x83, 0xdc, 0x34, 0x84, 0x8a, 0x7c, 0x5d, 0xa7, + 0x63, 0x11, 0x69, 0x4a, 0x7f, 0x5c, 0x4a, 0xf2, 0x21, 0x17, 0xcd, 0x3b, 0xc9, 0x6b, 0xe5, 0x0e, + 0x90, 0x67, 0x3e, 0x5a, 0x6b, 0x17, 0x32, 0x81, 0x27, 0x3c, 0x32, 0x25, 0x8d, 0x3b, 0x29, 0xf8, + 0xd7, 0xfe, 0x0b, 0xf8, 0xaf, 0xce, 0x02, 0xfe, 0xf8, 0x0e, 0x5a, 0x14, 0x6e, 0x3c, 0xec, 0xaf, + 0x17, 0xb3, 0xd8, 0xd9, 0x6d, 0x37, 0xab, 0x2a, 0xe5, 0x8b, 0x9d, 0xdd, 0x36, 0x01, 0x13, 0x78, + 0x1f, 0x95, 0xf9, 0xc8, 0x65, 0x80, 0x95, 0x16, 0x8b, 0x63, 0x2f, 0xc8, 0x60, 0x3a, 0x7c, 0xf0, + 0x15, 0x92, 0xc8, 0x8e, 0xfe, 0x2d, 0xc0, 0xec, 0x2c, 0xa2, 0xc2, 0x1c, 0xad, 0xba, 0x99, 0xd9, + 0x51, 0x79, 0xb8, 0x75, 0xf2, 0xa9, 0x53, 0x43, 0x7f, 0x41, 0xf9, 0x5d, 0xcd, 0xf2, 0x48, 0xce, + 0x87, 0x4e, 0x11, 0x4a, 0x9f, 0x0d, 0x73, 0x00, 0xcd, 0x1b, 0x0d, 0xbc, 0x9a, 0x03, 0xe8, 0xe9, + 0x90, 0x44, 0x74, 0x38, 0x28, 0x21, 0xb3, 0x39, 0x13, 0x7b, 0xe9, 0xe2, 0x4a, 0x0e, 0x4a, 0x3b, + 0xe1, 0x90, 0x8c, 0x94, 0xfe, 0xab, 0x86, 0xd6, 0x26, 0x00, 0x35, 0x7e, 0x15, 0x95, 0x7b, 0x99, + 0x33, 0x9b, 0x64, 0x28, 0xba, 0xb3, 0x11, 0x0f, 0x76, 0x64, 0x02, 0xcb, 0x26, 0x76, 0xe4, 0x34, + 0xd6, 0xc2, 0x66, 0xf6, 0x5a, 0x46, 0x73, 0x7c, 0x5e, 0x89, 0xcf, 0xbc, 0x98, 0xc9, 0x85, 0x5e, + 0x9a, 0x77, 0xa1, 0xf5, 0x1f, 0x4b, 0x28, 0x06, 0x8d, 0x3b, 0xad, 0x77, 0xc9, 0x29, 0xa0, 0x37, + 0x2b, 0x87, 0xde, 0x8e, 0xfb, 0x6f, 0x4a, 0x26, 0xb6, 0xb9, 0x20, 0xff, 0xfe, 0x04, 0xc8, 0xbf, + 0x76, 0x02, 0x9b, 0x2f, 0x07, 0xfa, 0x8f, 0x35, 0xb4, 0x96, 0x91, 0x3e, 0x85, 0xe3, 0xbd, 0x9f, + 0x3f, 0xde, 0x97, 0x8a, 0x3f, 0x65, 0xce, 0x01, 0xdf, 0xce, 0xbd, 0x40, 0x6e, 0xb2, 0x06, 0x2a, + 0xdb, 0x4e, 0x97, 0xe7, 0x46, 0x00, 0x98, 0x21, 0x89, 0xe8, 0xfa, 0x57, 0xe8, 0xfc, 0x54, 0x8e, + 0xb0, 0x2d, 0x81, 0x56, 0xd7, 0x11, 0x8e, 0xef, 0xc5, 0xe7, 0xd2, 0x2c, 0xf6, 0xf2, 0x9d, 0x58, + 0x2f, 0x87, 0xcc, 0x94, 0x29, 0x92, 0x31, 0xdb, 0xdc, 0x79, 0xf2, 0xbc, 0xbe, 0xf0, 0xf4, 0x79, + 0x7d, 0xe1, 0xd9, 0xf3, 0xfa, 0xc2, 0xd7, 0xe3, 0xba, 0xf6, 0x64, 0x5c, 0xd7, 0x9e, 0x8e, 0xeb, + 0xda, 0xb3, 0x71, 0x5d, 0xfb, 0x7b, 0x5c, 0xd7, 0x7e, 0xf8, 0xa7, 0xbe, 0xf0, 0xc9, 0xc5, 0x97, + 0xfe, 0x99, 0xec, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc2, 0xa4, 0xff, 0x46, 0x13, 0x00, + 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -709,6 +956,131 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IPAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1378,16 +1750,228 @@ func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base } func (m *HTTPIngressPath) Size() (n int) { if m == nil { @@ -1421,6 +2005,49 @@ func (m *HTTPIngressRuleValue) Size() (n int) { return n } +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Ingress) Size() (n int) { if m == nil { return 0 @@ -1673,31 +2300,110 @@ func (m *IngressTLS) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" + +func (m *ServiceCIDR) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, @@ -1706,6 +2412,43 @@ func (this *HTTPIngressRuleValue) String() string { }, "") return s } +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Ingress) String() string { if this == nil { return "nil" @@ -1900,22 +2643,1172 @@ func (this *IngressTLS) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IngressClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1938,15 +3831,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1974,13 +3867,14 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1990,28 +3884,27 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2039,64 +3932,46 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2106,25 +3981,24 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2147,7 +4021,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2170,17 +4044,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2190,28 +4064,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Controller = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2238,40 +4111,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Parameters == nil { + m.Parameters = &IngressClassParametersReference{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2296,7 +4139,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2319,47 +4162,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2386,13 +4197,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2419,10 +4230,8 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2447,7 +4256,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClass) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2470,17 +4279,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2490,28 +4299,59 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2538,7 +4378,8 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2563,7 +4404,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassList) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2586,48 +4427,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2654,8 +4462,8 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2680,7 +4488,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2703,17 +4511,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2723,28 +4531,14 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.APIGroup = &s - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2772,11 +4566,11 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2804,11 +4598,62 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2836,14 +4681,13 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2853,24 +4697,24 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2893,7 +4737,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2912,51 +4756,19 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2983,10 +4795,10 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &IngressClassParametersReference{} + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3011,7 +4823,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3034,15 +4846,15 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3069,13 +4881,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3102,66 +4917,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3171,27 +4936,29 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3219,11 +4986,62 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3250,8 +5068,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3276,7 +5093,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { +func (m *IngressTLS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3299,17 +5116,17 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3319,25 +5136,55 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3360,7 +5207,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3383,17 +5230,49 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.Port = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3403,14 +5282,27 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3438,11 +5330,11 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3470,8 +5362,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3494,7 +5385,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3517,17 +5408,17 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3537,27 +5428,28 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3584,63 +5476,13 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3667,10 +5509,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3695,7 +5534,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3718,15 +5557,15 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3753,50 +5592,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3823,44 +5625,11 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ServiceCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3882,7 +5651,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3905,17 +5674,17 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3925,24 +5694,23 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -3965,7 +5733,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3988,17 +5756,17 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4008,55 +5776,25 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 46bb7f66f2..3368dcaec3 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -69,9 +69,48 @@ message HTTPIngressPath { // or '#'. message HTTPIngressRuleValue { // paths is a collection of paths that map requests to backends. + // +listType=atomic repeated HTTPIngressPath paths = 1; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name @@ -80,7 +119,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,13 +140,13 @@ message IngressBackend { // servicePort Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -119,7 +158,7 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -131,7 +170,7 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IngressClasses. repeated IngressClass items = 2; @@ -185,7 +224,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Ingress. repeated Ingress items = 2; @@ -211,6 +250,7 @@ message IngressLoadBalancerIngress { message IngressLoadBalancerStatus { // ingress is a list containing ingress points for the load-balancer. // +optional + // +listType=atomic repeated IngressLoadBalancerIngress ingress = 1; } @@ -313,11 +353,13 @@ message IngressSpec { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic repeated IngressTLS tls = 2; // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic repeated IngressRule rules = 3; } @@ -335,6 +377,7 @@ message IngressTLS { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic repeated string hosts = 1; // secretName is the name of the secret used to terminate TLS traffic on @@ -346,3 +389,74 @@ message IngressTLS { optional string secretName = 2; } +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + repeated string cidrs = 1; +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; +} + diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go index 04234953e6..9d2a13cc68 100644 --- a/vendor/k8s.io/api/networking/v1beta1/register.go +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -51,6 +51,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, + &IPAddress{}, + &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 87cc91654b..cd7126a5a8 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -97,11 +97,13 @@ type IngressSpec struct { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // TODO: Add the ability to specify load-balancer IP through claims } @@ -113,6 +115,7 @@ type IngressTLS struct { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` // secretName is the name of the secret used to terminate TLS traffic on @@ -136,6 +139,7 @@ type IngressStatus struct { type IngressLoadBalancerStatus struct { // ingress is a list containing ingress points for the load-balancer. // +optional + // +listType=atomic Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } @@ -214,7 +218,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -239,6 +243,7 @@ type IngressRuleValue struct { // or '#'. type HTTPIngressRuleValue struct { // paths is a collection of paths that map requests to backends. + // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -416,3 +421,133 @@ type IngressClassList struct { // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index b2373669fe..9d27517f3b 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -201,4 +230,55 @@ func (IngressTLS) SwaggerDoc() map[string]string { return map_IngressTLS } +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go new file mode 100644 index 0000000000..bc2207766f --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 005d64e7fd..1a6869cd6d 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -71,6 +72,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Ingress) DeepCopyInto(out *Ingress) { *out = *in @@ -448,3 +530,124 @@ func (in *IngressTLS) DeepCopy() *IngressTLS { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go index e8b4c7ec7f..a876fd5fe0 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,42 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *Ingress) APILifecycleIntroduced() (major, minor int) { @@ -120,3 +156,39 @@ func (in *IngressList) APILifecycleReplacement() schema.GroupVersionKind { func (in *IngressList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/node/v1/doc.go b/vendor/k8s.io/api/node/v1/doc.go index 12cbcb8a0e..57ca52445b 100644 --- a/vendor/k8s.io/api/node/v1/doc.go +++ b/vendor/k8s.io/api/node/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=node.k8s.io package v1 // import "k8s.io/api/node/v1" diff --git a/vendor/k8s.io/api/node/v1/generated.pb.go b/vendor/k8s.io/api/node/v1/generated.pb.go index 5355cbae7d..4c304f55f9 100644 --- a/vendor/k8s.io/api/node/v1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto +// source: k8s.io/api/node/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{0} + return fileDescriptor_9007436710e7565b, []int{0} } func (m *Overhead) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_Overhead proto.InternalMessageInfo func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } func (*RuntimeClass) ProtoMessage() {} func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{1} + return fileDescriptor_9007436710e7565b, []int{1} } func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } func (*RuntimeClassList) ProtoMessage() {} func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{2} + return fileDescriptor_9007436710e7565b, []int{2} } func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo func (m *Scheduling) Reset() { *m = Scheduling{} } func (*Scheduling) ProtoMessage() {} func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{3} + return fileDescriptor_9007436710e7565b, []int{3} } func (m *Scheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -170,53 +170,52 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto", fileDescriptor_6ac9be560e26ae98) + proto.RegisterFile("k8s.io/api/node/v1/generated.proto", fileDescriptor_9007436710e7565b) } -var fileDescriptor_6ac9be560e26ae98 = []byte{ - // 660 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x41, 0x6f, 0xd3, 0x4a, - 0x10, 0xce, 0xa6, 0xaf, 0x6a, 0xba, 0x49, 0xdf, 0xeb, 0x5b, 0x7a, 0x88, 0x22, 0xe4, 0x44, 0x39, - 0x15, 0xa4, 0xae, 0xdb, 0x0a, 0xa1, 0x0a, 0x0e, 0x48, 0x86, 0x56, 0x20, 0x41, 0x01, 0x17, 0x2e, - 0x88, 0x03, 0x1b, 0x7b, 0x70, 0xdc, 0xc4, 0xde, 0x68, 0xbd, 0x8e, 0xc8, 0x0d, 0x71, 0x41, 0xe2, - 0xd4, 0xff, 0xc2, 0x81, 0xbf, 0x50, 0x71, 0xea, 0xb1, 0xa7, 0x96, 0x86, 0x7f, 0xc1, 0x09, 0xed, - 0xda, 0x4e, 0x5c, 0x1c, 0x42, 0xb9, 0x79, 0x67, 0xbf, 0xef, 0x9b, 0x99, 0x6f, 0x76, 0x8c, 0xef, - 0xf6, 0x76, 0x22, 0xea, 0x73, 0xb3, 0x17, 0x77, 0x40, 0x84, 0x20, 0x21, 0x32, 0x87, 0x10, 0xba, - 0x5c, 0x98, 0xe9, 0x05, 0x1b, 0xf8, 0x66, 0xc8, 0x5d, 0x30, 0x87, 0x5b, 0xa6, 0x07, 0x21, 0x08, - 0x26, 0xc1, 0xa5, 0x03, 0xc1, 0x25, 0x27, 0x24, 0xc1, 0x50, 0x36, 0xf0, 0xa9, 0xc2, 0xd0, 0xe1, - 0x56, 0x63, 0xc3, 0xf3, 0x65, 0x37, 0xee, 0x50, 0x87, 0x07, 0xa6, 0xc7, 0x3d, 0x6e, 0x6a, 0x68, - 0x27, 0x7e, 0xab, 0x4f, 0xfa, 0xa0, 0xbf, 0x12, 0x89, 0x46, 0x3b, 0x97, 0xc6, 0xe1, 0x62, 0x56, - 0x9a, 0xc6, 0xad, 0x29, 0x26, 0x60, 0x4e, 0xd7, 0x0f, 0x41, 0x8c, 0xcc, 0x41, 0xcf, 0xd3, 0x24, - 0x01, 0x11, 0x8f, 0x85, 0x03, 0x7f, 0xc5, 0x8a, 0xcc, 0x00, 0x24, 0x9b, 0x95, 0xcb, 0xfc, 0x1d, - 0x4b, 0xc4, 0xa1, 0xf4, 0x83, 0x62, 0x9a, 0xdb, 0x7f, 0x22, 0x44, 0x4e, 0x17, 0x02, 0xf6, 0x2b, - 0xaf, 0xfd, 0xb5, 0x8c, 0x2b, 0x4f, 0x87, 0x20, 0xba, 0xc0, 0x5c, 0x72, 0x82, 0x70, 0x65, 0xc0, - 0xdd, 0x3d, 0xff, 0x1d, 0xb8, 0x75, 0xd4, 0x5a, 0x58, 0xaf, 0x6e, 0xdf, 0xa4, 0x45, 0x73, 0x69, - 0x46, 0xa0, 0xcf, 0x52, 0xf0, 0x6e, 0x28, 0xc5, 0xc8, 0xfa, 0x88, 0x8e, 0xcf, 0x9a, 0xa5, 0xf1, - 0x59, 0xb3, 0x92, 0xc5, 0x7f, 0x9c, 0x35, 0x9b, 0x45, 0x67, 0xa9, 0x9d, 0x9a, 0xf5, 0xd8, 0x8f, - 0xe4, 0x87, 0xf3, 0xb9, 0x90, 0x7d, 0x16, 0xc0, 0xa7, 0xf3, 0xe6, 0xc6, 0x55, 0xbc, 0xa7, 0xcf, - 0x63, 0x16, 0x4a, 0x5f, 0x8e, 0xec, 0x49, 0x17, 0x8d, 0x1e, 0x5e, 0xb9, 0x54, 0x24, 0x59, 0xc5, - 0x0b, 0x3d, 0x18, 0xd5, 0x51, 0x0b, 0xad, 0x2f, 0xdb, 0xea, 0x93, 0x3c, 0xc0, 0x8b, 0x43, 0xd6, - 0x8f, 0xa1, 0x5e, 0x6e, 0xa1, 0xf5, 0xea, 0x36, 0xcd, 0x75, 0x3c, 0xc9, 0x45, 0x07, 0x3d, 0x4f, - 0x5b, 0x50, 0xcc, 0x95, 0x90, 0xef, 0x94, 0x77, 0x50, 0xfb, 0x73, 0x19, 0xd7, 0xec, 0xc4, 0xef, - 0xfb, 0x7d, 0x16, 0x45, 0xe4, 0x0d, 0xae, 0xa8, 0x09, 0xbb, 0x4c, 0x32, 0x9d, 0xb1, 0xba, 0xbd, - 0x39, 0x4f, 0x3d, 0xa2, 0x0a, 0xad, 0x1d, 0xee, 0x1c, 0x82, 0x23, 0x9f, 0x80, 0x64, 0x16, 0x49, - 0x4d, 0xc5, 0xd3, 0x98, 0x3d, 0x51, 0x25, 0x37, 0xf0, 0x52, 0x97, 0x85, 0x6e, 0x1f, 0x84, 0x2e, - 0x7f, 0xd9, 0xfa, 0x2f, 0x85, 0x2f, 0x3d, 0x4c, 0xc2, 0x76, 0x76, 0x4f, 0xf6, 0x70, 0x85, 0xa7, - 0x83, 0xab, 0x2f, 0xe8, 0x62, 0xae, 0xcf, 0x1b, 0xae, 0x55, 0x53, 0x93, 0xcc, 0x4e, 0xf6, 0x84, - 0x4b, 0xf6, 0x31, 0x56, 0x8f, 0xc9, 0x8d, 0xfb, 0x7e, 0xe8, 0xd5, 0xff, 0xd1, 0x4a, 0xc6, 0x2c, - 0xa5, 0x83, 0x09, 0xca, 0xfa, 0x57, 0x35, 0x30, 0x3d, 0xdb, 0x39, 0x85, 0xf6, 0x17, 0x84, 0x57, - 0xf3, 0xae, 0xa9, 0x57, 0x41, 0x5e, 0x17, 0x9c, 0xa3, 0x57, 0x73, 0x4e, 0xb1, 0xb5, 0x6f, 0xab, - 0xd9, 0x63, 0xcc, 0x22, 0x39, 0xd7, 0x76, 0xf1, 0xa2, 0x2f, 0x21, 0x88, 0xea, 0x65, 0xfd, 0xc8, - 0x5b, 0xb3, 0xaa, 0xcf, 0x97, 0x64, 0xad, 0xa4, 0x62, 0x8b, 0x8f, 0x14, 0xcd, 0x4e, 0xd8, 0xed, - 0xa3, 0x32, 0xce, 0x35, 0x45, 0x0e, 0x71, 0x4d, 0x91, 0x0f, 0xa0, 0x0f, 0x8e, 0xe4, 0x22, 0xdd, - 0xa0, 0xcd, 0xf9, 0xd6, 0xd0, 0xfd, 0x1c, 0x25, 0xd9, 0xa3, 0xb5, 0x34, 0x59, 0x2d, 0x7f, 0x65, - 0x5f, 0xd2, 0x26, 0x2f, 0x71, 0x55, 0xf2, 0xbe, 0x5a, 0x65, 0x9f, 0x87, 0x59, 0x1f, 0x97, 0xa6, - 0xa0, 0x36, 0x49, 0xa5, 0x7a, 0x31, 0x81, 0x59, 0xd7, 0x52, 0xe1, 0xea, 0x34, 0x16, 0xd9, 0x79, - 0x9d, 0xc6, 0x3d, 0xfc, 0x7f, 0xa1, 0x9e, 0x19, 0x2b, 0xb3, 0x96, 0x5f, 0x99, 0xe5, 0xdc, 0x0a, - 0x58, 0x3b, 0xc7, 0x17, 0x46, 0xe9, 0xe4, 0xc2, 0x28, 0x9d, 0x5e, 0x18, 0xa5, 0xf7, 0x63, 0x03, - 0x1d, 0x8f, 0x0d, 0x74, 0x32, 0x36, 0xd0, 0xe9, 0xd8, 0x40, 0xdf, 0xc6, 0x06, 0x3a, 0xfa, 0x6e, - 0x94, 0x5e, 0x91, 0xe2, 0x5f, 0xfd, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x46, 0x77, 0x65, 0x3b, - 0x03, 0x06, 0x00, 0x00, +var fileDescriptor_9007436710e7565b = []byte{ + // 643 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0xa6, 0xbf, 0xaa, 0xe9, 0x26, 0xfd, 0x51, 0x96, 0x1e, 0xa2, 0x08, 0x39, 0x51, 0x4e, + 0x05, 0xa9, 0xeb, 0xb6, 0x42, 0xa8, 0xe2, 0x82, 0x64, 0x68, 0x05, 0x12, 0x14, 0x70, 0xe1, 0x82, + 0x38, 0xb0, 0xb5, 0x17, 0x67, 0x9b, 0xd8, 0x1b, 0xd9, 0xeb, 0x88, 0xdc, 0x10, 0x17, 0x24, 0x4e, + 0xfd, 0x2e, 0x1c, 0xf8, 0x0a, 0x15, 0xa7, 0x1e, 0x7b, 0x6a, 0xa9, 0xf9, 0x16, 0x9c, 0xd0, 0xae, + 0xff, 0x64, 0x83, 0x43, 0x28, 0x37, 0xef, 0xec, 0x7b, 0x6f, 0x66, 0xde, 0xec, 0x18, 0x76, 0xfb, + 0x3b, 0x11, 0x66, 0xdc, 0x24, 0x43, 0x66, 0x06, 0xdc, 0xa5, 0xe6, 0x68, 0xcb, 0xf4, 0x68, 0x40, + 0x43, 0x22, 0xa8, 0x8b, 0x87, 0x21, 0x17, 0x1c, 0xa1, 0x14, 0x83, 0xc9, 0x90, 0x61, 0x89, 0xc1, + 0xa3, 0xad, 0xd6, 0x86, 0xc7, 0x44, 0x2f, 0x3e, 0xc4, 0x0e, 0xf7, 0x4d, 0x8f, 0x7b, 0xdc, 0x54, + 0xd0, 0xc3, 0xf8, 0x9d, 0x3a, 0xa9, 0x83, 0xfa, 0x4a, 0x25, 0x5a, 0x7a, 0x1a, 0x87, 0x87, 0xb3, + 0xd2, 0xb4, 0xee, 0x4c, 0x30, 0x3e, 0x71, 0x7a, 0x2c, 0xa0, 0xe1, 0xd8, 0x1c, 0xf6, 0x3d, 0x45, + 0x0a, 0x69, 0xc4, 0xe3, 0xd0, 0xa1, 0xff, 0xc4, 0x8a, 0x4c, 0x9f, 0x0a, 0x32, 0x2b, 0x97, 0xf9, + 0x27, 0x56, 0x18, 0x07, 0x82, 0xf9, 0xe5, 0x34, 0x77, 0xff, 0x46, 0x88, 0x9c, 0x1e, 0xf5, 0xc9, + 0xef, 0xbc, 0xee, 0xb7, 0x2a, 0xac, 0x3d, 0x1b, 0xd1, 0xb0, 0x47, 0x89, 0x8b, 0x4e, 0x01, 0xac, + 0x0d, 0xb9, 0xbb, 0xc7, 0xde, 0x53, 0xb7, 0x09, 0x3a, 0x0b, 0xeb, 0xf5, 0xed, 0xdb, 0xb8, 0x6c, + 0x2e, 0xce, 0x09, 0xf8, 0x79, 0x06, 0xde, 0x0d, 0x44, 0x38, 0xb6, 0x3e, 0x81, 0x93, 0xf3, 0x76, + 0x25, 0x39, 0x6f, 0xd7, 0xf2, 0xf8, 0xcf, 0xf3, 0x76, 0xbb, 0xec, 0x2c, 0xb6, 0x33, 0xb3, 0x9e, + 0xb0, 0x48, 0x7c, 0xbc, 0x98, 0x0b, 0xd9, 0x27, 0x3e, 0xfd, 0x7c, 0xd1, 0xde, 0xb8, 0x8a, 0xf7, + 0xf8, 0x45, 0x4c, 0x02, 0xc1, 0xc4, 0xd8, 0x2e, 0xba, 0x68, 0xf5, 0xe1, 0xca, 0x54, 0x91, 0x68, + 0x15, 0x2e, 0xf4, 0xe9, 0xb8, 0x09, 0x3a, 0x60, 0x7d, 0xd9, 0x96, 0x9f, 0xe8, 0x21, 0x5c, 0x1c, + 0x91, 0x41, 0x4c, 0x9b, 0xd5, 0x0e, 0x58, 0xaf, 0x6f, 0x63, 0xad, 0xe3, 0x22, 0x17, 0x1e, 0xf6, + 0x3d, 0x65, 0x41, 0x39, 0x57, 0x4a, 0xbe, 0x57, 0xdd, 0x01, 0xdd, 0x2f, 0x55, 0xd8, 0xb0, 0x53, + 0xbf, 0x1f, 0x0c, 0x48, 0x14, 0xa1, 0xb7, 0xb0, 0x26, 0x27, 0xec, 0x12, 0x41, 0x54, 0xc6, 0xfa, + 0xf6, 0xe6, 0x3c, 0xf5, 0x08, 0x4b, 0xb4, 0x72, 0xf8, 0xf0, 0x88, 0x3a, 0xe2, 0x29, 0x15, 0xc4, + 0x42, 0x99, 0xa9, 0x70, 0x12, 0xb3, 0x0b, 0x55, 0x74, 0x0b, 0x2e, 0xf5, 0x48, 0xe0, 0x0e, 0x68, + 0xa8, 0xca, 0x5f, 0xb6, 0xae, 0x65, 0xf0, 0xa5, 0x47, 0x69, 0xd8, 0xce, 0xef, 0xd1, 0x1e, 0xac, + 0xf1, 0x6c, 0x70, 0xcd, 0x05, 0x55, 0xcc, 0xcd, 0x79, 0xc3, 0xb5, 0x1a, 0x72, 0x92, 0xf9, 0xc9, + 0x2e, 0xb8, 0x68, 0x1f, 0x42, 0xf9, 0x98, 0xdc, 0x78, 0xc0, 0x02, 0xaf, 0xf9, 0x9f, 0x52, 0x32, + 0x66, 0x29, 0x1d, 0x14, 0x28, 0xeb, 0x7f, 0xd9, 0xc0, 0xe4, 0x6c, 0x6b, 0x0a, 0xdd, 0xaf, 0x00, + 0xae, 0xea, 0xae, 0xc9, 0x57, 0x81, 0xde, 0x94, 0x9c, 0xc3, 0x57, 0x73, 0x4e, 0xb2, 0x95, 0x6f, + 0xab, 0xf9, 0x63, 0xcc, 0x23, 0x9a, 0x6b, 0xbb, 0x70, 0x91, 0x09, 0xea, 0x47, 0xcd, 0xaa, 0x7a, + 0xe4, 0x9d, 0x59, 0xd5, 0xeb, 0x25, 0x59, 0x2b, 0x99, 0xd8, 0xe2, 0x63, 0x49, 0xb3, 0x53, 0x76, + 0xf7, 0xb8, 0x0a, 0xb5, 0xa6, 0xd0, 0x11, 0x6c, 0x48, 0xf2, 0x01, 0x1d, 0x50, 0x47, 0xf0, 0x30, + 0xdb, 0xa0, 0xcd, 0xf9, 0xd6, 0xe0, 0x7d, 0x8d, 0x92, 0xee, 0xd1, 0x5a, 0x96, 0xac, 0xa1, 0x5f, + 0xd9, 0x53, 0xda, 0xe8, 0x15, 0xac, 0x0b, 0x3e, 0x90, 0xab, 0xcc, 0x78, 0x90, 0xf7, 0x31, 0x35, + 0x05, 0xb9, 0x49, 0x32, 0xd5, 0xcb, 0x02, 0x66, 0xdd, 0xc8, 0x84, 0xeb, 0x93, 0x58, 0x64, 0xeb, + 0x3a, 0xad, 0xfb, 0xf0, 0x7a, 0xa9, 0x9e, 0x19, 0x2b, 0xb3, 0xa6, 0xaf, 0xcc, 0xb2, 0xb6, 0x02, + 0xd6, 0xce, 0xc9, 0xa5, 0x51, 0x39, 0xbd, 0x34, 0x2a, 0x67, 0x97, 0x46, 0xe5, 0x43, 0x62, 0x80, + 0x93, 0xc4, 0x00, 0xa7, 0x89, 0x01, 0xce, 0x12, 0x03, 0x7c, 0x4f, 0x0c, 0x70, 0xfc, 0xc3, 0xa8, + 0xbc, 0x46, 0xe5, 0xbf, 0xfa, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x3f, 0x9c, 0xd0, 0xea, + 0x05, 0x00, 0x00, } func (m *Overhead) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 0152d5e3ab..e6b8852ec1 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values @@ -80,7 +80,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -103,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index b00f58772c..169862ea94 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run @@ -93,6 +94,7 @@ type Scheduling struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { diff --git a/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..7497955688 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClass) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index 9f876d4b44..16ac696433 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto +// source: k8s.io/api/node/v1alpha1/generated.proto package v1alpha1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{0} + return fileDescriptor_a8fee97bf5273e47, []int{0} } func (m *Overhead) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_Overhead proto.InternalMessageInfo func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } func (*RuntimeClass) ProtoMessage() {} func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{1} + return fileDescriptor_a8fee97bf5273e47, []int{1} } func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } func (*RuntimeClassList) ProtoMessage() {} func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{2} + return fileDescriptor_a8fee97bf5273e47, []int{2} } func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } func (*RuntimeClassSpec) ProtoMessage() {} func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{3} + return fileDescriptor_a8fee97bf5273e47, []int{3} } func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo func (m *Scheduling) Reset() { *m = Scheduling{} } func (*Scheduling) ProtoMessage() {} func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{4} + return fileDescriptor_a8fee97bf5273e47, []int{4} } func (m *Scheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -199,55 +199,54 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) + proto.RegisterFile("k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_a8fee97bf5273e47) } -var fileDescriptor_82a78945ab308218 = []byte{ - // 699 bytes of a gzipped FileDescriptorProto +var fileDescriptor_a8fee97bf5273e47 = []byte{ + // 683 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x3d, 0x6f, 0xd3, 0x4c, - 0x1c, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x8f, 0x9f, 0xea, 0x51, 0x94, 0xc1, 0xa9, 0xac, - 0x47, 0xa8, 0x42, 0xea, 0x99, 0x56, 0xa8, 0xaa, 0x18, 0x8a, 0x30, 0x2f, 0x02, 0x51, 0x5a, 0x70, - 0xcb, 0x82, 0x18, 0xb8, 0xd8, 0x7f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, - 0x89, 0x89, 0x89, 0x6f, 0x03, 0x73, 0xc7, 0x4e, 0xa8, 0x53, 0x4b, 0xc3, 0x77, 0x60, 0x60, 0x42, - 0x67, 0x9f, 0x13, 0x27, 0x69, 0x68, 0xd8, 0x7c, 0x77, 0xbf, 0x97, 0xff, 0x6b, 0x82, 0xef, 0x74, - 0x76, 0x62, 0xe2, 0x32, 0xbd, 0x93, 0xb4, 0x20, 0x0a, 0x80, 0x43, 0xac, 0x77, 0x21, 0xb0, 0x59, - 0xa4, 0xcb, 0x07, 0x1a, 0xba, 0x7a, 0xc0, 0x6c, 0xd0, 0xbb, 0x9b, 0xd4, 0x0b, 0xdb, 0x74, 0x53, - 0x77, 0x20, 0x80, 0x88, 0x72, 0xb0, 0x49, 0x18, 0x31, 0xce, 0x94, 0x7a, 0x86, 0x24, 0x34, 0x74, - 0x89, 0x40, 0x92, 0x1c, 0xd9, 0xd8, 0x70, 0x5c, 0xde, 0x4e, 0x5a, 0xc4, 0x62, 0xbe, 0xee, 0x30, - 0x87, 0xe9, 0x29, 0xa1, 0x95, 0xbc, 0x4e, 0x4f, 0xe9, 0x21, 0xfd, 0xca, 0x84, 0x1a, 0x5a, 0xc1, - 0xd2, 0x62, 0x91, 0xb0, 0x1c, 0x37, 0x6b, 0xdc, 0x1c, 0x62, 0x7c, 0x6a, 0xb5, 0xdd, 0x00, 0xa2, - 0x9e, 0x1e, 0x76, 0x9c, 0x94, 0x14, 0x41, 0xcc, 0x92, 0xc8, 0x82, 0xbf, 0x62, 0xc5, 0xba, 0x0f, - 0x9c, 0x5e, 0xe6, 0xa5, 0x4f, 0x63, 0x45, 0x49, 0xc0, 0x5d, 0x7f, 0xd2, 0x66, 0xfb, 0x2a, 0x42, - 0x6c, 0xb5, 0xc1, 0xa7, 0xe3, 0x3c, 0xed, 0xa4, 0x8c, 0x2b, 0x07, 0x5d, 0x88, 0xda, 0x40, 0x6d, - 0xe5, 0x1b, 0xc2, 0x95, 0x90, 0xd9, 0x0f, 0xdc, 0xb7, 0x60, 0xd7, 0xd1, 0xda, 0xdc, 0x7a, 0x75, - 0xeb, 0x06, 0x99, 0x56, 0x62, 0x92, 0xd3, 0xc8, 0x53, 0x49, 0xb9, 0x1f, 0xf0, 0xa8, 0x67, 0x7c, - 0x40, 0xc7, 0x67, 0xcd, 0x52, 0xff, 0xac, 0x59, 0xc9, 0xef, 0x7f, 0x9d, 0x35, 0x9b, 0x93, 0xf5, - 0x25, 0xa6, 0x2c, 0xd9, 0x9e, 0x1b, 0xf3, 0xf7, 0xe7, 0x7f, 0x84, 0xec, 0x53, 0x1f, 0x3e, 0x9e, - 0x37, 0x37, 0x66, 0xe9, 0x00, 0x79, 0x96, 0xd0, 0x80, 0xbb, 0xbc, 0x67, 0x0e, 0x72, 0x69, 0x74, - 0xf0, 0xd2, 0x48, 0x90, 0xca, 0x0a, 0x9e, 0xeb, 0x40, 0xaf, 0x8e, 0xd6, 0xd0, 0xfa, 0xa2, 0x29, - 0x3e, 0x95, 0x7b, 0x78, 0xa1, 0x4b, 0xbd, 0x04, 0xea, 0xe5, 0x35, 0xb4, 0x5e, 0xdd, 0x22, 0x85, - 0xbc, 0x07, 0x5e, 0x24, 0xec, 0x38, 0x69, 0x21, 0x26, 0xbd, 0x32, 0xf2, 0xad, 0xf2, 0x0e, 0xd2, - 0xbe, 0x22, 0x5c, 0x33, 0xb3, 0xaa, 0xdf, 0xf5, 0x68, 0x1c, 0x2b, 0xaf, 0x70, 0x45, 0xf4, 0xd9, - 0xa6, 0x9c, 0xa6, 0x8e, 0xa3, 0x55, 0x9d, 0x50, 0x8f, 0x89, 0x40, 0x93, 0xee, 0x26, 0x39, 0x68, - 0xbd, 0x01, 0x8b, 0x3f, 0x01, 0x4e, 0x0d, 0x45, 0x16, 0x15, 0x0f, 0xef, 0xcc, 0x81, 0xaa, 0xb2, - 0x87, 0xe7, 0xe3, 0x10, 0x2c, 0x19, 0xfb, 0xf5, 0xe9, 0x3d, 0x2b, 0xc6, 0x75, 0x18, 0x82, 0x65, - 0xd4, 0xa4, 0xee, 0xbc, 0x38, 0x99, 0xa9, 0x8a, 0xf6, 0x05, 0xe1, 0x95, 0x22, 0x50, 0x34, 0x48, - 0x79, 0x39, 0x91, 0x04, 0x99, 0x2d, 0x09, 0xc1, 0x4e, 0x53, 0x58, 0xc9, 0xe7, 0x22, 0xbf, 0x29, - 0x24, 0xf0, 0x18, 0x2f, 0xb8, 0x1c, 0xfc, 0xb8, 0x5e, 0x4e, 0xa7, 0xee, 0xda, 0x6c, 0x19, 0x18, - 0x4b, 0x52, 0x72, 0xe1, 0x91, 0x20, 0x9b, 0x99, 0x86, 0xf6, 0x73, 0x2c, 0x7e, 0x91, 0x9a, 0xb2, - 0x8b, 0x97, 0xe5, 0x2a, 0x3c, 0xa4, 0x81, 0xed, 0x41, 0x94, 0x35, 0xdf, 0xf8, 0x4f, 0x4a, 0x2c, - 0x9b, 0x23, 0xaf, 0xe6, 0x18, 0x5a, 0xd9, 0xc3, 0x15, 0x26, 0x07, 0x5e, 0x96, 0x59, 0xbb, 0x7a, - 0x35, 0x8c, 0x9a, 0xc8, 0x37, 0x3f, 0x99, 0x03, 0x05, 0xe5, 0x08, 0x63, 0xb1, 0x90, 0x76, 0xe2, - 0xb9, 0x81, 0x53, 0x9f, 0x4b, 0xf5, 0xfe, 0x9f, 0xae, 0x77, 0x38, 0xc0, 0x1a, 0xcb, 0x62, 0x08, - 0x86, 0x67, 0xb3, 0xa0, 0xa3, 0x7d, 0x2e, 0xe3, 0xc2, 0x93, 0x12, 0xe2, 0x9a, 0x90, 0x39, 0x04, - 0x0f, 0x2c, 0xce, 0x22, 0xb9, 0xd1, 0xdb, 0xb3, 0xd8, 0x90, 0xfd, 0x02, 0x31, 0xdb, 0xeb, 0x55, - 0x59, 0xa8, 0x5a, 0xf1, 0xc9, 0x1c, 0x71, 0x50, 0x9e, 0xe3, 0x2a, 0x67, 0x9e, 0xf8, 0x81, 0x71, - 0x59, 0x90, 0x37, 0x53, 0x2d, 0x1a, 0x8a, 0xcd, 0x16, 0x53, 0x71, 0x34, 0x80, 0x19, 0xff, 0x4a, - 0xe1, 0xea, 0xf0, 0x2e, 0x36, 0x8b, 0x3a, 0x8d, 0xdb, 0xf8, 0x9f, 0x89, 0x78, 0x2e, 0x59, 0xe1, - 0xd5, 0xe2, 0x0a, 0x2f, 0x16, 0x56, 0xd2, 0xd8, 0x3d, 0xbe, 0x50, 0x4b, 0x27, 0x17, 0x6a, 0xe9, - 0xf4, 0x42, 0x2d, 0xbd, 0xeb, 0xab, 0xe8, 0xb8, 0xaf, 0xa2, 0x93, 0xbe, 0x8a, 0x4e, 0xfb, 0x2a, - 0xfa, 0xde, 0x57, 0xd1, 0xa7, 0x1f, 0x6a, 0xe9, 0x45, 0x7d, 0xda, 0xff, 0xce, 0xef, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x92, 0x0d, 0xef, 0xbe, 0xab, 0x06, 0x00, 0x00, + 0x1c, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x8f, 0x9f, 0x0a, 0x45, 0x19, 0x9c, 0xca, 0x42, + 0x28, 0x42, 0xea, 0x99, 0x56, 0xa8, 0xaa, 0x18, 0x8a, 0x64, 0x5e, 0x04, 0xa2, 0xb4, 0x70, 0x2d, + 0x0b, 0x62, 0xe0, 0x6a, 0x1f, 0x8e, 0x89, 0xed, 0xb3, 0xec, 0x73, 0x44, 0x36, 0xc4, 0x82, 0xc4, + 0xc4, 0xc4, 0xb7, 0x81, 0xb9, 0x63, 0x27, 0xd4, 0xa9, 0xa5, 0xe1, 0x3b, 0x30, 0x30, 0xa1, 0xb3, + 0xcf, 0xc9, 0x25, 0x69, 0x68, 0xd8, 0x7c, 0x77, 0xbf, 0x97, 0xff, 0x6b, 0x02, 0x5b, 0x9d, 0xed, + 0x04, 0x79, 0xcc, 0x24, 0x91, 0x67, 0x86, 0xcc, 0xa1, 0x66, 0x77, 0x83, 0xf8, 0x51, 0x9b, 0x6c, + 0x98, 0x2e, 0x0d, 0x69, 0x4c, 0x38, 0x75, 0x50, 0x14, 0x33, 0xce, 0xb4, 0x7a, 0x8e, 0x44, 0x24, + 0xf2, 0x90, 0x40, 0xa2, 0x02, 0xd9, 0x58, 0x77, 0x3d, 0xde, 0x4e, 0x8f, 0x90, 0xcd, 0x02, 0xd3, + 0x65, 0x2e, 0x33, 0x33, 0xc2, 0x51, 0xfa, 0x26, 0x3b, 0x65, 0x87, 0xec, 0x2b, 0x17, 0x6a, 0x18, + 0x8a, 0xa5, 0xcd, 0x62, 0x61, 0x39, 0x6e, 0xd6, 0xb8, 0x3d, 0xc4, 0x04, 0xc4, 0x6e, 0x7b, 0x21, + 0x8d, 0x7b, 0x66, 0xd4, 0x71, 0x33, 0x52, 0x4c, 0x13, 0x96, 0xc6, 0x36, 0xfd, 0x27, 0x56, 0x62, + 0x06, 0x94, 0x93, 0xcb, 0xbc, 0xcc, 0x69, 0xac, 0x38, 0x0d, 0xb9, 0x17, 0x4c, 0xda, 0x6c, 0x5d, + 0x45, 0x48, 0xec, 0x36, 0x0d, 0xc8, 0x38, 0xcf, 0x38, 0x29, 0xc3, 0xca, 0x7e, 0x97, 0xc6, 0x6d, + 0x4a, 0x1c, 0xed, 0x3b, 0x80, 0x95, 0x88, 0x39, 0x0f, 0xbd, 0x77, 0xd4, 0xa9, 0x83, 0xb5, 0xb9, + 0x56, 0x75, 0xf3, 0x16, 0x9a, 0x56, 0x62, 0x54, 0xd0, 0xd0, 0x33, 0x49, 0x79, 0x10, 0xf2, 0xb8, + 0x67, 0x7d, 0x04, 0xc7, 0x67, 0xcd, 0x52, 0xff, 0xac, 0x59, 0x29, 0xee, 0x7f, 0x9f, 0x35, 0x9b, + 0x93, 0xf5, 0x45, 0x58, 0x96, 0x6c, 0xd7, 0x4b, 0xf8, 0x87, 0xf3, 0xbf, 0x42, 0xf6, 0x48, 0x40, + 0x3f, 0x9d, 0x37, 0xd7, 0x67, 0xe9, 0x00, 0x7a, 0x9e, 0x92, 0x90, 0x7b, 0xbc, 0x87, 0x07, 0xb9, + 0x34, 0x3a, 0x70, 0x69, 0x24, 0x48, 0x6d, 0x05, 0xce, 0x75, 0x68, 0xaf, 0x0e, 0xd6, 0x40, 0x6b, + 0x11, 0x8b, 0x4f, 0xed, 0x3e, 0x5c, 0xe8, 0x12, 0x3f, 0xa5, 0xf5, 0xf2, 0x1a, 0x68, 0x55, 0x37, + 0x91, 0x92, 0xf7, 0xc0, 0x0b, 0x45, 0x1d, 0x37, 0x2b, 0xc4, 0xa4, 0x57, 0x4e, 0xbe, 0x53, 0xde, + 0x06, 0xc6, 0x37, 0x00, 0x6b, 0x38, 0xaf, 0xfa, 0x3d, 0x9f, 0x24, 0x89, 0xf6, 0x1a, 0x56, 0x44, + 0x9f, 0x1d, 0xc2, 0x49, 0xe6, 0x38, 0x5a, 0xd5, 0x09, 0xf5, 0x04, 0x09, 0x34, 0xea, 0x6e, 0xa0, + 0xfd, 0xa3, 0xb7, 0xd4, 0xe6, 0x4f, 0x29, 0x27, 0x96, 0x26, 0x8b, 0x0a, 0x87, 0x77, 0x78, 0xa0, + 0xaa, 0xed, 0xc2, 0xf9, 0x24, 0xa2, 0xb6, 0x8c, 0xfd, 0xe6, 0xf4, 0x9e, 0xa9, 0x71, 0x1d, 0x44, + 0xd4, 0xb6, 0x6a, 0x52, 0x77, 0x5e, 0x9c, 0x70, 0xa6, 0x62, 0x7c, 0x05, 0x70, 0x45, 0x05, 0x8a, + 0x06, 0x69, 0xaf, 0x26, 0x92, 0x40, 0xb3, 0x25, 0x21, 0xd8, 0x59, 0x0a, 0x2b, 0xc5, 0x5c, 0x14, + 0x37, 0x4a, 0x02, 0x4f, 0xe0, 0x82, 0xc7, 0x69, 0x90, 0xd4, 0xcb, 0xd9, 0xd4, 0xdd, 0x98, 0x2d, + 0x03, 0x6b, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x9c, 0x6b, 0x18, 0xbf, 0xc6, 0xe2, 0x17, 0xa9, + 0x69, 0x3b, 0x70, 0x59, 0xae, 0xc2, 0x23, 0x12, 0x3a, 0x3e, 0x8d, 0xf3, 0xe6, 0x5b, 0xd7, 0xa4, + 0xc4, 0x32, 0x1e, 0x79, 0xc5, 0x63, 0x68, 0x6d, 0x17, 0x56, 0x98, 0x1c, 0x78, 0x59, 0x66, 0xe3, + 0xea, 0xd5, 0xb0, 0x6a, 0x22, 0xdf, 0xe2, 0x84, 0x07, 0x0a, 0xda, 0x21, 0x84, 0x62, 0x21, 0x9d, + 0xd4, 0xf7, 0x42, 0xb7, 0x3e, 0x97, 0xe9, 0x5d, 0x9f, 0xae, 0x77, 0x30, 0xc0, 0x5a, 0xcb, 0x62, + 0x08, 0x86, 0x67, 0xac, 0xe8, 0x18, 0x5f, 0xca, 0x50, 0x79, 0xd2, 0x22, 0x58, 0x13, 0x32, 0x07, + 0xd4, 0xa7, 0x36, 0x67, 0xb1, 0xdc, 0xe8, 0xad, 0x59, 0x6c, 0xd0, 0x9e, 0x42, 0xcc, 0xf7, 0x7a, + 0x55, 0x16, 0xaa, 0xa6, 0x3e, 0xe1, 0x11, 0x07, 0xed, 0x05, 0xac, 0x72, 0xe6, 0x8b, 0x1f, 0x18, + 0x8f, 0x85, 0x45, 0x33, 0x75, 0xd5, 0x50, 0x6c, 0xb6, 0x98, 0x8a, 0xc3, 0x01, 0xcc, 0xfa, 0x5f, + 0x0a, 0x57, 0x87, 0x77, 0x09, 0x56, 0x75, 0x1a, 0x77, 0xe1, 0x7f, 0x13, 0xf1, 0x5c, 0xb2, 0xc2, + 0xab, 0xea, 0x0a, 0x2f, 0x2a, 0x2b, 0x69, 0xed, 0x1c, 0x5f, 0xe8, 0xa5, 0x93, 0x0b, 0xbd, 0x74, + 0x7a, 0xa1, 0x97, 0xde, 0xf7, 0x75, 0x70, 0xdc, 0xd7, 0xc1, 0x49, 0x5f, 0x07, 0xa7, 0x7d, 0x1d, + 0xfc, 0xe8, 0xeb, 0xe0, 0xf3, 0x4f, 0xbd, 0xf4, 0xb2, 0x3e, 0xed, 0x7f, 0xe7, 0x4f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xa7, 0x9b, 0x7f, 0x45, 0x92, 0x06, 0x00, 0x00, } func (m *Overhead) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index 4673e9261d..bc68718d90 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -59,7 +59,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -113,6 +113,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index 8cd5a4cc35..537961c259 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto +// source: k8s.io/api/node/v1beta1/generated.proto package v1beta1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{0} + return fileDescriptor_73bb62abe8438af4, []int{0} } func (m *Overhead) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_Overhead proto.InternalMessageInfo func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } func (*RuntimeClass) ProtoMessage() {} func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{1} + return fileDescriptor_73bb62abe8438af4, []int{1} } func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } func (*RuntimeClassList) ProtoMessage() {} func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{2} + return fileDescriptor_73bb62abe8438af4, []int{2} } func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo func (m *Scheduling) Reset() { *m = Scheduling{} } func (*Scheduling) ProtoMessage() {} func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{3} + return fileDescriptor_73bb62abe8438af4, []int{3} } func (m *Scheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -170,53 +170,52 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) + proto.RegisterFile("k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_73bb62abe8438af4) } -var fileDescriptor_f977b0dddc93b4ec = []byte{ - // 668 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbb, 0x6f, 0xd3, 0x40, - 0x18, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0xa9, 0x51, 0x06, 0xa7, 0x04, 0x21, 0x95, - 0xa1, 0x67, 0x5a, 0x01, 0xaa, 0x90, 0x10, 0xc8, 0x3c, 0xc4, 0xb3, 0x05, 0x17, 0x16, 0xc4, 0xc0, - 0xc5, 0xfe, 0x70, 0x4c, 0x62, 0x5f, 0x74, 0x3e, 0x47, 0x64, 0x43, 0x2c, 0x48, 0x4c, 0x2c, 0xfc, - 0x37, 0xb0, 0x77, 0xa3, 0x0b, 0x52, 0xa7, 0x96, 0x86, 0xff, 0x82, 0x09, 0x9d, 0x5f, 0xb9, 0x36, - 0x4d, 0x1b, 0x36, 0xdf, 0xdd, 0xef, 0x71, 0xdf, 0xef, 0xbb, 0xcf, 0xf8, 0x4e, 0x7b, 0x3d, 0x24, - 0x1e, 0x33, 0xda, 0x51, 0x13, 0x78, 0x00, 0x02, 0x42, 0xa3, 0x07, 0x81, 0xc3, 0xb8, 0x91, 0x1e, - 0xd0, 0xae, 0x67, 0x04, 0xcc, 0x01, 0xa3, 0xb7, 0xda, 0x04, 0x41, 0x57, 0x0d, 0x17, 0x02, 0xe0, - 0x54, 0x80, 0x43, 0xba, 0x9c, 0x09, 0xa6, 0x2d, 0x26, 0x40, 0x42, 0xbb, 0x1e, 0x91, 0x40, 0x92, - 0x02, 0x6b, 0x2b, 0xae, 0x27, 0x5a, 0x51, 0x93, 0xd8, 0xcc, 0x37, 0x5c, 0xe6, 0x32, 0x23, 0xc6, - 0x37, 0xa3, 0x77, 0xf1, 0x2a, 0x5e, 0xc4, 0x5f, 0x89, 0x4e, 0xad, 0xa1, 0x18, 0xda, 0x8c, 0x4b, - 0xc3, 0xa3, 0x5e, 0xb5, 0x6b, 0x43, 0x8c, 0x4f, 0xed, 0x96, 0x17, 0x00, 0xef, 0x1b, 0xdd, 0xb6, - 0x1b, 0x93, 0x38, 0x84, 0x2c, 0xe2, 0x36, 0xfc, 0x17, 0x2b, 0x34, 0x7c, 0x10, 0xf4, 0x38, 0x2f, - 0x63, 0x1c, 0x8b, 0x47, 0x81, 0xf0, 0xfc, 0x51, 0x9b, 0x1b, 0xa7, 0x11, 0x42, 0xbb, 0x05, 0x3e, - 0x3d, 0xca, 0x6b, 0xfc, 0x2c, 0xe2, 0xd2, 0x66, 0x0f, 0x78, 0x0b, 0xa8, 0xa3, 0xfd, 0x42, 0xb8, - 0xd4, 0x65, 0xce, 0x03, 0xef, 0x03, 0x38, 0x55, 0xb4, 0x34, 0xb5, 0x5c, 0x5e, 0x33, 0xc8, 0x98, - 0x84, 0x49, 0xc6, 0x22, 0xcf, 0x53, 0xc6, 0xfd, 0x40, 0xf0, 0xbe, 0xf9, 0x19, 0x6d, 0xef, 0xd5, - 0x0b, 0x83, 0xbd, 0x7a, 0x29, 0xdb, 0xff, 0xbb, 0x57, 0xaf, 0x8f, 0xc6, 0x4b, 0xac, 0x34, 0xb1, - 0xa7, 0x5e, 0x28, 0x3e, 0xed, 0x9f, 0x08, 0xd9, 0xa0, 0x3e, 0x7c, 0xd9, 0xaf, 0xaf, 0x4c, 0xd2, - 0x00, 0xf2, 0x22, 0xa2, 0x81, 0xf0, 0x44, 0xdf, 0xca, 0x4b, 0xa9, 0xb5, 0xf1, 0xdc, 0xa1, 0x4b, - 0x6a, 0xf3, 0x78, 0xaa, 0x0d, 0xfd, 0x2a, 0x5a, 0x42, 0xcb, 0xb3, 0x96, 0xfc, 0xd4, 0xee, 0xe1, - 0xe9, 0x1e, 0xed, 0x44, 0x50, 0x2d, 0x2e, 0xa1, 0xe5, 0xf2, 0x1a, 0x51, 0xca, 0xce, 0xbd, 0x48, - 0xb7, 0xed, 0xc6, 0x39, 0x8c, 0x7a, 0x25, 0xe4, 0x9b, 0xc5, 0x75, 0xd4, 0xf8, 0x51, 0xc4, 0x15, - 0x2b, 0x09, 0xfd, 0x6e, 0x87, 0x86, 0xa1, 0xf6, 0x16, 0x97, 0x64, 0x9b, 0x1d, 0x2a, 0x68, 0xec, - 0x58, 0x5e, 0xbb, 0x7a, 0x92, 0x7a, 0x48, 0x24, 0x9a, 0xf4, 0x56, 0xc9, 0x66, 0xf3, 0x3d, 0xd8, - 0xe2, 0x19, 0x08, 0x6a, 0x6a, 0x69, 0xa8, 0x78, 0xb8, 0x67, 0xe5, 0xaa, 0xda, 0x15, 0x3c, 0xd3, - 0xa2, 0x81, 0xd3, 0x01, 0x1e, 0x5f, 0x7f, 0xd6, 0x3c, 0x97, 0xc2, 0x67, 0x1e, 0x26, 0xdb, 0x56, - 0x76, 0xae, 0x3d, 0xc1, 0x25, 0x96, 0x36, 0xae, 0x3a, 0x15, 0x5f, 0xe6, 0xe2, 0xa9, 0x1d, 0x36, - 0x2b, 0xb2, 0x9d, 0xd9, 0xca, 0xca, 0x05, 0xb4, 0x2d, 0x8c, 0xe5, 0xb3, 0x72, 0xa2, 0x8e, 0x17, - 0xb8, 0xd5, 0x33, 0xb1, 0xdc, 0xa5, 0xb1, 0x72, 0x5b, 0x39, 0xd4, 0x3c, 0x2b, 0x4b, 0x19, 0xae, - 0x2d, 0x45, 0xa6, 0xf1, 0x1d, 0xe1, 0x79, 0x35, 0x3f, 0xf9, 0x3e, 0xb4, 0x37, 0x23, 0x19, 0x92, - 0xc9, 0x32, 0x94, 0xec, 0x38, 0xc1, 0xf9, 0xec, 0x59, 0x66, 0x3b, 0x4a, 0x7e, 0x8f, 0xf1, 0xb4, - 0x27, 0xc0, 0x0f, 0xab, 0xc5, 0xf8, 0xcd, 0x5f, 0x1e, 0x5b, 0x82, 0x7a, 0x2f, 0x73, 0x2e, 0x55, - 0x9c, 0x7e, 0x24, 0xb9, 0x56, 0x22, 0xd1, 0xf8, 0x56, 0xc4, 0x4a, 0x65, 0x1a, 0xc3, 0x15, 0xa9, - 0xb0, 0x05, 0x1d, 0xb0, 0x05, 0xe3, 0xe9, 0x54, 0x5d, 0x9f, 0x20, 0x24, 0xb2, 0xa1, 0xf0, 0x92, - 0xd9, 0x5a, 0x48, 0x1d, 0x2b, 0xea, 0x91, 0x75, 0xc8, 0x40, 0x7b, 0x85, 0xcb, 0x82, 0x75, 0xe4, - 0x8c, 0x7b, 0x2c, 0xc8, 0x2a, 0xd2, 0x55, 0x3f, 0x39, 0x5d, 0x32, 0x9a, 0x97, 0x39, 0xcc, 0xbc, - 0x90, 0x0a, 0x97, 0x87, 0x7b, 0xa1, 0xa5, 0xea, 0xd4, 0x6e, 0xe3, 0xf3, 0x23, 0xf7, 0x39, 0x66, - 0x8c, 0x16, 0xd4, 0x31, 0x9a, 0x55, 0xc6, 0xc2, 0xbc, 0xb5, 0x7d, 0xa0, 0x17, 0x76, 0x0e, 0xf4, - 0xc2, 0xee, 0x81, 0x5e, 0xf8, 0x38, 0xd0, 0xd1, 0xf6, 0x40, 0x47, 0x3b, 0x03, 0x1d, 0xed, 0x0e, - 0x74, 0xf4, 0x7b, 0xa0, 0xa3, 0xaf, 0x7f, 0xf4, 0xc2, 0xeb, 0xc5, 0x31, 0x3f, 0xfe, 0x7f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x7a, 0xca, 0xe1, 0x7d, 0x2b, 0x06, 0x00, 0x00, +var fileDescriptor_73bb62abe8438af4 = []byte{ + // 654 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbb, 0x6f, 0x13, 0x31, + 0x18, 0x8f, 0x53, 0xaa, 0xa6, 0x4e, 0x0a, 0xc5, 0x54, 0x6a, 0x94, 0xe1, 0x52, 0x82, 0x10, 0x65, + 0xa8, 0x8f, 0x56, 0x80, 0x2a, 0x24, 0x84, 0x74, 0x3c, 0xc4, 0xb3, 0x85, 0x2b, 0x2c, 0x88, 0x01, + 0xe7, 0xce, 0x5c, 0x4c, 0x72, 0xe7, 0xe8, 0xce, 0x17, 0x91, 0x0d, 0xb1, 0x20, 0x31, 0xb1, 0xf0, + 0xdf, 0xc0, 0xde, 0x8d, 0x2e, 0x48, 0x9d, 0x5a, 0x1a, 0xfe, 0x0b, 0x26, 0x64, 0xdf, 0x23, 0x6e, + 0xd3, 0xb4, 0x61, 0x8b, 0x7d, 0xbf, 0xc7, 0xf7, 0xfd, 0x3e, 0x7f, 0x81, 0x57, 0xda, 0xeb, 0x11, + 0x66, 0xdc, 0x24, 0x5d, 0x66, 0x06, 0xdc, 0xa5, 0x66, 0x6f, 0xb5, 0x49, 0x05, 0x59, 0x35, 0x3d, + 0x1a, 0xd0, 0x90, 0x08, 0xea, 0xe2, 0x6e, 0xc8, 0x05, 0x47, 0x8b, 0x09, 0x10, 0x93, 0x2e, 0xc3, + 0x12, 0x88, 0x53, 0x60, 0x6d, 0xc5, 0x63, 0xa2, 0x15, 0x37, 0xb1, 0xc3, 0x7d, 0xd3, 0xe3, 0x1e, + 0x37, 0x15, 0xbe, 0x19, 0xbf, 0x53, 0x27, 0x75, 0x50, 0xbf, 0x12, 0x9d, 0x5a, 0x43, 0x33, 0x74, + 0x78, 0x28, 0x0d, 0x8f, 0x7a, 0xd5, 0xae, 0x0f, 0x31, 0x3e, 0x71, 0x5a, 0x2c, 0xa0, 0x61, 0xdf, + 0xec, 0xb6, 0x3d, 0x45, 0x0a, 0x69, 0xc4, 0xe3, 0xd0, 0xa1, 0xff, 0xc5, 0x8a, 0x4c, 0x9f, 0x0a, + 0x72, 0x9c, 0x97, 0x39, 0x8e, 0x15, 0xc6, 0x81, 0x60, 0xfe, 0xa8, 0xcd, 0xcd, 0xd3, 0x08, 0x91, + 0xd3, 0xa2, 0x3e, 0x39, 0xca, 0x6b, 0xfc, 0x2c, 0xc2, 0xd2, 0x66, 0x8f, 0x86, 0x2d, 0x4a, 0x5c, + 0xf4, 0x0b, 0xc0, 0x52, 0x97, 0xbb, 0x0f, 0xd8, 0x07, 0xea, 0x56, 0xc1, 0xd2, 0xd4, 0x72, 0x79, + 0xcd, 0xc4, 0x63, 0x12, 0xc6, 0x19, 0x0b, 0x3f, 0x4f, 0x19, 0xf7, 0x03, 0x11, 0xf6, 0xad, 0xcf, + 0x60, 0x7b, 0xaf, 0x5e, 0x18, 0xec, 0xd5, 0x4b, 0xd9, 0xfd, 0xdf, 0xbd, 0x7a, 0x7d, 0x34, 0x5e, + 0x6c, 0xa7, 0x89, 0x3d, 0x65, 0x91, 0xf8, 0xb4, 0x7f, 0x22, 0x64, 0x83, 0xf8, 0xf4, 0xcb, 0x7e, + 0x7d, 0x65, 0x92, 0x01, 0xe0, 0x17, 0x31, 0x09, 0x04, 0x13, 0x7d, 0x3b, 0x6f, 0xa5, 0xd6, 0x86, + 0x73, 0x87, 0x8a, 0x44, 0xf3, 0x70, 0xaa, 0x4d, 0xfb, 0x55, 0xb0, 0x04, 0x96, 0x67, 0x6d, 0xf9, + 0x13, 0xdd, 0x83, 0xd3, 0x3d, 0xd2, 0x89, 0x69, 0xb5, 0xb8, 0x04, 0x96, 0xcb, 0x6b, 0x58, 0x6b, + 0x3b, 0xf7, 0xc2, 0xdd, 0xb6, 0xa7, 0x72, 0x18, 0xf5, 0x4a, 0xc8, 0xb7, 0x8a, 0xeb, 0xa0, 0xf1, + 0xa3, 0x08, 0x2b, 0x76, 0x12, 0xfa, 0xdd, 0x0e, 0x89, 0x22, 0xf4, 0x16, 0x96, 0xe4, 0x98, 0x5d, + 0x22, 0x88, 0x72, 0x2c, 0xaf, 0x5d, 0x3b, 0x49, 0x3d, 0xc2, 0x12, 0x8d, 0x7b, 0xab, 0x78, 0xb3, + 0xf9, 0x9e, 0x3a, 0xe2, 0x19, 0x15, 0xc4, 0x42, 0x69, 0xa8, 0x70, 0x78, 0x67, 0xe7, 0xaa, 0xe8, + 0x2a, 0x9c, 0x69, 0x91, 0xc0, 0xed, 0xd0, 0x50, 0x95, 0x3f, 0x6b, 0x9d, 0x4b, 0xe1, 0x33, 0x0f, + 0x93, 0x6b, 0x3b, 0xfb, 0x8e, 0x9e, 0xc0, 0x12, 0x4f, 0x07, 0x57, 0x9d, 0x52, 0xc5, 0x5c, 0x3c, + 0x75, 0xc2, 0x56, 0x45, 0x8e, 0x33, 0x3b, 0xd9, 0xb9, 0x00, 0xda, 0x82, 0x50, 0x3e, 0x2b, 0x37, + 0xee, 0xb0, 0xc0, 0xab, 0x9e, 0x51, 0x72, 0x97, 0xc6, 0xca, 0x6d, 0xe5, 0x50, 0xeb, 0xac, 0x6c, + 0x65, 0x78, 0xb6, 0x35, 0x99, 0xc6, 0x77, 0x00, 0xe7, 0xf5, 0xfc, 0xe4, 0xfb, 0x40, 0x6f, 0x46, + 0x32, 0xc4, 0x93, 0x65, 0x28, 0xd9, 0x2a, 0xc1, 0xf9, 0xec, 0x59, 0x66, 0x37, 0x5a, 0x7e, 0x8f, + 0xe1, 0x34, 0x13, 0xd4, 0x8f, 0xaa, 0x45, 0xf5, 0xe6, 0x2f, 0x8f, 0x6d, 0x41, 0xaf, 0xcb, 0x9a, + 0x4b, 0x15, 0xa7, 0x1f, 0x49, 0xae, 0x9d, 0x48, 0x34, 0xbe, 0x15, 0xa1, 0xd6, 0x19, 0xe2, 0xb0, + 0x22, 0x15, 0xb6, 0x68, 0x87, 0x3a, 0x82, 0x87, 0xe9, 0x56, 0xdd, 0x98, 0x20, 0x24, 0xbc, 0xa1, + 0xf1, 0x92, 0xdd, 0x5a, 0x48, 0x1d, 0x2b, 0xfa, 0x27, 0xfb, 0x90, 0x01, 0x7a, 0x05, 0xcb, 0x82, + 0x77, 0xe4, 0x8e, 0x33, 0x1e, 0x64, 0x1d, 0x19, 0xba, 0x9f, 0xdc, 0x2e, 0x19, 0xcd, 0xcb, 0x1c, + 0x66, 0x5d, 0x48, 0x85, 0xcb, 0xc3, 0xbb, 0xc8, 0xd6, 0x75, 0x6a, 0x77, 0xe0, 0xf9, 0x91, 0x7a, + 0x8e, 0x59, 0xa3, 0x05, 0x7d, 0x8d, 0x66, 0xb5, 0xb5, 0xb0, 0x6e, 0x6f, 0x1f, 0x18, 0x85, 0x9d, + 0x03, 0xa3, 0xb0, 0x7b, 0x60, 0x14, 0x3e, 0x0e, 0x0c, 0xb0, 0x3d, 0x30, 0xc0, 0xce, 0xc0, 0x00, + 0xbb, 0x03, 0x03, 0xfc, 0x1e, 0x18, 0xe0, 0xeb, 0x1f, 0xa3, 0xf0, 0x7a, 0x71, 0xcc, 0x1f, 0xff, + 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x67, 0x22, 0x03, 0x12, 0x06, 0x00, 0x00, } func (m *Overhead) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 54dbc0995a..497027e033 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1beta1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values @@ -80,7 +80,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -103,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go index 177cdf5236..c51e02685a 100644 --- a/vendor/k8s.io/api/policy/v1/doc.go +++ b/vendor/k8s.io/api/policy/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, diff --git a/vendor/k8s.io/api/policy/v1/generated.pb.go b/vendor/k8s.io/api/policy/v1/generated.pb.go index d7e467a921..dd61b7266c 100644 --- a/vendor/k8s.io/api/policy/v1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1/generated.proto +// source: k8s.io/api/policy/v1/generated.proto package v1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{0} + return fileDescriptor_204bc6fa48ff56f7, []int{0} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_Eviction proto.InternalMessageInfo func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{1} + return fileDescriptor_204bc6fa48ff56f7, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{2} + return fileDescriptor_204bc6fa48ff56f7, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{3} + return fileDescriptor_204bc6fa48ff56f7, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{4} + return fileDescriptor_204bc6fa48ff56f7, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -197,65 +197,64 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1/generated.proto", fileDescriptor_2d50488813b2d18e) + proto.RegisterFile("k8s.io/api/policy/v1/generated.proto", fileDescriptor_204bc6fa48ff56f7) } -var fileDescriptor_2d50488813b2d18e = []byte{ - // 854 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x4e, 0x93, 0x68, 0x19, 0x16, 0x58, 0x72, 0x70, 0xaa, 0x9c, - 0x16, 0xa4, 0x8e, 0xd9, 0x16, 0xa1, 0x55, 0x25, 0x50, 0xeb, 0x66, 0x05, 0x45, 0x5d, 0xb2, 0x9a, - 0x6d, 0x85, 0x84, 0x40, 0x62, 0x62, 0xbf, 0x26, 0x43, 0x6c, 0x8f, 0xe5, 0x19, 0x87, 0xe6, 0x44, - 0xff, 0x04, 0xfe, 0x05, 0xfe, 0x14, 0x4e, 0xec, 0xb1, 0xdc, 0x2a, 0x0e, 0x11, 0x6b, 0xfe, 0x0b, - 0x4e, 0xc8, 0x63, 0xe7, 0x87, 0x37, 0x0e, 0xcd, 0x72, 0xe8, 0xcd, 0xf3, 0xde, 0xfb, 0x7e, 0x9e, - 0xdf, 0x8f, 0x71, 0x82, 0x3e, 0x1b, 0x1d, 0x4b, 0xc2, 0x85, 0x35, 0x8a, 0xfb, 0x10, 0x05, 0xa0, - 0x40, 0x5a, 0x63, 0x08, 0x5c, 0x11, 0x59, 0xb9, 0x83, 0x85, 0xdc, 0x0a, 0x85, 0xc7, 0x9d, 0x89, - 0x35, 0x3e, 0xb2, 0x06, 0x10, 0x40, 0xc4, 0x14, 0xb8, 0x24, 0x8c, 0x84, 0x12, 0x78, 0x3f, 0x8b, - 0x22, 0x2c, 0xe4, 0x24, 0x8b, 0x22, 0xe3, 0xa3, 0xd6, 0xed, 0x01, 0x57, 0xc3, 0xb8, 0x4f, 0x1c, - 0xe1, 0x5b, 0x03, 0x31, 0x10, 0x96, 0x0e, 0xee, 0xc7, 0xcf, 0xf4, 0x49, 0x1f, 0xf4, 0x53, 0x06, - 0x69, 0x7d, 0xb2, 0x48, 0xe5, 0x33, 0x67, 0xc8, 0x03, 0x88, 0x26, 0x56, 0x38, 0x1a, 0xa4, 0x06, - 0x69, 0xf9, 0xa0, 0x58, 0x49, 0xea, 0x96, 0xb5, 0x4e, 0x15, 0xc5, 0x81, 0xe2, 0x3e, 0xac, 0x08, - 0x3e, 0x7d, 0x9d, 0x40, 0x3a, 0x43, 0xf0, 0xd9, 0x8a, 0xee, 0xee, 0x3a, 0x5d, 0xac, 0xb8, 0x67, - 0xf1, 0x40, 0x49, 0x15, 0x5d, 0x15, 0x75, 0xfe, 0x34, 0xd0, 0xee, 0xc9, 0x98, 0x3b, 0x8a, 0x8b, - 0x00, 0xff, 0x80, 0x76, 0xd3, 0x2a, 0x5c, 0xa6, 0xd8, 0x81, 0x71, 0xcb, 0x38, 0xbc, 0x79, 0xe7, - 0x63, 0xb2, 0x68, 0xdc, 0x1c, 0x4a, 0xc2, 0xd1, 0x20, 0x35, 0x48, 0x92, 0x46, 0x93, 0xf1, 0x11, - 0xe9, 0xf5, 0x7f, 0x04, 0x47, 0x9d, 0x82, 0x62, 0x36, 0xbe, 0x98, 0xb6, 0x2b, 0xc9, 0xb4, 0x8d, - 0x16, 0x36, 0x3a, 0xa7, 0x62, 0x0f, 0x35, 0x5c, 0xf0, 0x40, 0x41, 0x2f, 0x4c, 0x33, 0xca, 0x83, - 0x2d, 0x9d, 0xe6, 0xee, 0x66, 0x69, 0xba, 0xcb, 0x52, 0xfb, 0xed, 0x64, 0xda, 0x6e, 0x14, 0x4c, - 0xb4, 0x08, 0xef, 0xfc, 0xba, 0x85, 0xde, 0x39, 0x13, 0x6e, 0x97, 0xcb, 0x28, 0xd6, 0x26, 0x3b, - 0x76, 0x07, 0xa0, 0xde, 0x40, 0x9d, 0x3d, 0xb4, 0x2d, 0x43, 0x70, 0xf2, 0xf2, 0x6e, 0x93, 0xb2, - 0xf5, 0x23, 0x25, 0xaf, 0x76, 0x1e, 0x82, 0x63, 0xd7, 0x73, 0xf4, 0x76, 0x7a, 0xa2, 0x1a, 0x84, - 0xbf, 0x41, 0x3b, 0x52, 0x31, 0x15, 0xcb, 0x83, 0xaa, 0x46, 0x5a, 0x9b, 0x23, 0xb5, 0xcc, 0x6e, - 0xe6, 0xd0, 0x9d, 0xec, 0x4c, 0x73, 0x5c, 0xe7, 0x77, 0x03, 0xbd, 0x5f, 0xa2, 0x7a, 0xcc, 0xa5, - 0xc2, 0xdf, 0xad, 0xf4, 0x89, 0x6c, 0xd6, 0xa7, 0x54, 0xad, 0xbb, 0xb4, 0x97, 0x67, 0xdd, 0x9d, - 0x59, 0x96, 0x7a, 0xf4, 0x35, 0xaa, 0x71, 0x05, 0x7e, 0xba, 0x03, 0xd5, 0xc3, 0x9b, 0x77, 0x3e, - 0xdc, 0xb8, 0x22, 0xbb, 0x91, 0x53, 0x6b, 0x8f, 0x52, 0x3d, 0xcd, 0x30, 0x9d, 0x3f, 0xaa, 0xa5, - 0x95, 0xa4, 0x4d, 0xc4, 0xcf, 0x50, 0xdd, 0xe7, 0xc1, 0x83, 0x31, 0xe3, 0x1e, 0xeb, 0x7b, 0xf0, - 0xda, 0xa9, 0xa7, 0x57, 0x86, 0x64, 0x57, 0x86, 0x3c, 0x0a, 0x54, 0x2f, 0x3a, 0x57, 0x11, 0x0f, - 0x06, 0xf6, 0x5e, 0x32, 0x6d, 0xd7, 0x4f, 0x97, 0x48, 0xb4, 0xc0, 0xc5, 0xdf, 0xa3, 0x5d, 0x09, - 0x1e, 0x38, 0x4a, 0x44, 0xd7, 0x5b, 0xed, 0xc7, 0xac, 0x0f, 0xde, 0x79, 0x2e, 0xb5, 0xeb, 0x69, - 0xcb, 0x66, 0x27, 0x3a, 0x47, 0x62, 0x0f, 0x35, 0x7d, 0xf6, 0xfc, 0x69, 0xc0, 0xe6, 0x85, 0x54, - 0xff, 0x67, 0x21, 0x38, 0x99, 0xb6, 0x9b, 0xa7, 0x05, 0x16, 0xbd, 0xc2, 0xc6, 0x2f, 0x0c, 0xd4, - 0x8a, 0x83, 0x21, 0x30, 0x4f, 0x0d, 0x27, 0x67, 0xc2, 0x9d, 0x7d, 0x27, 0xce, 0xf4, 0x70, 0x0e, - 0xb6, 0x6f, 0x19, 0x87, 0x37, 0xec, 0xfb, 0xc9, 0xb4, 0xdd, 0x7a, 0xba, 0x36, 0xea, 0x9f, 0x69, - 0xdb, 0x5c, 0xef, 0x7d, 0x32, 0x09, 0x81, 0xfe, 0x47, 0x8e, 0xce, 0x6f, 0x35, 0xf4, 0xc1, 0xda, - 0x9d, 0xc6, 0x5f, 0x21, 0x2c, 0xfa, 0x12, 0xa2, 0x31, 0xb8, 0x5f, 0x64, 0xdf, 0x35, 0x2e, 0x02, - 0x3d, 0xdb, 0xaa, 0xdd, 0xca, 0x77, 0x04, 0xf7, 0x56, 0x22, 0x68, 0x89, 0x0a, 0xff, 0x8c, 0x1a, - 0x6e, 0x96, 0x05, 0xdc, 0x33, 0xe1, 0xce, 0xb6, 0xd2, 0xbe, 0xe6, 0x3d, 0x23, 0xdd, 0x65, 0xc8, - 0x49, 0xa0, 0xa2, 0x89, 0xfd, 0x6e, 0xfe, 0x2a, 0x8d, 0x82, 0x8f, 0x16, 0xf3, 0xa5, 0xc5, 0xb8, - 0x73, 0xa4, 0x7c, 0xe0, 0x79, 0xe2, 0x27, 0x70, 0xf5, 0x7c, 0x6b, 0x8b, 0x62, 0xba, 0x2b, 0x11, - 0xb4, 0x44, 0x85, 0x3f, 0x47, 0x4d, 0x27, 0x8e, 0x22, 0x08, 0xd4, 0x97, 0x59, 0x67, 0xf5, 0xb0, - 0x6a, 0xf6, 0x7b, 0x39, 0xa7, 0xf9, 0xb0, 0xe0, 0xa5, 0x57, 0xa2, 0x53, 0xbd, 0x0b, 0x92, 0x47, - 0xe0, 0xce, 0xf4, 0xb5, 0xa2, 0xbe, 0x5b, 0xf0, 0xd2, 0x2b, 0xd1, 0xf8, 0x18, 0xd5, 0xe1, 0x79, - 0x08, 0xce, 0xac, 0x97, 0x3b, 0x5a, 0xbd, 0x9f, 0xab, 0xeb, 0x27, 0x4b, 0x3e, 0x5a, 0x88, 0xc4, - 0x0e, 0x42, 0x8e, 0x08, 0x5c, 0x9e, 0xfd, 0x3a, 0xbc, 0xa5, 0x67, 0x60, 0x6d, 0x76, 0x85, 0x1e, - 0xce, 0x74, 0x8b, 0x6f, 0xf3, 0xdc, 0x24, 0xe9, 0x12, 0xb6, 0xe5, 0x21, 0xbc, 0x3a, 0x26, 0xbc, - 0x87, 0xaa, 0x23, 0x98, 0xe8, 0xf5, 0xb9, 0x41, 0xd3, 0x47, 0x7c, 0x1f, 0xd5, 0xc6, 0xcc, 0x8b, - 0x21, 0xbf, 0xca, 0x1f, 0x6d, 0xf6, 0x1e, 0x4f, 0xb8, 0x0f, 0x34, 0x13, 0xde, 0xdb, 0x3a, 0x36, - 0xec, 0x7b, 0x17, 0x97, 0x66, 0xe5, 0xe5, 0xa5, 0x59, 0x79, 0x75, 0x69, 0x56, 0x5e, 0x24, 0xa6, - 0x71, 0x91, 0x98, 0xc6, 0xcb, 0xc4, 0x34, 0x5e, 0x25, 0xa6, 0xf1, 0x57, 0x62, 0x1a, 0xbf, 0xfc, - 0x6d, 0x56, 0xbe, 0xdd, 0x2f, 0xfb, 0x1f, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0xd7, - 0x99, 0xdb, 0xf7, 0x08, 0x00, 0x00, +var fileDescriptor_204bc6fa48ff56f7 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x4e, 0x93, 0x68, 0x19, 0x16, 0x58, 0x72, 0x70, 0xaa, 0x88, + 0xc3, 0x82, 0xd4, 0x31, 0xdb, 0x22, 0xb4, 0xea, 0x01, 0xb5, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xab, + 0xd9, 0x56, 0x48, 0x08, 0x24, 0x26, 0xf6, 0xd3, 0x64, 0x58, 0xdb, 0x63, 0x79, 0xc6, 0xa1, 0x39, + 0xd1, 0x8f, 0xc0, 0x57, 0xe0, 0xa3, 0x70, 0x62, 0x8f, 0xe5, 0x56, 0x71, 0x88, 0x58, 0xf3, 0x2d, + 0x38, 0x21, 0x8f, 0x9d, 0x17, 0x27, 0x0e, 0xcd, 0x72, 0xe8, 0xcd, 0xf3, 0xcc, 0xf3, 0xff, 0x3d, + 0xf3, 0xbc, 0xcc, 0x24, 0xe8, 0xc3, 0x8b, 0x63, 0x49, 0xb8, 0xb0, 0x58, 0xc8, 0xad, 0x50, 0x78, + 0xdc, 0x19, 0x5b, 0xa3, 0x23, 0x6b, 0x00, 0x01, 0x44, 0x4c, 0x81, 0x4b, 0xc2, 0x48, 0x28, 0x81, + 0xf7, 0x33, 0x2f, 0xc2, 0x42, 0x4e, 0x32, 0x2f, 0x32, 0x3a, 0x6a, 0xdd, 0x1e, 0x70, 0x35, 0x8c, + 0xfb, 0xc4, 0x11, 0xbe, 0x35, 0x10, 0x03, 0x61, 0x69, 0xe7, 0x7e, 0xfc, 0x4c, 0xaf, 0xf4, 0x42, + 0x7f, 0x65, 0x90, 0xd6, 0xa7, 0xf3, 0x50, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0x17, + 0x83, 0xd4, 0x20, 0x2d, 0x1f, 0x14, 0x2b, 0x09, 0xdd, 0xb2, 0xd6, 0xa9, 0xa2, 0x38, 0x50, 0xdc, + 0x87, 0x15, 0xc1, 0x67, 0xaf, 0x13, 0x48, 0x67, 0x08, 0x3e, 0x5b, 0xd1, 0xdd, 0x5d, 0xa7, 0x8b, + 0x15, 0xf7, 0x2c, 0x1e, 0x28, 0xa9, 0xa2, 0x65, 0x51, 0xe7, 0x4f, 0x03, 0xed, 0x9e, 0x8c, 0xb8, + 0xa3, 0xb8, 0x08, 0xf0, 0x0f, 0x68, 0x37, 0xcd, 0xc2, 0x65, 0x8a, 0x1d, 0x18, 0xb7, 0x8c, 0xc3, + 0x9b, 0x77, 0x3e, 0x21, 0xf3, 0xc2, 0xcd, 0xa0, 0x24, 0xbc, 0x18, 0xa4, 0x06, 0x49, 0x52, 0x6f, + 0x32, 0x3a, 0x22, 0xbd, 0xfe, 0x8f, 0xe0, 0xa8, 0x53, 0x50, 0xcc, 0xc6, 0x97, 0x93, 0x76, 0x25, + 0x99, 0xb4, 0xd1, 0xdc, 0x46, 0x67, 0x54, 0xec, 0xa1, 0x86, 0x0b, 0x1e, 0x28, 0xe8, 0x85, 0x69, + 0x44, 0x79, 0xb0, 0xa5, 0xc3, 0xdc, 0xdd, 0x2c, 0x4c, 0x77, 0x51, 0x6a, 0xbf, 0x9d, 0x4c, 0xda, + 0x8d, 0x82, 0x89, 0x16, 0xe1, 0x9d, 0x5f, 0xb7, 0xd0, 0x3b, 0x67, 0xc2, 0xed, 0x72, 0x19, 0xc5, + 0xda, 0x64, 0xc7, 0xee, 0x00, 0xd4, 0x1b, 0xc8, 0xb3, 0x87, 0xb6, 0x65, 0x08, 0x4e, 0x9e, 0xde, + 0x6d, 0x52, 0x36, 0x7e, 0xa4, 0xe4, 0x68, 0xe7, 0x21, 0x38, 0x76, 0x3d, 0x47, 0x6f, 0xa7, 0x2b, + 0xaa, 0x41, 0xf8, 0x1b, 0xb4, 0x23, 0x15, 0x53, 0xb1, 0x3c, 0xa8, 0x6a, 0xa4, 0xb5, 0x39, 0x52, + 0xcb, 0xec, 0x66, 0x0e, 0xdd, 0xc9, 0xd6, 0x34, 0xc7, 0x75, 0x7e, 0x37, 0xd0, 0xfb, 0x25, 0xaa, + 0xc7, 0x5c, 0x2a, 0xfc, 0xdd, 0x4a, 0x9d, 0xc8, 0x66, 0x75, 0x4a, 0xd5, 0xba, 0x4a, 0x7b, 0x79, + 0xd4, 0xdd, 0xa9, 0x65, 0xa1, 0x46, 0x5f, 0xa3, 0x1a, 0x57, 0xe0, 0xa7, 0x33, 0x50, 0x3d, 0xbc, + 0x79, 0xe7, 0xa3, 0x8d, 0x33, 0xb2, 0x1b, 0x39, 0xb5, 0xf6, 0x28, 0xd5, 0xd3, 0x0c, 0xd3, 0xf9, + 0xa3, 0x5a, 0x9a, 0x49, 0x5a, 0x44, 0xfc, 0x0c, 0xd5, 0x7d, 0x1e, 0x3c, 0x18, 0x31, 0xee, 0xb1, + 0xbe, 0x07, 0xaf, 0xed, 0x7a, 0x7a, 0x65, 0x48, 0x76, 0x65, 0xc8, 0xa3, 0x40, 0xf5, 0xa2, 0x73, + 0x15, 0xf1, 0x60, 0x60, 0xef, 0x25, 0x93, 0x76, 0xfd, 0x74, 0x81, 0x44, 0x0b, 0x5c, 0xfc, 0x3d, + 0xda, 0x95, 0xe0, 0x81, 0xa3, 0x44, 0x74, 0xbd, 0xd1, 0x7e, 0xcc, 0xfa, 0xe0, 0x9d, 0xe7, 0x52, + 0xbb, 0x9e, 0x96, 0x6c, 0xba, 0xa2, 0x33, 0x24, 0xf6, 0x50, 0xd3, 0x67, 0xcf, 0x9f, 0x06, 0x6c, + 0x96, 0x48, 0xf5, 0x7f, 0x26, 0x82, 0x93, 0x49, 0xbb, 0x79, 0x5a, 0x60, 0xd1, 0x25, 0x36, 0x7e, + 0x61, 0xa0, 0x56, 0x1c, 0x0c, 0x81, 0x79, 0x6a, 0x38, 0x3e, 0x13, 0xee, 0xf4, 0x9d, 0x38, 0xd3, + 0xcd, 0x39, 0xd8, 0xbe, 0x65, 0x1c, 0xde, 0xb0, 0xef, 0x27, 0x93, 0x76, 0xeb, 0xe9, 0x5a, 0xaf, + 0x7f, 0x26, 0x6d, 0x73, 0xfd, 0xee, 0x93, 0x71, 0x08, 0xf4, 0x3f, 0x62, 0x74, 0x7e, 0xab, 0xa1, + 0x0f, 0xd6, 0xce, 0x34, 0xfe, 0x0a, 0x61, 0xd1, 0x97, 0x10, 0x8d, 0xc0, 0xfd, 0x22, 0x7b, 0xd7, + 0xb8, 0x08, 0x74, 0x6f, 0xab, 0x76, 0x2b, 0x9f, 0x11, 0xdc, 0x5b, 0xf1, 0xa0, 0x25, 0x2a, 0xfc, + 0x33, 0x6a, 0xb8, 0x59, 0x14, 0x70, 0xcf, 0x84, 0x3b, 0x9d, 0x4a, 0xfb, 0x9a, 0xf7, 0x8c, 0x74, + 0x17, 0x21, 0x27, 0x81, 0x8a, 0xc6, 0xf6, 0xbb, 0xf9, 0x51, 0x1a, 0x85, 0x3d, 0x5a, 0x8c, 0x97, + 0x26, 0xe3, 0xce, 0x90, 0xf2, 0x81, 0xe7, 0x89, 0x9f, 0xc0, 0xd5, 0xfd, 0xad, 0xcd, 0x93, 0xe9, + 0xae, 0x78, 0xd0, 0x12, 0x15, 0xfe, 0x1c, 0x35, 0x9d, 0x38, 0x8a, 0x20, 0x50, 0x5f, 0x66, 0x95, + 0xd5, 0xcd, 0xaa, 0xd9, 0xef, 0xe5, 0x9c, 0xe6, 0xc3, 0xc2, 0x2e, 0x5d, 0xf2, 0x4e, 0xf5, 0x2e, + 0x48, 0x1e, 0x81, 0x3b, 0xd5, 0xd7, 0x8a, 0xfa, 0x6e, 0x61, 0x97, 0x2e, 0x79, 0xe3, 0x63, 0x54, + 0x87, 0xe7, 0x21, 0x38, 0xd3, 0x5a, 0xee, 0x68, 0xf5, 0x7e, 0xae, 0xae, 0x9f, 0x2c, 0xec, 0xd1, + 0x82, 0x27, 0x76, 0x10, 0x72, 0x44, 0xe0, 0xf2, 0xec, 0xd7, 0xe1, 0x2d, 0xdd, 0x03, 0x6b, 0xb3, + 0x2b, 0xf4, 0x70, 0xaa, 0x9b, 0xbf, 0xcd, 0x33, 0x93, 0xa4, 0x0b, 0xd8, 0x96, 0x87, 0xf0, 0x6a, + 0x9b, 0xf0, 0x1e, 0xaa, 0x5e, 0xc0, 0x58, 0x8f, 0xcf, 0x0d, 0x9a, 0x7e, 0xe2, 0xfb, 0xa8, 0x36, + 0x62, 0x5e, 0x0c, 0xf9, 0x55, 0xfe, 0x78, 0xb3, 0x73, 0x3c, 0xe1, 0x3e, 0xd0, 0x4c, 0x78, 0x6f, + 0xeb, 0xd8, 0xb0, 0xef, 0x5d, 0x5e, 0x99, 0x95, 0x97, 0x57, 0x66, 0xe5, 0xd5, 0x95, 0x59, 0x79, + 0x91, 0x98, 0xc6, 0x65, 0x62, 0x1a, 0x2f, 0x13, 0xd3, 0x78, 0x95, 0x98, 0xc6, 0x5f, 0x89, 0x69, + 0xfc, 0xf2, 0xb7, 0x59, 0xf9, 0x76, 0xbf, 0xec, 0x7f, 0xcc, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x0f, 0x42, 0xd2, 0x33, 0xde, 0x08, 0x00, 0x00, } func (m *Eviction) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index a79e710284..57128e8112 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of PodDisruptionBudgets repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec { // all pods within the namespace. // +patchStrategy=replace // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods // should be considered for eviction. Current implementation considers healthy pods, @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 45b9550f4a..f05367ebe4 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -170,6 +170,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { @@ -188,6 +189,7 @@ type PodDisruptionBudget struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { @@ -203,6 +205,7 @@ type PodDisruptionBudgetList struct { // +genclient // +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.22 // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..d6663b9234 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Eviction) APILifecycleIntroduced() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudget) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudgetList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index efba41b3fd..c3845e994e 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto +// source: k8s.io/api/policy/v1beta1/generated.proto package v1beta1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{0} + return fileDescriptor_68b366237812cc96, []int{0} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_Eviction proto.InternalMessageInfo func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{1} + return fileDescriptor_68b366237812cc96, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{2} + return fileDescriptor_68b366237812cc96, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{3} + return fileDescriptor_68b366237812cc96, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{4} + return fileDescriptor_68b366237812cc96, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -197,65 +197,64 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) + proto.RegisterFile("k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_68b366237812cc96) } -var fileDescriptor_014060e454a820dc = []byte{ - // 857 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0xc7, 0x45, 0xcb, 0x72, 0x9d, 0xad, 0x24, 0xb8, 0xdb, 0x2f, 0x5b, 0x07, 0x2a, 0xd0, 0x29, - 0x28, 0xd0, 0x65, 0x9d, 0x04, 0x85, 0xd1, 0x43, 0x9b, 0x30, 0x32, 0xd2, 0x14, 0x31, 0x6c, 0xac, - 0x9c, 0x4b, 0x91, 0x02, 0x5d, 0x91, 0x13, 0x69, 0x2b, 0x92, 0x4b, 0x70, 0x97, 0x6c, 0x74, 0xcb, - 0xa1, 0x0f, 0xd0, 0xf7, 0xe8, 0x83, 0xd4, 0x87, 0x1e, 0xd2, 0x5b, 0xd0, 0x83, 0x50, 0xb3, 0x6f, - 0xd1, 0x53, 0xc1, 0x25, 0xf5, 0x41, 0x7d, 0x34, 0x4a, 0x0e, 0xb9, 0x71, 0x67, 0xe6, 0xff, 0x1b, - 0xce, 0xc7, 0x52, 0x42, 0xf6, 0xe8, 0x44, 0x12, 0x2e, 0xac, 0x51, 0xdc, 0x87, 0x28, 0x00, 0x05, - 0xd2, 0x4a, 0x20, 0x70, 0x45, 0x64, 0x15, 0x0e, 0x16, 0x72, 0x2b, 0x14, 0x1e, 0x77, 0xc6, 0x56, - 0x72, 0xdc, 0x07, 0xc5, 0x8e, 0xad, 0x01, 0x04, 0x10, 0x31, 0x05, 0x2e, 0x09, 0x23, 0xa1, 0x04, - 0x3e, 0xca, 0x43, 0x09, 0x0b, 0x39, 0xc9, 0x43, 0x49, 0x11, 0xda, 0xfa, 0x7c, 0xc0, 0xd5, 0x30, - 0xee, 0x13, 0x47, 0xf8, 0xd6, 0x40, 0x0c, 0x84, 0xa5, 0x15, 0xfd, 0xf8, 0x99, 0x3e, 0xe9, 0x83, - 0x7e, 0xca, 0x49, 0xad, 0xbb, 0xf3, 0xa4, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0xa3, - 0x41, 0x66, 0x90, 0x96, 0x0f, 0x8a, 0x59, 0xc9, 0x4a, 0xfe, 0x96, 0xb5, 0x49, 0x15, 0xc5, 0x81, - 0xe2, 0x3e, 0xac, 0x08, 0xbe, 0x7c, 0x9d, 0x40, 0x3a, 0x43, 0xf0, 0xd9, 0x8a, 0xee, 0xce, 0x26, - 0x5d, 0xac, 0xb8, 0x67, 0xf1, 0x40, 0x49, 0x15, 0x2d, 0x8b, 0x3a, 0x7f, 0x19, 0x68, 0xff, 0x34, - 0xe1, 0x8e, 0xe2, 0x22, 0xc0, 0x3f, 0xa2, 0xfd, 0xac, 0x0a, 0x97, 0x29, 0x76, 0x68, 0xdc, 0x34, - 0x6e, 0xbd, 0x7f, 0xfb, 0x0b, 0x32, 0xef, 0xde, 0x0c, 0x4a, 0xc2, 0xd1, 0x20, 0x33, 0x48, 0x92, - 0x45, 0x93, 0xe4, 0x98, 0x9c, 0xf7, 0x7f, 0x02, 0x47, 0x9d, 0x81, 0x62, 0x36, 0xbe, 0x9a, 0xb4, - 0x2b, 0xe9, 0xa4, 0x8d, 0xe6, 0x36, 0x3a, 0xa3, 0x62, 0x0f, 0x35, 0x5c, 0xf0, 0x40, 0xc1, 0x79, - 0x98, 0x65, 0x94, 0x87, 0x3b, 0x3a, 0xcd, 0x9d, 0xed, 0xd2, 0x74, 0x17, 0xa5, 0xf6, 0x07, 0xe9, - 0xa4, 0xdd, 0x28, 0x99, 0x68, 0x19, 0xde, 0xf9, 0x6d, 0x07, 0x7d, 0x78, 0x21, 0xdc, 0x2e, 0x97, - 0x51, 0xac, 0x4d, 0x76, 0xec, 0x0e, 0x40, 0xbd, 0x83, 0x3a, 0x2f, 0xd1, 0xae, 0x0c, 0xc1, 0x29, - 0xca, 0xbb, 0x4d, 0x36, 0xee, 0x20, 0x59, 0xf3, 0x7e, 0xbd, 0x10, 0x1c, 0xbb, 0x5e, 0xf0, 0x77, - 0xb3, 0x13, 0xd5, 0x34, 0xfc, 0x14, 0xed, 0x49, 0xc5, 0x54, 0x2c, 0x0f, 0xab, 0x9a, 0x7b, 0xf7, - 0x0d, 0xb9, 0x5a, 0x6b, 0x37, 0x0b, 0xf2, 0x5e, 0x7e, 0xa6, 0x05, 0xb3, 0xf3, 0x87, 0x81, 0x3e, - 0x5d, 0xa3, 0x7a, 0xcc, 0xa5, 0xc2, 0x4f, 0x57, 0x3a, 0x46, 0xb6, 0xeb, 0x58, 0xa6, 0xd6, 0xfd, - 0x3a, 0x28, 0xb2, 0xee, 0x4f, 0x2d, 0x0b, 0xdd, 0xea, 0xa1, 0x1a, 0x57, 0xe0, 0x67, 0xdb, 0x50, - 0x5d, 0x42, 0x6f, 0x51, 0x96, 0xdd, 0x28, 0xd0, 0xb5, 0x47, 0x19, 0x84, 0xe6, 0xac, 0xce, 0x9f, - 0xd5, 0xb5, 0xe5, 0x64, 0xed, 0xc4, 0xcf, 0x50, 0xdd, 0xe7, 0xc1, 0xfd, 0x84, 0x71, 0x8f, 0xf5, - 0x3d, 0x78, 0xed, 0x12, 0x64, 0x37, 0x88, 0xe4, 0x37, 0x88, 0x3c, 0x0a, 0xd4, 0x79, 0xd4, 0x53, - 0x11, 0x0f, 0x06, 0xf6, 0x41, 0x3a, 0x69, 0xd7, 0xcf, 0x16, 0x48, 0xb4, 0xc4, 0xc5, 0x3f, 0xa0, - 0x7d, 0x09, 0x1e, 0x38, 0x4a, 0x44, 0x6f, 0xb6, 0xe9, 0x8f, 0x59, 0x1f, 0xbc, 0x5e, 0x21, 0xb5, - 0xeb, 0x59, 0xdf, 0xa6, 0x27, 0x3a, 0x43, 0x62, 0x0f, 0x35, 0x7d, 0xf6, 0xfc, 0x49, 0xc0, 0x66, - 0x85, 0x54, 0xdf, 0xb2, 0x10, 0x9c, 0x4e, 0xda, 0xcd, 0xb3, 0x12, 0x8b, 0x2e, 0xb1, 0xf1, 0x0b, - 0x03, 0xb5, 0xe2, 0x60, 0x08, 0xcc, 0x53, 0xc3, 0xf1, 0x85, 0x70, 0xa7, 0x9f, 0x8d, 0x0b, 0x3d, - 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9, 0x18, 0xf5, - 0xef, 0xa4, 0x6d, 0x6e, 0xf6, 0x5e, 0x8e, 0x43, 0xa0, 0xff, 0x93, 0xa3, 0xf3, 0x7b, 0x0d, 0x1d, - 0x6d, 0x5c, 0x6c, 0xfc, 0x1d, 0xc2, 0xa2, 0x2f, 0x21, 0x4a, 0xc0, 0x7d, 0x98, 0x7f, 0xe6, 0xb8, - 0x08, 0xf4, 0x6c, 0xab, 0x76, 0xab, 0xd8, 0x11, 0x7c, 0xbe, 0x12, 0x41, 0xd7, 0xa8, 0xf0, 0x2f, - 0x06, 0x6a, 0xb8, 0x79, 0x1a, 0x70, 0x2f, 0x84, 0x3b, 0xdd, 0xcd, 0x87, 0x6f, 0x73, 0xe5, 0x48, - 0x77, 0x91, 0x74, 0x1a, 0xa8, 0x68, 0x6c, 0x7f, 0x5c, 0xbc, 0x50, 0xa3, 0xe4, 0xa3, 0xe5, 0xa4, - 0x59, 0x49, 0xee, 0x0c, 0x29, 0xef, 0x7b, 0x9e, 0xf8, 0x19, 0x5c, 0x3d, 0xe5, 0xda, 0xbc, 0xa4, - 0xee, 0x4a, 0x04, 0x5d, 0xa3, 0xc2, 0x5f, 0xa3, 0xa6, 0x13, 0x47, 0x11, 0x04, 0xea, 0xdb, 0xbc, - 0xbf, 0x7a, 0x64, 0x35, 0xfb, 0x93, 0x82, 0xd3, 0x7c, 0x50, 0xf2, 0xd2, 0xa5, 0xe8, 0x4c, 0xef, - 0x82, 0xe4, 0x11, 0xb8, 0x53, 0x7d, 0xad, 0xac, 0xef, 0x96, 0xbc, 0x74, 0x29, 0x1a, 0x9f, 0xa0, - 0x3a, 0x3c, 0x0f, 0xc1, 0x99, 0x36, 0x74, 0x4f, 0xab, 0x3f, 0x2a, 0xd4, 0xf5, 0xd3, 0x05, 0x1f, - 0x2d, 0x45, 0x62, 0x07, 0x21, 0x47, 0x04, 0x2e, 0xcf, 0x7f, 0x32, 0xde, 0xd3, 0x83, 0xb0, 0xb6, - 0xbb, 0x48, 0x0f, 0xa6, 0xba, 0xf9, 0x07, 0x7b, 0x66, 0x92, 0x74, 0x01, 0xdb, 0xf2, 0x10, 0x5e, - 0x1d, 0x13, 0x3e, 0x40, 0xd5, 0x11, 0x8c, 0xf5, 0x12, 0xdd, 0xa0, 0xd9, 0x23, 0xbe, 0x87, 0x6a, - 0x09, 0xf3, 0x62, 0x28, 0x2e, 0xf4, 0x67, 0xdb, 0xbd, 0xc7, 0x25, 0xf7, 0x81, 0xe6, 0xc2, 0xaf, - 0x76, 0x4e, 0x0c, 0xfb, 0x9b, 0xab, 0x6b, 0xb3, 0xf2, 0xf2, 0xda, 0xac, 0xbc, 0xba, 0x36, 0x2b, - 0x2f, 0x52, 0xd3, 0xb8, 0x4a, 0x4d, 0xe3, 0x65, 0x6a, 0x1a, 0xaf, 0x52, 0xd3, 0xf8, 0x3b, 0x35, - 0x8d, 0x5f, 0xff, 0x31, 0x2b, 0xdf, 0x1f, 0x6d, 0xfc, 0x9b, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x96, 0x9a, 0x3a, 0xb5, 0x1b, 0x09, 0x00, 0x00, +var fileDescriptor_68b366237812cc96 = []byte{ + // 843 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x0e, 0x49, 0xb4, 0x0c, 0x6f, 0xbb, 0x39, 0x38, 0x55, 0x4e, + 0x05, 0x89, 0x31, 0xdb, 0x56, 0x68, 0xc5, 0x01, 0x5a, 0x37, 0xab, 0x52, 0xd4, 0xd5, 0xae, 0x26, + 0xdb, 0x0b, 0x2a, 0x12, 0x13, 0xfb, 0xa9, 0x33, 0xc4, 0xf6, 0x58, 0x9e, 0x71, 0x68, 0x6e, 0x3d, + 0xf0, 0x01, 0xf8, 0x1e, 0x7c, 0x10, 0xf6, 0xc0, 0xa1, 0xdc, 0x2a, 0x0e, 0x11, 0x6b, 0xbe, 0x05, + 0x27, 0xe4, 0xb1, 0xf3, 0xe2, 0xbc, 0xd0, 0xb4, 0x07, 0x6e, 0x9e, 0x67, 0x9e, 0xff, 0xef, 0x99, + 0xe7, 0x65, 0x26, 0x41, 0x9f, 0x0c, 0x4f, 0x24, 0xe1, 0xc2, 0x62, 0x11, 0xb7, 0x22, 0xe1, 0x73, + 0x67, 0x6c, 0x8d, 0x8e, 0xfb, 0xa0, 0xd8, 0xb1, 0xe5, 0x41, 0x08, 0x31, 0x53, 0xe0, 0x92, 0x28, + 0x16, 0x4a, 0xe0, 0xa3, 0xdc, 0x95, 0xb0, 0x88, 0x93, 0xdc, 0x95, 0x14, 0xae, 0xad, 0xcf, 0x3c, + 0xae, 0x06, 0x49, 0x9f, 0x38, 0x22, 0xb0, 0x3c, 0xe1, 0x09, 0x4b, 0x2b, 0xfa, 0xc9, 0x33, 0xbd, + 0xd2, 0x0b, 0xfd, 0x95, 0x93, 0x5a, 0x77, 0xe7, 0x41, 0x03, 0xe6, 0x0c, 0x78, 0x08, 0xf1, 0xd8, + 0x8a, 0x86, 0x5e, 0x66, 0x90, 0x56, 0x00, 0x8a, 0x59, 0xa3, 0x95, 0xf8, 0x2d, 0x6b, 0x93, 0x2a, + 0x4e, 0x42, 0xc5, 0x03, 0x58, 0x11, 0x7c, 0xf1, 0x3a, 0x81, 0x74, 0x06, 0x10, 0xb0, 0x15, 0xdd, + 0x9d, 0x4d, 0xba, 0x44, 0x71, 0xdf, 0xe2, 0xa1, 0x92, 0x2a, 0x5e, 0x16, 0x75, 0xfe, 0x34, 0xd0, + 0xfe, 0xe9, 0x88, 0x3b, 0x8a, 0x8b, 0x10, 0xff, 0x80, 0xf6, 0xb3, 0x2c, 0x5c, 0xa6, 0xd8, 0xa1, + 0x71, 0xd3, 0xb8, 0xf5, 0xee, 0xed, 0xcf, 0xc9, 0xbc, 0x7a, 0x33, 0x28, 0x89, 0x86, 0x5e, 0x66, + 0x90, 0x24, 0xf3, 0x26, 0xa3, 0x63, 0x72, 0xde, 0xff, 0x11, 0x1c, 0x75, 0x06, 0x8a, 0xd9, 0xf8, + 0x6a, 0xd2, 0xae, 0xa4, 0x93, 0x36, 0x9a, 0xdb, 0xe8, 0x8c, 0x8a, 0x7d, 0xd4, 0x70, 0xc1, 0x07, + 0x05, 0xe7, 0x51, 0x16, 0x51, 0x1e, 0xee, 0xe8, 0x30, 0x77, 0xb6, 0x0b, 0xd3, 0x5d, 0x94, 0xda, + 0xef, 0xa5, 0x93, 0x76, 0xa3, 0x64, 0xa2, 0x65, 0x78, 0xe7, 0xd7, 0x1d, 0xf4, 0xfe, 0x85, 0x70, + 0xbb, 0x5c, 0xc6, 0x89, 0x36, 0xd9, 0x89, 0xeb, 0x81, 0xfa, 0x1f, 0xf2, 0xbc, 0x44, 0xbb, 0x32, + 0x02, 0xa7, 0x48, 0xef, 0x36, 0xd9, 0x38, 0x83, 0x64, 0xcd, 0xf9, 0x7a, 0x11, 0x38, 0x76, 0xbd, + 0xe0, 0xef, 0x66, 0x2b, 0xaa, 0x69, 0xf8, 0x29, 0xda, 0x93, 0x8a, 0xa9, 0x44, 0x1e, 0x56, 0x35, + 0xf7, 0xee, 0x1b, 0x72, 0xb5, 0xd6, 0x6e, 0x16, 0xe4, 0xbd, 0x7c, 0x4d, 0x0b, 0x66, 0xe7, 0x77, + 0x03, 0x7d, 0xbc, 0x46, 0xf5, 0x98, 0x4b, 0x85, 0x9f, 0xae, 0x54, 0x8c, 0x6c, 0x57, 0xb1, 0x4c, + 0xad, 0xeb, 0x75, 0x50, 0x44, 0xdd, 0x9f, 0x5a, 0x16, 0xaa, 0xd5, 0x43, 0x35, 0xae, 0x20, 0xc8, + 0xa6, 0xa1, 0xba, 0x84, 0xde, 0x22, 0x2d, 0xbb, 0x51, 0xa0, 0x6b, 0x8f, 0x32, 0x08, 0xcd, 0x59, + 0x9d, 0x3f, 0xaa, 0x6b, 0xd3, 0xc9, 0xca, 0x89, 0x9f, 0xa1, 0x7a, 0xc0, 0xc3, 0xfb, 0x23, 0xc6, + 0x7d, 0xd6, 0xf7, 0xe1, 0xb5, 0x43, 0x90, 0xdd, 0x20, 0x92, 0xdf, 0x20, 0xf2, 0x28, 0x54, 0xe7, + 0x71, 0x4f, 0xc5, 0x3c, 0xf4, 0xec, 0x83, 0x74, 0xd2, 0xae, 0x9f, 0x2d, 0x90, 0x68, 0x89, 0x8b, + 0xbf, 0x47, 0xfb, 0x12, 0x7c, 0x70, 0x94, 0x88, 0xdf, 0x6c, 0xd2, 0x1f, 0xb3, 0x3e, 0xf8, 0xbd, + 0x42, 0x6a, 0xd7, 0xb3, 0xba, 0x4d, 0x57, 0x74, 0x86, 0xc4, 0x3e, 0x6a, 0x06, 0xec, 0xf9, 0x93, + 0x90, 0xcd, 0x12, 0xa9, 0xbe, 0x65, 0x22, 0x38, 0x9d, 0xb4, 0x9b, 0x67, 0x25, 0x16, 0x5d, 0x62, + 0xe3, 0x17, 0x06, 0x6a, 0x25, 0xe1, 0x00, 0x98, 0xaf, 0x06, 0xe3, 0x0b, 0xe1, 0x4e, 0x9f, 0x8d, + 0x0b, 0xdd, 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9, + 0xe8, 0xf5, 0xcf, 0xa4, 0x6d, 0x6e, 0xde, 0xbd, 0x1c, 0x47, 0x40, 0xff, 0x23, 0x46, 0xe7, 0xb7, + 0x1a, 0x3a, 0xda, 0x38, 0xd8, 0xf8, 0x5b, 0x84, 0x45, 0x5f, 0x42, 0x3c, 0x02, 0xf7, 0x61, 0xfe, + 0xcc, 0x71, 0x11, 0xea, 0xde, 0x56, 0xed, 0x56, 0x31, 0x23, 0xf8, 0x7c, 0xc5, 0x83, 0xae, 0x51, + 0xe1, 0x9f, 0x0d, 0xd4, 0x70, 0xf3, 0x30, 0xe0, 0x5e, 0x08, 0x77, 0x3a, 0x9b, 0x0f, 0xdf, 0xe6, + 0xca, 0x91, 0xee, 0x22, 0xe9, 0x34, 0x54, 0xf1, 0xd8, 0xfe, 0xb0, 0x38, 0x50, 0xa3, 0xb4, 0x47, + 0xcb, 0x41, 0xb3, 0x94, 0xdc, 0x19, 0x52, 0xde, 0xf7, 0x7d, 0xf1, 0x13, 0xb8, 0xba, 0xcb, 0xb5, + 0x79, 0x4a, 0xdd, 0x15, 0x0f, 0xba, 0x46, 0x85, 0xbf, 0x42, 0x4d, 0x27, 0x89, 0x63, 0x08, 0xd5, + 0x37, 0x79, 0x7d, 0x75, 0xcb, 0x6a, 0xf6, 0x47, 0x05, 0xa7, 0xf9, 0xa0, 0xb4, 0x4b, 0x97, 0xbc, + 0x33, 0xbd, 0x0b, 0x92, 0xc7, 0xe0, 0x4e, 0xf5, 0xb5, 0xb2, 0xbe, 0x5b, 0xda, 0xa5, 0x4b, 0xde, + 0xf8, 0x04, 0xd5, 0xe1, 0x79, 0x04, 0xce, 0xb4, 0xa0, 0x7b, 0x5a, 0xfd, 0x41, 0xa1, 0xae, 0x9f, + 0x2e, 0xec, 0xd1, 0x92, 0x27, 0x76, 0x10, 0x72, 0x44, 0xe8, 0xf2, 0xfc, 0x27, 0xe3, 0x1d, 0xdd, + 0x08, 0x6b, 0xbb, 0x8b, 0xf4, 0x60, 0xaa, 0x9b, 0x3f, 0xd8, 0x33, 0x93, 0xa4, 0x0b, 0xd8, 0x96, + 0x8f, 0xf0, 0x6a, 0x9b, 0xf0, 0x01, 0xaa, 0x0e, 0x61, 0xac, 0x87, 0xe8, 0x06, 0xcd, 0x3e, 0xf1, + 0x3d, 0x54, 0x1b, 0x31, 0x3f, 0x81, 0xe2, 0x42, 0x7f, 0xba, 0xdd, 0x39, 0x2e, 0x79, 0x00, 0x34, + 0x17, 0x7e, 0xb9, 0x73, 0x62, 0xd8, 0x5f, 0x5f, 0x5d, 0x9b, 0x95, 0x97, 0xd7, 0x66, 0xe5, 0xd5, + 0xb5, 0x59, 0x79, 0x91, 0x9a, 0xc6, 0x55, 0x6a, 0x1a, 0x2f, 0x53, 0xd3, 0x78, 0x95, 0x9a, 0xc6, + 0x5f, 0xa9, 0x69, 0xfc, 0xf2, 0xb7, 0x59, 0xf9, 0xee, 0x68, 0xe3, 0xdf, 0x9c, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x3c, 0xbe, 0x15, 0xfb, 0x02, 0x09, 0x00, 0x00, } func (m *Eviction) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index d1409913f1..91e33f2332 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1beta1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual PodDisruptionBudget objects repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec { // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods // should be considered for eviction. Current implementation considers healthy pods, @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 80f43ce922..b0e4e5b5b5 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=rbac.authorization.k8s.io package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 4e466eb285..112d18fb06 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto +// source: k8s.io/api/rbac/v1/generated.proto package v1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{0} + return fileDescriptor_c8ba2e7dd472de66, []int{0} } func (m *AggregationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_AggregationRule proto.InternalMessageInfo func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{1} + return fileDescriptor_c8ba2e7dd472de66, []int{1} } func (m *ClusterRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_ClusterRole proto.InternalMessageInfo func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{2} + return fileDescriptor_c8ba2e7dd472de66, []int{2} } func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{3} + return fileDescriptor_c8ba2e7dd472de66, []int{3} } func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{4} + return fileDescriptor_c8ba2e7dd472de66, []int{4} } func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{5} + return fileDescriptor_c8ba2e7dd472de66, []int{5} } func (m *PolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_PolicyRule proto.InternalMessageInfo func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{6} + return fileDescriptor_c8ba2e7dd472de66, []int{6} } func (m *Role) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_Role proto.InternalMessageInfo func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{7} + return fileDescriptor_c8ba2e7dd472de66, []int{7} } func (m *RoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +271,7 @@ var xxx_messageInfo_RoleBinding proto.InternalMessageInfo func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{8} + return fileDescriptor_c8ba2e7dd472de66, []int{8} } func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{9} + return fileDescriptor_c8ba2e7dd472de66, []int{9} } func (m *RoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +327,7 @@ var xxx_messageInfo_RoleList proto.InternalMessageInfo func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{10} + return fileDescriptor_c8ba2e7dd472de66, []int{10} } func (m *RoleRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +355,7 @@ var xxx_messageInfo_RoleRef proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{11} + return fileDescriptor_c8ba2e7dd472de66, []int{11} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,62 +396,61 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) -} - -var fileDescriptor_979ffd7b30c07419 = []byte{ - // 809 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0xcf, 0x6b, 0xe3, 0x46, - 0x14, 0xf6, 0x38, 0x36, 0xb1, 0xc6, 0x35, 0x6e, 0xa6, 0xa1, 0x88, 0xb4, 0xc8, 0x41, 0x85, 0x12, - 0x68, 0x2b, 0x35, 0x69, 0x69, 0x03, 0x25, 0x87, 0x28, 0xa5, 0x25, 0x24, 0x4d, 0xc3, 0x84, 0xf6, - 0x50, 0x7a, 0xe8, 0x48, 0x9e, 0x28, 0x53, 0xeb, 0x17, 0x33, 0x92, 0x21, 0xf4, 0x52, 0x0a, 0x3d, - 0xec, 0x6d, 0x8f, 0xbb, 0x7f, 0xc1, 0x5e, 0x76, 0x8f, 0xfb, 0x17, 0xec, 0x25, 0xc7, 0x1c, 0x73, - 0x32, 0x1b, 0xed, 0x1f, 0xb2, 0x8b, 0x7e, 0x59, 0xfe, 0xa1, 0x6c, 0x7c, 0x32, 0x2c, 0x7b, 0xb2, - 0xe7, 0xbd, 0xef, 0x7d, 0xef, 0x9b, 0x4f, 0x7a, 0xcf, 0x86, 0x3f, 0x0c, 0x76, 0x85, 0xc6, 0x7c, - 0x7d, 0x10, 0x99, 0x94, 0x7b, 0x34, 0xa4, 0x42, 0x1f, 0x52, 0xaf, 0xef, 0x73, 0x3d, 0x4f, 0x90, - 0x80, 0xe9, 0xdc, 0x24, 0x96, 0x3e, 0xdc, 0xd6, 0x6d, 0xea, 0x51, 0x4e, 0x42, 0xda, 0xd7, 0x02, - 0xee, 0x87, 0x3e, 0x42, 0x19, 0x46, 0x23, 0x01, 0xd3, 0x12, 0x8c, 0x36, 0xdc, 0xde, 0xf8, 0xca, - 0x66, 0xe1, 0x45, 0x64, 0x6a, 0x96, 0xef, 0xea, 0xb6, 0x6f, 0xfb, 0x7a, 0x0a, 0x35, 0xa3, 0xf3, - 0xf4, 0x94, 0x1e, 0xd2, 0x6f, 0x19, 0xc5, 0xc6, 0xb7, 0x65, 0x1b, 0x97, 0x58, 0x17, 0xcc, 0xa3, - 0xfc, 0x52, 0x0f, 0x06, 0x76, 0x12, 0x10, 0xba, 0x4b, 0x43, 0x52, 0xd1, 0x78, 0x43, 0xbf, 0xab, - 0x8a, 0x47, 0x5e, 0xc8, 0x5c, 0x3a, 0x57, 0xf0, 0xdd, 0x7d, 0x05, 0xc2, 0xba, 0xa0, 0x2e, 0x99, - 0xad, 0x53, 0x1f, 0x03, 0xd8, 0xdd, 0xb7, 0x6d, 0x4e, 0x6d, 0x12, 0x32, 0xdf, 0xc3, 0x91, 0x43, - 0xd1, 0xff, 0x00, 0xae, 0x5b, 0x4e, 0x24, 0x42, 0xca, 0xb1, 0xef, 0xd0, 0x33, 0xea, 0x50, 0x2b, - 0xf4, 0xb9, 0x90, 0xc1, 0xe6, 0xca, 0x56, 0x7b, 0xe7, 0x1b, 0xad, 0x74, 0x65, 0xdc, 0x4b, 0x0b, - 0x06, 0x76, 0x12, 0x10, 0x5a, 0x72, 0x25, 0x6d, 0xb8, 0xad, 0x1d, 0x13, 0x93, 0x3a, 0x45, 0xad, - 0xf1, 0xe9, 0xd5, 0xa8, 0x57, 0x8b, 0x47, 0xbd, 0xf5, 0x83, 0x0a, 0x62, 0x5c, 0xd9, 0x4e, 0x7d, - 0x54, 0x87, 0xed, 0x09, 0x38, 0xfa, 0x0b, 0xb6, 0x12, 0xf2, 0x3e, 0x09, 0x89, 0x0c, 0x36, 0xc1, - 0x56, 0x7b, 0xe7, 0xeb, 0xc5, 0xa4, 0xfc, 0x6a, 0xfe, 0x4d, 0xad, 0xf0, 0x17, 0x1a, 0x12, 0x03, - 0xe5, 0x3a, 0x60, 0x19, 0xc3, 0x63, 0x56, 0x74, 0x00, 0x9b, 0x3c, 0x72, 0xa8, 0x90, 0xeb, 0xe9, - 0x4d, 0x15, 0x6d, 0xfe, 0xf9, 0x6b, 0xa7, 0xbe, 0xc3, 0xac, 0xcb, 0xc4, 0x28, 0xa3, 0x93, 0x93, - 0x35, 0x93, 0x93, 0xc0, 0x59, 0x2d, 0x32, 0x61, 0x97, 0x4c, 0x3b, 0x2a, 0xaf, 0xa4, 0x6a, 0x3f, - 0xab, 0xa2, 0x9b, 0x31, 0xdf, 0xf8, 0x28, 0x1e, 0xf5, 0x66, 0x9f, 0x08, 0x9e, 0x25, 0x54, 0x1f, - 0xd4, 0x21, 0x9a, 0xb0, 0xc6, 0x60, 0x5e, 0x9f, 0x79, 0xf6, 0x12, 0x1c, 0x3a, 0x84, 0x2d, 0x11, - 0xa5, 0x89, 0xc2, 0xa4, 0x4f, 0xaa, 0x6e, 0x75, 0x96, 0x61, 0x8c, 0x0f, 0x73, 0xb2, 0x56, 0x1e, - 0x10, 0x78, 0x5c, 0x8e, 0x7e, 0x82, 0xab, 0xdc, 0x77, 0x28, 0xa6, 0xe7, 0xb9, 0x3f, 0x95, 0x4c, - 0x38, 0x83, 0x18, 0xdd, 0x9c, 0x69, 0x35, 0x0f, 0xe0, 0xa2, 0x58, 0x7d, 0x01, 0xe0, 0xc7, 0xf3, - 0x5e, 0x1c, 0x33, 0x11, 0xa2, 0x3f, 0xe7, 0xfc, 0xd0, 0x16, 0x7c, 0x79, 0x99, 0xc8, 0xdc, 0x18, - 0x5f, 0xa0, 0x88, 0x4c, 0x78, 0x71, 0x04, 0x9b, 0x2c, 0xa4, 0x6e, 0x61, 0xc4, 0xe7, 0x55, 0xf2, - 0xe7, 0x85, 0x95, 0x6f, 0xcd, 0x61, 0x52, 0x8c, 0x33, 0x0e, 0xf5, 0x39, 0x80, 0xdd, 0x09, 0xf0, - 0x12, 0xe4, 0xff, 0x38, 0x2d, 0xbf, 0x77, 0x9f, 0xfc, 0x6a, 0xdd, 0xaf, 0x01, 0x84, 0xe5, 0x48, - 0xa0, 0x1e, 0x6c, 0x0e, 0x29, 0x37, 0xb3, 0x5d, 0x21, 0x19, 0x52, 0x82, 0xff, 0x3d, 0x09, 0xe0, - 0x2c, 0x8e, 0xbe, 0x80, 0x12, 0x09, 0xd8, 0xcf, 0xdc, 0x8f, 0x82, 0xac, 0xb3, 0x64, 0x74, 0xe2, - 0x51, 0x4f, 0xda, 0x3f, 0x3d, 0xcc, 0x82, 0xb8, 0xcc, 0x27, 0x60, 0x4e, 0x85, 0x1f, 0x71, 0x8b, - 0x0a, 0x79, 0xa5, 0x04, 0xe3, 0x22, 0x88, 0xcb, 0x3c, 0xfa, 0x1e, 0x76, 0x8a, 0xc3, 0x09, 0x71, - 0xa9, 0x90, 0x1b, 0x69, 0xc1, 0x5a, 0x3c, 0xea, 0x75, 0xf0, 0x64, 0x02, 0x4f, 0xe3, 0xd0, 0x1e, - 0xec, 0x7a, 0xbe, 0x57, 0x40, 0x7e, 0xc3, 0xc7, 0x42, 0x6e, 0xa6, 0xa5, 0xe9, 0x2c, 0x9e, 0x4c, - 0xa7, 0xf0, 0x2c, 0x56, 0x7d, 0x06, 0x60, 0xe3, 0x1d, 0xda, 0x4f, 0xea, 0x7f, 0x75, 0xd8, 0x7e, - 0xef, 0x97, 0x46, 0x32, 0x6e, 0xcb, 0xdd, 0x16, 0x8b, 0x8c, 0xdb, 0xfd, 0x6b, 0xe2, 0x09, 0x80, - 0xad, 0x25, 0xed, 0x87, 0xbd, 0x69, 0xc1, 0xf2, 0x9d, 0x82, 0xab, 0x95, 0xfe, 0x03, 0x0b, 0xd7, - 0xd1, 0x97, 0xb0, 0x55, 0xcc, 0x74, 0xaa, 0x53, 0x2a, 0xfb, 0x16, 0x63, 0x8f, 0xc7, 0x08, 0xb4, - 0x09, 0x1b, 0x03, 0xe6, 0xf5, 0xe5, 0x7a, 0x8a, 0xfc, 0x20, 0x47, 0x36, 0x8e, 0x98, 0xd7, 0xc7, - 0x69, 0x26, 0x41, 0x78, 0xc4, 0xcd, 0x7e, 0x56, 0x27, 0x10, 0xc9, 0x34, 0xe3, 0x34, 0xa3, 0x3e, - 0x05, 0x70, 0x35, 0x7f, 0x7b, 0xc6, 0x7c, 0xe0, 0x4e, 0xbe, 0x49, 0x7d, 0xf5, 0x45, 0xf4, 0xbd, - 0xbd, 0x3b, 0xd2, 0xa1, 0x94, 0x7c, 0x8a, 0x80, 0x58, 0x54, 0x6e, 0xa4, 0xb0, 0xb5, 0x1c, 0x26, - 0x9d, 0x14, 0x09, 0x5c, 0x62, 0x8c, 0xdd, 0xab, 0x5b, 0xa5, 0x76, 0x7d, 0xab, 0xd4, 0x6e, 0x6e, - 0x95, 0xda, 0xbf, 0xb1, 0x02, 0xae, 0x62, 0x05, 0x5c, 0xc7, 0x0a, 0xb8, 0x89, 0x15, 0xf0, 0x32, - 0x56, 0xc0, 0xc3, 0x57, 0x4a, 0xed, 0x0f, 0x34, 0xff, 0x8f, 0xf5, 0x4d, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xec, 0x4f, 0xa6, 0x29, 0xdf, 0x0a, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/rbac/v1/generated.proto", fileDescriptor_c8ba2e7dd472de66) +} + +var fileDescriptor_c8ba2e7dd472de66 = []byte{ + // 790 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x4d, 0x6f, 0xd3, 0x4a, + 0x14, 0xcd, 0xa4, 0x89, 0x1a, 0x4f, 0x5e, 0x94, 0xd7, 0x79, 0xd5, 0x93, 0xd5, 0xf7, 0xe4, 0x54, + 0x46, 0x42, 0x95, 0x00, 0x9b, 0x16, 0x04, 0xdd, 0x74, 0x51, 0x17, 0x81, 0xaa, 0x96, 0x52, 0x4d, + 0x05, 0x0b, 0xc4, 0x82, 0x89, 0x33, 0x75, 0x87, 0xf8, 0x4b, 0x1e, 0x3b, 0x52, 0xc5, 0x06, 0x21, + 0xb1, 0x60, 0xc7, 0x12, 0x7e, 0x01, 0x1b, 0x58, 0xf2, 0x0b, 0xd8, 0x74, 0xd9, 0x65, 0x57, 0x11, + 0x35, 0x3f, 0x04, 0xe4, 0xaf, 0x38, 0x1f, 0x2e, 0xcd, 0x2a, 0x12, 0x62, 0x95, 0xcc, 0xbd, 0xe7, + 0x9e, 0x7b, 0xe6, 0xd8, 0xf7, 0x26, 0x50, 0xee, 0xae, 0x73, 0x85, 0x39, 0x2a, 0x71, 0x99, 0xea, + 0xb5, 0x89, 0xae, 0xf6, 0x56, 0x55, 0x83, 0xda, 0xd4, 0x23, 0x3e, 0xed, 0x28, 0xae, 0xe7, 0xf8, + 0x0e, 0x42, 0x09, 0x46, 0x21, 0x2e, 0x53, 0x22, 0x8c, 0xd2, 0x5b, 0x5d, 0xba, 0x61, 0x30, 0xff, + 0x28, 0x68, 0x2b, 0xba, 0x63, 0xa9, 0x86, 0x63, 0x38, 0x6a, 0x0c, 0x6d, 0x07, 0x87, 0xf1, 0x29, + 0x3e, 0xc4, 0xdf, 0x12, 0x8a, 0xa5, 0xdb, 0x79, 0x1b, 0x8b, 0xe8, 0x47, 0xcc, 0xa6, 0xde, 0xb1, + 0xea, 0x76, 0x8d, 0x28, 0xc0, 0x55, 0x8b, 0xfa, 0xa4, 0xa0, 0xf1, 0x92, 0x7a, 0x51, 0x95, 0x17, + 0xd8, 0x3e, 0xb3, 0xe8, 0x44, 0xc1, 0x9d, 0xcb, 0x0a, 0xb8, 0x7e, 0x44, 0x2d, 0x32, 0x5e, 0x27, + 0x7f, 0x00, 0xb0, 0xb9, 0x69, 0x18, 0x1e, 0x35, 0x88, 0xcf, 0x1c, 0x1b, 0x07, 0x26, 0x45, 0x6f, + 0x00, 0x5c, 0xd4, 0xcd, 0x80, 0xfb, 0xd4, 0xc3, 0x8e, 0x49, 0x0f, 0xa8, 0x49, 0x75, 0xdf, 0xf1, + 0xb8, 0x08, 0x96, 0xe7, 0x56, 0xea, 0x6b, 0xb7, 0x94, 0xdc, 0x95, 0x41, 0x2f, 0xc5, 0xed, 0x1a, + 0x51, 0x80, 0x2b, 0xd1, 0x95, 0x94, 0xde, 0xaa, 0xb2, 0x4b, 0xda, 0xd4, 0xcc, 0x6a, 0xb5, 0xff, + 0x4f, 0xfa, 0xad, 0x52, 0xd8, 0x6f, 0x2d, 0x6e, 0x15, 0x10, 0xe3, 0xc2, 0x76, 0xf2, 0xfb, 0x32, + 0xac, 0x0f, 0xc1, 0xd1, 0x73, 0x58, 0x8b, 0xc8, 0x3b, 0xc4, 0x27, 0x22, 0x58, 0x06, 0x2b, 0xf5, + 0xb5, 0x9b, 0xd3, 0x49, 0x79, 0xd4, 0x7e, 0x41, 0x75, 0xff, 0x21, 0xf5, 0x89, 0x86, 0x52, 0x1d, + 0x30, 0x8f, 0xe1, 0x01, 0x2b, 0xda, 0x82, 0x55, 0x2f, 0x30, 0x29, 0x17, 0xcb, 0xf1, 0x4d, 0x25, + 0x65, 0xf2, 0xf9, 0x2b, 0xfb, 0x8e, 0xc9, 0xf4, 0xe3, 0xc8, 0x28, 0xad, 0x91, 0x92, 0x55, 0xa3, + 0x13, 0xc7, 0x49, 0x2d, 0x6a, 0xc3, 0x26, 0x19, 0x75, 0x54, 0x9c, 0x8b, 0xd5, 0x5e, 0x29, 0xa2, + 0x1b, 0x33, 0x5f, 0xfb, 0x27, 0xec, 0xb7, 0xc6, 0x9f, 0x08, 0x1e, 0x27, 0x94, 0xdf, 0x96, 0x21, + 0x1a, 0xb2, 0x46, 0x63, 0x76, 0x87, 0xd9, 0xc6, 0x0c, 0x1c, 0xda, 0x86, 0x35, 0x1e, 0xc4, 0x89, + 0xcc, 0xa4, 0xff, 0x8a, 0x6e, 0x75, 0x90, 0x60, 0xb4, 0xbf, 0x53, 0xb2, 0x5a, 0x1a, 0xe0, 0x78, + 0x50, 0x8e, 0xee, 0xc3, 0x79, 0xcf, 0x31, 0x29, 0xa6, 0x87, 0xa9, 0x3f, 0x85, 0x4c, 0x38, 0x81, + 0x68, 0xcd, 0x94, 0x69, 0x3e, 0x0d, 0xe0, 0xac, 0x58, 0xfe, 0x0a, 0xe0, 0xbf, 0x93, 0x5e, 0xec, + 0x32, 0xee, 0xa3, 0x67, 0x13, 0x7e, 0x28, 0x53, 0xbe, 0xbc, 0x8c, 0x27, 0x6e, 0x0c, 0x2e, 0x90, + 0x45, 0x86, 0xbc, 0xd8, 0x81, 0x55, 0xe6, 0x53, 0x2b, 0x33, 0xe2, 0x6a, 0x91, 0xfc, 0x49, 0x61, + 0xf9, 0x5b, 0xb3, 0x1d, 0x15, 0xe3, 0x84, 0x43, 0xfe, 0x02, 0x60, 0x73, 0x08, 0x3c, 0x03, 0xf9, + 0xf7, 0x46, 0xe5, 0xb7, 0x2e, 0x93, 0x5f, 0xac, 0xfb, 0x07, 0x80, 0x30, 0x1f, 0x09, 0xd4, 0x82, + 0xd5, 0x1e, 0xf5, 0xda, 0xc9, 0xae, 0x10, 0x34, 0x21, 0xc2, 0x3f, 0x89, 0x02, 0x38, 0x89, 0xa3, + 0x6b, 0x50, 0x20, 0x2e, 0x7b, 0xe0, 0x39, 0x81, 0x9b, 0x74, 0x16, 0xb4, 0x46, 0xd8, 0x6f, 0x09, + 0x9b, 0xfb, 0xdb, 0x49, 0x10, 0xe7, 0xf9, 0x08, 0xec, 0x51, 0xee, 0x04, 0x9e, 0x4e, 0xb9, 0x38, + 0x97, 0x83, 0x71, 0x16, 0xc4, 0x79, 0x1e, 0xdd, 0x85, 0x8d, 0xec, 0xb0, 0x47, 0x2c, 0xca, 0xc5, + 0x4a, 0x5c, 0xb0, 0x10, 0xf6, 0x5b, 0x0d, 0x3c, 0x9c, 0xc0, 0xa3, 0x38, 0xb4, 0x01, 0x9b, 0xb6, + 0x63, 0x67, 0x90, 0xc7, 0x78, 0x97, 0x8b, 0xd5, 0xb8, 0x34, 0x9e, 0xc5, 0xbd, 0xd1, 0x14, 0x1e, + 0xc7, 0xca, 0x9f, 0x01, 0xac, 0xfc, 0x46, 0xfb, 0x49, 0x7e, 0x5d, 0x86, 0xf5, 0x3f, 0x7e, 0x69, + 0x44, 0xe3, 0x36, 0xdb, 0x6d, 0x31, 0xcd, 0xb8, 0x5d, 0xbe, 0x26, 0x3e, 0x02, 0x58, 0x9b, 0xd1, + 0x7e, 0xd8, 0x18, 0x15, 0x2c, 0x5e, 0x28, 0xb8, 0x58, 0xe9, 0x4b, 0x98, 0xb9, 0x8e, 0xae, 0xc3, + 0x5a, 0x36, 0xd3, 0xb1, 0x4e, 0x21, 0xef, 0x9b, 0x8d, 0x3d, 0x1e, 0x20, 0xd0, 0x32, 0xac, 0x74, + 0x99, 0xdd, 0x11, 0xcb, 0x31, 0xf2, 0xaf, 0x14, 0x59, 0xd9, 0x61, 0x76, 0x07, 0xc7, 0x99, 0x08, + 0x61, 0x13, 0x2b, 0xf9, 0x59, 0x1d, 0x42, 0x44, 0xd3, 0x8c, 0xe3, 0x8c, 0xfc, 0x09, 0xc0, 0xf9, + 0xf4, 0xed, 0x19, 0xf0, 0x81, 0x0b, 0xf9, 0x86, 0xf5, 0x95, 0xa7, 0xd1, 0xf7, 0xeb, 0xee, 0x48, + 0x85, 0x42, 0xf4, 0xc9, 0x5d, 0xa2, 0x53, 0xb1, 0x12, 0xc3, 0x16, 0x52, 0x98, 0xb0, 0x97, 0x25, + 0x70, 0x8e, 0xd1, 0xd6, 0x4f, 0xce, 0xa5, 0xd2, 0xe9, 0xb9, 0x54, 0x3a, 0x3b, 0x97, 0x4a, 0xaf, + 0x42, 0x09, 0x9c, 0x84, 0x12, 0x38, 0x0d, 0x25, 0x70, 0x16, 0x4a, 0xe0, 0x5b, 0x28, 0x81, 0x77, + 0xdf, 0xa5, 0xd2, 0x53, 0x34, 0xf9, 0x8f, 0xf5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xff, + 0x5a, 0x4f, 0xc6, 0x0a, 0x00, 0x00, } func (m *AggregationRule) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 13ff60ea71..87b8f832d3 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -33,17 +33,19 @@ message AggregationRule { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -58,10 +60,11 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -74,7 +77,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -84,7 +87,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -94,25 +97,30 @@ message ClusterRoleList { // about who the rule applies to or which namespace the rule applies to. message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic repeated string resourceNames = 4; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic repeated string nonResourceURLs = 5; } @@ -120,10 +128,11 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic repeated PolicyRule rules = 2; } @@ -133,10 +142,11 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -149,7 +159,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -159,7 +169,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index ce845d69b4..f9628b8536 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -48,23 +48,28 @@ const ( // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` } @@ -79,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -101,6 +106,7 @@ type RoleRef struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { @@ -111,11 +117,13 @@ type Role struct { // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given @@ -128,6 +136,7 @@ type RoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -137,6 +146,7 @@ type RoleBinding struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { @@ -150,6 +160,7 @@ type RoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleList is a collection of Roles type RoleList struct { @@ -165,6 +176,7 @@ type RoleList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { @@ -175,6 +187,7 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -189,12 +202,14 @@ type AggregationRule struct { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional + // +listType=atomic ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. @@ -206,6 +221,7 @@ type ClusterRoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. @@ -215,6 +231,7 @@ type ClusterRoleBinding struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { @@ -228,6 +245,7 @@ type ClusterRoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..f6f74413b8 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRole) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Role) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 5cce23ea12..ee3c7bfcc0 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +// source: k8s.io/api/rbac/v1alpha1/generated.proto package v1alpha1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{0} + return fileDescriptor_758889dfd9a88fa6, []int{0} } func (m *AggregationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_AggregationRule proto.InternalMessageInfo func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{1} + return fileDescriptor_758889dfd9a88fa6, []int{1} } func (m *ClusterRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_ClusterRole proto.InternalMessageInfo func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{2} + return fileDescriptor_758889dfd9a88fa6, []int{2} } func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{3} + return fileDescriptor_758889dfd9a88fa6, []int{3} } func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{4} + return fileDescriptor_758889dfd9a88fa6, []int{4} } func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{5} + return fileDescriptor_758889dfd9a88fa6, []int{5} } func (m *PolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_PolicyRule proto.InternalMessageInfo func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{6} + return fileDescriptor_758889dfd9a88fa6, []int{6} } func (m *Role) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_Role proto.InternalMessageInfo func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{7} + return fileDescriptor_758889dfd9a88fa6, []int{7} } func (m *RoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +271,7 @@ var xxx_messageInfo_RoleBinding proto.InternalMessageInfo func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{8} + return fileDescriptor_758889dfd9a88fa6, []int{8} } func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{9} + return fileDescriptor_758889dfd9a88fa6, []int{9} } func (m *RoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +327,7 @@ var xxx_messageInfo_RoleList proto.InternalMessageInfo func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{10} + return fileDescriptor_758889dfd9a88fa6, []int{10} } func (m *RoleRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +355,7 @@ var xxx_messageInfo_RoleRef proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{11} + return fileDescriptor_758889dfd9a88fa6, []int{11} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,64 +396,63 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) -} - -var fileDescriptor_b59b0bd5e7cb9590 = []byte{ - // 833 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x20, 0xc5, 0x57, 0xa0, 0x40, 0xd8, 0x5b, 0xe6, 0xc4, - 0x15, 0x88, 0x82, 0x89, 0x33, 0xe7, 0x0c, 0xb1, 0x3d, 0xd6, 0x8c, 0x1d, 0xe9, 0x44, 0x43, 0x43, - 0x8b, 0x68, 0x28, 0xe8, 0x69, 0x69, 0xa0, 0xe4, 0x1f, 0x58, 0xba, 0x2b, 0xb7, 0x8a, 0x58, 0xf3, - 0x87, 0x80, 0x3c, 0xb6, 0x63, 0xe7, 0x17, 0x49, 0x15, 0x09, 0x89, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x12, 0xd8, 0x9f, 0xbe, 0x2f, 0x4d, 0xc6, 0xad, 0x69, 0x3c, 0xa2, - 0x22, 0xa0, 0x11, 0x95, 0xd6, 0x8c, 0x06, 0x63, 0x2e, 0xac, 0xdc, 0x41, 0x42, 0x66, 0x89, 0x11, - 0x71, 0xac, 0xd9, 0x39, 0xf1, 0xc2, 0x09, 0x39, 0xb7, 0x5c, 0x1a, 0x50, 0x41, 0x22, 0x3a, 0x36, - 0x43, 0xc1, 0x23, 0x8e, 0xb4, 0x0c, 0x69, 0x92, 0x90, 0x99, 0x29, 0xd2, 0x2c, 0x90, 0xa7, 0x6f, - 0xba, 0x2c, 0x9a, 0xc4, 0x23, 0xd3, 0xe1, 0xbe, 0xe5, 0x72, 0x97, 0x5b, 0x2a, 0x60, 0x14, 0x3f, - 0x51, 0x27, 0x75, 0x50, 0xdf, 0x32, 0xa2, 0xd3, 0x77, 0xca, 0x94, 0x3e, 0x71, 0x26, 0x2c, 0xa0, - 0xe2, 0xa9, 0x15, 0x4e, 0xdd, 0xd4, 0x20, 0x2d, 0x9f, 0x46, 0xc4, 0x9a, 0xad, 0xa5, 0x3f, 0xb5, - 0xb6, 0x45, 0x89, 0x38, 0x88, 0x98, 0x4f, 0xd7, 0x02, 0xde, 0xdd, 0x15, 0x20, 0x9d, 0x09, 0xf5, - 0xc9, 0x6a, 0x9c, 0xf1, 0x13, 0x80, 0xdd, 0xbe, 0xeb, 0x0a, 0xea, 0x92, 0x88, 0xf1, 0x00, 0xc7, - 0x1e, 0x45, 0xdf, 0x01, 0x78, 0xd7, 0xf1, 0x62, 0x19, 0x51, 0x81, 0xb9, 0x47, 0x1f, 0x51, 0x8f, - 0x3a, 0x11, 0x17, 0x52, 0x03, 0x67, 0x47, 0xf7, 0x4e, 0x2e, 0xde, 0x36, 0x4b, 0x6d, 0x16, 0xb9, - 0xcc, 0x70, 0xea, 0xa6, 0x06, 0x69, 0xa6, 0x2d, 0x99, 0xb3, 0x73, 0x73, 0x48, 0x46, 0xd4, 0x2b, - 0x62, 0xed, 0x97, 0xaf, 0xe7, 0xbd, 0x5a, 0x32, 0xef, 0xdd, 0x7d, 0xb0, 0x81, 0x18, 0x6f, 0x4c, - 0x67, 0xfc, 0x5c, 0x87, 0x27, 0x15, 0x38, 0xfa, 0x0a, 0xb6, 0x52, 0xf2, 0x31, 0x89, 0x88, 0x06, - 0xce, 0xc0, 0xbd, 0x93, 0x8b, 0xb7, 0xf6, 0x2b, 0xe5, 0xe1, 0xe8, 0x6b, 0xea, 0x44, 0x9f, 0xd2, - 0x88, 0xd8, 0x28, 0xaf, 0x03, 0x96, 0x36, 0xbc, 0x60, 0x45, 0x03, 0xd8, 0x14, 0xb1, 0x47, 0xa5, - 0x56, 0x57, 0x9d, 0xbe, 0x6a, 0x6e, 0x7b, 0x05, 0xe6, 0x15, 0xf7, 0x98, 0xf3, 0x34, 0x95, 0xcb, - 0xee, 0xe4, 0x94, 0xcd, 0xf4, 0x24, 0x71, 0xc6, 0x80, 0x26, 0xb0, 0x4b, 0x96, 0x75, 0xd5, 0x8e, - 0x54, 0xcd, 0xaf, 0x6f, 0x27, 0x5d, 0xb9, 0x08, 0xfb, 0xc5, 0x64, 0xde, 0x5b, 0xbd, 0x1d, 0xbc, - 0x4a, 0x6b, 0xfc, 0x58, 0x87, 0xa8, 0x22, 0x93, 0xcd, 0x82, 0x31, 0x0b, 0xdc, 0x03, 0xa8, 0xf5, - 0x10, 0xb6, 0x64, 0xac, 0x1c, 0x85, 0x60, 0xaf, 0x6c, 0xef, 0xed, 0x51, 0x86, 0xb4, 0x5f, 0xc8, - 0x29, 0x5b, 0xb9, 0x41, 0xe2, 0x05, 0x09, 0x1a, 0xc2, 0x63, 0xc1, 0x3d, 0x8a, 0xe9, 0x93, 0x5c, - 0xab, 0x7f, 0xe1, 0xc3, 0x19, 0xd0, 0xee, 0xe6, 0x7c, 0xc7, 0xb9, 0x01, 0x17, 0x14, 0xc6, 0x1f, - 0x00, 0xbe, 0xb4, 0xae, 0xcb, 0x90, 0xc9, 0x08, 0x7d, 0xb9, 0xa6, 0x8d, 0xb9, 0xe7, 0xa3, 0x66, - 0x32, 0x53, 0x66, 0xd1, 0x46, 0x61, 0xa9, 0xe8, 0xf2, 0x19, 0x6c, 0xb2, 0x88, 0xfa, 0x85, 0x28, - 0xf7, 0xb7, 0x37, 0xb1, 0x5e, 0x5e, 0xf9, 0x9a, 0x06, 0x29, 0x05, 0xce, 0x98, 0x8c, 0xdf, 0x01, - 0xec, 0x56, 0xc0, 0x07, 0x68, 0xe2, 0xe3, 0xe5, 0x26, 0x5e, 0xdb, 0xaf, 0x89, 0xcd, 0xd5, 0xff, - 0x0d, 0x20, 0x2c, 0x07, 0x06, 0xf5, 0x60, 0x73, 0x46, 0xc5, 0x28, 0xdb, 0x27, 0x6d, 0xbb, 0x9d, - 0xe2, 0x1f, 0xa7, 0x06, 0x9c, 0xd9, 0xd1, 0x1b, 0xb0, 0x4d, 0x42, 0xf6, 0x91, 0xe0, 0x71, 0x28, - 0xb5, 0x23, 0x05, 0xea, 0x24, 0xf3, 0x5e, 0xbb, 0x7f, 0x35, 0xc8, 0x8c, 0xb8, 0xf4, 0xa7, 0x60, - 0x41, 0x25, 0x8f, 0x85, 0x43, 0xa5, 0xd6, 0x28, 0xc1, 0xb8, 0x30, 0xe2, 0xd2, 0x8f, 0xde, 0x83, - 0x9d, 0xe2, 0x70, 0x49, 0x7c, 0x2a, 0xb5, 0xa6, 0x0a, 0xb8, 0x93, 0xcc, 0x7b, 0x1d, 0x5c, 0x75, - 0xe0, 0x65, 0x1c, 0xfa, 0x00, 0x76, 0x03, 0x1e, 0x14, 0x90, 0xcf, 0xf1, 0x50, 0x6a, 0xcf, 0xa9, - 0x50, 0x35, 0xa3, 0x97, 0xcb, 0x2e, 0xbc, 0x8a, 0x35, 0x7e, 0x03, 0xb0, 0xf1, 0x9f, 0xdb, 0x61, - 0xc6, 0xf7, 0x75, 0x78, 0xf2, 0xff, 0x4a, 0xa9, 0xac, 0x94, 0x74, 0x0c, 0x0f, 0xbb, 0x4b, 0xf6, - 0x1f, 0xc3, 0xdd, 0x4b, 0xe4, 0x17, 0x00, 0x5b, 0x07, 0xda, 0x1e, 0x0f, 0x96, 0xcb, 0xd6, 0x77, - 0x94, 0xbd, 0xb9, 0xde, 0x6f, 0x60, 0x71, 0x03, 0xe8, 0x3e, 0x6c, 0x15, 0x13, 0xaf, 0xaa, 0x6d, - 0x97, 0xd9, 0x8b, 0xa5, 0x80, 0x17, 0x08, 0x74, 0x06, 0x1b, 0x53, 0x16, 0x8c, 0xb5, 0xba, 0x42, - 0x3e, 0x9f, 0x23, 0x1b, 0x9f, 0xb0, 0x60, 0x8c, 0x95, 0x27, 0x45, 0x04, 0xc4, 0xcf, 0x7e, 0x92, - 0x2b, 0x88, 0x74, 0xd6, 0xb1, 0xf2, 0x18, 0xbf, 0x02, 0x78, 0x9c, 0xbf, 0xa7, 0x05, 0x1f, 0xd8, - 0xca, 0x77, 0x01, 0x21, 0x09, 0xd9, 0x63, 0x2a, 0x24, 0xe3, 0x41, 0x9e, 0x77, 0xf1, 0xd2, 0xfb, - 0x57, 0x83, 0xdc, 0x83, 0x2b, 0xa8, 0xdd, 0x35, 0x20, 0x0b, 0xb6, 0xd3, 0x4f, 0x19, 0x12, 0x87, - 0x6a, 0x0d, 0x05, 0xbb, 0x93, 0xc3, 0xda, 0x97, 0x85, 0x03, 0x97, 0x18, 0xfb, 0xc3, 0xeb, 0x5b, - 0xbd, 0xf6, 0xec, 0x56, 0xaf, 0xdd, 0xdc, 0xea, 0xb5, 0x6f, 0x13, 0x1d, 0x5c, 0x27, 0x3a, 0x78, - 0x96, 0xe8, 0xe0, 0x26, 0xd1, 0xc1, 0x9f, 0x89, 0x0e, 0x7e, 0xf8, 0x4b, 0xaf, 0x7d, 0xa1, 0x6d, - 0xfb, 0x17, 0xfc, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x0e, 0xba, 0xc2, 0x39, 0x0b, 0x00, - 0x00, + proto.RegisterFile("k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_758889dfd9a88fa6) +} + +var fileDescriptor_758889dfd9a88fa6 = []byte{ + // 819 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x09, 0x4d, 0x26, 0x44, 0xa1, 0x43, 0x85, 0xac, 0x0a, 0x39, 0xc5, 0x02, 0xa9, + 0x88, 0x62, 0xd3, 0x82, 0x80, 0x0b, 0x48, 0x75, 0x0f, 0x28, 0x10, 0xda, 0x32, 0x15, 0x3d, 0x20, + 0x0e, 0x4c, 0x9c, 0xa9, 0x33, 0xc4, 0xbf, 0xe4, 0xb1, 0x23, 0x55, 0x5c, 0xb8, 0x70, 0x45, 0x5c, + 0x38, 0x70, 0xe7, 0xca, 0x85, 0x3d, 0xee, 0x3f, 0xd0, 0xbd, 0xf5, 0xd8, 0x53, 0xb4, 0xf5, 0xfe, + 0x21, 0xbb, 0xf2, 0xd8, 0x8e, 0x9d, 0x5f, 0x9b, 0x9c, 0x22, 0xad, 0xb4, 0xa7, 0x64, 0xde, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0x66, 0xde, 0x4b, 0xe0, 0xc1, 0xf0, 0x4b, 0xae, 0x32, 0x57, 0x23, 0x1e, + 0xd3, 0xfc, 0x1e, 0x31, 0xb4, 0xd1, 0x11, 0xb1, 0xbc, 0x01, 0x39, 0xd2, 0x4c, 0xea, 0x50, 0x9f, + 0x04, 0xb4, 0xaf, 0x7a, 0xbe, 0x1b, 0xb8, 0x48, 0x4a, 0x90, 0x2a, 0xf1, 0x98, 0x1a, 0x23, 0xd5, + 0x0c, 0xb9, 0xf7, 0xb1, 0xc9, 0x82, 0x41, 0xd8, 0x53, 0x0d, 0xd7, 0xd6, 0x4c, 0xd7, 0x74, 0x35, + 0x11, 0xd0, 0x0b, 0xaf, 0xc5, 0x49, 0x1c, 0xc4, 0xb7, 0x84, 0x68, 0xef, 0xb3, 0x3c, 0xa5, 0x4d, + 0x8c, 0x01, 0x73, 0xa8, 0x7f, 0xa3, 0x79, 0x43, 0x33, 0x36, 0x70, 0xcd, 0xa6, 0x01, 0xd1, 0x46, + 0x73, 0xe9, 0xf7, 0xb4, 0x65, 0x51, 0x7e, 0xe8, 0x04, 0xcc, 0xa6, 0x73, 0x01, 0x9f, 0xaf, 0x0a, + 0xe0, 0xc6, 0x80, 0xda, 0x64, 0x36, 0x4e, 0xf9, 0x07, 0xc0, 0xd6, 0x89, 0x69, 0xfa, 0xd4, 0x24, + 0x01, 0x73, 0x1d, 0x1c, 0x5a, 0x14, 0xfd, 0x01, 0xe0, 0xae, 0x61, 0x85, 0x3c, 0xa0, 0x3e, 0x76, + 0x2d, 0x7a, 0x49, 0x2d, 0x6a, 0x04, 0xae, 0xcf, 0x25, 0xb0, 0xbf, 0x75, 0xd0, 0x38, 0xfe, 0x54, + 0xcd, 0xb5, 0x99, 0xe4, 0x52, 0xbd, 0xa1, 0x19, 0x1b, 0xb8, 0x1a, 0xb7, 0xa4, 0x8e, 0x8e, 0xd4, + 0x2e, 0xe9, 0x51, 0x2b, 0x8b, 0xd5, 0xdf, 0xbd, 0x1d, 0xb7, 0x4b, 0xd1, 0xb8, 0xbd, 0x7b, 0xba, + 0x80, 0x18, 0x2f, 0x4c, 0xa7, 0xfc, 0x5b, 0x86, 0x8d, 0x02, 0x1c, 0xfd, 0x02, 0x6b, 0x31, 0x79, + 0x9f, 0x04, 0x44, 0x02, 0xfb, 0xe0, 0xa0, 0x71, 0xfc, 0xc9, 0x7a, 0xa5, 0x9c, 0xf7, 0x7e, 0xa5, + 0x46, 0xf0, 0x3d, 0x0d, 0x88, 0x8e, 0xd2, 0x3a, 0x60, 0x6e, 0xc3, 0x13, 0x56, 0xd4, 0x81, 0x55, + 0x3f, 0xb4, 0x28, 0x97, 0xca, 0xa2, 0xd3, 0xf7, 0xd5, 0x65, 0xaf, 0x40, 0xbd, 0x70, 0x2d, 0x66, + 0xdc, 0xc4, 0x72, 0xe9, 0xcd, 0x94, 0xb2, 0x1a, 0x9f, 0x38, 0x4e, 0x18, 0xd0, 0x00, 0xb6, 0xc8, + 0xb4, 0xae, 0xd2, 0x96, 0xa8, 0xf9, 0xc3, 0xe5, 0xa4, 0x33, 0x17, 0xa1, 0xbf, 0x1d, 0x8d, 0xdb, + 0xb3, 0xb7, 0x83, 0x67, 0x69, 0x95, 0xbf, 0xcb, 0x10, 0x15, 0x64, 0xd2, 0x99, 0xd3, 0x67, 0x8e, + 0xb9, 0x01, 0xb5, 0xce, 0x61, 0x8d, 0x87, 0xc2, 0x91, 0x09, 0xf6, 0xde, 0xf2, 0xde, 0x2e, 0x13, + 0xa4, 0xfe, 0x56, 0x4a, 0x59, 0x4b, 0x0d, 0x1c, 0x4f, 0x48, 0x50, 0x17, 0x6e, 0xfb, 0xae, 0x45, + 0x31, 0xbd, 0x4e, 0xb5, 0x7a, 0x09, 0x1f, 0x4e, 0x80, 0x7a, 0x2b, 0xe5, 0xdb, 0x4e, 0x0d, 0x38, + 0xa3, 0x50, 0x9e, 0x00, 0xf8, 0xce, 0xbc, 0x2e, 0x5d, 0xc6, 0x03, 0xf4, 0xf3, 0x9c, 0x36, 0xea, + 0x9a, 0x8f, 0x9a, 0xf1, 0x44, 0x99, 0x49, 0x1b, 0x99, 0xa5, 0xa0, 0xcb, 0x0f, 0xb0, 0xca, 0x02, + 0x6a, 0x67, 0xa2, 0x1c, 0x2e, 0x6f, 0x62, 0xbe, 0xbc, 0xfc, 0x35, 0x75, 0x62, 0x0a, 0x9c, 0x30, + 0x29, 0x8f, 0x01, 0x6c, 0x15, 0xc0, 0x1b, 0x68, 0xe2, 0xdb, 0xe9, 0x26, 0x3e, 0x58, 0xaf, 0x89, + 0xc5, 0xd5, 0x3f, 0x07, 0x10, 0xe6, 0x03, 0x83, 0xda, 0xb0, 0x3a, 0xa2, 0x7e, 0x2f, 0xd9, 0x27, + 0x75, 0xbd, 0x1e, 0xe3, 0xaf, 0x62, 0x03, 0x4e, 0xec, 0xe8, 0x23, 0x58, 0x27, 0x1e, 0xfb, 0xc6, + 0x77, 0x43, 0x8f, 0x4b, 0x5b, 0x02, 0xd4, 0x8c, 0xc6, 0xed, 0xfa, 0xc9, 0x45, 0x27, 0x31, 0xe2, + 0xdc, 0x1f, 0x83, 0x7d, 0xca, 0xdd, 0xd0, 0x37, 0x28, 0x97, 0x2a, 0x39, 0x18, 0x67, 0x46, 0x9c, + 0xfb, 0xd1, 0x17, 0xb0, 0x99, 0x1d, 0xce, 0x88, 0x4d, 0xb9, 0x54, 0x15, 0x01, 0x3b, 0xd1, 0xb8, + 0xdd, 0xc4, 0x45, 0x07, 0x9e, 0xc6, 0xa1, 0xaf, 0x60, 0xcb, 0x71, 0x9d, 0x0c, 0xf2, 0x23, 0xee, + 0x72, 0xe9, 0x0d, 0x11, 0x2a, 0x66, 0xf4, 0x6c, 0xda, 0x85, 0x67, 0xb1, 0xca, 0x23, 0x00, 0x2b, + 0xaf, 0xdc, 0x0e, 0x53, 0xfe, 0x2c, 0xc3, 0xc6, 0xeb, 0x95, 0x52, 0x58, 0x29, 0xf1, 0x18, 0x6e, + 0x76, 0x97, 0xac, 0x3f, 0x86, 0xab, 0x97, 0xc8, 0x7f, 0x00, 0xd6, 0x36, 0xb4, 0x3d, 0x4e, 0xa7, + 0xcb, 0x96, 0x57, 0x94, 0xbd, 0xb8, 0xde, 0xdf, 0x60, 0x76, 0x03, 0xe8, 0x10, 0xd6, 0xb2, 0x89, + 0x17, 0xd5, 0xd6, 0xf3, 0xec, 0xd9, 0x52, 0xc0, 0x13, 0x04, 0xda, 0x87, 0x95, 0x21, 0x73, 0xfa, + 0x52, 0x59, 0x20, 0xdf, 0x4c, 0x91, 0x95, 0xef, 0x98, 0xd3, 0xc7, 0xc2, 0x13, 0x23, 0x1c, 0x62, + 0x27, 0x3f, 0xc9, 0x05, 0x44, 0x3c, 0xeb, 0x58, 0x78, 0x94, 0xff, 0x01, 0xdc, 0x4e, 0xdf, 0xd3, + 0x84, 0x0f, 0x2c, 0xe5, 0x3b, 0x86, 0x90, 0x78, 0xec, 0x8a, 0xfa, 0x9c, 0xb9, 0x4e, 0x9a, 0x77, + 0xf2, 0xd2, 0x4f, 0x2e, 0x3a, 0xa9, 0x07, 0x17, 0x50, 0xab, 0x6b, 0x40, 0x1a, 0xac, 0xc7, 0x9f, + 0xdc, 0x23, 0x06, 0x95, 0x2a, 0x02, 0xb6, 0x93, 0xc2, 0xea, 0x67, 0x99, 0x03, 0xe7, 0x18, 0xfd, + 0xeb, 0xdb, 0x07, 0xb9, 0x74, 0xf7, 0x20, 0x97, 0xee, 0x1f, 0xe4, 0xd2, 0xef, 0x91, 0x0c, 0x6e, + 0x23, 0x19, 0xdc, 0x45, 0x32, 0xb8, 0x8f, 0x64, 0xf0, 0x34, 0x92, 0xc1, 0x5f, 0xcf, 0xe4, 0xd2, + 0x4f, 0xd2, 0xb2, 0x7f, 0xc1, 0x2f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x02, 0x55, 0xe5, 0x20, + 0x0b, 0x00, 0x00, } func (m *AggregationRule) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index d5ceaa0e82..19d43cdee5 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -33,7 +33,8 @@ message AggregationRule { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -41,10 +42,11 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -60,10 +62,11 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -76,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -87,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -97,25 +100,30 @@ message ClusterRoleList { // about who the rule applies to or which namespace the rule applies to. message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic repeated string apiGroups = 3; // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic repeated string resources = 4; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic repeated string resourceNames = 5; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic repeated string nonResourceURLs = 6; } @@ -124,10 +132,11 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic repeated PolicyRule rules = 2; } @@ -138,10 +147,11 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -154,7 +164,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -165,7 +175,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index e0e75b1503..2146b4ce39 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -48,23 +48,28 @@ const ( // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` } @@ -79,7 +84,7 @@ type Subject struct { // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. // +k8s:conversion-gen=false // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -111,6 +116,7 @@ type Role struct { // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -129,6 +135,7 @@ type RoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -178,6 +185,7 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -192,6 +200,7 @@ type AggregationRule struct { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional + // +listType=atomic ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } @@ -210,6 +219,7 @@ type ClusterRoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index ad6685591e..9052d7e8db 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto +// source: k8s.io/api/rbac/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{0} + return fileDescriptor_c5bc2d145acd4e45, []int{0} } func (m *AggregationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_AggregationRule proto.InternalMessageInfo func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{1} + return fileDescriptor_c5bc2d145acd4e45, []int{1} } func (m *ClusterRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_ClusterRole proto.InternalMessageInfo func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{2} + return fileDescriptor_c5bc2d145acd4e45, []int{2} } func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{3} + return fileDescriptor_c5bc2d145acd4e45, []int{3} } func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{4} + return fileDescriptor_c5bc2d145acd4e45, []int{4} } func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{5} + return fileDescriptor_c5bc2d145acd4e45, []int{5} } func (m *PolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_PolicyRule proto.InternalMessageInfo func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{6} + return fileDescriptor_c5bc2d145acd4e45, []int{6} } func (m *Role) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_Role proto.InternalMessageInfo func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{7} + return fileDescriptor_c5bc2d145acd4e45, []int{7} } func (m *RoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +271,7 @@ var xxx_messageInfo_RoleBinding proto.InternalMessageInfo func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{8} + return fileDescriptor_c5bc2d145acd4e45, []int{8} } func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{9} + return fileDescriptor_c5bc2d145acd4e45, []int{9} } func (m *RoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +327,7 @@ var xxx_messageInfo_RoleList proto.InternalMessageInfo func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{10} + return fileDescriptor_c5bc2d145acd4e45, []int{10} } func (m *RoleRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +355,7 @@ var xxx_messageInfo_RoleRef proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{11} + return fileDescriptor_c5bc2d145acd4e45, []int{11} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,62 +396,61 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) -} - -var fileDescriptor_99f6bec96facc83d = []byte{ - // 812 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xbd, 0x6f, 0x2b, 0x45, - 0x10, 0xf7, 0x3a, 0xb6, 0xe2, 0x5b, 0x63, 0x99, 0xb7, 0x3c, 0xf1, 0x4e, 0x11, 0x9c, 0x2d, 0x43, - 0x11, 0xe9, 0xc1, 0x1d, 0x79, 0x20, 0xa0, 0x89, 0x44, 0x8e, 0x02, 0xa2, 0x04, 0x13, 0x6d, 0x04, - 0x05, 0xa2, 0x60, 0xef, 0xbc, 0xb9, 0x2c, 0xbe, 0x2f, 0xed, 0xde, 0x59, 0x8a, 0x68, 0x68, 0xe8, - 0x28, 0x90, 0xa8, 0x68, 0xa9, 0xa9, 0x28, 0xf9, 0x0b, 0x5c, 0xa6, 0x4c, 0x65, 0x91, 0xe3, 0x0f, - 0x01, 0xed, 0x7d, 0xf8, 0xfc, 0x75, 0x89, 0x2b, 0x4b, 0x48, 0xaf, 0xb2, 0x77, 0xe6, 0x37, 0xbf, - 0x99, 0xf9, 0xed, 0xce, 0xd8, 0xf0, 0x93, 0xf1, 0xc7, 0x42, 0x67, 0x81, 0x31, 0x8e, 0x2d, 0xca, - 0x7d, 0x1a, 0x51, 0x61, 0x4c, 0xa8, 0x3f, 0x0a, 0xb8, 0x91, 0x3b, 0x48, 0xc8, 0x0c, 0x6e, 0x11, - 0xdb, 0x98, 0x1c, 0x59, 0x34, 0x22, 0x47, 0x86, 0x43, 0x7d, 0xca, 0x49, 0x44, 0x47, 0x7a, 0xc8, - 0x83, 0x28, 0x40, 0xcf, 0x32, 0xa0, 0x4e, 0x42, 0xa6, 0x4b, 0xa0, 0x9e, 0x03, 0x0f, 0xde, 0x75, - 0x58, 0x74, 0x1d, 0x5b, 0xba, 0x1d, 0x78, 0x86, 0x13, 0x38, 0x81, 0x91, 0xe2, 0xad, 0xf8, 0x2a, - 0x3d, 0xa5, 0x87, 0xf4, 0x5b, 0xc6, 0x73, 0xf0, 0x41, 0x99, 0xd0, 0x23, 0xf6, 0x35, 0xf3, 0x29, - 0xbf, 0x31, 0xc2, 0xb1, 0x23, 0x0d, 0xc2, 0xf0, 0x68, 0x44, 0x8c, 0xc9, 0x5a, 0xf6, 0x03, 0xa3, - 0x2a, 0x8a, 0xc7, 0x7e, 0xc4, 0x3c, 0xba, 0x16, 0xf0, 0xe1, 0x63, 0x01, 0xc2, 0xbe, 0xa6, 0x1e, - 0x59, 0x8d, 0x1b, 0xfc, 0x06, 0x60, 0xf7, 0xc4, 0x71, 0x38, 0x75, 0x48, 0xc4, 0x02, 0x1f, 0xc7, - 0x2e, 0x45, 0x3f, 0x01, 0xf8, 0xd4, 0x76, 0x63, 0x11, 0x51, 0x8e, 0x03, 0x97, 0x5e, 0x52, 0x97, - 0xda, 0x51, 0xc0, 0x85, 0x0a, 0xfa, 0x7b, 0x87, 0xed, 0x17, 0xef, 0xeb, 0xa5, 0x34, 0xf3, 0x5c, - 0x7a, 0x38, 0x76, 0xa4, 0x41, 0xe8, 0xb2, 0x25, 0x7d, 0x72, 0xa4, 0x9f, 0x13, 0x8b, 0xba, 0x45, - 0xac, 0xf9, 0xc6, 0x74, 0xd6, 0xab, 0x25, 0xb3, 0xde, 0xd3, 0x4f, 0x37, 0x10, 0xe3, 0x8d, 0xe9, - 0x06, 0xbf, 0xd7, 0x61, 0x7b, 0x01, 0x8e, 0xbe, 0x83, 0x2d, 0x49, 0x3e, 0x22, 0x11, 0x51, 0x41, - 0x1f, 0x1c, 0xb6, 0x5f, 0xbc, 0xb7, 0x5d, 0x29, 0x5f, 0x5a, 0xdf, 0x53, 0x3b, 0xfa, 0x82, 0x46, - 0xc4, 0x44, 0x79, 0x1d, 0xb0, 0xb4, 0xe1, 0x39, 0x2b, 0xfa, 0x1c, 0x36, 0x79, 0xec, 0x52, 0xa1, - 0xd6, 0xd3, 0x4e, 0xdf, 0xd2, 0x2b, 0x1e, 0x81, 0x7e, 0x11, 0xb8, 0xcc, 0xbe, 0x91, 0x6a, 0x99, - 0x9d, 0x9c, 0xb1, 0x29, 0x4f, 0x02, 0x67, 0x04, 0xc8, 0x81, 0x5d, 0xb2, 0x2c, 0xab, 0xba, 0x97, - 0x96, 0x7c, 0x58, 0xc9, 0xb9, 0x72, 0x0d, 0xe6, 0x6b, 0xc9, 0xac, 0xb7, 0x7a, 0x37, 0x78, 0x95, - 0x75, 0xf0, 0x6b, 0x1d, 0xa2, 0x05, 0x91, 0x4c, 0xe6, 0x8f, 0x98, 0xef, 0xec, 0x40, 0xab, 0x21, - 0x6c, 0x89, 0x38, 0x75, 0x14, 0x72, 0xf5, 0x2b, 0x5b, 0xbb, 0xcc, 0x80, 0xe6, 0xab, 0x39, 0x63, - 0x2b, 0x37, 0x08, 0x3c, 0xe7, 0x40, 0x67, 0x70, 0x9f, 0x07, 0x2e, 0xc5, 0xf4, 0x2a, 0x57, 0xaa, - 0x9a, 0x0e, 0x67, 0x38, 0xb3, 0x9b, 0xd3, 0xed, 0xe7, 0x06, 0x5c, 0x30, 0x0c, 0xa6, 0x00, 0xbe, - 0xbe, 0xae, 0xca, 0x39, 0x13, 0x11, 0xfa, 0x76, 0x4d, 0x19, 0x7d, 0xcb, 0x07, 0xcd, 0x44, 0xa6, - 0xcb, 0xbc, 0x8b, 0xc2, 0xb2, 0xa0, 0xca, 0x05, 0x6c, 0xb2, 0x88, 0x7a, 0x85, 0x24, 0xcf, 0x2b, - 0x7b, 0x58, 0xaf, 0xae, 0x7c, 0x49, 0xa7, 0x92, 0x01, 0x67, 0x44, 0x83, 0xbf, 0x00, 0xec, 0x2e, - 0x80, 0x77, 0xd0, 0xc3, 0xe9, 0x72, 0x0f, 0x6f, 0x6f, 0xd5, 0xc3, 0xe6, 0xe2, 0xff, 0x05, 0x10, - 0x96, 0xb3, 0x82, 0x7a, 0xb0, 0x39, 0xa1, 0xdc, 0xca, 0x36, 0x89, 0x62, 0x2a, 0x12, 0xff, 0xb5, - 0x34, 0xe0, 0xcc, 0x8e, 0x9e, 0x43, 0x85, 0x84, 0xec, 0x33, 0x1e, 0xc4, 0x61, 0x96, 0x5e, 0x31, - 0x3b, 0xc9, 0xac, 0xa7, 0x9c, 0x5c, 0x9c, 0x66, 0x46, 0x5c, 0xfa, 0x25, 0x98, 0x53, 0x11, 0xc4, - 0xdc, 0xa6, 0x42, 0xdd, 0x2b, 0xc1, 0xb8, 0x30, 0xe2, 0xd2, 0x8f, 0x3e, 0x82, 0x9d, 0xe2, 0x30, - 0x24, 0x1e, 0x15, 0x6a, 0x23, 0x0d, 0x78, 0x92, 0xcc, 0x7a, 0x1d, 0xbc, 0xe8, 0xc0, 0xcb, 0x38, - 0x74, 0x0c, 0xbb, 0x7e, 0xe0, 0x17, 0x90, 0xaf, 0xf0, 0xb9, 0x50, 0x9b, 0x69, 0x68, 0x3a, 0x9f, - 0xc3, 0x65, 0x17, 0x5e, 0xc5, 0x0e, 0xfe, 0x04, 0xb0, 0xf1, 0x7f, 0xdb, 0x5e, 0x83, 0x9f, 0xeb, - 0xb0, 0xfd, 0x72, 0x9b, 0xcc, 0xb7, 0x89, 0x1c, 0xc1, 0xdd, 0xae, 0x91, 0xad, 0x47, 0xf0, 0xf1, - 0xfd, 0xf1, 0x07, 0x80, 0xad, 0x1d, 0x2d, 0x0e, 0x73, 0xb9, 0xea, 0x37, 0x1f, 0xae, 0x7a, 0x73, - 0xb9, 0x3f, 0xc0, 0x42, 0x7f, 0xf4, 0x0e, 0x6c, 0x15, 0xc3, 0x9e, 0x16, 0xab, 0x94, 0xc9, 0x8b, - 0x7d, 0x80, 0xe7, 0x08, 0xd4, 0x87, 0x8d, 0x31, 0xf3, 0x47, 0x6a, 0x3d, 0x45, 0xbe, 0x92, 0x23, - 0x1b, 0x67, 0xcc, 0x1f, 0xe1, 0xd4, 0x23, 0x11, 0x3e, 0xf1, 0xb2, 0x1f, 0xe2, 0x05, 0x84, 0x1c, - 0x73, 0x9c, 0x7a, 0xa4, 0x56, 0xfb, 0xf9, 0x63, 0x9a, 0xf3, 0x81, 0x4a, 0xbe, 0xc5, 0xfa, 0xea, - 0xdb, 0xd4, 0xf7, 0x70, 0x76, 0x64, 0x40, 0x45, 0x7e, 0x8a, 0x90, 0xd8, 0x54, 0x6d, 0xa4, 0xb0, - 0x27, 0x39, 0x4c, 0x19, 0x16, 0x0e, 0x5c, 0x62, 0xcc, 0xe3, 0xe9, 0xbd, 0x56, 0xbb, 0xbd, 0xd7, - 0x6a, 0x77, 0xf7, 0x5a, 0xed, 0xc7, 0x44, 0x03, 0xd3, 0x44, 0x03, 0xb7, 0x89, 0x06, 0xee, 0x12, - 0x0d, 0xfc, 0x9d, 0x68, 0xe0, 0x97, 0x7f, 0xb4, 0xda, 0x37, 0xcf, 0x2a, 0xfe, 0xf2, 0xfe, 0x17, - 0x00, 0x00, 0xff, 0xff, 0xf7, 0xdd, 0xcc, 0x2b, 0x25, 0x0b, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_c5bc2d145acd4e45) +} + +var fileDescriptor_c5bc2d145acd4e45 = []byte{ + // 800 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x3b, 0x6f, 0xe3, 0x46, + 0x10, 0xd6, 0xca, 0x12, 0x2c, 0xae, 0x22, 0x28, 0xde, 0x18, 0x31, 0x61, 0x24, 0x94, 0xa0, 0x04, + 0x88, 0x01, 0x27, 0x64, 0xec, 0x04, 0x49, 0x1a, 0x17, 0x66, 0x8a, 0xc4, 0xb0, 0xa3, 0x18, 0x6b, + 0x24, 0x45, 0x90, 0x22, 0x2b, 0x6a, 0x4d, 0x6f, 0xc4, 0x17, 0xb8, 0xa4, 0x00, 0x23, 0x4d, 0x9a, + 0xeb, 0xae, 0x38, 0xe0, 0xaa, 0x6b, 0xaf, 0xbe, 0xea, 0xca, 0xfb, 0x05, 0x2a, 0x5d, 0xba, 0x12, + 0xce, 0xbc, 0x1f, 0x72, 0x87, 0xe5, 0x43, 0xd4, 0x8b, 0xb6, 0x2a, 0x01, 0x07, 0x5c, 0x25, 0xed, + 0xcc, 0x37, 0xdf, 0xcc, 0x7c, 0xbb, 0x33, 0x12, 0xfc, 0x6a, 0xf0, 0x13, 0x57, 0x99, 0xab, 0x11, + 0x8f, 0x69, 0x7e, 0x8f, 0x18, 0xda, 0xf0, 0xa0, 0x47, 0x03, 0x72, 0xa0, 0x99, 0xd4, 0xa1, 0x3e, + 0x09, 0x68, 0x5f, 0xf5, 0x7c, 0x37, 0x70, 0xd1, 0x4e, 0x02, 0x54, 0x89, 0xc7, 0x54, 0x01, 0x54, + 0x53, 0xe0, 0xee, 0x37, 0x26, 0x0b, 0xae, 0xc2, 0x9e, 0x6a, 0xb8, 0xb6, 0x66, 0xba, 0xa6, 0xab, + 0xc5, 0xf8, 0x5e, 0x78, 0x19, 0x9f, 0xe2, 0x43, 0xfc, 0x2d, 0xe1, 0xd9, 0xfd, 0x3e, 0x4f, 0x68, + 0x13, 0xe3, 0x8a, 0x39, 0xd4, 0xbf, 0xd6, 0xbc, 0x81, 0x29, 0x0c, 0x5c, 0xb3, 0x69, 0x40, 0xb4, + 0xe1, 0x42, 0xf6, 0x5d, 0xad, 0x28, 0xca, 0x0f, 0x9d, 0x80, 0xd9, 0x74, 0x21, 0xe0, 0x87, 0x87, + 0x02, 0xb8, 0x71, 0x45, 0x6d, 0x32, 0x1f, 0xd7, 0x79, 0x06, 0x60, 0xf3, 0xd8, 0x34, 0x7d, 0x6a, + 0x92, 0x80, 0xb9, 0x0e, 0x0e, 0x2d, 0x8a, 0x1e, 0x01, 0xb8, 0x6d, 0x58, 0x21, 0x0f, 0xa8, 0x8f, + 0x5d, 0x8b, 0x5e, 0x50, 0x8b, 0x1a, 0x81, 0xeb, 0x73, 0x19, 0xb4, 0x37, 0xf6, 0xea, 0x87, 0xdf, + 0xa9, 0xb9, 0x34, 0x93, 0x5c, 0xaa, 0x37, 0x30, 0x85, 0x81, 0xab, 0xa2, 0x25, 0x75, 0x78, 0xa0, + 0x9e, 0x91, 0x1e, 0xb5, 0xb2, 0x58, 0xfd, 0xb3, 0xd1, 0xb8, 0x55, 0x8a, 0xc6, 0xad, 0xed, 0x9f, + 0x97, 0x10, 0xe3, 0xa5, 0xe9, 0x3a, 0xcf, 0xcb, 0xb0, 0x3e, 0x05, 0x47, 0xff, 0xc0, 0x9a, 0x20, + 0xef, 0x93, 0x80, 0xc8, 0xa0, 0x0d, 0xf6, 0xea, 0x87, 0xdf, 0xae, 0x56, 0xca, 0xef, 0xbd, 0x7f, + 0xa9, 0x11, 0xfc, 0x46, 0x03, 0xa2, 0xa3, 0xb4, 0x0e, 0x98, 0xdb, 0xf0, 0x84, 0x15, 0xfd, 0x0a, + 0xab, 0x7e, 0x68, 0x51, 0x2e, 0x97, 0xe3, 0x4e, 0xbf, 0x50, 0x0b, 0x1e, 0x81, 0x7a, 0xee, 0x5a, + 0xcc, 0xb8, 0x16, 0x6a, 0xe9, 0x8d, 0x94, 0xb1, 0x2a, 0x4e, 0x1c, 0x27, 0x04, 0xc8, 0x84, 0x4d, + 0x32, 0x2b, 0xab, 0xbc, 0x11, 0x97, 0xbc, 0x57, 0xc8, 0x39, 0x77, 0x0d, 0xfa, 0x27, 0xd1, 0xb8, + 0x35, 0x7f, 0x37, 0x78, 0x9e, 0xb5, 0xf3, 0xb4, 0x0c, 0xd1, 0x94, 0x48, 0x3a, 0x73, 0xfa, 0xcc, + 0x31, 0xd7, 0xa0, 0x55, 0x17, 0xd6, 0x78, 0x18, 0x3b, 0x32, 0xb9, 0xda, 0x85, 0xad, 0x5d, 0x24, + 0x40, 0xfd, 0xe3, 0x94, 0xb1, 0x96, 0x1a, 0x38, 0x9e, 0x70, 0xa0, 0x53, 0xb8, 0xe9, 0xbb, 0x16, + 0xc5, 0xf4, 0x32, 0x55, 0xaa, 0x98, 0x0e, 0x27, 0x38, 0xbd, 0x99, 0xd2, 0x6d, 0xa6, 0x06, 0x9c, + 0x31, 0x74, 0x46, 0x00, 0x7e, 0xba, 0xa8, 0xca, 0x19, 0xe3, 0x01, 0xfa, 0x7b, 0x41, 0x19, 0x75, + 0xc5, 0x07, 0xcd, 0x78, 0xa2, 0xcb, 0xa4, 0x8b, 0xcc, 0x32, 0xa5, 0xca, 0x39, 0xac, 0xb2, 0x80, + 0xda, 0x99, 0x24, 0xfb, 0x85, 0x3d, 0x2c, 0x56, 0x97, 0xbf, 0xa4, 0x13, 0xc1, 0x80, 0x13, 0xa2, + 0xce, 0x2b, 0x00, 0x9b, 0x53, 0xe0, 0x35, 0xf4, 0x70, 0x32, 0xdb, 0xc3, 0x97, 0x2b, 0xf5, 0xb0, + 0xbc, 0xf8, 0xb7, 0x00, 0xc2, 0x7c, 0x56, 0x50, 0x0b, 0x56, 0x87, 0xd4, 0xef, 0x25, 0x9b, 0x44, + 0xd2, 0x25, 0x81, 0xff, 0x53, 0x18, 0x70, 0x62, 0x47, 0xfb, 0x50, 0x22, 0x1e, 0xfb, 0xc5, 0x77, + 0x43, 0x2f, 0x49, 0x2f, 0xe9, 0x8d, 0x68, 0xdc, 0x92, 0x8e, 0xcf, 0x4f, 0x12, 0x23, 0xce, 0xfd, + 0x02, 0xec, 0x53, 0xee, 0x86, 0xbe, 0x41, 0xb9, 0xbc, 0x91, 0x83, 0x71, 0x66, 0xc4, 0xb9, 0x1f, + 0xfd, 0x08, 0x1b, 0xd9, 0xa1, 0x4b, 0x6c, 0xca, 0xe5, 0x4a, 0x1c, 0xb0, 0x15, 0x8d, 0x5b, 0x0d, + 0x3c, 0xed, 0xc0, 0xb3, 0x38, 0x74, 0x04, 0x9b, 0x8e, 0xeb, 0x64, 0x90, 0x3f, 0xf0, 0x19, 0x97, + 0xab, 0x71, 0x68, 0x3c, 0x9f, 0xdd, 0x59, 0x17, 0x9e, 0xc7, 0x76, 0x5e, 0x02, 0x58, 0x79, 0xdf, + 0xb6, 0x57, 0xe7, 0x71, 0x19, 0xd6, 0x3f, 0x6c, 0x93, 0xc9, 0x36, 0x11, 0x23, 0xb8, 0xde, 0x35, + 0xb2, 0xf2, 0x08, 0x3e, 0xbc, 0x3f, 0x5e, 0x00, 0x58, 0x5b, 0xd3, 0xe2, 0xd0, 0x67, 0xab, 0xfe, + 0xfc, 0xfe, 0xaa, 0x97, 0x97, 0xfb, 0x1f, 0xcc, 0xf4, 0x47, 0x5f, 0xc3, 0x5a, 0x36, 0xec, 0x71, + 0xb1, 0x52, 0x9e, 0x3c, 0xdb, 0x07, 0x78, 0x82, 0x40, 0x6d, 0x58, 0x19, 0x30, 0xa7, 0x2f, 0x97, + 0x63, 0xe4, 0x47, 0x29, 0xb2, 0x72, 0xca, 0x9c, 0x3e, 0x8e, 0x3d, 0x02, 0xe1, 0x10, 0x3b, 0xf9, + 0x21, 0x9e, 0x42, 0x88, 0x31, 0xc7, 0xb1, 0x47, 0x68, 0xb5, 0x99, 0x3e, 0xa6, 0x09, 0x1f, 0x28, + 0xe4, 0x9b, 0xae, 0xaf, 0xbc, 0x4a, 0x7d, 0xf7, 0x67, 0x47, 0x1a, 0x94, 0xc4, 0x27, 0xf7, 0x88, + 0x41, 0xe5, 0x4a, 0x0c, 0xdb, 0x4a, 0x61, 0x52, 0x37, 0x73, 0xe0, 0x1c, 0xa3, 0x1f, 0x8d, 0xee, + 0x94, 0xd2, 0xcd, 0x9d, 0x52, 0xba, 0xbd, 0x53, 0x4a, 0xff, 0x47, 0x0a, 0x18, 0x45, 0x0a, 0xb8, + 0x89, 0x14, 0x70, 0x1b, 0x29, 0xe0, 0x75, 0xa4, 0x80, 0x27, 0x6f, 0x94, 0xd2, 0x5f, 0x3b, 0x05, + 0x7f, 0x79, 0xdf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0xfb, 0x5a, 0x79, 0x0c, 0x0b, 0x00, 0x00, } func (m *AggregationRule) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index f6b2f0dde1..8bfbd0c8ac 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -33,7 +33,8 @@ message AggregationRule { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -41,10 +42,11 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -60,10 +62,11 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -76,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -87,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -97,26 +100,31 @@ message ClusterRoleList { // about who the rule applies to or which namespace the rule applies to. message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic repeated string resourceNames = 4; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic repeated string nonResourceURLs = 5; } @@ -125,10 +133,11 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic repeated PolicyRule rules = 2; } @@ -139,10 +148,11 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -155,7 +165,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -166,7 +176,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 4941cd2abc..9cfaaceb92 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -48,24 +48,29 @@ const ( // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` } @@ -79,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -115,6 +120,7 @@ type Role struct { // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -137,6 +143,7 @@ type RoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -198,6 +205,7 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be @@ -211,6 +219,7 @@ type AggregationRule struct { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional + // +listType=atomic ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } @@ -233,6 +242,7 @@ type ClusterRoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go deleted file mode 100644 index 2e8f9c724a..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go +++ /dev/null @@ -1,4817 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto - -package v1alpha2 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/api/core/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AllocationResult) Reset() { *m = AllocationResult{} } -func (*AllocationResult) ProtoMessage() {} -func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{0} -} -func (m *AllocationResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllocationResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocationResult.Merge(m, src) -} -func (m *AllocationResult) XXX_Size() int { - return m.Size() -} -func (m *AllocationResult) XXX_DiscardUnknown() { - xxx_messageInfo_AllocationResult.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocationResult proto.InternalMessageInfo - -func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } -func (*PodSchedulingContext) ProtoMessage() {} -func (*PodSchedulingContext) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{1} -} -func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContext.Merge(m, src) -} -func (m *PodSchedulingContext) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContext) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo - -func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } -func (*PodSchedulingContextList) ProtoMessage() {} -func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{2} -} -func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextList.Merge(m, src) -} -func (m *PodSchedulingContextList) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo - -func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } -func (*PodSchedulingContextSpec) ProtoMessage() {} -func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{3} -} -func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) -} -func (m *PodSchedulingContextSpec) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo - -func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } -func (*PodSchedulingContextStatus) ProtoMessage() {} -func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{4} -} -func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) -} -func (m *PodSchedulingContextStatus) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo - -func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } -func (*ResourceClaim) ProtoMessage() {} -func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{5} -} -func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaim) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaim.Merge(m, src) -} -func (m *ResourceClaim) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaim) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaim.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo - -func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } -func (*ResourceClaimConsumerReference) ProtoMessage() {} -func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{6} -} -func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) -} -func (m *ResourceClaimConsumerReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo - -func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } -func (*ResourceClaimList) ProtoMessage() {} -func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{7} -} -func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimList.Merge(m, src) -} -func (m *ResourceClaimList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo - -func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } -func (*ResourceClaimParametersReference) ProtoMessage() {} -func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{8} -} -func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParametersReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParametersReference.Merge(m, src) -} -func (m *ResourceClaimParametersReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParametersReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParametersReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo - -func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } -func (*ResourceClaimSchedulingStatus) ProtoMessage() {} -func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{9} -} -func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) -} -func (m *ResourceClaimSchedulingStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo - -func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } -func (*ResourceClaimSpec) ProtoMessage() {} -func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{10} -} -func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimSpec.Merge(m, src) -} -func (m *ResourceClaimSpec) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo - -func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } -func (*ResourceClaimStatus) ProtoMessage() {} -func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{11} -} -func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimStatus.Merge(m, src) -} -func (m *ResourceClaimStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo - -func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } -func (*ResourceClaimTemplate) ProtoMessage() {} -func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{12} -} -func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) -} -func (m *ResourceClaimTemplate) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo - -func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } -func (*ResourceClaimTemplateList) ProtoMessage() {} -func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{13} -} -func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) -} -func (m *ResourceClaimTemplateList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo - -func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } -func (*ResourceClaimTemplateSpec) ProtoMessage() {} -func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{14} -} -func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) -} -func (m *ResourceClaimTemplateSpec) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo - -func (m *ResourceClass) Reset() { *m = ResourceClass{} } -func (*ResourceClass) ProtoMessage() {} -func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{15} -} -func (m *ResourceClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClass.Merge(m, src) -} -func (m *ResourceClass) XXX_Size() int { - return m.Size() -} -func (m *ResourceClass) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClass.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClass proto.InternalMessageInfo - -func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } -func (*ResourceClassList) ProtoMessage() {} -func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{16} -} -func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassList.Merge(m, src) -} -func (m *ResourceClassList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo - -func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } -func (*ResourceClassParametersReference) ProtoMessage() {} -func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{17} -} -func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassParametersReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParametersReference.Merge(m, src) -} -func (m *ResourceClassParametersReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParametersReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo - -func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } -func (*ResourceHandle) ProtoMessage() {} -func (*ResourceHandle) Descriptor() ([]byte, []int) { - return fileDescriptor_3add37bbd52889e0, []int{18} -} -func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceHandle) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceHandle.Merge(m, src) -} -func (m *ResourceHandle) XXX_Size() int { - return m.Size() -} -func (m *ResourceHandle) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceHandle.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") - proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") - proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") - proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") - proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") - proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_3add37bbd52889e0) -} - -var fileDescriptor_3add37bbd52889e0 = []byte{ - // 1233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xda, 0x6e, 0x95, 0x4c, 0x1a, 0x37, 0xd9, 0xb6, 0xe0, 0x46, 0xad, 0x63, 0xf6, 0x14, - 0x89, 0xb2, 0xdb, 0x06, 0x54, 0x2a, 0xfe, 0x49, 0xd9, 0x06, 0x4a, 0x04, 0x4d, 0xc3, 0x98, 0x8a, - 0x16, 0x21, 0xd4, 0xc9, 0xee, 0xab, 0xbd, 0x64, 0xff, 0xb1, 0x33, 0x6b, 0xa8, 0xb8, 0xf4, 0x23, - 0xf4, 0xc0, 0x01, 0x4e, 0x1c, 0xf9, 0x02, 0x7c, 0x03, 0x84, 0xd4, 0x63, 0x11, 0x1c, 0x7a, 0xb2, - 0xa8, 0xf9, 0x08, 0x9c, 0xe8, 0x09, 0xcd, 0x78, 0x77, 0xbd, 0xb3, 0xf6, 0x9a, 0x38, 0x07, 0x0b, - 0x4e, 0xc9, 0xcc, 0xfb, 0xbd, 0xdf, 0xfb, 0x37, 0xef, 0xcd, 0xac, 0xd1, 0xbb, 0x87, 0xd7, 0xa8, - 0xee, 0x04, 0xc6, 0x61, 0x7c, 0x00, 0x91, 0x0f, 0x0c, 0xa8, 0xd1, 0x03, 0xdf, 0x0e, 0x22, 0x23, - 0x11, 0x90, 0xd0, 0x31, 0x22, 0xa0, 0x41, 0x1c, 0x59, 0x60, 0xf4, 0xae, 0x10, 0x37, 0xec, 0x92, - 0x2d, 0xa3, 0x03, 0x3e, 0x44, 0x84, 0x81, 0xad, 0x87, 0x51, 0xc0, 0x02, 0xf5, 0xc2, 0x10, 0xad, - 0x93, 0xd0, 0xd1, 0x53, 0xb4, 0x9e, 0xa2, 0xd7, 0x5f, 0xe9, 0x38, 0xac, 0x1b, 0x1f, 0xe8, 0x56, - 0xe0, 0x19, 0x9d, 0xa0, 0x13, 0x18, 0x42, 0xe9, 0x20, 0xbe, 0x2f, 0x56, 0x62, 0x21, 0xfe, 0x1b, - 0x92, 0xad, 0x6b, 0x39, 0xd3, 0x56, 0x10, 0x71, 0xb3, 0x45, 0x83, 0xeb, 0xaf, 0x8d, 0x30, 0x1e, - 0xb1, 0xba, 0x8e, 0x0f, 0xd1, 0x03, 0x23, 0x3c, 0xec, 0xf0, 0x0d, 0x6a, 0x78, 0xc0, 0xc8, 0x24, - 0x2d, 0xa3, 0x4c, 0x2b, 0x8a, 0x7d, 0xe6, 0x78, 0x30, 0xa6, 0x70, 0xf5, 0xdf, 0x14, 0xa8, 0xd5, - 0x05, 0x8f, 0x14, 0xf5, 0xb4, 0xef, 0x2a, 0x68, 0x75, 0xdb, 0x75, 0x03, 0x8b, 0x30, 0x27, 0xf0, - 0x31, 0xd0, 0xd8, 0x65, 0x6a, 0x80, 0x4e, 0xa7, 0xb9, 0x79, 0x9f, 0xf8, 0xb6, 0x0b, 0xb4, 0xa1, - 0xb4, 0xaa, 0x9b, 0xcb, 0x5b, 0x97, 0xf4, 0x69, 0xe9, 0xd3, 0xb1, 0xa4, 0x64, 0xbe, 0xf8, 0xb8, - 0xbf, 0xb1, 0x30, 0xe8, 0x6f, 0x9c, 0x96, 0xf7, 0x29, 0x2e, 0xb2, 0xab, 0x07, 0x68, 0x95, 0xf4, - 0x88, 0xe3, 0x92, 0x03, 0x17, 0x6e, 0xf9, 0x7b, 0x81, 0x0d, 0xb4, 0x51, 0x69, 0x29, 0x9b, 0xcb, - 0x5b, 0xad, 0xbc, 0x45, 0x9e, 0x63, 0xbd, 0x77, 0x45, 0xe7, 0x80, 0x36, 0xb8, 0x60, 0xb1, 0x20, - 0x32, 0xcf, 0x0e, 0xfa, 0x1b, 0xab, 0xdb, 0x05, 0x6d, 0x3c, 0xc6, 0xa7, 0x1a, 0x68, 0x89, 0x76, - 0x49, 0x04, 0x7c, 0xaf, 0x51, 0x6d, 0x29, 0x9b, 0x8b, 0xe6, 0x5a, 0xe2, 0xe0, 0x52, 0x3b, 0x15, - 0xe0, 0x11, 0x46, 0xfb, 0xa9, 0x82, 0xce, 0xee, 0x07, 0x76, 0xdb, 0xea, 0x82, 0x1d, 0xbb, 0x8e, - 0xdf, 0xb9, 0x1e, 0xf8, 0x0c, 0xbe, 0x66, 0xea, 0x3d, 0xb4, 0xc8, 0xeb, 0x66, 0x13, 0x46, 0x1a, - 0x8a, 0xf0, 0xf2, 0x72, 0xce, 0xcb, 0x2c, 0xfd, 0x7a, 0x78, 0xd8, 0xe1, 0x1b, 0x54, 0xe7, 0x68, - 0xee, 0xf7, 0xad, 0x83, 0x2f, 0xc0, 0x62, 0x37, 0x81, 0x11, 0x53, 0x4d, 0x4c, 0xa3, 0xd1, 0x1e, - 0xce, 0x58, 0xd5, 0x3b, 0xa8, 0x46, 0x43, 0xb0, 0x92, 0x1c, 0x5c, 0x9d, 0x9e, 0xf5, 0x49, 0x3e, - 0xb6, 0x43, 0xb0, 0xcc, 0x53, 0x89, 0x8d, 0x1a, 0x5f, 0x61, 0xc1, 0xa8, 0xde, 0x43, 0x27, 0x29, - 0x23, 0x2c, 0xa6, 0x22, 0x05, 0xcb, 0x5b, 0xd7, 0x8e, 0xc1, 0x2d, 0xf4, 0xcd, 0x7a, 0xc2, 0x7e, - 0x72, 0xb8, 0xc6, 0x09, 0xaf, 0xf6, 0xab, 0x82, 0x1a, 0x93, 0xd4, 0x3e, 0x74, 0x28, 0x53, 0x3f, - 0x1b, 0x4b, 0x9d, 0x7e, 0xb4, 0xd4, 0x71, 0x6d, 0x91, 0xb8, 0xd5, 0xc4, 0xec, 0x62, 0xba, 0x93, - 0x4b, 0xdb, 0x27, 0xe8, 0x84, 0xc3, 0xc0, 0xe3, 0x67, 0x87, 0x9f, 0xd6, 0xad, 0xd9, 0x63, 0x33, - 0x57, 0x12, 0xfa, 0x13, 0xbb, 0x9c, 0x08, 0x0f, 0xf9, 0xb4, 0x47, 0x25, 0x31, 0xf1, 0xc4, 0xaa, - 0xd7, 0xd0, 0x29, 0x2a, 0x0e, 0x23, 0xd8, 0xfc, 0xa4, 0x89, 0xb8, 0x96, 0xcc, 0xb3, 0x09, 0xd1, - 0xa9, 0x76, 0x4e, 0x86, 0x25, 0xa4, 0xfa, 0x06, 0xaa, 0x87, 0x01, 0x03, 0x9f, 0x39, 0xc4, 0x4d, - 0x0f, 0x7d, 0x75, 0x73, 0xc9, 0x54, 0x07, 0xfd, 0x8d, 0xfa, 0xbe, 0x24, 0xc1, 0x05, 0xa4, 0xf6, - 0xbd, 0x82, 0xd6, 0xcb, 0xab, 0xa3, 0x7e, 0x83, 0xea, 0x69, 0xc4, 0xd7, 0x5d, 0xe2, 0x78, 0x69, - 0x07, 0xbf, 0x79, 0xb4, 0x0e, 0x16, 0x3a, 0x23, 0xee, 0xa4, 0xe4, 0x2f, 0x24, 0x31, 0xd5, 0x25, - 0x18, 0xc5, 0x05, 0x53, 0xda, 0x0f, 0x15, 0xb4, 0x22, 0x41, 0xe6, 0xd0, 0x32, 0x1f, 0x49, 0x2d, - 0x63, 0xcc, 0x12, 0x66, 0x59, 0xaf, 0xdc, 0x2d, 0xf4, 0xca, 0x95, 0x59, 0x48, 0xa7, 0x37, 0xc9, - 0x40, 0x41, 0x4d, 0x09, 0x7f, 0x3d, 0xf0, 0x69, 0xec, 0x41, 0x84, 0xe1, 0x3e, 0x44, 0xe0, 0x5b, - 0xa0, 0x5e, 0x42, 0x8b, 0x24, 0x74, 0x6e, 0x44, 0x41, 0x1c, 0x26, 0x47, 0x2a, 0x3b, 0xfa, 0xdb, - 0xfb, 0xbb, 0x62, 0x1f, 0x67, 0x08, 0x8e, 0x4e, 0x3d, 0x12, 0xde, 0xe6, 0xd0, 0xa9, 0x1d, 0x9c, - 0x21, 0xd4, 0x16, 0xaa, 0xf9, 0xc4, 0x83, 0x46, 0x4d, 0x20, 0xb3, 0xd8, 0xf7, 0x88, 0x07, 0x58, - 0x48, 0x54, 0x13, 0x55, 0x63, 0xc7, 0x6e, 0x9c, 0x10, 0x80, 0xcb, 0x09, 0xa0, 0x7a, 0x7b, 0x77, - 0xe7, 0x79, 0x7f, 0xe3, 0xa5, 0xb2, 0xbb, 0x86, 0x3d, 0x08, 0x81, 0xea, 0xb7, 0x77, 0x77, 0x30, - 0x57, 0xd6, 0x7e, 0x56, 0xd0, 0x9a, 0x14, 0xe4, 0x1c, 0x46, 0xc0, 0xbe, 0x3c, 0x02, 0x5e, 0x9e, - 0xa1, 0x64, 0x25, 0xbd, 0xff, 0xad, 0x82, 0x5a, 0x12, 0x6e, 0x9f, 0x44, 0xc4, 0x03, 0x06, 0x11, - 0x3d, 0x6e, 0xb1, 0x5a, 0xa8, 0x76, 0xe8, 0xf8, 0xb6, 0x38, 0xab, 0xb9, 0xf4, 0x7f, 0xe0, 0xf8, - 0x36, 0x16, 0x92, 0xac, 0x40, 0xd5, 0xb2, 0x02, 0x69, 0x0f, 0x15, 0x74, 0x71, 0x6a, 0xb7, 0x66, - 0x1c, 0x4a, 0x69, 0x91, 0xdf, 0x46, 0xa7, 0x63, 0x9f, 0xc6, 0x0e, 0xe3, 0xf7, 0x5d, 0x7e, 0x00, - 0x9d, 0xe1, 0xb7, 0xf6, 0x6d, 0x59, 0x84, 0x8b, 0x58, 0xed, 0xc7, 0x4a, 0xa1, 0xbe, 0x62, 0x1c, - 0xde, 0x40, 0x6b, 0xb9, 0x71, 0x40, 0xe9, 0xde, 0xc8, 0x87, 0xf3, 0x89, 0x0f, 0x79, 0xad, 0x21, - 0x00, 0x8f, 0xeb, 0xa8, 0x5f, 0xa1, 0x95, 0x30, 0x9f, 0xea, 0xa4, 0xb5, 0xdf, 0x99, 0xa1, 0xa4, - 0x13, 0x4a, 0x65, 0xae, 0x0d, 0xfa, 0x1b, 0x2b, 0x92, 0x00, 0xcb, 0x76, 0xd4, 0x7d, 0x54, 0x27, - 0xd9, 0x93, 0xe8, 0x26, 0x1f, 0xe9, 0xc3, 0x32, 0x6c, 0xa6, 0xe3, 0x6f, 0x5b, 0x92, 0x3e, 0x1f, - 0xdb, 0xc1, 0x05, 0x7d, 0xed, 0xaf, 0x0a, 0x3a, 0x33, 0x61, 0x3c, 0xa8, 0x5b, 0x08, 0xd9, 0x91, - 0xd3, 0x83, 0x28, 0x97, 0xa4, 0x6c, 0xcc, 0xed, 0x64, 0x12, 0x9c, 0x43, 0xa9, 0x9f, 0x23, 0x34, - 0x62, 0x4f, 0x72, 0xa2, 0x4f, 0xcf, 0x49, 0xf1, 0x81, 0x67, 0xd6, 0x39, 0x7f, 0x6e, 0x37, 0xc7, - 0xa8, 0x52, 0xb4, 0x1c, 0x01, 0x85, 0xa8, 0x07, 0xf6, 0x7b, 0x41, 0xd4, 0xa8, 0x8a, 0x3e, 0x7a, - 0x6b, 0x86, 0xa4, 0x8f, 0x8d, 0x32, 0xf3, 0x4c, 0x12, 0xd2, 0x32, 0x1e, 0x11, 0xe3, 0xbc, 0x15, - 0xb5, 0x8d, 0xce, 0xd9, 0x40, 0x72, 0x6e, 0x7e, 0x19, 0x03, 0x65, 0x60, 0x8b, 0x09, 0xb5, 0x68, - 0x5e, 0x4c, 0x08, 0xce, 0xed, 0x4c, 0x02, 0xe1, 0xc9, 0xba, 0xda, 0xef, 0x0a, 0x3a, 0x27, 0x79, - 0xf6, 0x31, 0x78, 0xa1, 0x4b, 0x18, 0xcc, 0xe1, 0x3a, 0xba, 0x2b, 0x5d, 0x47, 0xaf, 0xcf, 0x90, - 0xbe, 0xd4, 0xc9, 0xb2, 0x6b, 0x49, 0xfb, 0x4d, 0x41, 0xe7, 0x27, 0x6a, 0xcc, 0x61, 0xbc, 0xde, - 0x91, 0xc7, 0xeb, 0xab, 0xc7, 0x88, 0xab, 0x64, 0xcc, 0x3e, 0x29, 0x8b, 0xaa, 0x3d, 0x7c, 0xb6, - 0xfe, 0xff, 0xde, 0x0f, 0xda, 0xdf, 0xf2, 0x33, 0x88, 0xd2, 0x39, 0x84, 0x21, 0x4f, 0x94, 0xca, - 0x91, 0x26, 0xca, 0xd8, 0xa0, 0xad, 0xce, 0x38, 0x68, 0x29, 0x3d, 0xde, 0xa0, 0xbd, 0x8b, 0x56, - 0xe4, 0xdb, 0xa7, 0x76, 0xc4, 0x6f, 0x3e, 0x41, 0xdd, 0x96, 0x6e, 0x27, 0x99, 0xa9, 0xf8, 0xf6, - 0xa0, 0xf4, 0xbf, 0xfc, 0xf6, 0xa0, 0xb4, 0xa4, 0x29, 0x7e, 0x91, 0xdf, 0x1e, 0x13, 0xf3, 0x3c, - 0xff, 0xb7, 0x07, 0xff, 0x94, 0xe6, 0x7f, 0x69, 0x48, 0xac, 0xf4, 0x0d, 0x99, 0x7d, 0x4a, 0xef, - 0xa5, 0x02, 0x3c, 0xc2, 0x68, 0xf7, 0x51, 0x5d, 0xfe, 0x0d, 0xe0, 0x58, 0x37, 0x5f, 0x0b, 0xd5, - 0x44, 0xe5, 0x0a, 0xae, 0xef, 0x10, 0x46, 0xb0, 0x90, 0x98, 0xe6, 0xe3, 0x67, 0xcd, 0x85, 0x27, - 0xcf, 0x9a, 0x0b, 0x4f, 0x9f, 0x35, 0x17, 0x1e, 0x0e, 0x9a, 0xca, 0xe3, 0x41, 0x53, 0x79, 0x32, - 0x68, 0x2a, 0x4f, 0x07, 0x4d, 0xe5, 0x8f, 0x41, 0x53, 0x79, 0xf4, 0x67, 0x73, 0xe1, 0xd3, 0x0b, - 0xd3, 0x7e, 0x31, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x67, 0xe4, 0xf6, 0x18, 0x69, 0x12, 0x00, - 0x00, -} - -func (m *AllocationResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Shareable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.AvailableOnNodes != nil { - { - size, err := m.AvailableOnNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ResourceHandles) > 0 { - for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PotentialNodes) > 0 { - for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PotentialNodes[iNdEx]) - copy(dAtA[i:], m.PotentialNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.SelectedNode) - copy(dAtA[i:], m.SelectedNode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x1a - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimParametersReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimParametersReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.UnsuitableNodes) > 0 { - for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UnsuitableNodes[iNdEx]) - copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.AllocationMode) - copy(dAtA[i:], m.AllocationMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) - i-- - dAtA[i] = 0x1a - if m.ParametersRef != nil { - { - size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.ResourceClassName) - copy(dAtA[i:], m.ResourceClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClassName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.DeallocationRequested { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - if len(m.ReservedFor) > 0 { - for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Allocation != nil { - { - size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SuitableNodes != nil { - { - size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ParametersRef != nil { - { - size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassParametersReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassParametersReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceHandles) > 0 { - for _, e := range m.ResourceHandles { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AvailableOnNodes != nil { - l = m.AvailableOnNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *PodSchedulingContext) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSchedulingContextList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SelectedNode) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.PotentialNodes) > 0 { - for _, s := range m.PotentialNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for _, e := range m.ResourceClaims { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaim) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimConsumerReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimParametersReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimSchedulingStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UnsuitableNodes) > 0 { - for _, s := range m.UnsuitableNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ResourceClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.ParametersRef != nil { - l = m.ParametersRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.AllocationMode) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Allocation != nil { - l = m.Allocation.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ReservedFor) > 0 { - for _, e := range m.ReservedFor { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - return n -} - -func (m *ResourceClaimTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimTemplateList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimTemplateSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - if m.ParametersRef != nil { - l = m.ParametersRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SuitableNodes != nil { - l = m.SuitableNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParametersReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceHandle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllocationResult) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceHandles := "[]ResourceHandle{" - for _, f := range this.ResourceHandles { - repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceHandles += "}" - s := strings.Join([]string{`&AllocationResult{`, - `ResourceHandles:` + repeatedStringForResourceHandles + `,`, - `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContext{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSchedulingContext{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingContextList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContextSpec{`, - `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, - `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" - for _, f := range this.ResourceClaims { - repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingContextStatus{`, - `ResourceClaims:` + repeatedStringForResourceClaims + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaim) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimConsumerReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimConsumerReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClaim{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimParametersReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimParametersReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimSchedulingStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimSpec{`, - `ResourceClassName:` + fmt.Sprintf("%v", this.ResourceClassName) + `,`, - `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`, - `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" - for _, f := range this.ReservedFor { - repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," - } - repeatedStringForReservedFor += "}" - s := strings.Join([]string{`&ResourceClaimStatus{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, - `ReservedFor:` + repeatedStringForReservedFor + `,`, - `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimTemplateList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClaimTemplate{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClaimTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`, - `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClass", "ResourceClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParametersReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClassParametersReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceHandle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceHandle{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) - if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableOnNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AvailableOnNodes == nil { - m.AvailableOnNodes = &v1.NodeSelector{} - } - if err := m.AvailableOnNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Shareable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSchedulingContext{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelectedNode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) - if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaim) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParametersReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParametersRef == nil { - m.ParametersRef = &ResourceClaimParametersReference{} - } - if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllocationMode = AllocationMode(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Allocation == nil { - m.Allocation = &AllocationResult{} - } - if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) - if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeallocationRequested = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClaimTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParametersRef == nil { - m.ParametersRef = &ResourceClassParametersReference{} - } - if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SuitableNodes == nil { - m.SuitableNodes = &v1.NodeSelector{} - } - if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParametersReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceHandle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto deleted file mode 100644 index f7748f9a1a..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.proto +++ /dev/null @@ -1,400 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.resource.v1alpha2; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha2"; - -// AllocationResult contains attributes of an allocated resource. -message AllocationResult { - // ResourceHandles contain the state associated with an allocation that - // should be maintained throughout the lifetime of a claim. Each - // ResourceHandle contains data that should be passed to a specific kubelet - // plugin once it lands on a node. This data is returned by the driver - // after a successful allocation and is opaque to Kubernetes. Driver - // documentation may explain to users how to interpret this data if needed. - // - // Setting this field is optional. It has a maximum size of 32 entries. - // If null (or empty), it is assumed this allocation will be processed by a - // single kubelet plugin with no ResourceHandle data attached. The name of - // the kubelet plugin invoked will match the DriverName set in the - // ResourceClaimStatus this AllocationResult is embedded in. - // - // +listType=atomic - // +optional - repeated ResourceHandle resourceHandles = 1; - - // This field will get set by the resource driver after it has allocated - // the resource to inform the scheduler where it can schedule Pods using - // the ResourceClaim. - // - // Setting this field is optional. If null, the resource is available - // everywhere. - // +optional - optional k8s.io.api.core.v1.NodeSelector availableOnNodes = 2; - - // Shareable determines whether the resource supports more - // than one consumer at a time. - // +optional - optional bool shareable = 3; -} - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message PodSchedulingContext { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes where resources for the Pod are needed. - optional PodSchedulingContextSpec spec = 2; - - // Status describes where resources for the Pod can be allocated. - // +optional - optional PodSchedulingContextStatus status = 3; -} - -// PodSchedulingContextList is a collection of Pod scheduling objects. -message PodSchedulingContextList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of PodSchedulingContext objects. - repeated PodSchedulingContext items = 2; -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -message PodSchedulingContextSpec { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // +optional - optional string selectedNode = 1; - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - repeated string potentialNodes = 2; -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -message PodSchedulingContextStatus { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - repeated ResourceClaimSchedulingStatus resourceClaims = 1; -} - -// ResourceClaim describes which resources are needed by a resource consumer. -// Its status tracks whether the resource has been allocated and what the -// resulting attributes are. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message ResourceClaim { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes the desired attributes of a resource that then needs - // to be allocated. It can only be set once when creating the - // ResourceClaim. - optional ResourceClaimSpec spec = 2; - - // Status describes whether the resource is available and with which - // attributes. - // +optional - optional ResourceClaimStatus status = 3; -} - -// ResourceClaimConsumerReference contains enough information to let you -// locate the consumer of a ResourceClaim. The user must be a resource in the same -// namespace as the ResourceClaim. -message ResourceClaimConsumerReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Resource is the type of resource being referenced, for example "pods". - optional string resource = 3; - - // Name is the name of resource being referenced. - optional string name = 4; - - // UID identifies exactly one incarnation of the resource. - optional string uid = 5; -} - -// ResourceClaimList is a collection of claims. -message ResourceClaimList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource claims. - repeated ResourceClaim items = 2; -} - -// ResourceClaimParametersReference contains enough information to let you -// locate the parameters for a ResourceClaim. The object must be in the same -// namespace as the ResourceClaim. -message ResourceClaimParametersReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata, for example "ConfigMap". - optional string kind = 2; - - // Name is the name of resource being referenced. - optional string name = 3; -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -message ResourceClaimSchedulingStatus { - // Name matches the pod.spec.resourceClaims[*].Name field. - // +optional - optional string name = 1; - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - repeated string unsuitableNodes = 2; -} - -// ResourceClaimSpec defines how a resource is to be allocated. -message ResourceClaimSpec { - // ResourceClassName references the driver and additional parameters - // via the name of a ResourceClass that was created as part of the - // driver deployment. - optional string resourceClassName = 1; - - // ParametersRef references a separate object with arbitrary parameters - // that will be used by the driver when allocating a resource for the - // claim. - // - // The object must be in the same namespace as the ResourceClaim. - // +optional - optional ResourceClaimParametersReference parametersRef = 2; - - // Allocation can start immediately or when a Pod wants to use the - // resource. "WaitForFirstConsumer" is the default. - // +optional - optional string allocationMode = 3; -} - -// ResourceClaimStatus tracks whether the resource has been allocated and what -// the resulting attributes are. -message ResourceClaimStatus { - // DriverName is a copy of the driver name from the ResourceClass at - // the time when allocation started. - // +optional - optional string driverName = 1; - - // Allocation is set by the resource driver once a resource or set of - // resources has been allocated successfully. If this is not specified, the - // resources have not been allocated yet. - // +optional - optional AllocationResult allocation = 2; - - // ReservedFor indicates which entities are currently allowed to use - // the claim. A Pod which references a ResourceClaim which is not - // reserved for that Pod will not be started. - // - // There can be at most 32 such reservations. This may get increased in - // the future, but not reduced. - // - // +listType=map - // +listMapKey=uid - // +optional - repeated ResourceClaimConsumerReference reservedFor = 3; - - // DeallocationRequested indicates that a ResourceClaim is to be - // deallocated. - // - // The driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // While DeallocationRequested is set, no new consumers may be added to - // ReservedFor. - // +optional - optional bool deallocationRequested = 4; -} - -// ResourceClaimTemplate is used to produce ResourceClaim objects. -message ResourceClaimTemplate { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Describes the ResourceClaim that is to be generated. - // - // This field is immutable. A ResourceClaim will get created by the - // control plane for a Pod when needed and then not get updated - // anymore. - optional ResourceClaimTemplateSpec spec = 2; -} - -// ResourceClaimTemplateList is a collection of claim templates. -message ResourceClaimTemplateList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource claim templates. - repeated ResourceClaimTemplate items = 2; -} - -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. -message ResourceClaimTemplateSpec { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec for the ResourceClaim. The entire content is copied unchanged - // into the ResourceClaim that gets created from this template. The - // same fields as in a ResourceClaim are also valid here. - optional ResourceClaimSpec spec = 2; -} - -// ResourceClass is used by administrators to influence how resources -// are allocated. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message ResourceClass { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // DriverName defines the name of the dynamic resource driver that is - // used for allocation of a ResourceClaim that uses this class. - // - // Resource drivers have a unique name in forward domain order - // (acme.example.com). - optional string driverName = 2; - - // ParametersRef references an arbitrary separate object that may hold - // parameters that will be used by the driver when allocating a - // resource that uses this class. A dynamic resource driver can - // distinguish between parameters stored here and and those stored in - // ResourceClaimSpec. - // +optional - optional ResourceClassParametersReference parametersRef = 3; - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a ResourceClaim that has not been allocated yet. - // - // Setting this field is optional. If null, all nodes are candidates. - // +optional - optional k8s.io.api.core.v1.NodeSelector suitableNodes = 4; -} - -// ResourceClassList is a collection of classes. -message ResourceClassList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource classes. - repeated ResourceClass items = 2; -} - -// ResourceClassParametersReference contains enough information to let you -// locate the parameters for a ResourceClass. -message ResourceClassParametersReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata. - optional string kind = 2; - - // Name is the name of resource being referenced. - optional string name = 3; - - // Namespace that contains the referenced resource. Must be empty - // for cluster-scoped resources and non-empty for namespaced - // resources. - // +optional - optional string namespace = 4; -} - -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. -message ResourceHandle { - // DriverName specifies the name of the resource driver whose kubelet - // plugin should be invoked to process this ResourceHandle's data once it - // lands on a node. This may differ from the DriverName set in - // ResourceClaimStatus this ResourceHandle is embedded in. - optional string driverName = 1; - - // Data contains the opaque data associated with this ResourceHandle. It is - // set by the controller component of the resource driver whose name - // matches the DriverName set in the ResourceClaimStatus this - // ResourceHandle is embedded in. It is set at allocation time and is - // intended for processing by the kubelet plugin whose name matches - // the DriverName set in this ResourceHandle. - // - // The maximum size of this field is 16KiB. This may get increased in the - // future, but not reduced. - // +optional - optional string data = 2; -} - diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go deleted file mode 100644 index a614ff9dc1..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/types.go +++ /dev/null @@ -1,462 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaim describes which resources are needed by a resource consumer. -// Its status tracks whether the resource has been allocated and what the -// resulting attributes are. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type ResourceClaim struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes the desired attributes of a resource that then needs - // to be allocated. It can only be set once when creating the - // ResourceClaim. - Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes whether the resource is available and with which - // attributes. - // +optional - Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ResourceClaimSpec defines how a resource is to be allocated. -type ResourceClaimSpec struct { - // ResourceClassName references the driver and additional parameters - // via the name of a ResourceClass that was created as part of the - // driver deployment. - ResourceClassName string `json:"resourceClassName" protobuf:"bytes,1,name=resourceClassName"` - - // ParametersRef references a separate object with arbitrary parameters - // that will be used by the driver when allocating a resource for the - // claim. - // - // The object must be in the same namespace as the ResourceClaim. - // +optional - ParametersRef *ResourceClaimParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,2,opt,name=parametersRef"` - - // Allocation can start immediately or when a Pod wants to use the - // resource. "WaitForFirstConsumer" is the default. - // +optional - AllocationMode AllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"` -} - -// AllocationMode describes whether a ResourceClaim gets allocated immediately -// when it gets created (AllocationModeImmediate) or whether allocation is -// delayed until it is needed for a Pod -// (AllocationModeWaitForFirstConsumer). Other modes might get added in the -// future. -type AllocationMode string - -const ( - // When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is - // delayed until a Pod gets scheduled that needs the ResourceClaim. The - // scheduler will consider all resource requirements of that Pod and - // trigger allocation for a node that fits the Pod. - AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer" - - // When a ResourceClaim has AllocationModeImmediate, allocation starts - // as soon as the ResourceClaim gets created. This is done without - // considering the needs of Pods that will use the ResourceClaim - // because those Pods are not known yet. - AllocationModeImmediate AllocationMode = "Immediate" -) - -// ResourceClaimStatus tracks whether the resource has been allocated and what -// the resulting attributes are. -type ResourceClaimStatus struct { - // DriverName is a copy of the driver name from the ResourceClass at - // the time when allocation started. - // +optional - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Allocation is set by the resource driver once a resource or set of - // resources has been allocated successfully. If this is not specified, the - // resources have not been allocated yet. - // +optional - Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` - - // ReservedFor indicates which entities are currently allowed to use - // the claim. A Pod which references a ResourceClaim which is not - // reserved for that Pod will not be started. - // - // There can be at most 32 such reservations. This may get increased in - // the future, but not reduced. - // - // +listType=map - // +listMapKey=uid - // +optional - ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor"` - - // DeallocationRequested indicates that a ResourceClaim is to be - // deallocated. - // - // The driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // While DeallocationRequested is set, no new consumers may be added to - // ReservedFor. - // +optional - DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"varint,4,opt,name=deallocationRequested"` -} - -// ReservedForMaxSize is the maximum number of entries in -// claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 - -// AllocationResult contains attributes of an allocated resource. -type AllocationResult struct { - // ResourceHandles contain the state associated with an allocation that - // should be maintained throughout the lifetime of a claim. Each - // ResourceHandle contains data that should be passed to a specific kubelet - // plugin once it lands on a node. This data is returned by the driver - // after a successful allocation and is opaque to Kubernetes. Driver - // documentation may explain to users how to interpret this data if needed. - // - // Setting this field is optional. It has a maximum size of 32 entries. - // If null (or empty), it is assumed this allocation will be processed by a - // single kubelet plugin with no ResourceHandle data attached. The name of - // the kubelet plugin invoked will match the DriverName set in the - // ResourceClaimStatus this AllocationResult is embedded in. - // - // +listType=atomic - // +optional - ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` - - // This field will get set by the resource driver after it has allocated - // the resource to inform the scheduler where it can schedule Pods using - // the ResourceClaim. - // - // Setting this field is optional. If null, the resource is available - // everywhere. - // +optional - AvailableOnNodes *v1.NodeSelector `json:"availableOnNodes,omitempty" protobuf:"bytes,2,opt,name=availableOnNodes"` - - // Shareable determines whether the resource supports more - // than one consumer at a time. - // +optional - Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` -} - -// AllocationResultResourceHandlesMaxSize represents the maximum number of -// entries in allocation.resourceHandles. -const AllocationResultResourceHandlesMaxSize = 32 - -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. -type ResourceHandle struct { - // DriverName specifies the name of the resource driver whose kubelet - // plugin should be invoked to process this ResourceHandle's data once it - // lands on a node. This may differ from the DriverName set in - // ResourceClaimStatus this ResourceHandle is embedded in. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Data contains the opaque data associated with this ResourceHandle. It is - // set by the controller component of the resource driver whose name - // matches the DriverName set in the ResourceClaimStatus this - // ResourceHandle is embedded in. It is set at allocation time and is - // intended for processing by the kubelet plugin whose name matches - // the DriverName set in this ResourceHandle. - // - // The maximum size of this field is 16KiB. This may get increased in the - // future, but not reduced. - // +optional - Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` -} - -// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. -const ResourceHandleDataMaxSize = 16 * 1024 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimList is a collection of claims. -type ResourceClaimList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource claims. - Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type PodSchedulingContext struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes where resources for the Pod are needed. - Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes where resources for the Pod can be allocated. - // +optional - Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -type PodSchedulingContextSpec struct { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // +optional - SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -type PodSchedulingContextStatus struct { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` - - // If there ever is a need to support other kinds of resources - // than ResourceClaim, then new fields could get added here - // for those other resources. -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -type ResourceClaimSchedulingStatus struct { - // Name matches the pod.spec.resourceClaims[*].Name field. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` -} - -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodSchedulingContext objects. This limit is part -// of the API. -const PodSchedulingNodeListMaxSize = 128 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContextList is a collection of Pod scheduling objects. -type PodSchedulingContextList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of PodSchedulingContext objects. - Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClass is used by administrators to influence how resources -// are allocated. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type ResourceClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // DriverName defines the name of the dynamic resource driver that is - // used for allocation of a ResourceClaim that uses this class. - // - // Resource drivers have a unique name in forward domain order - // (acme.example.com). - DriverName string `json:"driverName" protobuf:"bytes,2,name=driverName"` - - // ParametersRef references an arbitrary separate object that may hold - // parameters that will be used by the driver when allocating a - // resource that uses this class. A dynamic resource driver can - // distinguish between parameters stored here and and those stored in - // ResourceClaimSpec. - // +optional - ParametersRef *ResourceClassParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,3,opt,name=parametersRef"` - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a ResourceClaim that has not been allocated yet. - // - // Setting this field is optional. If null, all nodes are candidates. - // +optional - SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClassList is a collection of classes. -type ResourceClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource classes. - Items []ResourceClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ResourceClassParametersReference contains enough information to let you -// locate the parameters for a ResourceClass. -type ResourceClassParametersReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata. - Kind string `json:"kind" protobuf:"bytes,2,name=kind"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,3,name=name"` - // Namespace that contains the referenced resource. Must be empty - // for cluster-scoped resources and non-empty for namespaced - // resources. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// ResourceClaimParametersReference contains enough information to let you -// locate the parameters for a ResourceClaim. The object must be in the same -// namespace as the ResourceClaim. -type ResourceClaimParametersReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata, for example "ConfigMap". - Kind string `json:"kind" protobuf:"bytes,2,name=kind"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,3,name=name"` -} - -// ResourceClaimConsumerReference contains enough information to let you -// locate the consumer of a ResourceClaim. The user must be a resource in the same -// namespace as the ResourceClaim. -type ResourceClaimConsumerReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Resource is the type of resource being referenced, for example "pods". - Resource string `json:"resource" protobuf:"bytes,3,name=resource"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,4,name=name"` - // UID identifies exactly one incarnation of the resource. - UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimTemplate is used to produce ResourceClaim objects. -type ResourceClaimTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Describes the ResourceClaim that is to be generated. - // - // This field is immutable. A ResourceClaim will get created by the - // control plane for a Pod when needed and then not get updated - // anymore. - Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. -type ResourceClaimTemplateSpec struct { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec for the ResourceClaim. The entire content is copied unchanged - // into the ResourceClaim that gets created from this template. The - // same fields as in a ResourceClaim are also valid here. - Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimTemplateList is a collection of claim templates. -type ResourceClaimTemplateList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource claim templates. - Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go deleted file mode 100644 index 474be8c85c..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllocationResult = map[string]string{ - "": "AllocationResult contains attributes of an allocated resource.", - "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", - "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", - "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", -} - -func (AllocationResult) SwaggerDoc() map[string]string { - return map_AllocationResult -} - -var map_PodSchedulingContext = map[string]string{ - "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes where resources for the Pod are needed.", - "status": "Status describes where resources for the Pod can be allocated.", -} - -func (PodSchedulingContext) SwaggerDoc() map[string]string { - return map_PodSchedulingContext -} - -var map_PodSchedulingContextList = map[string]string{ - "": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "metadata": "Standard list metadata", - "items": "Items is the list of PodSchedulingContext objects.", -} - -func (PodSchedulingContextList) SwaggerDoc() map[string]string { - return map_PodSchedulingContextList -} - -var map_PodSchedulingContextSpec = map[string]string{ - "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", -} - -func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingContextSpec -} - -var map_PodSchedulingContextStatus = map[string]string{ - "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", -} - -func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingContextStatus -} - -var map_ResourceClaim = map[string]string{ - "": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", - "status": "Status describes whether the resource is available and with which attributes.", -} - -func (ResourceClaim) SwaggerDoc() map[string]string { - return map_ResourceClaim -} - -var map_ResourceClaimConsumerReference = map[string]string{ - "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "resource": "Resource is the type of resource being referenced, for example \"pods\".", - "name": "Name is the name of resource being referenced.", - "uid": "UID identifies exactly one incarnation of the resource.", -} - -func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { - return map_ResourceClaimConsumerReference -} - -var map_ResourceClaimList = map[string]string{ - "": "ResourceClaimList is a collection of claims.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource claims.", -} - -func (ResourceClaimList) SwaggerDoc() map[string]string { - return map_ResourceClaimList -} - -var map_ResourceClaimParametersReference = map[string]string{ - "": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", - "name": "Name is the name of resource being referenced.", -} - -func (ResourceClaimParametersReference) SwaggerDoc() map[string]string { - return map_ResourceClaimParametersReference -} - -var map_ResourceClaimSchedulingStatus = map[string]string{ - "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "name": "Name matches the pod.spec.resourceClaims[*].Name field.", - "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", -} - -func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimSchedulingStatus -} - -var map_ResourceClaimSpec = map[string]string{ - "": "ResourceClaimSpec defines how a resource is to be allocated.", - "resourceClassName": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", - "parametersRef": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", - "allocationMode": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", -} - -func (ResourceClaimSpec) SwaggerDoc() map[string]string { - return map_ResourceClaimSpec -} - -var map_ResourceClaimStatus = map[string]string{ - "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", - "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", - "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", -} - -func (ResourceClaimStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimStatus -} - -var map_ResourceClaimTemplate = map[string]string{ - "": "ResourceClaimTemplate is used to produce ResourceClaim objects.", - "metadata": "Standard object metadata", - "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", -} - -func (ResourceClaimTemplate) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplate -} - -var map_ResourceClaimTemplateList = map[string]string{ - "": "ResourceClaimTemplateList is a collection of claim templates.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource claim templates.", -} - -func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplateList -} - -var map_ResourceClaimTemplateSpec = map[string]string{ - "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", - "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", - "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", -} - -func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplateSpec -} - -var map_ResourceClass = map[string]string{ - "": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "driverName": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", - "parametersRef": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", - "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.", -} - -func (ResourceClass) SwaggerDoc() map[string]string { - return map_ResourceClass -} - -var map_ResourceClassList = map[string]string{ - "": "ResourceClassList is a collection of classes.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource classes.", -} - -func (ResourceClassList) SwaggerDoc() map[string]string { - return map_ResourceClassList -} - -var map_ResourceClassParametersReference = map[string]string{ - "": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", - "name": "Name is the name of resource being referenced.", - "namespace": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", -} - -func (ResourceClassParametersReference) SwaggerDoc() map[string]string { - return map_ResourceClassParametersReference -} - -var map_ResourceHandle = map[string]string{ - "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", - "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", - "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", -} - -func (ResourceHandle) SwaggerDoc() map[string]string { - return map_ResourceHandle -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go deleted file mode 100644 index 89d521bf05..0000000000 --- a/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go +++ /dev/null @@ -1,498 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { - *out = *in - if in.ResourceHandles != nil { - in, out := &in.ResourceHandles, &out.ResourceHandles - *out = make([]ResourceHandle, len(*in)) - copy(*out, *in) - } - if in.AvailableOnNodes != nil { - in, out := &in.AvailableOnNodes, &out.AvailableOnNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. -func (in *AllocationResult) DeepCopy() *AllocationResult { - if in == nil { - return nil - } - out := new(AllocationResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. -func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { - if in == nil { - return nil - } - out := new(PodSchedulingContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSchedulingContext, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. -func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { - if in == nil { - return nil - } - out := new(PodSchedulingContextList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { - *out = *in - if in.PotentialNodes != nil { - in, out := &in.PotentialNodes, &out.PotentialNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. -func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { - if in == nil { - return nil - } - out := new(PodSchedulingContextSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { - *out = *in - if in.ResourceClaims != nil { - in, out := &in.ResourceClaims, &out.ResourceClaims - *out = make([]ResourceClaimSchedulingStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. -func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { - if in == nil { - return nil - } - out := new(PodSchedulingContextStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. -func (in *ResourceClaim) DeepCopy() *ResourceClaim { - if in == nil { - return nil - } - out := new(ResourceClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. -func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { - if in == nil { - return nil - } - out := new(ResourceClaimConsumerReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. -func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { - if in == nil { - return nil - } - out := new(ResourceClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersReference. -func (in *ResourceClaimParametersReference) DeepCopy() *ResourceClaimParametersReference { - if in == nil { - return nil - } - out := new(ResourceClaimParametersReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { - *out = *in - if in.UnsuitableNodes != nil { - in, out := &in.UnsuitableNodes, &out.UnsuitableNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. -func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { - if in == nil { - return nil - } - out := new(ResourceClaimSchedulingStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { - *out = *in - if in.ParametersRef != nil { - in, out := &in.ParametersRef, &out.ParametersRef - *out = new(ResourceClaimParametersReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. -func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { - if in == nil { - return nil - } - out := new(ResourceClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { - *out = *in - if in.Allocation != nil { - in, out := &in.Allocation, &out.Allocation - *out = new(AllocationResult) - (*in).DeepCopyInto(*out) - } - if in.ReservedFor != nil { - in, out := &in.ReservedFor, &out.ReservedFor - *out = make([]ResourceClaimConsumerReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. -func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { - if in == nil { - return nil - } - out := new(ResourceClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. -func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { - if in == nil { - return nil - } - out := new(ResourceClaimTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClaimTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. -func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { - if in == nil { - return nil - } - out := new(ResourceClaimTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. -func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { - if in == nil { - return nil - } - out := new(ResourceClaimTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClass) DeepCopyInto(out *ResourceClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ParametersRef != nil { - in, out := &in.ParametersRef, &out.ParametersRef - *out = new(ResourceClassParametersReference) - **out = **in - } - if in.SuitableNodes != nil { - in, out := &in.SuitableNodes, &out.SuitableNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClass. -func (in *ResourceClass) DeepCopy() *ResourceClass { - if in == nil { - return nil - } - out := new(ResourceClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassList) DeepCopyInto(out *ResourceClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassList. -func (in *ResourceClassList) DeepCopy() *ResourceClassList { - if in == nil { - return nil - } - out := new(ResourceClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersReference. -func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersReference { - if in == nil { - return nil - } - out := new(ResourceClassParametersReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. -func (in *ResourceHandle) DeepCopy() *ResourceHandle { - if in == nil { - return nil - } - out := new(ResourceHandle) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/resource/v1alpha2/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go similarity index 84% rename from vendor/k8s.io/api/resource/v1alpha2/doc.go rename to vendor/k8s.io/api/resource/v1alpha3/doc.go index d9c20e089d..aeb66561fb 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/doc.go +++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +groupName=resource.k8s.io -// Package v1alpha2 is the v1alpha2 version of the resource API. -package v1alpha2 // import "k8s.io/api/resource/v1alpha2" +// Package v1alpha3 is the v1alpha3 version of the resource API. +package v1alpha3 // import "k8s.io/api/resource/v1alpha3" diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go new file mode 100644 index 0000000000..4ac01cc6f3 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -0,0 +1,8987 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1alpha3/generated.proto + +package v1alpha3 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{0} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *BasicDevice) Reset() { *m = BasicDevice{} } +func (*BasicDevice) ProtoMessage() {} +func (*BasicDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{1} +} +func (m *BasicDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BasicDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicDevice.Merge(m, src) +} +func (m *BasicDevice) XXX_Size() int { + return m.Size() +} +func (m *BasicDevice) XXX_DiscardUnknown() { + xxx_messageInfo_BasicDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{2} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{3} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{4} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{5} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{6} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{7} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{8} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{9} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{10} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{11} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{12} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{13} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{14} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{15} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{16} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{17} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{18} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } +func (*PodSchedulingContext) ProtoMessage() {} +func (*PodSchedulingContext) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{19} +} +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContext.Merge(m, src) +} +func (m *PodSchedulingContext) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo + +func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } +func (*PodSchedulingContextList) ProtoMessage() {} +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{20} +} +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextList.Merge(m, src) +} +func (m *PodSchedulingContextList) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo + +func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } +func (*PodSchedulingContextSpec) ProtoMessage() {} +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{21} +} +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) +} +func (m *PodSchedulingContextSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo + +func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } +func (*PodSchedulingContextStatus) ProtoMessage() {} +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{22} +} +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) +} +func (m *PodSchedulingContextStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{23} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{24} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{25} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } +func (*ResourceClaimSchedulingStatus) ProtoMessage() {} +func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{26} +} +func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) +} +func (m *ResourceClaimSchedulingStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{27} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{28} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{29} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{30} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{31} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{32} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{33} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{34} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{35} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult") + proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.DeviceAttribute") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1alpha3.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1alpha3.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha3.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2) +} + +var fileDescriptor_66649ee9bbcd89d2 = []byte{ + // 2085 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, + 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, + 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, + 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, + 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, + 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, + 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, + 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, + 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, + 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, + 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, + 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, + 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, + 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, + 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, + 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, + 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, + 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, + 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, + 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, + 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, + 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, + 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, + 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, + 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, + 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, + 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, + 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, + 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, + 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, + 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, + 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, + 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, + 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, + 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, + 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, + 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, + 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, + 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, + 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, + 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, + 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, + 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, + 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, + 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, + 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, + 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, + 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, + 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, + 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, + 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, + 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, + 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, + 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, + 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, + 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, + 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, + 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, + 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, + 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, + 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, + 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, + 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, + 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, + 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, + 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, + 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, + 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, + 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, + 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, + 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, + 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, + 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, + 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, + 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, + 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, + 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, + 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, + 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, + 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, + 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, + 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, + 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, + 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, + 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, + 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, + 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, + 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, + 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, + 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, + 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, + 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, + 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, + 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, + 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, + 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, + 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, + 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, + 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, + 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, + 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, + 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, + 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, + 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, + 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, + 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, + 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, + 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, + 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, + 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, + 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, + 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, + 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, + 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, + 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, + 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, + 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, + 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, + 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, + 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, + 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, + 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, + 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, + 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, + 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, + 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, + 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, + 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, + 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, + 0x20, 0x64, 0x20, 0x00, 0x00, +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) + i-- + dAtA[i] = 0x22 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BasicDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SuitableNodes != nil { + { + size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PotentialNodes) > 0 { + for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PotentialNodes[iNdEx]) + copy(dAtA[i:], m.PotentialNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.SelectedNode) + copy(dAtA[i:], m.SelectedNode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UnsuitableNodes) > 0 { + for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UnsuitableNodes[iNdEx]) + copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.DeallocationRequested { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Controller) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BasicDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SuitableNodes != nil { + l = m.SuitableNodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + n += 2 + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSchedulingContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSchedulingContextList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSchedulingContextSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SelectedNode) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PotentialNodes) > 0 { + for _, s := range m.PotentialNodes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSchedulingContextStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for _, e := range m.ResourceClaims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSchedulingStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UnsuitableNodes) > 0 { + for _, s := range m.UnsuitableNodes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Controller) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `}`, + }, "") + return s +} +func (this *BasicDevice) String() string { + if this == nil { + return "nil" + } + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&BasicDevice{`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + fmt.Sprintf("%v", this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingContext{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodSchedulingContext{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodSchedulingContextList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingContextSpec{`, + `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, + `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" + for _, f := range this.ResourceClaims { + repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaims += "}" + s := strings.Join([]string{`&PodSchedulingContextStatus{`, + `ResourceClaims:` + repeatedStringForResourceClaims + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSchedulingStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v1.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SuitableNodes == nil { + m.SuitableNodes = &v1.NodeSelector{} + } + if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AdminAccess = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSchedulingContext{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelectedNode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeallocationRequested = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v1.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllNodes = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto new file mode 100644 index 0000000000..b4428ad452 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -0,0 +1,912 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1alpha3; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1alpha3"; + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; + + // Controller is the name of the DRA driver which handled the + // allocation. That driver is also responsible for deallocating the + // claim. It is empty when the claim can be deallocated without + // involving a driver. + // + // A driver may allocate devices provided by other drivers, so this + // driver name here can be different from the driver names listed for + // the results. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional string controller = 4; +} + +// BasicDevice defines one device instance. +message BasicDevice { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 1; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 2; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // +required + optional string expression = 1; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + optional BasicDevice basic = 2; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a claim that has not been allocated yet *and* that claim + // gets allocated through a control plane controller. It is ignored + // when the claim does not use a control plane controller + // for allocation. + // + // Setting this field is optional. If unset, all Nodes are candidates. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // +optional + // +default=false + optional bool adminAccess = 6; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +message OpaqueDeviceConfiguration { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // +required + optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; +} + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DRAControlPlaneController +// feature gate. +message PodSchedulingContext { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes where resources for the Pod are needed. + optional PodSchedulingContextSpec spec = 2; + + // Status describes where resources for the Pod can be allocated. + // + // +optional + optional PodSchedulingContextStatus status = 3; +} + +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // + // +optional + optional string selectedNode = 1; + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + repeated string potentialNodes = 2; +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + repeated ResourceClaimSchedulingStatus resourceClaims = 1; +} + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + // +required + optional string resource = 3; + + // Name is the name of resource being referenced. + // +required + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + // +required + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +message ResourceClaimSchedulingStatus { + // Name matches the pod.spec.resourceClaims[*].Name field. + // + // +required + optional string name = 1; + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + repeated string unsuitableNodes = 2; +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +message ResourceClaimSpec { + // Devices defines how to request devices. + // + // +optional + optional DeviceClaim devices = 1; + + // Controller is the name of the DRA driver that is meant + // to handle allocation of this claim. If empty, allocation is handled + // by the scheduler while scheduling a pod. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional string controller = 2; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +message ResourceClaimStatus { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + optional AllocationResult allocation = 1; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + repeated ResourceClaimConsumerReference reservedFor = 2; + + // Indicates that a claim is to be deallocated. While this is set, + // no new consumers may be added to ReservedFor. + // + // This is only used if the claim needs to be deallocated by a DRA driver. + // That driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional bool deallocationRequested = 3; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourcePool describes the pool that ResourceSlices belong to. +message ResourcePool { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + optional string name = 1; + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + optional int64 generation = 2; + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + optional int64 resourceSliceCount = 3; +} + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceSlice { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + optional ResourceSliceSpec spec = 2; +} + +// ResourceSliceList is a collection of ResourceSlices. +message ResourceSliceList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource ResourceSlices. + repeated ResourceSlice items = 2; +} + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +message ResourceSliceSpec { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + optional string driver = 1; + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + optional ResourcePool pool = 2; + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + optional string nodeName = 3; + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4; + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional bool allNodes = 5; + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + repeated Device devices = 6; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha2/register.go b/vendor/k8s.io/api/resource/v1alpha3/register.go similarity index 92% rename from vendor/k8s.io/api/resource/v1alpha2/register.go rename to vendor/k8s.io/api/resource/v1alpha3/register.go index 6e0d7ceb98..74044e8cf0 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/register.go +++ b/vendor/k8s.io/api/resource/v1alpha3/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -44,19 +44,18 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ResourceClass{}, - &ResourceClassList{}, + &DeviceClass{}, + &DeviceClassList{}, &ResourceClaim{}, &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, &PodSchedulingContext{}, &PodSchedulingContextList{}, + &ResourceSlice{}, + &ResourceSliceList{}, ) - // Add common types - scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) - // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go new file mode 100644 index 0000000000..4efd2491de --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -0,0 +1,1048 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // Finalizer is the finalizer that gets set for claims + // which were allocated through a builtin controller. + // Reserved for use by Kubernetes, DRA driver controllers must + // use their own finalizer. + Finalizer = "resource.kubernetes.io/delete-protection" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +const ( + // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.NodeName]. + ResourceSliceSelectorNodeName = "spec.nodeName" + // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.Driver]. + ResourceSliceSelectorDriver = "spec.driver" +) + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +type ResourceSliceSpec struct { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"` + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"` + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"` + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"` + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` +} + +// ResourcePool describes the pool that ResourceSlices belong to. +type ResourcePool struct { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"` + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"` +} + +const ResourceSliceMaxSharedCapacity = 128 +const ResourceSliceMaxDevices = 128 +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name. + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +type Device struct { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"` +} + +// BasicDevice defines one device instance. +type BasicDevice struct { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"` + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` +} + +// Limit for the sum of the number of entries in both ResourceSlices. +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 + +// QualifiedName is the name of a device attribute or capacity. +// +// Attributes and capacities are defined either by the owner of the specific +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes +// project). Because they are sometimes compared across devices, a given name +// is expected to mean the same thing and have the same type on all devices. +// +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain +// followed by a slash ("/") followed by a C identifier +// (e.g. "dra.example.com/theName"). Names which do not include the +// domain prefix are assumed to be part of the driver's domain. Attributes +// or capacities defined by 3rd parties must include the domain prefix. +// +// The maximum length for the DNS subdomain is 63 characters (same as +// for driver names) and the maximum length of the C identifier +// is 32. +type QualifiedName string + +// FullyQualifiedName is a QualifiedName where the domain is set. +type FullyQualifiedName string + +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). +const DeviceMaxIDLength = 32 + +// DeviceAttribute must have exactly one field set. +type DeviceAttribute struct { + // The Go field names below have a Value suffix to avoid a conflict between the + // field "String" and the corresponding method. That method is required. + // The Kubernetes API is defined without that suffix to keep it more natural. + + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"` + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"` + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"` + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"` +} + +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value. +const DeviceAttributeMaxValueLength = 64 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ResourceSliceList is a collection of ResourceSlices. +type ResourceSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource ResourceSlices. + Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +type ResourceClaimSpec struct { + // Devices defines how to request devices. + // + // +optional + Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` + + // Controller is the name of the DRA driver that is meant + // to handle allocation of this claim. If empty, allocation is handled + // by the scheduler while scheduling a pod. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +type DeviceClaim struct { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"` + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"` + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` +} + +const ( + DeviceRequestsMaxSize = AllocationResultsMaxSize + DeviceConstraintsMaxSize = 32 + DeviceConfigMaxSize = 32 +) + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +type DeviceRequest struct { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"` + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"` + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"` + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"` + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // +optional + // +default=false + AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` +} + +const ( + DeviceSelectorsMaxSize = 32 +) + +type DeviceAllocationMode string + +// Valid [DeviceRequest.CountMode] values. +const ( + DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount") + DeviceAllocationModeAll = DeviceAllocationMode("All") +) + +// DeviceSelector must have exactly one field set. +type DeviceSelector struct { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"` +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +type CELDeviceSelector struct { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // +required + Expression string `json:"expression" protobuf:"bytes,1,name=expression"` +} + +// DeviceConstraint must have exactly one field set besides Requests. +type DeviceConstraint struct { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +type DeviceClaimConfiguration struct { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"` +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +type DeviceConfiguration struct { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"` +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +type OpaqueDeviceConfiguration struct { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // +required + Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +type ResourceClaimStatus struct { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` + + // Indicates that a claim is to be deallocated. While this is set, + // no new consumers may be added to ReservedFor. + // + // This is only used if the claim needs to be deallocated by a DRA driver. + // That driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` +} + +// ReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 32 + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + // +required + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + // +required + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + // +required + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // Devices is the result of allocating devices. + // + // +optional + Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"` + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` + + // Controller is the name of the DRA driver which handled the + // allocation. That driver is also responsible for deallocating the + // claim. It is empty when the claim can be deallocated without + // involving a driver. + // + // A driver may allocate devices provided by other drivers, so this + // driver name here can be different from the driver names listed for + // the results. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` +} + +// DeviceAllocationResult is the result of allocating devices. +type DeviceAllocationResult struct { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"` + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// AllocationResultsMaxSize represents the maximum number of +// entries in allocation.devices.results. +const AllocationResultsMaxSize = 32 + +// DeviceRequestAllocationResult contains the allocation result for one request. +type DeviceRequestAllocationResult struct { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + Request string `json:"request" protobuf:"bytes,1,name=request"` + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,2,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,3,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,4,name=device"` +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +type DeviceAllocationConfiguration struct { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"` + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"` +} + +type AllocationConfigSource string + +// Valid [DeviceAllocationConfiguration.Source] values. +const ( + AllocationConfigSourceClass = "FromClass" + AllocationConfigSourceClaim = "FromClaim" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DRAControlPlaneController +// feature gate. +type PodSchedulingContext struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes where resources for the Pod are needed. + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes where resources for the Pod can be allocated. + // + // +optional + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // + // +optional + SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` + + // If there ever is a need to support other kinds of resources + // than ResourceClaim, then new fields could get added here + // for those other resources. +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +type ResourceClaimSchedulingStatus struct { + // Name matches the pod.spec.resourceClaims[*].Name field. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` +} + +// PodSchedulingNodeListMaxSize defines the maximum number of entries in the +// node lists that are stored in PodSchedulingContext objects. This limit is part +// of the API. +const PodSchedulingNodeListMaxSize = 128 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type DeviceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +type DeviceClassSpec struct { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"` + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a claim that has not been allocated yet *and* that claim + // gets allocated through a control plane controller. It is ignored + // when the claim does not use a control plane controller + // for allocation. + // + // Setting this field is optional. If unset, all Nodes are candidates. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` +} + +// DeviceClassConfiguration is used in DeviceClass. +type DeviceClassConfiguration struct { + DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// DeviceClassList is a collection of classes. +type DeviceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go new file mode 100644 index 0000000000..1a44a971db --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -0,0 +1,404 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "devices": "Devices is the result of allocating devices.", + "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", + "controller": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_BasicDevice = map[string]string{ + "": "BasicDevice defines one device instance.", + "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", +} + +func (BasicDevice) SwaggerDoc() map[string]string { + return map_BasicDevice +} + +var map_CELDeviceSelector = map[string]string{ + "": "CELDeviceSelector contains a CEL expression for selecting a device.", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)", +} + +func (CELDeviceSelector) SwaggerDoc() map[string]string { + return map_CELDeviceSelector +} + +var map_Device = map[string]string{ + "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "basic": "Basic defines one device instance.", +} + +func (Device) SwaggerDoc() map[string]string { + return map_Device +} + +var map_DeviceAllocationConfiguration = map[string]string{ + "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", +} + +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string { + return map_DeviceAllocationConfiguration +} + +var map_DeviceAllocationResult = map[string]string{ + "": "DeviceAllocationResult is the result of allocating devices.", + "results": "Results lists all allocated devices.", + "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", +} + +func (DeviceAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceAllocationResult +} + +var map_DeviceAttribute = map[string]string{ + "": "DeviceAttribute must have exactly one field set.", + "int": "IntValue is a number.", + "bool": "BoolValue is a true/false value.", + "string": "StringValue is a string. Must not be longer than 64 characters.", + "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", +} + +func (DeviceAttribute) SwaggerDoc() map[string]string { + return map_DeviceAttribute +} + +var map_DeviceClaim = map[string]string{ + "": "DeviceClaim defines how to request devices with a ResourceClaim.", + "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", + "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.", + "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", +} + +func (DeviceClaim) SwaggerDoc() map[string]string { + return map_DeviceClaim +} + +var map_DeviceClaimConfiguration = map[string]string{ + "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", +} + +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClaimConfiguration +} + +var map_DeviceClass = map[string]string{ + "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (DeviceClass) SwaggerDoc() map[string]string { + return map_DeviceClass +} + +var map_DeviceClassConfiguration = map[string]string{ + "": "DeviceClassConfiguration is used in DeviceClass.", +} + +func (DeviceClassConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClassConfiguration +} + +var map_DeviceClassList = map[string]string{ + "": "DeviceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (DeviceClassList) SwaggerDoc() map[string]string { + return map_DeviceClassList +} + +var map_DeviceClassSpec = map[string]string{ + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", + "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (DeviceClassSpec) SwaggerDoc() map[string]string { + return map_DeviceClassSpec +} + +var map_DeviceConfiguration = map[string]string{ + "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.", + "opaque": "Opaque provides driver-specific configuration parameters.", +} + +func (DeviceConfiguration) SwaggerDoc() map[string]string { + return map_DeviceConfiguration +} + +var map_DeviceConstraint = map[string]string{ + "": "DeviceConstraint must have exactly one field set besides Requests.", + "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", +} + +func (DeviceConstraint) SwaggerDoc() map[string]string { + return map_DeviceConstraint +} + +var map_DeviceRequest = map[string]string{ + "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", + "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", + "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", + "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", + "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.", +} + +func (DeviceRequest) SwaggerDoc() map[string]string { + return map_DeviceRequest +} + +var map_DeviceRequestAllocationResult = map[string]string{ + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", +} + +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceRequestAllocationResult +} + +var map_DeviceSelector = map[string]string{ + "": "DeviceSelector must have exactly one field set.", + "cel": "CEL contains a CEL expression for selecting a device.", +} + +func (DeviceSelector) SwaggerDoc() map[string]string { + return map_DeviceSelector +} + +var map_OpaqueDeviceConfiguration = map[string]string{ + "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.", +} + +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { + return map_OpaqueDeviceConfiguration +} + +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes where resources for the Pod are needed.", + "status": "Status describes where resources for the Pod can be allocated.", +} + +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext +} + +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", + "metadata": "Standard list metadata", + "items": "Items is the list of PodSchedulingContext objects.", +} + +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList +} + +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", + "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", + "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", +} + +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec +} + +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", + "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", +} + +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "status": "Status describes whether the claim is ready to use and what has been allocated.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimSchedulingStatus = map[string]string{ + "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", + "name": "Name matches the pod.spec.resourceClaims[*].Name field.", + "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", +} + +func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimSchedulingStatus +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", + "controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourcePool = map[string]string{ + "": "ResourcePool describes the pool that ResourceSlices belong to.", + "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", + "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", +} + +func (ResourcePool) SwaggerDoc() map[string]string { + return map_ResourcePool +} + +var map_ResourceSlice = map[string]string{ + "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (ResourceSlice) SwaggerDoc() map[string]string { + return map_ResourceSlice +} + +var map_ResourceSliceList = map[string]string{ + "": "ResourceSliceList is a collection of ResourceSlices.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource ResourceSlices.", +} + +func (ResourceSliceList) SwaggerDoc() map[string]string { + return map_ResourceSliceList +} + +var map_ResourceSliceSpec = map[string]string{ + "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", + "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", + "pool": "Pool describes the pool that this ResourceSlice belongs to.", + "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", + "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", +} + +func (ResourceSliceSpec) SwaggerDoc() map[string]string { + return map_ResourceSliceSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..58171df1f2 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,927 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. +func (in *AllocationResult) DeepCopy() *AllocationResult { + if in == nil { + return nil + } + out := new(AllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[QualifiedName]DeviceAttribute, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(map[QualifiedName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice. +func (in *BasicDevice) DeepCopy() *BasicDevice { + if in == nil { + return nil + } + out := new(BasicDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector. +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector { + if in == nil { + return nil + } + out := new(CELDeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicDevice) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration. +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration { + if in == nil { + return nil + } + out := new(DeviceAllocationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { + *out = *in + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]DeviceRequestAllocationResult, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceAllocationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult. +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult { + if in == nil { + return nil + } + out := new(DeviceAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) { + *out = *in + if in.IntValue != nil { + in, out := &in.IntValue, &out.IntValue + *out = new(int64) + **out = **in + } + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(bool) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } + if in.VersionValue != nil { + in, out := &in.VersionValue, &out.VersionValue + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute. +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute { + if in == nil { + return nil + } + out := new(DeviceAttribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]DeviceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]DeviceConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClaimConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim. +func (in *DeviceClaim) DeepCopy() *DeviceClaim { + if in == nil { + return nil + } + out := new(DeviceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration. +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration { + if in == nil { + return nil + } + out := new(DeviceClaimConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass. +func (in *DeviceClass) DeepCopy() *DeviceClass { + if in == nil { + return nil + } + out := new(DeviceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) { + *out = *in + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration. +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration { + if in == nil { + return nil + } + out := new(DeviceClassConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList. +func (in *DeviceClassList) DeepCopy() *DeviceClassList { + if in == nil { + return nil + } + out := new(DeviceClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClassConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SuitableNodes != nil { + in, out := &in.SuitableNodes, &out.SuitableNodes + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec. +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec { + if in == nil { + return nil + } + out := new(DeviceClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) { + *out = *in + if in.Opaque != nil { + in, out := &in.Opaque, &out.Opaque + *out = new(OpaqueDeviceConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration. +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration { + if in == nil { + return nil + } + out := new(DeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchAttribute != nil { + in, out := &in.MatchAttribute, &out.MatchAttribute + *out = new(FullyQualifiedName) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint. +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint { + if in == nil { + return nil + } + out := new(DeviceConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest. +func (in *DeviceRequest) DeepCopy() *DeviceRequest { + if in == nil { + return nil + } + out := new(DeviceRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult. +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult { + if in == nil { + return nil + } + out := new(DeviceRequestAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) { + *out = *in + if in.CEL != nil { + in, out := &in.CEL, &out.CEL + *out = new(CELDeviceSelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector. +func (in *DeviceSelector) DeepCopy() *DeviceSelector { + if in == nil { + return nil + } + out := new(DeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { + *out = *in + in.Parameters.DeepCopyInto(&out.Parameters) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { + if in == nil { + return nil + } + out := new(OpaqueDeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { + if in == nil { + return nil + } + out := new(PodSchedulingContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSchedulingContext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { + if in == nil { + return nil + } + out := new(PodSchedulingContextList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { + *out = *in + if in.PotentialNodes != nil { + in, out := &in.PotentialNodes, &out.PotentialNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { + if in == nil { + return nil + } + out := new(PodSchedulingContextSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { + *out = *in + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make([]ResourceClaimSchedulingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { + if in == nil { + return nil + } + out := new(PodSchedulingContextStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { + if in == nil { + return nil + } + out := new(ResourceClaimConsumerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { + if in == nil { + return nil + } + out := new(ResourceClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { + *out = *in + if in.UnsuitableNodes != nil { + in, out := &in.UnsuitableNodes, &out.UnsuitableNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. +func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { + if in == nil { + return nil + } + out := new(ResourceClaimSchedulingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { + if in == nil { + return nil + } + out := new(ResourceClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { + *out = *in + if in.Allocation != nil { + in, out := &in.Allocation, &out.Allocation + *out = new(AllocationResult) + (*in).DeepCopyInto(*out) + } + if in.ReservedFor != nil { + in, out := &in.ReservedFor, &out.ReservedFor + *out = make([]ResourceClaimConsumerReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { + if in == nil { + return nil + } + out := new(ResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { + if in == nil { + return nil + } + out := new(ResourceClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { + if in == nil { + return nil + } + out := new(ResourcePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice. +func (in *ResourceSlice) DeepCopy() *ResourceSlice { + if in == nil { + return nil + } + out := new(ResourceSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList. +func (in *ResourceSliceList) DeepCopy() *ResourceSliceList { + if in == nil { + return nil + } + out := new(ResourceSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { + *out = *in + out.Pool = in.Pool + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec. +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec { + if in == nil { + return nil + } + out := new(ResourceSliceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go index 76c4da002e..ee3c668471 100644 --- a/vendor/k8s.io/api/scheduling/v1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=scheduling.k8s.io package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index 373c901e6b..6fef1a9379 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto +// source: k8s.io/api/scheduling/v1/generated.proto package v1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_277b2f43b72fffd5, []int{0} + return fileDescriptor_3f12bd05064e996e, []int{0} } func (m *PriorityClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_PriorityClass proto.InternalMessageInfo func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } func (*PriorityClassList) ProtoMessage() {} func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_277b2f43b72fffd5, []int{1} + return fileDescriptor_3f12bd05064e996e, []int{1} } func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,42 +107,41 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) + proto.RegisterFile("k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_3f12bd05064e996e) } -var fileDescriptor_277b2f43b72fffd5 = []byte{ - // 492 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x4e, - 0x18, 0xc7, 0x3b, 0xdd, 0x5f, 0xa1, 0xbf, 0x29, 0x85, 0x1a, 0x11, 0x42, 0x0f, 0x69, 0xe9, 0x1e, - 0xec, 0xc5, 0x19, 0xbb, 0xa8, 0x08, 0x0b, 0x82, 0x71, 0x41, 0x84, 0x15, 0x4b, 0x0e, 0x1e, 0xc4, - 0x83, 0x93, 0xe4, 0xd9, 0x74, 0x6c, 0x92, 0x09, 0x33, 0x93, 0x40, 0x6f, 0xbe, 0x04, 0xdf, 0x91, - 0xd7, 0x1e, 0xf7, 0xb8, 0xa7, 0x62, 0xe3, 0x4b, 0xf0, 0xe6, 0x49, 0x92, 0xc6, 0x4d, 0xff, 0x6c, - 0xd1, 0x5b, 0x9e, 0xe7, 0xf9, 0x7e, 0xbe, 0x33, 0xf3, 0xcd, 0x0c, 0x7e, 0x39, 0x7f, 0xae, 0x08, - 0x17, 0x74, 0x9e, 0xba, 0x20, 0x63, 0xd0, 0xa0, 0x68, 0x06, 0xb1, 0x2f, 0x24, 0xad, 0x06, 0x2c, - 0xe1, 0x54, 0x79, 0x33, 0xf0, 0xd3, 0x90, 0xc7, 0x01, 0xcd, 0x26, 0x34, 0x80, 0x18, 0x24, 0xd3, - 0xe0, 0x93, 0x44, 0x0a, 0x2d, 0x0c, 0x73, 0xa3, 0x24, 0x2c, 0xe1, 0xa4, 0x56, 0x92, 0x6c, 0xd2, - 0x7f, 0x14, 0x70, 0x3d, 0x4b, 0x5d, 0xe2, 0x89, 0x88, 0x06, 0x22, 0x10, 0xb4, 0x04, 0xdc, 0xf4, - 0xaa, 0xac, 0xca, 0xa2, 0xfc, 0xda, 0x18, 0xf5, 0x47, 0x5b, 0x4b, 0x7a, 0x42, 0xc2, 0x1d, 0x8b, - 0xf5, 0x9f, 0xd4, 0x9a, 0x88, 0x79, 0x33, 0x1e, 0x83, 0x5c, 0xd0, 0x64, 0x1e, 0x14, 0x0d, 0x45, - 0x23, 0xd0, 0xec, 0x2e, 0x8a, 0x1e, 0xa3, 0x64, 0x1a, 0x6b, 0x1e, 0xc1, 0x01, 0xf0, 0xec, 0x6f, - 0x40, 0x71, 0xd0, 0x88, 0xed, 0x73, 0xa3, 0x9f, 0x4d, 0xdc, 0x9d, 0x4a, 0x2e, 0x24, 0xd7, 0x8b, - 0x57, 0x21, 0x53, 0xca, 0xf8, 0x84, 0xdb, 0xc5, 0xae, 0x7c, 0xa6, 0x99, 0x89, 0x86, 0x68, 0xdc, - 0x39, 0x7b, 0x4c, 0xea, 0xc0, 0x6e, 0xcd, 0x49, 0x32, 0x0f, 0x8a, 0x86, 0x22, 0x85, 0x9a, 0x64, - 0x13, 0xf2, 0xce, 0xfd, 0x0c, 0x9e, 0x7e, 0x0b, 0x9a, 0xd9, 0xc6, 0x72, 0x35, 0x68, 0xe4, 0xab, - 0x01, 0xae, 0x7b, 0xce, 0xad, 0xab, 0x71, 0x8a, 0x5b, 0x19, 0x0b, 0x53, 0x30, 0x9b, 0x43, 0x34, - 0x6e, 0xd9, 0xdd, 0x4a, 0xdc, 0x7a, 0x5f, 0x34, 0x9d, 0xcd, 0xcc, 0x38, 0xc7, 0xdd, 0x20, 0x14, - 0x2e, 0x0b, 0x2f, 0xe0, 0x8a, 0xa5, 0xa1, 0x36, 0x4f, 0x86, 0x68, 0xdc, 0xb6, 0x1f, 0x54, 0xe2, - 0xee, 0xeb, 0xed, 0xa1, 0xb3, 0xab, 0x35, 0x9e, 0xe2, 0x8e, 0x0f, 0xca, 0x93, 0x3c, 0xd1, 0x5c, - 0xc4, 0xe6, 0x7f, 0x43, 0x34, 0xfe, 0xdf, 0xbe, 0x5f, 0xa1, 0x9d, 0x8b, 0x7a, 0xe4, 0x6c, 0xeb, - 0x8c, 0x00, 0xf7, 0x12, 0x09, 0x10, 0x95, 0xd5, 0x54, 0x84, 0xdc, 0x5b, 0x98, 0xad, 0x92, 0x3d, - 0xcf, 0x57, 0x83, 0xde, 0x74, 0x6f, 0xf6, 0x6b, 0x35, 0x38, 0x3d, 0xbc, 0x01, 0x64, 0x5f, 0xe6, - 0x1c, 0x98, 0x8e, 0xbe, 0x21, 0x7c, 0x6f, 0x27, 0xf5, 0x4b, 0xae, 0xb4, 0xf1, 0xf1, 0x20, 0x79, - 0xf2, 0x6f, 0xc9, 0x17, 0x74, 0x99, 0x7b, 0xaf, 0x3a, 0x62, 0xfb, 0x4f, 0x67, 0x2b, 0xf5, 0x4b, - 0xdc, 0xe2, 0x1a, 0x22, 0x65, 0x36, 0x87, 0x27, 0xe3, 0xce, 0xd9, 0x43, 0x72, 0xec, 0x15, 0x90, - 0x9d, 0x9d, 0xd5, 0xbf, 0xe7, 0x4d, 0x41, 0x3b, 0x1b, 0x13, 0xfb, 0xc5, 0x72, 0x6d, 0x35, 0xae, - 0xd7, 0x56, 0xe3, 0x66, 0x6d, 0x35, 0xbe, 0xe4, 0x16, 0x5a, 0xe6, 0x16, 0xba, 0xce, 0x2d, 0x74, - 0x93, 0x5b, 0xe8, 0x7b, 0x6e, 0xa1, 0xaf, 0x3f, 0xac, 0xc6, 0x07, 0xf3, 0xd8, 0x9b, 0xfc, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0xa9, 0x88, 0x2b, 0xa0, 0xc7, 0x03, 0x00, 0x00, +var fileDescriptor_3f12bd05064e996e = []byte{ + // 476 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xe3, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x46, 0xbd, 0x81, + 0x2c, 0xd8, 0xf4, 0x04, 0x08, 0xe9, 0x24, 0x86, 0x70, 0x12, 0x42, 0x3a, 0x44, 0x95, 0x81, 0x01, + 0x31, 0xe0, 0xa6, 0x3e, 0xd7, 0x34, 0x89, 0x23, 0xdb, 0xa9, 0xd4, 0x8d, 0x8f, 0xc0, 0x37, 0x62, + 0xed, 0x78, 0xe3, 0x4d, 0x15, 0x0d, 0x1f, 0x81, 0x8d, 0x09, 0x25, 0x2d, 0x97, 0xfe, 0xb9, 0x0a, + 0xb6, 0xbc, 0xef, 0xfb, 0xfc, 0x1e, 0xdb, 0x4f, 0x6c, 0xe8, 0x4f, 0x5f, 0x6a, 0x2c, 0x24, 0xa1, + 0x99, 0x20, 0x3a, 0x9a, 0xb0, 0x71, 0x1e, 0x8b, 0x94, 0x93, 0xd9, 0x80, 0x70, 0x96, 0x32, 0x45, + 0x0d, 0x1b, 0xe3, 0x4c, 0x49, 0x23, 0x1d, 0x77, 0xad, 0xc4, 0x34, 0x13, 0xb8, 0x56, 0xe2, 0xd9, + 0xa0, 0xfb, 0x84, 0x0b, 0x33, 0xc9, 0x47, 0x38, 0x92, 0x09, 0xe1, 0x92, 0x4b, 0x52, 0x01, 0xa3, + 0xfc, 0xaa, 0xaa, 0xaa, 0xa2, 0xfa, 0x5a, 0x1b, 0x75, 0xfb, 0x5b, 0x4b, 0x46, 0x52, 0xb1, 0x3b, + 0x16, 0xeb, 0x3e, 0xab, 0x35, 0x09, 0x8d, 0x26, 0x22, 0x65, 0x6a, 0x4e, 0xb2, 0x29, 0x2f, 0x1b, + 0x9a, 0x24, 0xcc, 0xd0, 0xbb, 0x28, 0x72, 0x8c, 0x52, 0x79, 0x6a, 0x44, 0xc2, 0x0e, 0x80, 0x17, + 0xff, 0x02, 0xca, 0x83, 0x26, 0x74, 0x9f, 0xeb, 0xff, 0x6a, 0xc0, 0xf6, 0x50, 0x09, 0xa9, 0x84, + 0x99, 0xbf, 0x8e, 0xa9, 0xd6, 0xce, 0x67, 0xd8, 0x2c, 0x77, 0x35, 0xa6, 0x86, 0xba, 0xc0, 0x03, + 0x7e, 0xeb, 0xec, 0x29, 0xae, 0x03, 0xbb, 0x35, 0xc7, 0xd9, 0x94, 0x97, 0x0d, 0x8d, 0x4b, 0x35, + 0x9e, 0x0d, 0xf0, 0xfb, 0xd1, 0x17, 0x16, 0x99, 0x77, 0xcc, 0xd0, 0xc0, 0x59, 0x2c, 0x7b, 0x56, + 0xb1, 0xec, 0xc1, 0xba, 0x17, 0xde, 0xba, 0x3a, 0xa7, 0xd0, 0x9e, 0xd1, 0x38, 0x67, 0x6e, 0xc3, + 0x03, 0xbe, 0x1d, 0xb4, 0x37, 0x62, 0xfb, 0x43, 0xd9, 0x0c, 0xd7, 0x33, 0xe7, 0x1c, 0xb6, 0x79, + 0x2c, 0x47, 0x34, 0xbe, 0x60, 0x57, 0x34, 0x8f, 0x8d, 0x7b, 0xe2, 0x01, 0xbf, 0x19, 0x3c, 0xda, + 0x88, 0xdb, 0x6f, 0xb6, 0x87, 0xe1, 0xae, 0xd6, 0x79, 0x0e, 0x5b, 0x63, 0xa6, 0x23, 0x25, 0x32, + 0x23, 0x64, 0xea, 0xde, 0xf3, 0x80, 0x7f, 0x3f, 0x78, 0xb8, 0x41, 0x5b, 0x17, 0xf5, 0x28, 0xdc, + 0xd6, 0x39, 0x1c, 0x76, 0x32, 0xc5, 0x58, 0x52, 0x55, 0x43, 0x19, 0x8b, 0x68, 0xee, 0xda, 0x15, + 0x7b, 0x5e, 0x2c, 0x7b, 0x9d, 0xe1, 0xde, 0xec, 0xf7, 0xb2, 0x77, 0x7a, 0x78, 0x03, 0xf0, 0xbe, + 0x2c, 0x3c, 0x30, 0xed, 0x7f, 0x07, 0xf0, 0xc1, 0x4e, 0xea, 0x97, 0x42, 0x1b, 0xe7, 0xd3, 0x41, + 0xf2, 0xf8, 0xff, 0x92, 0x2f, 0xe9, 0x2a, 0xf7, 0xce, 0xe6, 0x88, 0xcd, 0xbf, 0x9d, 0xad, 0xd4, + 0x2f, 0xa1, 0x2d, 0x0c, 0x4b, 0xb4, 0xdb, 0xf0, 0x4e, 0xfc, 0xd6, 0xd9, 0x63, 0x7c, 0xec, 0x15, + 0xe0, 0x9d, 0x9d, 0xd5, 0xbf, 0xe7, 0x6d, 0x49, 0x87, 0x6b, 0x93, 0xe0, 0xd5, 0x62, 0x85, 0xac, + 0xeb, 0x15, 0xb2, 0x6e, 0x56, 0xc8, 0xfa, 0x5a, 0x20, 0xb0, 0x28, 0x10, 0xb8, 0x2e, 0x10, 0xb8, + 0x29, 0x10, 0xf8, 0x51, 0x20, 0xf0, 0xed, 0x27, 0xb2, 0x3e, 0xba, 0xc7, 0xde, 0xe4, 0x9f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x9a, 0x3d, 0x5f, 0x2e, 0xae, 0x03, 0x00, 0x00, } func (m *PriorityClass) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index c1a27e8baa..374e68238b 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -35,7 +35,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -66,7 +66,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index 146bae40d3..019dbcd00e 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. @@ -59,6 +60,7 @@ type PriorityClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClassList is a collection of priority classes. type PriorityClassList struct { diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..a4a432a64f --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClass) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index d2c5d2f33f..83e504b5a3 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +// source: k8s.io/api/scheduling/v1alpha1/generated.proto package v1alpha1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_f033641dd0b95dce, []int{0} + return fileDescriptor_260442fbb28d876a, []int{0} } func (m *PriorityClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_PriorityClass proto.InternalMessageInfo func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } func (*PriorityClassList) ProtoMessage() {} func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_f033641dd0b95dce, []int{1} + return fileDescriptor_260442fbb28d876a, []int{1} } func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,42 +107,41 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) + proto.RegisterFile("k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_260442fbb28d876a) } -var fileDescriptor_f033641dd0b95dce = []byte{ - // 495 bytes of a gzipped FileDescriptorProto +var fileDescriptor_260442fbb28d876a = []byte{ + // 480 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x30, - 0x14, 0xc7, 0xeb, 0x1e, 0x95, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x56, 0xbd, 0xa5, - 0xcb, 0xd9, 0xf4, 0x04, 0x08, 0xe9, 0xb6, 0x52, 0xe9, 0x84, 0x04, 0xa2, 0xca, 0xc0, 0x80, 0x18, - 0x70, 0xd3, 0x77, 0xa9, 0x69, 0x12, 0x47, 0xb6, 0x13, 0xa9, 0x1b, 0x1f, 0x81, 0x2f, 0x85, 0xd4, - 0xf1, 0xc6, 0x9b, 0x2a, 0x1a, 0x3e, 0x02, 0x1b, 0x13, 0x4a, 0x9a, 0xbb, 0xb4, 0x0d, 0x1c, 0x6c, - 0x79, 0xef, 0xfd, 0xfe, 0x7f, 0xdb, 0xff, 0xd8, 0xf8, 0x72, 0xf9, 0x52, 0x53, 0x21, 0xd9, 0x32, - 0x9e, 0x81, 0x0a, 0xc1, 0x80, 0x66, 0x09, 0x84, 0x73, 0xa9, 0x58, 0x31, 0xe0, 0x91, 0x60, 0xda, - 0x5d, 0xc0, 0x3c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xc1, 0x47, 0xcc, 0x83, 0x10, - 0x14, 0x37, 0x30, 0xa7, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xc7, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0xf4, - 0x96, 0xef, 0x9e, 0x79, 0xc2, 0x2c, 0xe2, 0x19, 0x75, 0x65, 0xc0, 0x3c, 0xe9, 0x49, 0x96, 0xcb, - 0x66, 0xf1, 0x55, 0x5e, 0xe5, 0x45, 0xfe, 0xb5, 0xb3, 0xeb, 0x0e, 0xf6, 0x96, 0x77, 0xa5, 0x02, - 0x96, 0x54, 0x96, 0xec, 0x3e, 0x2b, 0x99, 0x80, 0xbb, 0x0b, 0x11, 0x82, 0x5a, 0xb1, 0x68, 0xe9, - 0x65, 0x0d, 0xcd, 0x02, 0x30, 0xfc, 0x4f, 0x2a, 0xf6, 0x37, 0x95, 0x8a, 0x43, 0x23, 0x02, 0xa8, - 0x08, 0x5e, 0xfc, 0x4b, 0x90, 0x1d, 0x37, 0xe0, 0xc7, 0xba, 0xc1, 0xcf, 0x3a, 0x6e, 0x4f, 0x95, - 0x90, 0x4a, 0x98, 0xd5, 0x2b, 0x9f, 0x6b, 0x6d, 0x7d, 0xc2, 0xcd, 0x6c, 0x57, 0x73, 0x6e, 0xb8, - 0x8d, 0xfa, 0x68, 0xd8, 0x3a, 0x7f, 0x4a, 0xcb, 0xd8, 0xee, 0xcc, 0x69, 0xb4, 0xf4, 0xb2, 0x86, - 0xa6, 0x19, 0x4d, 0x93, 0x11, 0x7d, 0x37, 0xfb, 0x0c, 0xae, 0x79, 0x0b, 0x86, 0x8f, 0xad, 0xf5, - 0xa6, 0x57, 0x4b, 0x37, 0x3d, 0x5c, 0xf6, 0x9c, 0x3b, 0x57, 0xeb, 0x14, 0x37, 0x12, 0xee, 0xc7, - 0x60, 0xd7, 0xfb, 0x68, 0xd8, 0x18, 0xb7, 0x0b, 0xb8, 0xf1, 0x3e, 0x6b, 0x3a, 0xbb, 0x99, 0x75, - 0x81, 0xdb, 0x9e, 0x2f, 0x67, 0xdc, 0x9f, 0xc0, 0x15, 0x8f, 0x7d, 0x63, 0x9f, 0xf4, 0xd1, 0xb0, - 0x39, 0x7e, 0x52, 0xc0, 0xed, 0xcb, 0xfd, 0xa1, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x0e, 0xda, - 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xa4, 0xad, 0x49, - 0x39, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0x79, 0x35, 0x95, 0xbe, 0x70, 0x57, - 0x76, 0x23, 0xd7, 0x5e, 0xa4, 0x9b, 0x5e, 0x67, 0x7a, 0x34, 0xfb, 0xb5, 0xe9, 0x9d, 0x56, 0x6f, - 0x00, 0x3d, 0xc6, 0x9c, 0x8a, 0xe9, 0xe0, 0x1b, 0xc2, 0x8f, 0x0e, 0x52, 0x7f, 0x23, 0xb4, 0xb1, - 0x3e, 0x56, 0x92, 0xa7, 0xff, 0x97, 0x7c, 0xa6, 0xce, 0x73, 0xef, 0x14, 0x47, 0x6c, 0xde, 0x76, - 0xf6, 0x52, 0x77, 0x70, 0x43, 0x18, 0x08, 0xb4, 0x5d, 0xef, 0x9f, 0x0c, 0x5b, 0xe7, 0x67, 0xf4, - 0xfe, 0xb7, 0x40, 0x0f, 0xf6, 0x57, 0xfe, 0xa4, 0xd7, 0x99, 0x87, 0xb3, 0xb3, 0x1a, 0x4f, 0xd6, - 0x5b, 0x52, 0xbb, 0xde, 0x92, 0xda, 0xcd, 0x96, 0xd4, 0xbe, 0xa4, 0x04, 0xad, 0x53, 0x82, 0xae, - 0x53, 0x82, 0x6e, 0x52, 0x82, 0xbe, 0xa7, 0x04, 0x7d, 0xfd, 0x41, 0x6a, 0x1f, 0xc8, 0xfd, 0xaf, - 0xf4, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbd, 0xf8, 0x5a, 0x80, 0xdf, 0x03, 0x00, 0x00, + 0x18, 0x86, 0xeb, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x46, 0xbd, 0x25, + 0xcb, 0xd9, 0xf4, 0x04, 0x08, 0xe9, 0xb6, 0x50, 0x09, 0x21, 0x81, 0xa8, 0x32, 0x30, 0x20, 0x06, + 0xdc, 0xd4, 0xe7, 0x9a, 0x26, 0x71, 0x64, 0x3b, 0x95, 0xba, 0xf1, 0x13, 0xf8, 0x53, 0x48, 0x1d, + 0x6f, 0xbc, 0xa9, 0xa2, 0xe1, 0x27, 0xb0, 0x31, 0xa1, 0xa4, 0xbd, 0x4b, 0xdb, 0xc0, 0x71, 0x5b, + 0xbe, 0xef, 0x7b, 0xde, 0xd7, 0xf6, 0x1b, 0x1b, 0xe2, 0xf9, 0x4b, 0x8d, 0x85, 0x24, 0x34, 0x15, + 0x44, 0x87, 0x33, 0x36, 0xcd, 0x22, 0x91, 0x70, 0xb2, 0x18, 0xd2, 0x28, 0x9d, 0xd1, 0x21, 0xe1, + 0x2c, 0x61, 0x8a, 0x1a, 0x36, 0xc5, 0xa9, 0x92, 0x46, 0xda, 0x68, 0xcb, 0x63, 0x9a, 0x0a, 0x5c, + 0xf1, 0xf8, 0x86, 0xef, 0x9d, 0x71, 0x61, 0x66, 0xd9, 0x04, 0x87, 0x32, 0x26, 0x5c, 0x72, 0x49, + 0x4a, 0xd9, 0x24, 0xbb, 0x2c, 0xab, 0xb2, 0x28, 0xbf, 0xb6, 0x76, 0xbd, 0xc1, 0xde, 0xf2, 0xa1, + 0x54, 0x8c, 0x2c, 0x6a, 0x4b, 0xf6, 0x9e, 0x55, 0x4c, 0x4c, 0xc3, 0x99, 0x48, 0x98, 0x5a, 0x92, + 0x74, 0xce, 0x8b, 0x86, 0x26, 0x31, 0x33, 0xf4, 0x6f, 0x2a, 0xf2, 0x2f, 0x95, 0xca, 0x12, 0x23, + 0x62, 0x56, 0x13, 0xbc, 0xf8, 0x9f, 0xa0, 0x38, 0x6e, 0x4c, 0x8f, 0x75, 0x83, 0x5f, 0x4d, 0xd8, + 0x19, 0x2b, 0x21, 0x95, 0x30, 0xcb, 0x57, 0x11, 0xd5, 0xda, 0xfe, 0x0c, 0x5b, 0xc5, 0xae, 0xa6, + 0xd4, 0x50, 0x07, 0xb8, 0xc0, 0x6b, 0x9f, 0x3f, 0xc5, 0x55, 0x6c, 0xb7, 0xe6, 0x38, 0x9d, 0xf3, + 0xa2, 0xa1, 0x71, 0x41, 0xe3, 0xc5, 0x10, 0xbf, 0x9f, 0x7c, 0x61, 0xa1, 0x79, 0xc7, 0x0c, 0xf5, + 0xed, 0xd5, 0xba, 0xdf, 0xc8, 0xd7, 0x7d, 0x58, 0xf5, 0x82, 0x5b, 0x57, 0xfb, 0x14, 0x5a, 0x0b, + 0x1a, 0x65, 0xcc, 0x69, 0xba, 0xc0, 0xb3, 0xfc, 0xce, 0x0e, 0xb6, 0x3e, 0x14, 0xcd, 0x60, 0x3b, + 0xb3, 0x2f, 0x60, 0x87, 0x47, 0x72, 0x42, 0xa3, 0x11, 0xbb, 0xa4, 0x59, 0x64, 0x9c, 0x13, 0x17, + 0x78, 0x2d, 0xff, 0xc9, 0x0e, 0xee, 0xbc, 0xde, 0x1f, 0x06, 0x87, 0xac, 0xfd, 0x1c, 0xb6, 0xa7, + 0x4c, 0x87, 0x4a, 0xa4, 0x46, 0xc8, 0xc4, 0x79, 0xe0, 0x02, 0xef, 0xa1, 0xff, 0x78, 0x27, 0x6d, + 0x8f, 0xaa, 0x51, 0xb0, 0xcf, 0xd9, 0x1c, 0x76, 0x53, 0xc5, 0x58, 0x5c, 0x56, 0x63, 0x19, 0x89, + 0x70, 0xe9, 0x58, 0xa5, 0xf6, 0x22, 0x5f, 0xf7, 0xbb, 0xe3, 0xa3, 0xd9, 0xef, 0x75, 0xff, 0xb4, + 0x7e, 0x03, 0xf0, 0x31, 0x16, 0xd4, 0x4c, 0x07, 0xdf, 0x01, 0x7c, 0x74, 0x90, 0xfa, 0x5b, 0xa1, + 0x8d, 0xfd, 0xa9, 0x96, 0x3c, 0xbe, 0x5f, 0xf2, 0x85, 0xba, 0xcc, 0xbd, 0xbb, 0x3b, 0x62, 0xeb, + 0xa6, 0xb3, 0x97, 0x7a, 0x00, 0x2d, 0x61, 0x58, 0xac, 0x9d, 0xa6, 0x7b, 0xe2, 0xb5, 0xcf, 0xcf, + 0xf0, 0xdd, 0x6f, 0x01, 0x1f, 0xec, 0xaf, 0xfa, 0x49, 0x6f, 0x0a, 0x8f, 0x60, 0x6b, 0xe5, 0x8f, + 0x56, 0x1b, 0xd4, 0xb8, 0xda, 0xa0, 0xc6, 0xf5, 0x06, 0x35, 0xbe, 0xe6, 0x08, 0xac, 0x72, 0x04, + 0xae, 0x72, 0x04, 0xae, 0x73, 0x04, 0x7e, 0xe4, 0x08, 0x7c, 0xfb, 0x89, 0x1a, 0x1f, 0xd1, 0xdd, + 0xaf, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xfe, 0x45, 0x7e, 0xc6, 0x03, 0x00, 0x00, } func (m *PriorityClass) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index f0878fb16e..e42dccc688 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -67,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index 262caf7f1d..68e8e90d1d 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +// source: k8s.io/api/scheduling/v1beta1/generated.proto package v1beta1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd406dede2d3f42, []int{0} + return fileDescriptor_9edc3acf997efcf2, []int{0} } func (m *PriorityClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_PriorityClass proto.InternalMessageInfo func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } func (*PriorityClassList) ProtoMessage() {} func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd406dede2d3f42, []int{1} + return fileDescriptor_9edc3acf997efcf2, []int{1} } func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,42 +107,41 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) + proto.RegisterFile("k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_9edc3acf997efcf2) } -var fileDescriptor_6cd406dede2d3f42 = []byte{ - // 497 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x3e, - 0x18, 0xc6, 0xeb, 0xde, 0xbf, 0x52, 0xff, 0xae, 0x2a, 0x95, 0x20, 0xa4, 0xa8, 0xd2, 0xa5, 0x55, - 0x6f, 0xe9, 0x00, 0x36, 0x3d, 0x01, 0x42, 0xba, 0xad, 0x77, 0x02, 0x21, 0x81, 0x28, 0x19, 0x18, - 0x10, 0x03, 0x4e, 0xf2, 0x5e, 0x6a, 0x9a, 0xc4, 0x91, 0xed, 0x44, 0xea, 0xc6, 0x47, 0xe0, 0x43, - 0x31, 0x74, 0xbc, 0xf1, 0xa6, 0x8a, 0x86, 0x8f, 0xc0, 0xc6, 0x84, 0x92, 0x86, 0x4b, 0xdb, 0xc0, - 0xc1, 0x96, 0xf7, 0x7d, 0x7f, 0xcf, 0x63, 0xfb, 0x89, 0x8d, 0x9f, 0x2d, 0x9e, 0x2a, 0xc2, 0x05, - 0x5d, 0x24, 0x0e, 0xc8, 0x08, 0x34, 0x28, 0x9a, 0x42, 0xe4, 0x09, 0x49, 0xcb, 0x01, 0x8b, 0x39, - 0x55, 0xee, 0x1c, 0xbc, 0x24, 0xe0, 0x91, 0x4f, 0xd3, 0x89, 0x03, 0x9a, 0x4d, 0xa8, 0x0f, 0x11, - 0x48, 0xa6, 0xc1, 0x23, 0xb1, 0x14, 0x5a, 0x18, 0xc7, 0x5b, 0x9c, 0xb0, 0x98, 0x93, 0x0a, 0x27, - 0x25, 0xde, 0x7f, 0xe0, 0x73, 0x3d, 0x4f, 0x1c, 0xe2, 0x8a, 0x90, 0xfa, 0xc2, 0x17, 0xb4, 0x50, - 0x39, 0xc9, 0x65, 0x51, 0x15, 0x45, 0xf1, 0xb5, 0x75, 0xeb, 0x8f, 0x76, 0x16, 0x77, 0x85, 0x04, - 0x9a, 0xd6, 0x56, 0xec, 0x3f, 0xaa, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, - 0xe7, 0x0d, 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2a, 0xfa, 0x27, 0x95, 0x4c, 0x22, 0xcd, 0x43, 0xa8, - 0x09, 0x9e, 0xfc, 0x4d, 0x90, 0x9f, 0x36, 0x64, 0x87, 0xba, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, - 0x17, 0x92, 0xeb, 0xe5, 0x79, 0xc0, 0x94, 0x32, 0x3e, 0xe0, 0x76, 0xbe, 0x2b, 0x8f, 0x69, 0x66, - 0xa2, 0x21, 0x1a, 0x77, 0x4e, 0x1f, 0x92, 0x2a, 0xb5, 0x1b, 0x73, 0x12, 0x2f, 0xfc, 0xbc, 0xa1, - 0x48, 0x4e, 0x93, 0x74, 0x42, 0x5e, 0x3b, 0x1f, 0xc1, 0xd5, 0xaf, 0x40, 0xb3, 0xa9, 0xb1, 0x5a, - 0x0f, 0x1a, 0xd9, 0x7a, 0x80, 0xab, 0x9e, 0x7d, 0xe3, 0x6a, 0x9c, 0xe0, 0x56, 0xca, 0x82, 0x04, - 0xcc, 0xe6, 0x10, 0x8d, 0x5b, 0xd3, 0x6e, 0x09, 0xb7, 0xde, 0xe6, 0x4d, 0x7b, 0x3b, 0x33, 0xce, - 0x70, 0xd7, 0x0f, 0x84, 0xc3, 0x82, 0x0b, 0xb8, 0x64, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, - 0xa7, 0xf7, 0x4a, 0xb8, 0xfb, 0x7c, 0x77, 0x68, 0xef, 0xb3, 0xc6, 0x63, 0xdc, 0xf1, 0x40, 0xb9, - 0x92, 0xc7, 0x9a, 0x8b, 0xc8, 0xfc, 0x6f, 0x88, 0xc6, 0xff, 0x4f, 0xef, 0x96, 0xd2, 0xce, 0x45, - 0x35, 0xb2, 0x77, 0x39, 0xc3, 0xc7, 0xbd, 0x58, 0x02, 0x84, 0x45, 0x35, 0x13, 0x01, 0x77, 0x97, - 0x66, 0xab, 0xd0, 0x9e, 0x65, 0xeb, 0x41, 0x6f, 0x76, 0x30, 0xfb, 0xb1, 0x1e, 0x9c, 0xd4, 0x6f, - 0x00, 0x39, 0xc4, 0xec, 0x9a, 0xe9, 0xe8, 0x0b, 0xc2, 0x77, 0xf6, 0x52, 0x7f, 0xc9, 0x95, 0x36, - 0xde, 0xd7, 0x92, 0x27, 0xff, 0x96, 0x7c, 0xae, 0x2e, 0x72, 0xef, 0x95, 0x47, 0x6c, 0xff, 0xea, - 0xec, 0xa4, 0xfe, 0x06, 0xb7, 0xb8, 0x86, 0x50, 0x99, 0xcd, 0xe1, 0xd1, 0xb8, 0x73, 0x7a, 0x9f, - 0xdc, 0xfa, 0x14, 0xc8, 0xde, 0xf6, 0xaa, 0x7f, 0xf4, 0x22, 0xb7, 0xb0, 0xb7, 0x4e, 0xd3, 0xf3, - 0xd5, 0xc6, 0x6a, 0x5c, 0x6d, 0xac, 0xc6, 0xf5, 0xc6, 0x6a, 0x7c, 0xca, 0x2c, 0xb4, 0xca, 0x2c, - 0x74, 0x95, 0x59, 0xe8, 0x3a, 0xb3, 0xd0, 0xd7, 0xcc, 0x42, 0x9f, 0xbf, 0x59, 0x8d, 0x77, 0xc7, - 0xb7, 0x3e, 0xd1, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x6c, 0x56, 0x80, 0xdb, 0x03, 0x00, +var fileDescriptor_9edc3acf997efcf2 = []byte{ + // 481 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x30, + 0x18, 0x86, 0xe3, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xaa, 0x74, 0x69, 0xd4, 0x5b, + 0x32, 0x70, 0x36, 0x3d, 0x01, 0x42, 0xba, 0x2d, 0x77, 0x12, 0x42, 0x02, 0x51, 0x32, 0x30, 0x20, + 0x06, 0x9c, 0xd4, 0x97, 0x9a, 0x26, 0x71, 0x64, 0x3b, 0x95, 0xba, 0xf1, 0x13, 0xf8, 0x51, 0x0c, + 0x1d, 0x6f, 0xbc, 0xa9, 0xa2, 0xe1, 0x27, 0xb0, 0x31, 0xa1, 0xa4, 0xe1, 0xd2, 0x36, 0x50, 0x6e, + 0xcb, 0xf7, 0x7d, 0xcf, 0xfb, 0xda, 0x7e, 0x63, 0xc3, 0xd3, 0xd9, 0x0b, 0x89, 0x18, 0xc7, 0x24, + 0x65, 0x58, 0x06, 0x53, 0x3a, 0xc9, 0x22, 0x96, 0x84, 0x78, 0x3e, 0xf2, 0xa9, 0x22, 0x23, 0x1c, + 0xd2, 0x84, 0x0a, 0xa2, 0xe8, 0x04, 0xa5, 0x82, 0x2b, 0x6e, 0x1c, 0x6f, 0x70, 0x44, 0x52, 0x86, + 0x6a, 0x1c, 0x55, 0x78, 0xff, 0x34, 0x64, 0x6a, 0x9a, 0xf9, 0x28, 0xe0, 0x31, 0x0e, 0x79, 0xc8, + 0x71, 0xa9, 0xf2, 0xb3, 0xab, 0xb2, 0x2a, 0x8b, 0xf2, 0x6b, 0xe3, 0xd6, 0x1f, 0x6e, 0x2d, 0x1e, + 0x70, 0x41, 0xf1, 0xbc, 0xb1, 0x62, 0xff, 0x69, 0xcd, 0xc4, 0x24, 0x98, 0xb2, 0x84, 0x8a, 0x05, + 0x4e, 0x67, 0x61, 0xd1, 0x90, 0x38, 0xa6, 0x8a, 0xfc, 0x4d, 0x85, 0xff, 0xa5, 0x12, 0x59, 0xa2, + 0x58, 0x4c, 0x1b, 0x82, 0xe7, 0xff, 0x13, 0x14, 0xa7, 0x8d, 0xc9, 0xbe, 0x6e, 0xf8, 0xb3, 0x05, + 0xbb, 0x63, 0xc1, 0xb8, 0x60, 0x6a, 0x71, 0x11, 0x11, 0x29, 0x8d, 0x4f, 0xb0, 0x5d, 0xec, 0x6a, + 0x42, 0x14, 0x31, 0x81, 0x0d, 0x9c, 0xce, 0xd9, 0x13, 0x54, 0xa7, 0x76, 0x6b, 0x8e, 0xd2, 0x59, + 0x58, 0x34, 0x24, 0x2a, 0x68, 0x34, 0x1f, 0xa1, 0xb7, 0xfe, 0x67, 0x1a, 0xa8, 0x37, 0x54, 0x11, + 0xd7, 0x58, 0xae, 0x06, 0x5a, 0xbe, 0x1a, 0xc0, 0xba, 0xe7, 0xdd, 0xba, 0x1a, 0x27, 0x50, 0x9f, + 0x93, 0x28, 0xa3, 0x66, 0xcb, 0x06, 0x8e, 0xee, 0x76, 0x2b, 0x58, 0x7f, 0x5f, 0x34, 0xbd, 0xcd, + 0xcc, 0x38, 0x87, 0xdd, 0x30, 0xe2, 0x3e, 0x89, 0x2e, 0xe9, 0x15, 0xc9, 0x22, 0x65, 0x1e, 0xd9, + 0xc0, 0x69, 0xbb, 0x8f, 0x2a, 0xb8, 0xfb, 0x72, 0x7b, 0xe8, 0xed, 0xb2, 0xc6, 0x33, 0xd8, 0x99, + 0x50, 0x19, 0x08, 0x96, 0x2a, 0xc6, 0x13, 0xf3, 0x9e, 0x0d, 0x9c, 0xfb, 0xee, 0xc3, 0x4a, 0xda, + 0xb9, 0xac, 0x47, 0xde, 0x36, 0x67, 0x84, 0xb0, 0x97, 0x0a, 0x4a, 0xe3, 0xb2, 0x1a, 0xf3, 0x88, + 0x05, 0x0b, 0x53, 0x2f, 0xb5, 0xe7, 0xf9, 0x6a, 0xd0, 0x1b, 0xef, 0xcd, 0x7e, 0xad, 0x06, 0x27, + 0xcd, 0x1b, 0x80, 0xf6, 0x31, 0xaf, 0x61, 0x3a, 0xfc, 0x06, 0xe0, 0x83, 0x9d, 0xd4, 0x5f, 0x33, + 0xa9, 0x8c, 0x8f, 0x8d, 0xe4, 0xd1, 0xdd, 0x92, 0x2f, 0xd4, 0x65, 0xee, 0xbd, 0xea, 0x88, 0xed, + 0x3f, 0x9d, 0xad, 0xd4, 0xdf, 0x41, 0x9d, 0x29, 0x1a, 0x4b, 0xb3, 0x65, 0x1f, 0x39, 0x9d, 0xb3, + 0xc7, 0xe8, 0xe0, 0x53, 0x40, 0x3b, 0xdb, 0xab, 0xff, 0xd1, 0xab, 0xc2, 0xc2, 0xdb, 0x38, 0xb9, + 0x17, 0xcb, 0xb5, 0xa5, 0x5d, 0xaf, 0x2d, 0xed, 0x66, 0x6d, 0x69, 0x5f, 0x72, 0x0b, 0x2c, 0x73, + 0x0b, 0x5c, 0xe7, 0x16, 0xb8, 0xc9, 0x2d, 0xf0, 0x3d, 0xb7, 0xc0, 0xd7, 0x1f, 0x96, 0xf6, 0xe1, + 0xf8, 0xe0, 0x13, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x04, 0x2e, 0xb0, 0xce, 0xc2, 0x03, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 43878184d6..7f77b01753 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -36,7 +36,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -67,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 75a6489da2..e2310dac23 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/storage/v1" diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index d36497432d..11c8c97c24 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto +// source: k8s.io/api/storage/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CSIDriver) Reset() { *m = CSIDriver{} } func (*CSIDriver) ProtoMessage() {} func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{0} + return fileDescriptor_662262cc70094b41, []int{0} } func (m *CSIDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_CSIDriver proto.InternalMessageInfo func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } func (*CSIDriverList) ProtoMessage() {} func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{1} + return fileDescriptor_662262cc70094b41, []int{1} } func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } func (*CSIDriverSpec) ProtoMessage() {} func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{2} + return fileDescriptor_662262cc70094b41, []int{2} } func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo func (m *CSINode) Reset() { *m = CSINode{} } func (*CSINode) ProtoMessage() {} func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{3} + return fileDescriptor_662262cc70094b41, []int{3} } func (m *CSINode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CSINode proto.InternalMessageInfo func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } func (*CSINodeDriver) ProtoMessage() {} func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{4} + return fileDescriptor_662262cc70094b41, []int{4} } func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo func (m *CSINodeList) Reset() { *m = CSINodeList{} } func (*CSINodeList) ProtoMessage() {} func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{5} + return fileDescriptor_662262cc70094b41, []int{5} } func (m *CSINodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_CSINodeList proto.InternalMessageInfo func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } func (*CSINodeSpec) ProtoMessage() {} func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{6} + return fileDescriptor_662262cc70094b41, []int{6} } func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} } func (*CSIStorageCapacity) ProtoMessage() {} func (*CSIStorageCapacity) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{7} + return fileDescriptor_662262cc70094b41, []int{7} } func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} } func (*CSIStorageCapacityList) ProtoMessage() {} func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{8} + return fileDescriptor_662262cc70094b41, []int{8} } func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} + return fileDescriptor_662262cc70094b41, []int{9} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} + return fileDescriptor_662262cc70094b41, []int{10} } func (m *StorageClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} + return fileDescriptor_662262cc70094b41, []int{11} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} + return fileDescriptor_662262cc70094b41, []int{12} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} + return fileDescriptor_662262cc70094b41, []int{13} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} + return fileDescriptor_662262cc70094b41, []int{14} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} + return fileDescriptor_662262cc70094b41, []int{15} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{16} + return fileDescriptor_662262cc70094b41, []int{16} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +527,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{17} + return fileDescriptor_662262cc70094b41, []int{17} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +555,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{18} + return fileDescriptor_662262cc70094b41, []int{18} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -605,116 +605,115 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) -} - -var fileDescriptor_3b530c1983504d8d = []byte{ - // 1670 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x73, 0x1b, 0x4b, - 0x11, 0xf7, 0x5a, 0xf2, 0xd7, 0xc8, 0x8e, 0xed, 0xb1, 0xfd, 0x10, 0x3e, 0x48, 0xae, 0x7d, 0xaf, - 0xc0, 0xef, 0xc1, 0x5b, 0xbd, 0x38, 0x21, 0x95, 0x0a, 0x15, 0xaa, 0xbc, 0xb6, 0x42, 0x5c, 0x58, - 0xb6, 0x19, 0x99, 0x54, 0x8a, 0x02, 0x2a, 0xe3, 0xdd, 0xb1, 0x3c, 0xb1, 0xf6, 0x23, 0x3b, 0xb3, - 0xc2, 0xe2, 0x04, 0x17, 0x6e, 0x54, 0xc1, 0x95, 0xbf, 0x02, 0xaa, 0xe0, 0xc2, 0x91, 0x03, 0x15, - 0x6e, 0x29, 0x4e, 0x39, 0xa9, 0x88, 0x38, 0xc3, 0x91, 0x83, 0x4f, 0xaf, 0x66, 0x76, 0xa4, 0xfd, - 0xd0, 0xca, 0xb1, 0x2f, 0xba, 0x69, 0xa6, 0xbb, 0x7f, 0xdd, 0x33, 0xdd, 0xfd, 0x9b, 0x5e, 0x81, - 0x1f, 0x5c, 0x3e, 0x66, 0x06, 0xf5, 0x6a, 0x97, 0xe1, 0x19, 0x09, 0x5c, 0xc2, 0x09, 0xab, 0x75, - 0x88, 0x6b, 0x7b, 0x41, 0x4d, 0x09, 0xb0, 0x4f, 0x6b, 0x8c, 0x7b, 0x01, 0x6e, 0x91, 0x5a, 0xe7, - 0x7e, 0xad, 0x45, 0x5c, 0x12, 0x60, 0x4e, 0x6c, 0xc3, 0x0f, 0x3c, 0xee, 0xc1, 0x8d, 0x48, 0xcd, - 0xc0, 0x3e, 0x35, 0x94, 0x9a, 0xd1, 0xb9, 0xbf, 0xf9, 0x65, 0x8b, 0xf2, 0x8b, 0xf0, 0xcc, 0xb0, - 0x3c, 0xa7, 0xd6, 0xf2, 0x5a, 0x5e, 0x4d, 0x6a, 0x9f, 0x85, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0x57, - 0x84, 0xb2, 0xa9, 0x27, 0x9c, 0x59, 0x5e, 0x90, 0xe7, 0x69, 0xf3, 0x61, 0xac, 0xe3, 0x60, 0xeb, - 0x82, 0xba, 0x24, 0xe8, 0xd6, 0xfc, 0xcb, 0x96, 0x34, 0x0a, 0x08, 0xf3, 0xc2, 0xc0, 0x22, 0x77, - 0xb2, 0x62, 0x35, 0x87, 0x70, 0x9c, 0xe7, 0xab, 0x36, 0xce, 0x2a, 0x08, 0x5d, 0x4e, 0x9d, 0x51, - 0x37, 0x8f, 0x3e, 0x66, 0xc0, 0xac, 0x0b, 0xe2, 0xe0, 0xac, 0x9d, 0xfe, 0x57, 0x0d, 0x2c, 0xec, - 0x35, 0x0f, 0xf6, 0x03, 0xda, 0x21, 0x01, 0x7c, 0x05, 0xe6, 0x45, 0x44, 0x36, 0xe6, 0xb8, 0xac, - 0x6d, 0x69, 0xdb, 0xa5, 0x9d, 0xaf, 0x8c, 0xf8, 0x7e, 0x87, 0xc0, 0x86, 0x7f, 0xd9, 0x12, 0x1b, - 0xcc, 0x10, 0xda, 0x46, 0xe7, 0xbe, 0x71, 0x7c, 0xf6, 0x9a, 0x58, 0xbc, 0x41, 0x38, 0x36, 0xe1, - 0xdb, 0x5e, 0x75, 0xaa, 0xdf, 0xab, 0x82, 0x78, 0x0f, 0x0d, 0x51, 0xe1, 0x33, 0x50, 0x64, 0x3e, - 0xb1, 0xca, 0xd3, 0x12, 0xfd, 0x33, 0x23, 0x37, 0x7b, 0xc6, 0x30, 0xa2, 0xa6, 0x4f, 0x2c, 0x73, - 0x51, 0x21, 0x16, 0xc5, 0x0a, 0x49, 0x7b, 0xfd, 0x2f, 0x1a, 0x58, 0x1a, 0x6a, 0x1d, 0x52, 0xc6, - 0xe1, 0xcf, 0x46, 0x62, 0x37, 0x6e, 0x17, 0xbb, 0xb0, 0x96, 0x91, 0xaf, 0x28, 0x3f, 0xf3, 0x83, - 0x9d, 0x44, 0xdc, 0x75, 0x30, 0x43, 0x39, 0x71, 0x58, 0x79, 0x7a, 0xab, 0xb0, 0x5d, 0xda, 0xd9, - 0xfa, 0x58, 0xe0, 0xe6, 0x92, 0x02, 0x9b, 0x39, 0x10, 0x66, 0x28, 0xb2, 0xd6, 0xff, 0x55, 0x4c, - 0x84, 0x2d, 0x8e, 0x03, 0x9f, 0x80, 0x7b, 0x98, 0x73, 0x6c, 0x5d, 0x20, 0xf2, 0x26, 0xa4, 0x01, - 0xb1, 0x65, 0xf0, 0xf3, 0x26, 0xec, 0xf7, 0xaa, 0xf7, 0x76, 0x53, 0x12, 0x94, 0xd1, 0x14, 0xb6, - 0xbe, 0x67, 0x1f, 0xb8, 0xe7, 0xde, 0xb1, 0xdb, 0xf0, 0x42, 0x97, 0xcb, 0x6b, 0x55, 0xb6, 0x27, - 0x29, 0x09, 0xca, 0x68, 0x42, 0x0b, 0xac, 0x77, 0xbc, 0x76, 0xe8, 0x90, 0x43, 0x7a, 0x4e, 0xac, - 0xae, 0xd5, 0x26, 0x0d, 0xcf, 0x26, 0xac, 0x5c, 0xd8, 0x2a, 0x6c, 0x2f, 0x98, 0xb5, 0x7e, 0xaf, - 0xba, 0xfe, 0x22, 0x47, 0x7e, 0xdd, 0xab, 0xae, 0xe5, 0xec, 0xa3, 0x5c, 0x30, 0xf8, 0x14, 0x2c, - 0xab, 0xcb, 0xd9, 0xc3, 0x3e, 0xb6, 0x28, 0xef, 0x96, 0x8b, 0x32, 0xc2, 0xb5, 0x7e, 0xaf, 0xba, - 0xdc, 0x4c, 0x8b, 0x50, 0x56, 0x17, 0x3e, 0x07, 0x4b, 0xe7, 0xec, 0x87, 0x81, 0x17, 0xfa, 0x27, - 0x5e, 0x9b, 0x5a, 0xdd, 0xf2, 0xcc, 0x96, 0xb6, 0xbd, 0x60, 0xea, 0xfd, 0x5e, 0x75, 0xe9, 0x59, - 0x33, 0x21, 0xb8, 0xce, 0x6e, 0xa0, 0xb4, 0x21, 0x7c, 0x05, 0x96, 0xb8, 0x77, 0x49, 0x5c, 0x71, - 0x75, 0x84, 0x71, 0x56, 0x9e, 0x95, 0x69, 0xfc, 0x74, 0x4c, 0x1a, 0x4f, 0x13, 0xba, 0xe6, 0x86, - 0xca, 0xe4, 0x52, 0x72, 0x97, 0xa1, 0x34, 0x20, 0xdc, 0x03, 0xab, 0x41, 0x94, 0x17, 0x86, 0x88, - 0x1f, 0x9e, 0xb5, 0x29, 0xbb, 0x28, 0xcf, 0xc9, 0xc3, 0x6e, 0xf4, 0x7b, 0xd5, 0x55, 0x94, 0x15, - 0xa2, 0x51, 0x7d, 0xf8, 0x10, 0x2c, 0x32, 0x72, 0x48, 0xdd, 0xf0, 0x2a, 0x4a, 0xe7, 0xbc, 0xb4, - 0x5f, 0xe9, 0xf7, 0xaa, 0x8b, 0xcd, 0x7a, 0xbc, 0x8f, 0x52, 0x5a, 0xfa, 0x9f, 0x35, 0x30, 0xb7, - 0xd7, 0x3c, 0x38, 0xf2, 0x6c, 0x32, 0x81, 0x0e, 0xde, 0x4f, 0x75, 0xb0, 0x3e, 0xbe, 0x11, 0x44, - 0x3c, 0x63, 0xfb, 0xf7, 0x7f, 0x51, 0xff, 0x0a, 0x1d, 0xc5, 0x3d, 0x5b, 0xa0, 0xe8, 0x62, 0x87, - 0xc8, 0xa8, 0x17, 0x62, 0x9b, 0x23, 0xec, 0x10, 0x24, 0x25, 0xf0, 0x5b, 0x60, 0xd6, 0xf5, 0x6c, - 0x72, 0xb0, 0x2f, 0x7d, 0x2f, 0x98, 0xf7, 0x94, 0xce, 0xec, 0x91, 0xdc, 0x45, 0x4a, 0x2a, 0x6e, - 0x91, 0x7b, 0xbe, 0xd7, 0xf6, 0x5a, 0xdd, 0x1f, 0x91, 0xee, 0xa0, 0xa4, 0xe5, 0x2d, 0x9e, 0x26, - 0xf6, 0x51, 0x4a, 0x0b, 0xfe, 0x1c, 0x94, 0x70, 0xbb, 0xed, 0x59, 0x98, 0xe3, 0xb3, 0x36, 0x91, - 0x75, 0x5a, 0xda, 0xf9, 0x62, 0xcc, 0xf1, 0xa2, 0x16, 0x10, 0x7e, 0x91, 0x22, 0x7e, 0x66, 0x2e, - 0xf7, 0x7b, 0xd5, 0xd2, 0x6e, 0x0c, 0x81, 0x92, 0x78, 0xfa, 0x9f, 0x34, 0x50, 0x52, 0x07, 0x9e, - 0x00, 0x5d, 0xed, 0xa5, 0xe9, 0xaa, 0x72, 0x73, 0x96, 0xc6, 0x90, 0xd5, 0x2f, 0x86, 0x11, 0x4b, - 0xa6, 0x3a, 0x06, 0x73, 0xb6, 0x4c, 0x15, 0x2b, 0x6b, 0x12, 0xf5, 0xb3, 0x9b, 0x51, 0x15, 0x11, - 0x2e, 0x2b, 0xec, 0xb9, 0x68, 0xcd, 0xd0, 0x00, 0x45, 0xff, 0x7f, 0x01, 0xc0, 0xbd, 0xe6, 0x41, - 0x86, 0x06, 0x26, 0x50, 0xc2, 0x14, 0x2c, 0x8a, 0x52, 0x19, 0x14, 0x83, 0x2a, 0xe5, 0x07, 0xb7, - 0xbc, 0x7f, 0x7c, 0x46, 0xda, 0x4d, 0xd2, 0x26, 0x16, 0xf7, 0x82, 0xa8, 0xaa, 0x8e, 0x12, 0x60, - 0x28, 0x05, 0x0d, 0xf7, 0xc1, 0xca, 0x80, 0xd5, 0xda, 0x98, 0x31, 0x51, 0xcd, 0xe5, 0x82, 0xac, - 0xde, 0xb2, 0x0a, 0x71, 0xa5, 0x99, 0x91, 0xa3, 0x11, 0x0b, 0xf8, 0x12, 0xcc, 0x5b, 0x49, 0x02, - 0xfd, 0x48, 0xb1, 0x18, 0x83, 0x69, 0xc4, 0xf8, 0x71, 0x88, 0x5d, 0x4e, 0x79, 0xd7, 0x5c, 0x14, - 0x85, 0x32, 0x64, 0xda, 0x21, 0x1a, 0x64, 0x60, 0xd5, 0xc1, 0x57, 0xd4, 0x09, 0x9d, 0xa8, 0xa4, - 0x9b, 0xf4, 0x57, 0x44, 0xd2, 0xec, 0xdd, 0x5d, 0x48, 0x9a, 0x6b, 0x64, 0xc1, 0xd0, 0x28, 0xbe, - 0xfe, 0x0f, 0x0d, 0x7c, 0x32, 0x9a, 0xf8, 0x09, 0xb4, 0xc5, 0x51, 0xba, 0x2d, 0x3e, 0x1f, 0x5f, - 0xc0, 0x99, 0xd8, 0xc6, 0x74, 0xc8, 0xef, 0x66, 0xc1, 0x62, 0x32, 0x7d, 0x13, 0xa8, 0xdd, 0xef, - 0x81, 0x92, 0x1f, 0x78, 0x1d, 0xca, 0xa8, 0xe7, 0x92, 0x40, 0x31, 0xe1, 0x9a, 0x32, 0x29, 0x9d, - 0xc4, 0x22, 0x94, 0xd4, 0x83, 0x2d, 0x00, 0x7c, 0x1c, 0x60, 0x87, 0x70, 0xd1, 0xbf, 0x05, 0x79, - 0xfc, 0x07, 0x63, 0x8e, 0x9f, 0x3c, 0x91, 0x71, 0x32, 0xb4, 0xaa, 0xbb, 0x3c, 0xe8, 0xc6, 0xd1, - 0xc5, 0x02, 0x94, 0x80, 0x86, 0x97, 0x60, 0x29, 0x20, 0x56, 0x1b, 0x53, 0x47, 0xbd, 0xd9, 0x45, - 0x19, 0x61, 0x5d, 0x3c, 0xa0, 0x28, 0x29, 0xb8, 0xee, 0x55, 0xbf, 0x1a, 0x9d, 0xba, 0x8d, 0x13, - 0x12, 0x30, 0xca, 0x38, 0x71, 0x79, 0x54, 0x30, 0x29, 0x1b, 0x94, 0xc6, 0x16, 0x4c, 0xef, 0x88, - 0x27, 0xf0, 0xd8, 0xe7, 0xd4, 0x73, 0x59, 0x79, 0x26, 0x66, 0xfa, 0x46, 0x62, 0x1f, 0xa5, 0xb4, - 0xe0, 0x21, 0x58, 0x17, 0xcc, 0xfc, 0xcb, 0xc8, 0x41, 0xfd, 0xca, 0xc7, 0xae, 0xb8, 0xa5, 0xf2, - 0xac, 0x7c, 0x6d, 0xcb, 0x62, 0xf4, 0xd9, 0xcd, 0x91, 0xa3, 0x5c, 0x2b, 0xf8, 0x12, 0xac, 0x46, - 0xb3, 0x8f, 0x49, 0x5d, 0x9b, 0xba, 0x2d, 0x31, 0xf9, 0xc8, 0x87, 0x7f, 0xc1, 0xfc, 0x42, 0x74, - 0xc4, 0x8b, 0xac, 0xf0, 0x3a, 0x6f, 0x13, 0x8d, 0x82, 0xc0, 0x37, 0x60, 0x55, 0x7a, 0x24, 0xb6, - 0xa2, 0x13, 0x4a, 0x58, 0x79, 0x5e, 0xa6, 0x6e, 0x3b, 0x99, 0x3a, 0x71, 0x75, 0xd1, 0xd4, 0x12, - 0x91, 0xce, 0x80, 0x9c, 0x4e, 0x49, 0xe0, 0x98, 0xdf, 0x54, 0xf9, 0x5a, 0xdd, 0xcd, 0x42, 0xa1, - 0x51, 0xf4, 0xcd, 0xa7, 0x60, 0x39, 0x93, 0x70, 0xb8, 0x02, 0x0a, 0x97, 0xa4, 0x1b, 0x3d, 0xcb, - 0x48, 0xfc, 0x84, 0xeb, 0x60, 0xa6, 0x83, 0xdb, 0x21, 0x89, 0x8a, 0x0f, 0x45, 0x8b, 0x27, 0xd3, - 0x8f, 0x35, 0xfd, 0x6f, 0x1a, 0x48, 0xd1, 0xd9, 0x04, 0x5a, 0xfa, 0x79, 0xba, 0xa5, 0x3f, 0xbd, - 0x45, 0x4d, 0x8f, 0x69, 0xe6, 0xdf, 0x68, 0x60, 0x31, 0x39, 0xe2, 0xc1, 0xef, 0x82, 0x79, 0x1c, - 0xda, 0x94, 0xb8, 0xd6, 0x60, 0x2a, 0x19, 0x06, 0xb2, 0xab, 0xf6, 0xd1, 0x50, 0x43, 0x0c, 0x80, - 0xe4, 0xca, 0xa7, 0x01, 0x16, 0x45, 0xd6, 0x24, 0x96, 0xe7, 0xda, 0x4c, 0xde, 0x50, 0x21, 0x62, - 0xc6, 0x7a, 0x56, 0x88, 0x46, 0xf5, 0xf5, 0x3f, 0x4e, 0x83, 0x95, 0xa8, 0x36, 0xa2, 0xd1, 0xdf, - 0x21, 0x2e, 0x9f, 0x00, 0xa9, 0x34, 0x52, 0x33, 0xdd, 0x77, 0x6e, 0x1c, 0x7a, 0xe2, 0xc0, 0xc6, - 0x0d, 0x77, 0xf0, 0x27, 0x60, 0x96, 0x71, 0xcc, 0x43, 0x26, 0x9f, 0xba, 0xd2, 0xce, 0x97, 0xb7, - 0x05, 0x94, 0x46, 0xf1, 0x5c, 0x17, 0xad, 0x91, 0x02, 0xd3, 0xff, 0xae, 0x81, 0xf5, 0xac, 0xc9, - 0x04, 0x2a, 0xec, 0x30, 0x5d, 0x61, 0xdf, 0xbe, 0xe5, 0x61, 0xc6, 0x7d, 0x01, 0x6a, 0xe0, 0x93, - 0x91, 0x73, 0xcb, 0x97, 0x54, 0xf0, 0x92, 0x9f, 0x61, 0xbf, 0xa3, 0x78, 0x22, 0x96, 0xbc, 0x74, - 0x92, 0x23, 0x47, 0xb9, 0x56, 0xf0, 0x35, 0x58, 0xa1, 0x6e, 0x9b, 0xba, 0x44, 0x3d, 0xbc, 0x71, - 0x7e, 0x73, 0xc9, 0x23, 0x8b, 0x2c, 0x93, 0xbb, 0x2e, 0xe6, 0x93, 0x83, 0x0c, 0x0a, 0x1a, 0xc1, - 0xd5, 0xff, 0x99, 0x93, 0x19, 0x39, 0x33, 0x8a, 0x16, 0x92, 0x3b, 0x24, 0x18, 0x69, 0x21, 0xb5, - 0x8f, 0x86, 0x1a, 0xb2, 0x6e, 0xe4, 0x55, 0xa8, 0x40, 0x6f, 0x5d, 0x37, 0xd2, 0x28, 0x51, 0x37, - 0x72, 0x8d, 0x14, 0x98, 0x08, 0x42, 0xcc, 0x64, 0x89, 0xd9, 0x6b, 0x18, 0xc4, 0x91, 0xda, 0x47, - 0x43, 0x0d, 0xfd, 0xbf, 0x85, 0x9c, 0x04, 0xc9, 0x02, 0x4c, 0x9c, 0x66, 0xf0, 0x95, 0x9e, 0x3d, - 0x8d, 0x3d, 0x3c, 0x8d, 0x0d, 0xff, 0xa0, 0x01, 0x88, 0x87, 0x10, 0x8d, 0x41, 0x81, 0x46, 0x55, - 0x54, 0xbf, 0x53, 0x4b, 0x18, 0xbb, 0x23, 0x38, 0xd1, 0x6b, 0xbc, 0xa9, 0xfc, 0xc3, 0x51, 0x05, - 0x94, 0xe3, 0x1c, 0xda, 0xa0, 0x14, 0xed, 0xd6, 0x83, 0xc0, 0x0b, 0x54, 0x7b, 0xea, 0x37, 0xc6, - 0x22, 0x35, 0xcd, 0x8a, 0xfc, 0xb8, 0x89, 0x4d, 0xaf, 0x7b, 0xd5, 0x52, 0x42, 0x8e, 0x92, 0xb0, - 0xc2, 0x8b, 0x4d, 0x62, 0x2f, 0xc5, 0xbb, 0x79, 0xd9, 0x27, 0xe3, 0xbd, 0x24, 0x60, 0x37, 0xeb, - 0xe0, 0x1b, 0x63, 0xae, 0xe5, 0x4e, 0x6f, 0xd6, 0x6f, 0x35, 0x90, 0xf4, 0x01, 0x0f, 0x41, 0x91, - 0x53, 0xd5, 0x75, 0xe9, 0x0f, 0xc0, 0x1b, 0x88, 0xe4, 0x94, 0x3a, 0x24, 0xa6, 0x42, 0xb1, 0x42, - 0x12, 0x05, 0x7e, 0x0e, 0xe6, 0x1c, 0xc2, 0x18, 0x6e, 0x29, 0xcf, 0xf1, 0xe7, 0x50, 0x23, 0xda, - 0x46, 0x03, 0xb9, 0xfe, 0x08, 0xac, 0xe5, 0x7c, 0x56, 0xc2, 0x2a, 0x98, 0xb1, 0xe4, 0x9f, 0x01, - 0x22, 0xa0, 0x19, 0x73, 0x41, 0x30, 0xca, 0x9e, 0xfc, 0x17, 0x20, 0xda, 0x37, 0xbf, 0xff, 0xf6, - 0x43, 0x65, 0xea, 0xdd, 0x87, 0xca, 0xd4, 0xfb, 0x0f, 0x95, 0xa9, 0x5f, 0xf7, 0x2b, 0xda, 0xdb, - 0x7e, 0x45, 0x7b, 0xd7, 0xaf, 0x68, 0xef, 0xfb, 0x15, 0xed, 0xdf, 0xfd, 0x8a, 0xf6, 0xfb, 0xff, - 0x54, 0xa6, 0x7e, 0xba, 0x91, 0xfb, 0x77, 0xea, 0xd7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xf9, - 0xe3, 0xd5, 0x7f, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/storage/v1/generated.proto", fileDescriptor_662262cc70094b41) +} + +var fileDescriptor_662262cc70094b41 = []byte{ + // 1655 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xbd, 0x6f, 0x1b, 0xc9, + 0x15, 0xd7, 0x8a, 0xd4, 0xd7, 0x50, 0xb2, 0xa4, 0x91, 0xe4, 0x30, 0x2a, 0x48, 0x61, 0xed, 0x24, + 0xb2, 0x13, 0x2f, 0x6d, 0xd9, 0x31, 0x0c, 0x07, 0x2e, 0xb4, 0x12, 0x1d, 0x0b, 0x11, 0x25, 0x65, + 0xa8, 0x18, 0x46, 0x90, 0x04, 0x1e, 0xed, 0x8e, 0xa8, 0xb1, 0xb8, 0x1f, 0xde, 0x19, 0x2a, 0x62, + 0xaa, 0xa4, 0x49, 0x17, 0x20, 0x69, 0xf3, 0x57, 0x24, 0x40, 0xd2, 0x5c, 0x79, 0xc5, 0xc1, 0xd7, + 0x19, 0x57, 0xb9, 0x22, 0xce, 0xbc, 0xfa, 0xae, 0xbc, 0x42, 0xd5, 0x61, 0x66, 0x87, 0xdc, 0x0f, + 0x2e, 0x65, 0xa9, 0x61, 0xc7, 0x99, 0xf7, 0xde, 0xef, 0xbd, 0x99, 0xf7, 0xde, 0x6f, 0xde, 0x12, + 0xfc, 0xe4, 0xf4, 0x09, 0x33, 0xa8, 0x57, 0xc1, 0x3e, 0xad, 0x30, 0xee, 0x05, 0xb8, 0x41, 0x2a, + 0x67, 0x0f, 0x2a, 0x0d, 0xe2, 0x92, 0x00, 0x73, 0x62, 0x1b, 0x7e, 0xe0, 0x71, 0x0f, 0xae, 0x84, + 0x6a, 0x06, 0xf6, 0xa9, 0xa1, 0xd4, 0x8c, 0xb3, 0x07, 0xab, 0xf7, 0x1a, 0x94, 0x9f, 0xb4, 0x8e, + 0x0c, 0xcb, 0x73, 0x2a, 0x0d, 0xaf, 0xe1, 0x55, 0xa4, 0xf6, 0x51, 0xeb, 0x58, 0xae, 0xe4, 0x42, + 0xfe, 0x0a, 0x51, 0x56, 0xf5, 0x98, 0x33, 0xcb, 0x0b, 0xb2, 0x3c, 0xad, 0x3e, 0x8a, 0x74, 0x1c, + 0x6c, 0x9d, 0x50, 0x97, 0x04, 0xed, 0x8a, 0x7f, 0xda, 0x90, 0x46, 0x01, 0x61, 0x5e, 0x2b, 0xb0, + 0xc8, 0xb5, 0xac, 0x58, 0xc5, 0x21, 0x1c, 0x67, 0xf9, 0xaa, 0x0c, 0xb3, 0x0a, 0x5a, 0x2e, 0xa7, + 0xce, 0xa0, 0x9b, 0xc7, 0x9f, 0x32, 0x60, 0xd6, 0x09, 0x71, 0x70, 0xda, 0x4e, 0xff, 0xbf, 0x06, + 0x66, 0xb6, 0xea, 0x3b, 0xdb, 0x01, 0x3d, 0x23, 0x01, 0x7c, 0x0d, 0xa6, 0x45, 0x44, 0x36, 0xe6, + 0xb8, 0xa8, 0xad, 0x69, 0xeb, 0x85, 0x8d, 0xfb, 0x46, 0x74, 0xbf, 0x7d, 0x60, 0xc3, 0x3f, 0x6d, + 0x88, 0x0d, 0x66, 0x08, 0x6d, 0xe3, 0xec, 0x81, 0xb1, 0x7f, 0xf4, 0x86, 0x58, 0xbc, 0x46, 0x38, + 0x36, 0xe1, 0xbb, 0x4e, 0x79, 0xac, 0xdb, 0x29, 0x83, 0x68, 0x0f, 0xf5, 0x51, 0xe1, 0x73, 0x90, + 0x67, 0x3e, 0xb1, 0x8a, 0xe3, 0x12, 0xfd, 0xb6, 0x91, 0x99, 0x3d, 0xa3, 0x1f, 0x51, 0xdd, 0x27, + 0x96, 0x39, 0xab, 0x10, 0xf3, 0x62, 0x85, 0xa4, 0xbd, 0xfe, 0x3f, 0x0d, 0xcc, 0xf5, 0xb5, 0x76, + 0x29, 0xe3, 0xf0, 0x0f, 0x03, 0xb1, 0x1b, 0x57, 0x8b, 0x5d, 0x58, 0xcb, 0xc8, 0x17, 0x94, 0x9f, + 0xe9, 0xde, 0x4e, 0x2c, 0xee, 0x2a, 0x98, 0xa0, 0x9c, 0x38, 0xac, 0x38, 0xbe, 0x96, 0x5b, 0x2f, + 0x6c, 0xac, 0x7d, 0x2a, 0x70, 0x73, 0x4e, 0x81, 0x4d, 0xec, 0x08, 0x33, 0x14, 0x5a, 0xeb, 0x5f, + 0xe5, 0x63, 0x61, 0x8b, 0xe3, 0xc0, 0xa7, 0xe0, 0x06, 0xe6, 0x1c, 0x5b, 0x27, 0x88, 0xbc, 0x6d, + 0xd1, 0x80, 0xd8, 0x32, 0xf8, 0x69, 0x13, 0x76, 0x3b, 0xe5, 0x1b, 0x9b, 0x09, 0x09, 0x4a, 0x69, + 0x0a, 0x5b, 0xdf, 0xb3, 0x77, 0xdc, 0x63, 0x6f, 0xdf, 0xad, 0x79, 0x2d, 0x97, 0xcb, 0x6b, 0x55, + 0xb6, 0x07, 0x09, 0x09, 0x4a, 0x69, 0x42, 0x0b, 0x2c, 0x9f, 0x79, 0xcd, 0x96, 0x43, 0x76, 0xe9, + 0x31, 0xb1, 0xda, 0x56, 0x93, 0xd4, 0x3c, 0x9b, 0xb0, 0x62, 0x6e, 0x2d, 0xb7, 0x3e, 0x63, 0x56, + 0xba, 0x9d, 0xf2, 0xf2, 0xcb, 0x0c, 0xf9, 0x45, 0xa7, 0xbc, 0x94, 0xb1, 0x8f, 0x32, 0xc1, 0xe0, + 0x33, 0x30, 0xaf, 0x2e, 0x67, 0x0b, 0xfb, 0xd8, 0xa2, 0xbc, 0x5d, 0xcc, 0xcb, 0x08, 0x97, 0xba, + 0x9d, 0xf2, 0x7c, 0x3d, 0x29, 0x42, 0x69, 0x5d, 0xf8, 0x02, 0xcc, 0x1d, 0xb3, 0x5f, 0x07, 0x5e, + 0xcb, 0x3f, 0xf0, 0x9a, 0xd4, 0x6a, 0x17, 0x27, 0xd6, 0xb4, 0xf5, 0x19, 0x53, 0xef, 0x76, 0xca, + 0x73, 0xcf, 0xeb, 0x31, 0xc1, 0x45, 0x7a, 0x03, 0x25, 0x0d, 0xe1, 0x6b, 0x30, 0xc7, 0xbd, 0x53, + 0xe2, 0x8a, 0xab, 0x23, 0x8c, 0xb3, 0xe2, 0xa4, 0x4c, 0xe3, 0xad, 0x21, 0x69, 0x3c, 0x8c, 0xe9, + 0x9a, 0x2b, 0x2a, 0x93, 0x73, 0xf1, 0x5d, 0x86, 0x92, 0x80, 0x70, 0x0b, 0x2c, 0x06, 0x61, 0x5e, + 0x18, 0x22, 0x7e, 0xeb, 0xa8, 0x49, 0xd9, 0x49, 0x71, 0x4a, 0x1e, 0x76, 0xa5, 0xdb, 0x29, 0x2f, + 0xa2, 0xb4, 0x10, 0x0d, 0xea, 0xc3, 0x47, 0x60, 0x96, 0x91, 0x5d, 0xea, 0xb6, 0xce, 0xc3, 0x74, + 0x4e, 0x4b, 0xfb, 0x85, 0x6e, 0xa7, 0x3c, 0x5b, 0xaf, 0x46, 0xfb, 0x28, 0xa1, 0xa5, 0xff, 0x57, + 0x03, 0x53, 0x5b, 0xf5, 0x9d, 0x3d, 0xcf, 0x26, 0x23, 0xe8, 0xe0, 0xed, 0x44, 0x07, 0xeb, 0xc3, + 0x1b, 0x41, 0xc4, 0x33, 0xb4, 0x7f, 0xbf, 0x0b, 0xfb, 0x57, 0xe8, 0x28, 0xee, 0x59, 0x03, 0x79, + 0x17, 0x3b, 0x44, 0x46, 0x3d, 0x13, 0xd9, 0xec, 0x61, 0x87, 0x20, 0x29, 0x81, 0x3f, 0x05, 0x93, + 0xae, 0x67, 0x93, 0x9d, 0x6d, 0xe9, 0x7b, 0xc6, 0xbc, 0xa1, 0x74, 0x26, 0xf7, 0xe4, 0x2e, 0x52, + 0x52, 0x71, 0x8b, 0xdc, 0xf3, 0xbd, 0xa6, 0xd7, 0x68, 0xff, 0x86, 0xb4, 0x7b, 0x25, 0x2d, 0x6f, + 0xf1, 0x30, 0xb6, 0x8f, 0x12, 0x5a, 0xf0, 0x8f, 0xa0, 0x80, 0x9b, 0x4d, 0xcf, 0xc2, 0x1c, 0x1f, + 0x35, 0x89, 0xac, 0xd3, 0xc2, 0xc6, 0xdd, 0x21, 0xc7, 0x0b, 0x5b, 0x40, 0xf8, 0x45, 0x8a, 0xf8, + 0x99, 0x39, 0xdf, 0xed, 0x94, 0x0b, 0x9b, 0x11, 0x04, 0x8a, 0xe3, 0xe9, 0xff, 0xd1, 0x40, 0x41, + 0x1d, 0x78, 0x04, 0x74, 0xb5, 0x95, 0xa4, 0xab, 0xd2, 0xe5, 0x59, 0x1a, 0x42, 0x56, 0x7f, 0xea, + 0x47, 0x2c, 0x99, 0x6a, 0x1f, 0x4c, 0xd9, 0x32, 0x55, 0xac, 0xa8, 0x49, 0xd4, 0xdb, 0x97, 0xa3, + 0x2a, 0x22, 0x9c, 0x57, 0xd8, 0x53, 0xe1, 0x9a, 0xa1, 0x1e, 0x8a, 0xfe, 0x7d, 0x0e, 0xc0, 0xad, + 0xfa, 0x4e, 0x8a, 0x06, 0x46, 0x50, 0xc2, 0x14, 0xcc, 0x8a, 0x52, 0xe9, 0x15, 0x83, 0x2a, 0xe5, + 0x87, 0x57, 0xbc, 0x7f, 0x7c, 0x44, 0x9a, 0x75, 0xd2, 0x24, 0x16, 0xf7, 0x82, 0xb0, 0xaa, 0xf6, + 0x62, 0x60, 0x28, 0x01, 0x0d, 0xb7, 0xc1, 0x42, 0x8f, 0xd5, 0x9a, 0x98, 0x31, 0x51, 0xcd, 0xc5, + 0x9c, 0xac, 0xde, 0xa2, 0x0a, 0x71, 0xa1, 0x9e, 0x92, 0xa3, 0x01, 0x0b, 0xf8, 0x0a, 0x4c, 0x5b, + 0x71, 0x02, 0xfd, 0x44, 0xb1, 0x18, 0xbd, 0x69, 0xc4, 0xf8, 0x6d, 0x0b, 0xbb, 0x9c, 0xf2, 0xb6, + 0x39, 0x2b, 0x0a, 0xa5, 0xcf, 0xb4, 0x7d, 0x34, 0xc8, 0xc0, 0xa2, 0x83, 0xcf, 0xa9, 0xd3, 0x72, + 0xc2, 0x92, 0xae, 0xd3, 0xbf, 0x10, 0x49, 0xb3, 0xd7, 0x77, 0x21, 0x69, 0xae, 0x96, 0x06, 0x43, + 0x83, 0xf8, 0xfa, 0x17, 0x1a, 0xb8, 0x39, 0x98, 0xf8, 0x11, 0xb4, 0xc5, 0x5e, 0xb2, 0x2d, 0xee, + 0x0c, 0x2f, 0xe0, 0x54, 0x6c, 0x43, 0x3a, 0xe4, 0x1f, 0x93, 0x60, 0x36, 0x9e, 0xbe, 0x11, 0xd4, + 0xee, 0x2f, 0x41, 0xc1, 0x0f, 0xbc, 0x33, 0xca, 0xa8, 0xe7, 0x92, 0x40, 0x31, 0xe1, 0x92, 0x32, + 0x29, 0x1c, 0x44, 0x22, 0x14, 0xd7, 0x83, 0x0d, 0x00, 0x7c, 0x1c, 0x60, 0x87, 0x70, 0xd1, 0xbf, + 0x39, 0x79, 0xfc, 0x87, 0x43, 0x8e, 0x1f, 0x3f, 0x91, 0x71, 0xd0, 0xb7, 0xaa, 0xba, 0x3c, 0x68, + 0x47, 0xd1, 0x45, 0x02, 0x14, 0x83, 0x86, 0xa7, 0x60, 0x2e, 0x20, 0x56, 0x13, 0x53, 0x47, 0xbd, + 0xd9, 0x79, 0x19, 0x61, 0x55, 0x3c, 0xa0, 0x28, 0x2e, 0xb8, 0xe8, 0x94, 0xef, 0x0f, 0x4e, 0xdd, + 0xc6, 0x01, 0x09, 0x18, 0x65, 0x9c, 0xb8, 0x3c, 0x2c, 0x98, 0x84, 0x0d, 0x4a, 0x62, 0x0b, 0xa6, + 0x77, 0xc4, 0x13, 0xb8, 0xef, 0x73, 0xea, 0xb9, 0xac, 0x38, 0x11, 0x31, 0x7d, 0x2d, 0xb6, 0x8f, + 0x12, 0x5a, 0x70, 0x17, 0x2c, 0x0b, 0x66, 0xfe, 0x73, 0xe8, 0xa0, 0x7a, 0xee, 0x63, 0x57, 0xdc, + 0x52, 0x71, 0x52, 0xbe, 0xb6, 0x45, 0x31, 0xfa, 0x6c, 0x66, 0xc8, 0x51, 0xa6, 0x15, 0x7c, 0x05, + 0x16, 0xc3, 0xd9, 0xc7, 0xa4, 0xae, 0x4d, 0xdd, 0x86, 0x98, 0x7c, 0xe4, 0xc3, 0x3f, 0x63, 0xde, + 0x15, 0x1d, 0xf1, 0x32, 0x2d, 0xbc, 0xc8, 0xda, 0x44, 0x83, 0x20, 0xf0, 0x2d, 0x58, 0x94, 0x1e, + 0x89, 0xad, 0xe8, 0x84, 0x12, 0x56, 0x9c, 0x96, 0xa9, 0x5b, 0x8f, 0xa7, 0x4e, 0x5c, 0x5d, 0x38, + 0xb5, 0x84, 0xa4, 0xd3, 0x23, 0xa7, 0x43, 0x12, 0x38, 0xe6, 0x8f, 0x55, 0xbe, 0x16, 0x37, 0xd3, + 0x50, 0x68, 0x10, 0x7d, 0xf5, 0x19, 0x98, 0x4f, 0x25, 0x1c, 0x2e, 0x80, 0xdc, 0x29, 0x69, 0x87, + 0xcf, 0x32, 0x12, 0x3f, 0xe1, 0x32, 0x98, 0x38, 0xc3, 0xcd, 0x16, 0x09, 0x8b, 0x0f, 0x85, 0x8b, + 0xa7, 0xe3, 0x4f, 0x34, 0xfd, 0x33, 0x0d, 0x24, 0xe8, 0x6c, 0x04, 0x2d, 0xfd, 0x22, 0xd9, 0xd2, + 0xb7, 0xae, 0x50, 0xd3, 0x43, 0x9a, 0xf9, 0x6f, 0x1a, 0x98, 0x8d, 0x8f, 0x78, 0xf0, 0x17, 0x60, + 0x1a, 0xb7, 0x6c, 0x4a, 0x5c, 0xab, 0x37, 0x95, 0xf4, 0x03, 0xd9, 0x54, 0xfb, 0xa8, 0xaf, 0x21, + 0x06, 0x40, 0x72, 0xee, 0xd3, 0x00, 0x8b, 0x22, 0xab, 0x13, 0xcb, 0x73, 0x6d, 0x26, 0x6f, 0x28, + 0x17, 0x32, 0x63, 0x35, 0x2d, 0x44, 0x83, 0xfa, 0xfa, 0xbf, 0xc7, 0xc1, 0x42, 0x58, 0x1b, 0xe1, + 0xe8, 0xef, 0x10, 0x97, 0x8f, 0x80, 0x54, 0x6a, 0x89, 0x99, 0xee, 0xe7, 0x97, 0x0e, 0x3d, 0x51, + 0x60, 0xc3, 0x86, 0x3b, 0xf8, 0x3b, 0x30, 0xc9, 0x38, 0xe6, 0x2d, 0x26, 0x9f, 0xba, 0xc2, 0xc6, + 0xbd, 0xab, 0x02, 0x4a, 0xa3, 0x68, 0xae, 0x0b, 0xd7, 0x48, 0x81, 0xe9, 0x9f, 0x6b, 0x60, 0x39, + 0x6d, 0x32, 0x82, 0x0a, 0xdb, 0x4d, 0x56, 0xd8, 0xcf, 0xae, 0x78, 0x98, 0x61, 0x5f, 0x80, 0x1a, + 0xb8, 0x39, 0x70, 0x6e, 0xf9, 0x92, 0x0a, 0x5e, 0xf2, 0x53, 0xec, 0xb7, 0x17, 0x4d, 0xc4, 0x92, + 0x97, 0x0e, 0x32, 0xe4, 0x28, 0xd3, 0x0a, 0xbe, 0x01, 0x0b, 0xd4, 0x6d, 0x52, 0x97, 0xa8, 0x87, + 0x37, 0xca, 0x6f, 0x26, 0x79, 0xa4, 0x91, 0x65, 0x72, 0x97, 0xc5, 0x7c, 0xb2, 0x93, 0x42, 0x41, + 0x03, 0xb8, 0xfa, 0x97, 0x19, 0x99, 0x91, 0x33, 0xa3, 0x68, 0x21, 0xb9, 0x43, 0x82, 0x81, 0x16, + 0x52, 0xfb, 0xa8, 0xaf, 0x21, 0xeb, 0x46, 0x5e, 0x85, 0x0a, 0xf4, 0xca, 0x75, 0x23, 0x8d, 0x62, + 0x75, 0x23, 0xd7, 0x48, 0x81, 0x89, 0x20, 0xc4, 0x4c, 0x16, 0x9b, 0xbd, 0xfa, 0x41, 0xec, 0xa9, + 0x7d, 0xd4, 0xd7, 0xd0, 0xbf, 0xcd, 0x65, 0x24, 0x48, 0x16, 0x60, 0xec, 0x34, 0xbd, 0xaf, 0xf4, + 0xf4, 0x69, 0xec, 0xfe, 0x69, 0x6c, 0xf8, 0x2f, 0x0d, 0x40, 0xdc, 0x87, 0xa8, 0xf5, 0x0a, 0x34, + 0xac, 0xa2, 0xea, 0xb5, 0x5a, 0xc2, 0xd8, 0x1c, 0xc0, 0x09, 0x5f, 0xe3, 0x55, 0xe5, 0x1f, 0x0e, + 0x2a, 0xa0, 0x0c, 0xe7, 0xd0, 0x06, 0x85, 0x70, 0xb7, 0x1a, 0x04, 0x5e, 0xa0, 0xda, 0x53, 0xbf, + 0x34, 0x16, 0xa9, 0x69, 0x96, 0xe4, 0xc7, 0x4d, 0x64, 0x7a, 0xd1, 0x29, 0x17, 0x62, 0x72, 0x14, + 0x87, 0x15, 0x5e, 0x6c, 0x12, 0x79, 0xc9, 0x5f, 0xcf, 0xcb, 0x36, 0x19, 0xee, 0x25, 0x06, 0xbb, + 0x5a, 0x05, 0x3f, 0x1a, 0x72, 0x2d, 0xd7, 0x7a, 0xb3, 0xfe, 0xae, 0x81, 0xb8, 0x0f, 0xb8, 0x0b, + 0xf2, 0x9c, 0xaa, 0xae, 0x4b, 0x7e, 0x00, 0x5e, 0x42, 0x24, 0x87, 0xd4, 0x21, 0x11, 0x15, 0x8a, + 0x15, 0x92, 0x28, 0xf0, 0x0e, 0x98, 0x72, 0x08, 0x63, 0xb8, 0xa1, 0x3c, 0x47, 0x9f, 0x43, 0xb5, + 0x70, 0x1b, 0xf5, 0xe4, 0xfa, 0x63, 0xb0, 0x94, 0xf1, 0x59, 0x09, 0xcb, 0x60, 0xc2, 0x92, 0x7f, + 0x06, 0x88, 0x80, 0x26, 0xcc, 0x19, 0xc1, 0x28, 0x5b, 0xf2, 0x5f, 0x80, 0x70, 0xdf, 0xfc, 0xd5, + 0xbb, 0x8f, 0xa5, 0xb1, 0xf7, 0x1f, 0x4b, 0x63, 0x1f, 0x3e, 0x96, 0xc6, 0xfe, 0xda, 0x2d, 0x69, + 0xef, 0xba, 0x25, 0xed, 0x7d, 0xb7, 0xa4, 0x7d, 0xe8, 0x96, 0xb4, 0xaf, 0xbb, 0x25, 0xed, 0x9f, + 0xdf, 0x94, 0xc6, 0x7e, 0xbf, 0x92, 0xf9, 0x77, 0xea, 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7a, + 0x55, 0x95, 0x9f, 0x66, 0x15, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index b35f708c66..ec2beac468 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -44,7 +44,7 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; @@ -55,7 +55,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -103,7 +103,7 @@ message CSIDriverSpec { // deployed on such a cluster and the deployment determines which mode that is, for example // via a command line parameter of the driver. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // +optional optional bool podInfoOnMount = 2; @@ -150,7 +150,7 @@ message CSIDriverSpec { // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // Defaults to ReadWriteOnceWithFSType, which will examine each volume // to determine if Kubernetes should modify ownership and permissions of the volume. @@ -226,7 +226,7 @@ message CSIDriverSpec { message CSINode { // Standard object's metadata. // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -261,6 +261,7 @@ message CSINodeDriver { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic repeated string topologyKeys = 3; // allocatable represents the volume resources of a node that are available for scheduling. @@ -274,7 +275,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -286,6 +287,8 @@ message CSINodeSpec { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated CSINodeDriver drivers = 1; } @@ -324,7 +327,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -333,7 +336,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -353,7 +356,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -367,7 +370,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -375,11 +378,9 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name repeated CSIStorageCapacity items = 2; } @@ -392,7 +393,7 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -411,6 +412,7 @@ message StorageClass { // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic repeated string mountOptions = 5; // allowVolumeExpansion shows whether the storage class allow volume expand. @@ -429,7 +431,7 @@ message StorageClass { // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -437,7 +439,7 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of StorageClasses repeated StorageClass items = 2; @@ -464,7 +466,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -482,7 +484,7 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; @@ -504,7 +506,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -552,7 +554,7 @@ message VolumeAttachmentStatus { message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 7d7b7664b8..de2bbc2e06 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -25,6 +25,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. @@ -56,6 +57,7 @@ type StorageClass struct { // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` // allowVolumeExpansion shows whether the storage class allow volume expand. @@ -78,6 +80,7 @@ type StorageClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClassList is a collection of storage classes. type StorageClassList struct { @@ -111,6 +114,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. @@ -136,6 +140,7 @@ type VolumeAttachment struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { @@ -226,6 +231,7 @@ type VolumeError struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriver captures information about a Container Storage Interface (CSI) // volume driver deployed on the cluster. @@ -250,6 +256,7 @@ type CSIDriver struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriverList is a collection of CSIDriver objects. type CSIDriverList struct { @@ -306,7 +313,7 @@ type CSIDriverSpec struct { // deployed on such a cluster and the deployment determines which mode that is, for example // via a command line parameter of the driver. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` @@ -353,7 +360,7 @@ type CSIDriverSpec struct { // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // Defaults to ReadWriteOnceWithFSType, which will examine each volume // to determine if Kubernetes should modify ownership and permissions of the volume. @@ -490,6 +497,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as @@ -517,6 +525,8 @@ type CSINodeSpec struct { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` } @@ -549,6 +559,7 @@ type CSINodeDriver struct { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` // allocatable represents the volume resources of a node that are available for scheduling. @@ -568,6 +579,7 @@ type VolumeNodeResources struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINodeList is a collection of CSINode objects. type CSINodeList struct { @@ -584,6 +596,7 @@ type CSINodeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 // CSIStorageCapacity stores the result of one CSI GetCapacity call. // For a given StorageClass, this describes the available capacity in a @@ -669,6 +682,7 @@ type CSIStorageCapacity struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { @@ -680,7 +694,5 @@ type CSIStorageCapacityList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 69ee683610..89b1cbb201 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -50,10 +50,10 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..a44c1181ad --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriver) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriverList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINode) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacity) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacityList) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClass) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachment) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachmentList) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 2b15ec3feb..86343b170a 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// source: k8s.io/api/storage/v1alpha1/generated.proto package v1alpha1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} } func (*CSIStorageCapacity) ProtoMessage() {} func (*CSIStorageCapacity) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{0} + return fileDescriptor_02e7952e43280c27, []int{0} } func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} } func (*CSIStorageCapacityList) ProtoMessage() {} func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{1} + return fileDescriptor_02e7952e43280c27, []int{1} } func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{2} + return fileDescriptor_02e7952e43280c27, []int{2} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{3} + return fileDescriptor_02e7952e43280c27, []int{3} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{4} + return fileDescriptor_02e7952e43280c27, []int{4} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{5} + return fileDescriptor_02e7952e43280c27, []int{5} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{6} + return fileDescriptor_02e7952e43280c27, []int{6} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } func (*VolumeAttributesClass) ProtoMessage() {} func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{7} + return fileDescriptor_02e7952e43280c27, []int{7} } func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } func (*VolumeAttributesClassList) ProtoMessage() {} func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{8} + return fileDescriptor_02e7952e43280c27, []int{8} } func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{9} + return fileDescriptor_02e7952e43280c27, []int{9} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -343,75 +343,75 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) -} - -var fileDescriptor_10f856db1e670dc4 = []byte{ - // 1023 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6e, 0x23, 0x45, - 0x17, 0x4e, 0xe7, 0x32, 0xe3, 0xa9, 0xe4, 0xff, 0xc7, 0x53, 0xca, 0x0c, 0xc6, 0x23, 0xb5, 0x23, - 0xaf, 0x0c, 0x62, 0xba, 0x49, 0x40, 0x68, 0x84, 0xc4, 0xc2, 0x9d, 0x64, 0x11, 0x91, 0x84, 0xa1, - 0x1c, 0x01, 0x02, 0x16, 0x94, 0xdb, 0x07, 0xbb, 0x62, 0xf7, 0x45, 0x55, 0xd5, 0x16, 0x66, 0xc5, - 0x8a, 0x35, 0x3b, 0xde, 0x80, 0x67, 0xc9, 0x02, 0x89, 0xd1, 0xac, 0x66, 0x65, 0x91, 0x86, 0x67, - 0x60, 0xc1, 0x06, 0xd4, 0xd5, 0xe5, 0x76, 0xc7, 0x6d, 0x07, 0x27, 0x8b, 0xec, 0x5c, 0xe7, 0xf2, - 0x9d, 0xdb, 0x77, 0x4e, 0x27, 0xe8, 0xa0, 0xff, 0x5c, 0x58, 0x2c, 0xb0, 0xfb, 0x51, 0x1b, 0xb8, - 0x0f, 0x12, 0x84, 0x3d, 0x04, 0xbf, 0x13, 0x70, 0x5b, 0x2b, 0x68, 0xc8, 0x6c, 0x21, 0x03, 0x4e, - 0xbb, 0x60, 0x0f, 0x77, 0xe9, 0x20, 0xec, 0xd1, 0x5d, 0xbb, 0x0b, 0x3e, 0x70, 0x2a, 0xa1, 0x63, - 0x85, 0x3c, 0x90, 0x01, 0x7e, 0x9a, 0x1a, 0x5b, 0x34, 0x64, 0x96, 0x36, 0xb6, 0x26, 0xc6, 0xd5, - 0x67, 0x5d, 0x26, 0x7b, 0x51, 0xdb, 0x72, 0x03, 0xcf, 0xee, 0x06, 0xdd, 0xc0, 0x56, 0x3e, 0xed, - 0xe8, 0x5b, 0xf5, 0x52, 0x0f, 0xf5, 0x2b, 0xc5, 0xaa, 0xd6, 0x73, 0x81, 0xdd, 0x80, 0x27, 0x51, - 0x67, 0xe3, 0x55, 0xdf, 0x9f, 0xda, 0x78, 0xd4, 0xed, 0x31, 0x1f, 0xf8, 0xc8, 0x0e, 0xfb, 0x5d, - 0xe5, 0xc4, 0x41, 0x04, 0x11, 0x77, 0xe1, 0x46, 0x5e, 0xc2, 0xf6, 0x40, 0xd2, 0x79, 0xb1, 0xec, - 0x45, 0x5e, 0x3c, 0xf2, 0x25, 0xf3, 0x8a, 0x61, 0x3e, 0xf8, 0x2f, 0x07, 0xe1, 0xf6, 0xc0, 0xa3, - 0xb3, 0x7e, 0xf5, 0xbf, 0xd6, 0x10, 0xde, 0x6f, 0x1d, 0xb5, 0xd2, 0xfe, 0xed, 0xd3, 0x90, 0xba, - 0x4c, 0x8e, 0xf0, 0x37, 0xa8, 0x94, 0xa4, 0xd6, 0xa1, 0x92, 0x56, 0x8c, 0x1d, 0xa3, 0xb1, 0xb9, - 0xf7, 0xae, 0x35, 0x6d, 0x77, 0x16, 0xc1, 0x0a, 0xfb, 0xdd, 0x44, 0x20, 0xac, 0xc4, 0xda, 0x1a, - 0xee, 0x5a, 0x9f, 0xb4, 0xcf, 0xc1, 0x95, 0x27, 0x20, 0xa9, 0x83, 0x2f, 0xc6, 0xb5, 0x95, 0x78, - 0x5c, 0x43, 0x53, 0x19, 0xc9, 0x50, 0x31, 0x43, 0x5b, 0x7e, 0xd0, 0x81, 0xb3, 0x20, 0x0c, 0x06, - 0x41, 0x77, 0x54, 0x59, 0x55, 0x51, 0xde, 0x5b, 0x2e, 0xca, 0x31, 0x6d, 0xc3, 0xa0, 0x05, 0x03, - 0x70, 0x65, 0xc0, 0x9d, 0x72, 0x3c, 0xae, 0x6d, 0x9d, 0xe6, 0xc0, 0xc8, 0x15, 0x68, 0x7c, 0x80, - 0xca, 0x9a, 0x1f, 0xfb, 0x03, 0x2a, 0xc4, 0x29, 0xf5, 0xa0, 0xb2, 0xb6, 0x63, 0x34, 0x1e, 0x38, - 0x15, 0x9d, 0x62, 0xb9, 0x35, 0xa3, 0x27, 0x05, 0x0f, 0xfc, 0x05, 0x2a, 0xb9, 0xba, 0x3d, 0x95, - 0x75, 0x95, 0xac, 0x75, 0x5d, 0xb2, 0xd6, 0x84, 0x11, 0xd6, 0xa7, 0x11, 0xf5, 0x25, 0x93, 0x23, - 0x67, 0x2b, 0x1e, 0xd7, 0x4a, 0x93, 0x16, 0x93, 0x0c, 0x0d, 0x0b, 0xf4, 0xc8, 0xa3, 0xdf, 0x31, - 0x2f, 0xf2, 0x3e, 0x0b, 0x06, 0x91, 0x07, 0x2d, 0xf6, 0x3d, 0x54, 0x36, 0x6e, 0x15, 0xe2, 0x71, - 0x3c, 0xae, 0x3d, 0x3a, 0x99, 0x05, 0x23, 0x45, 0xfc, 0xfa, 0xaf, 0x06, 0x7a, 0x52, 0x1c, 0xfc, - 0x31, 0x13, 0x12, 0x7f, 0x5d, 0x18, 0xbe, 0xb5, 0xe4, 0x58, 0x98, 0x48, 0x47, 0x5f, 0xd6, 0x7d, - 0x2d, 0x4d, 0x24, 0xb9, 0xc1, 0x9f, 0xa1, 0x0d, 0x26, 0xc1, 0x13, 0x95, 0xd5, 0x9d, 0xb5, 0xc6, - 0xe6, 0x9e, 0x6d, 0x5d, 0xb3, 0xc6, 0x56, 0x31, 0x43, 0xe7, 0x7f, 0x1a, 0x7b, 0xe3, 0x28, 0x41, - 0x21, 0x29, 0x58, 0xfd, 0x97, 0x55, 0x54, 0x4e, 0xab, 0x6b, 0x4a, 0x49, 0xdd, 0x9e, 0x07, 0xbe, - 0xbc, 0x03, 0x16, 0xb7, 0xd0, 0xba, 0x08, 0xc1, 0xd5, 0xec, 0xdd, 0xbd, 0xb6, 0x96, 0xd9, 0xf4, - 0x5a, 0x21, 0xb8, 0xce, 0x96, 0x86, 0x5f, 0x4f, 0x5e, 0x44, 0x81, 0xe1, 0xaf, 0xd0, 0x3d, 0x21, - 0xa9, 0x8c, 0x84, 0x62, 0xe9, 0xd5, 0xa5, 0x58, 0x02, 0x56, 0xb9, 0x3a, 0xff, 0xd7, 0xc0, 0xf7, - 0xd2, 0x37, 0xd1, 0x90, 0xf5, 0x0b, 0x03, 0x6d, 0xcf, 0xba, 0xdc, 0xc1, 0xd4, 0xc9, 0xd5, 0xa9, - 0x3f, 0xbb, 0x51, 0x49, 0x0b, 0x66, 0xfe, 0xca, 0x40, 0x4f, 0x0a, 0xd5, 0xab, 0x85, 0xc0, 0xc7, - 0x68, 0x3b, 0x04, 0x2e, 0x98, 0x90, 0xe0, 0xcb, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, - 0xae, 0x6d, 0xbf, 0x98, 0xa3, 0x27, 0x73, 0xbd, 0xf0, 0x39, 0x2a, 0x33, 0x7f, 0xc0, 0x7c, 0xd0, - 0xfb, 0x33, 0x9d, 0x78, 0x23, 0x5f, 0x47, 0xf2, 0xe1, 0x48, 0x1a, 0x32, 0x8b, 0xac, 0x06, 0xbd, - 0x9d, 0x9c, 0x99, 0xa3, 0x19, 0x14, 0x52, 0xc0, 0xad, 0xff, 0x36, 0x67, 0x3e, 0x89, 0x02, 0xbf, - 0x83, 0x4a, 0x54, 0x49, 0x80, 0xeb, 0x32, 0xb2, 0x7e, 0x37, 0xb5, 0x9c, 0x64, 0x16, 0x8a, 0x43, - 0xaa, 0x15, 0x73, 0x0e, 0xeb, 0x12, 0x1c, 0x52, 0xae, 0x39, 0x0e, 0xa9, 0x37, 0xd1, 0x90, 0x49, - 0x2a, 0xc9, 0x81, 0xcd, 0x1d, 0xd2, 0x2c, 0x95, 0x53, 0x2d, 0x27, 0x99, 0x45, 0xfd, 0x9f, 0xb5, - 0x39, 0x63, 0x52, 0x64, 0xcc, 0xd5, 0xd4, 0x51, 0x35, 0x95, 0x0a, 0x35, 0x75, 0xb2, 0x9a, 0x3a, - 0xf8, 0x67, 0x03, 0x61, 0x9a, 0x41, 0x9c, 0x4c, 0xc8, 0x9a, 0x32, 0xea, 0xe3, 0x5b, 0x2c, 0x89, - 0xd5, 0x2c, 0xa0, 0x1d, 0xfa, 0x92, 0x8f, 0x9c, 0xaa, 0xce, 0x02, 0x17, 0x0d, 0xc8, 0x9c, 0x14, - 0xf0, 0x39, 0xda, 0x4c, 0xa5, 0x87, 0x9c, 0x07, 0x5c, 0xaf, 0x6d, 0x63, 0x89, 0x8c, 0x94, 0xbd, - 0x63, 0xc6, 0xe3, 0xda, 0x66, 0x73, 0x0a, 0xf0, 0xf7, 0xb8, 0xb6, 0x99, 0xd3, 0x93, 0x3c, 0x78, - 0x12, 0xab, 0x03, 0xd3, 0x58, 0xeb, 0xb7, 0x89, 0x75, 0x00, 0x8b, 0x63, 0xe5, 0xc0, 0xab, 0x87, - 0xe8, 0x8d, 0x05, 0x2d, 0xc2, 0x65, 0xb4, 0xd6, 0x87, 0x51, 0xca, 0x44, 0x92, 0xfc, 0xc4, 0xdb, - 0x68, 0x63, 0x48, 0x07, 0x51, 0xca, 0xb8, 0x07, 0x24, 0x7d, 0x7c, 0xb8, 0xfa, 0xdc, 0xa8, 0xff, - 0xb9, 0x8a, 0x1e, 0x67, 0x13, 0xe0, 0xac, 0x1d, 0x49, 0x10, 0xea, 0xc3, 0x7a, 0x07, 0x17, 0x7a, - 0x0f, 0xa1, 0x0e, 0x67, 0x43, 0xe0, 0x8a, 0xad, 0x2a, 0xb5, 0xa9, 0xc7, 0x41, 0xa6, 0x21, 0x39, - 0x2b, 0x3c, 0x44, 0x28, 0xa4, 0x9c, 0x7a, 0x20, 0x81, 0x27, 0x47, 0x38, 0xe1, 0x97, 0xb3, 0x1c, - 0xbf, 0xf2, 0xd5, 0x59, 0x2f, 0x32, 0x90, 0x94, 0x56, 0x59, 0xdc, 0xa9, 0x82, 0xe4, 0x22, 0x55, - 0x3f, 0x42, 0x0f, 0x67, 0x5c, 0x6e, 0xd4, 0xe6, 0x57, 0x06, 0x7a, 0x73, 0x6e, 0x22, 0x77, 0x70, - 0xdf, 0x3f, 0xbf, 0x7a, 0xdf, 0xf7, 0x6e, 0xde, 0xad, 0x05, 0x47, 0xfe, 0x47, 0x03, 0xe5, 0xf9, - 0x89, 0x8f, 0xd1, 0x7a, 0xf2, 0xf7, 0xac, 0x2e, 0xe1, 0xed, 0xe5, 0x4a, 0x38, 0x63, 0x1e, 0x4c, - 0x3f, 0xb5, 0xc9, 0x8b, 0x28, 0x14, 0xfc, 0x16, 0xba, 0xef, 0x81, 0x10, 0xb4, 0x3b, 0xa1, 0xc6, - 0x43, 0x6d, 0x74, 0xff, 0x24, 0x15, 0x93, 0x89, 0xde, 0x69, 0x5e, 0x5c, 0x9a, 0x2b, 0x2f, 0x2f, - 0xcd, 0x95, 0xd7, 0x97, 0xe6, 0xca, 0x0f, 0xb1, 0x69, 0x5c, 0xc4, 0xa6, 0xf1, 0x32, 0x36, 0x8d, - 0xd7, 0xb1, 0x69, 0xfc, 0x1e, 0x9b, 0xc6, 0x4f, 0x7f, 0x98, 0x2b, 0x5f, 0x3e, 0xbd, 0xe6, 0x3f, - 0x98, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x2f, 0x75, 0xee, 0xf8, 0x0c, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_02e7952e43280c27) +} + +var fileDescriptor_02e7952e43280c27 = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x3d, 0x6f, 0x23, 0x45, + 0x18, 0xce, 0xda, 0xce, 0x9d, 0x6f, 0x1c, 0x38, 0xdf, 0xc8, 0x77, 0x18, 0x9f, 0xb4, 0x3e, 0xb9, + 0x32, 0x1f, 0x37, 0x4b, 0x02, 0x42, 0x27, 0x24, 0x0a, 0x6f, 0x92, 0x22, 0x22, 0x09, 0xc7, 0x38, + 0x02, 0x04, 0x14, 0x8c, 0xd7, 0x83, 0x3d, 0x89, 0xf7, 0x43, 0x33, 0xb3, 0x16, 0xa6, 0xa2, 0xa2, + 0xa6, 0xe3, 0x1f, 0xf0, 0x5b, 0x52, 0x20, 0x71, 0xba, 0xea, 0x2a, 0x8b, 0x2c, 0xfc, 0x06, 0x0a, + 0x1a, 0xd0, 0xce, 0x8e, 0xd7, 0x1b, 0xaf, 0x1d, 0x9c, 0x14, 0xe9, 0x3c, 0x33, 0xef, 0xfb, 0x3c, + 0xef, 0xc7, 0xf3, 0xbe, 0x9b, 0x80, 0x77, 0xce, 0x9e, 0x09, 0xc4, 0x7c, 0x8b, 0x04, 0xcc, 0x12, + 0xd2, 0xe7, 0x64, 0x40, 0xad, 0xf1, 0x36, 0x19, 0x05, 0x43, 0xb2, 0x6d, 0x0d, 0xa8, 0x47, 0x39, + 0x91, 0xb4, 0x8f, 0x02, 0xee, 0x4b, 0x1f, 0x3e, 0x4e, 0x8c, 0x11, 0x09, 0x18, 0xd2, 0xc6, 0x68, + 0x66, 0xdc, 0x78, 0x3a, 0x60, 0x72, 0x18, 0xf6, 0x90, 0xe3, 0xbb, 0xd6, 0xc0, 0x1f, 0xf8, 0x96, + 0xf2, 0xe9, 0x85, 0xdf, 0xa9, 0x93, 0x3a, 0xa8, 0x5f, 0x09, 0x56, 0xa3, 0x95, 0x21, 0x76, 0x7c, + 0x1e, 0xb3, 0x2e, 0xf2, 0x35, 0x3e, 0x98, 0xdb, 0xb8, 0xc4, 0x19, 0x32, 0x8f, 0xf2, 0x89, 0x15, + 0x9c, 0x0d, 0x94, 0x13, 0xa7, 0xc2, 0x0f, 0xb9, 0x43, 0xaf, 0xe5, 0x25, 0x2c, 0x97, 0x4a, 0xb2, + 0x8c, 0xcb, 0x5a, 0xe5, 0xc5, 0x43, 0x4f, 0x32, 0x37, 0x4f, 0xf3, 0xe1, 0xff, 0x39, 0x08, 0x67, + 0x48, 0x5d, 0xb2, 0xe8, 0xd7, 0xfa, 0xbb, 0x08, 0xe0, 0x6e, 0xf7, 0xa0, 0x9b, 0xd4, 0x6f, 0x97, + 0x04, 0xc4, 0x61, 0x72, 0x02, 0xbf, 0x05, 0xe5, 0x38, 0xb4, 0x3e, 0x91, 0xa4, 0x6e, 0x3c, 0x31, + 0xda, 0x95, 0x9d, 0xf7, 0xd0, 0xbc, 0xdc, 0x29, 0x03, 0x0a, 0xce, 0x06, 0xf1, 0x85, 0x40, 0xb1, + 0x35, 0x1a, 0x6f, 0xa3, 0x4f, 0x7b, 0xa7, 0xd4, 0x91, 0x47, 0x54, 0x12, 0x1b, 0x9e, 0x4f, 0x9b, + 0x1b, 0xd1, 0xb4, 0x09, 0xe6, 0x77, 0x38, 0x45, 0x85, 0x0c, 0x6c, 0x79, 0x7e, 0x9f, 0x9e, 0xf8, + 0x81, 0x3f, 0xf2, 0x07, 0x93, 0x7a, 0x41, 0xb1, 0xbc, 0xbf, 0x1e, 0xcb, 0x21, 0xe9, 0xd1, 0x51, + 0x97, 0x8e, 0xa8, 0x23, 0x7d, 0x6e, 0x57, 0xa3, 0x69, 0x73, 0xeb, 0x38, 0x03, 0x86, 0x2f, 0x41, + 0xc3, 0x3d, 0x50, 0xd5, 0xfa, 0xd8, 0x1d, 0x11, 0x21, 0x8e, 0x89, 0x4b, 0xeb, 0xc5, 0x27, 0x46, + 0xfb, 0x9e, 0x5d, 0xd7, 0x21, 0x56, 0xbb, 0x0b, 0xef, 0x38, 0xe7, 0x01, 0xbf, 0x04, 0x65, 0x47, + 0x97, 0xa7, 0x5e, 0x52, 0xc1, 0xa2, 0xab, 0x82, 0x45, 0x33, 0x45, 0xa0, 0xcf, 0x42, 0xe2, 0x49, + 0x26, 0x27, 0xf6, 0x56, 0x34, 0x6d, 0x96, 0x67, 0x25, 0xc6, 0x29, 0x1a, 0x14, 0xe0, 0x81, 0x4b, + 0xbe, 0x67, 0x6e, 0xe8, 0x7e, 0xee, 0x8f, 0x42, 0x97, 0x76, 0xd9, 0x0f, 0xb4, 0xbe, 0x79, 0x23, + 0x8a, 0x87, 0xd1, 0xb4, 0xf9, 0xe0, 0x68, 0x11, 0x0c, 0xe7, 0xf1, 0x5b, 0xbf, 0x19, 0xe0, 0x51, + 0xbe, 0xf1, 0x87, 0x4c, 0x48, 0xf8, 0x4d, 0xae, 0xf9, 0x68, 0xcd, 0xb6, 0x30, 0x91, 0xb4, 0xbe, + 0xaa, 0xeb, 0x5a, 0x9e, 0xdd, 0x64, 0x1a, 0x7f, 0x02, 0x36, 0x99, 0xa4, 0xae, 0xa8, 0x17, 0x9e, + 0x14, 0xdb, 0x95, 0x1d, 0x0b, 0x5d, 0x31, 0xc6, 0x28, 0x1f, 0xa1, 0xfd, 0x9a, 0xc6, 0xde, 0x3c, + 0x88, 0x51, 0x70, 0x02, 0xd6, 0xfa, 0xb5, 0x00, 0xaa, 0x49, 0x76, 0x1d, 0x29, 0x89, 0x33, 0x74, + 0xa9, 0x27, 0x6f, 0x41, 0xc5, 0x5d, 0x50, 0x12, 0x01, 0x75, 0xb4, 0x7a, 0xb7, 0xaf, 0xcc, 0x65, + 0x31, 0xbc, 0x6e, 0x40, 0x1d, 0x7b, 0x4b, 0xc3, 0x97, 0xe2, 0x13, 0x56, 0x60, 0xf0, 0x6b, 0x70, + 0x47, 0x48, 0x22, 0x43, 0xa1, 0x54, 0x7a, 0x79, 0x28, 0xd6, 0x80, 0x55, 0xae, 0xf6, 0xeb, 0x1a, + 0xf8, 0x4e, 0x72, 0xc6, 0x1a, 0xb2, 0x75, 0x6e, 0x80, 0xda, 0xa2, 0xcb, 0x2d, 0x74, 0x1d, 0x5f, + 0xee, 0xfa, 0xd3, 0x6b, 0xa5, 0xb4, 0xa2, 0xe7, 0x2f, 0x0d, 0xf0, 0x28, 0x97, 0xbd, 0x1a, 0x08, + 0x78, 0x08, 0x6a, 0x01, 0xe5, 0x82, 0x09, 0x49, 0x3d, 0x99, 0xd8, 0xa8, 0xb1, 0x37, 0x92, 0xb1, + 0x8f, 0xa6, 0xcd, 0xda, 0xf3, 0x25, 0xef, 0x78, 0xa9, 0x17, 0x3c, 0x05, 0x55, 0xe6, 0x8d, 0x98, + 0x47, 0xf5, 0xfc, 0xcc, 0x3b, 0xde, 0xce, 0xe6, 0x11, 0x7f, 0x38, 0xe2, 0x82, 0x2c, 0x22, 0xab, + 0x46, 0xd7, 0xe2, 0x35, 0x73, 0xb0, 0x80, 0x82, 0x73, 0xb8, 0xad, 0xdf, 0x97, 0xf4, 0x27, 0x7e, + 0x80, 0xef, 0x82, 0x32, 0x51, 0x37, 0x94, 0xeb, 0x34, 0xd2, 0x7a, 0x77, 0xf4, 0x3d, 0x4e, 0x2d, + 0x94, 0x86, 0x54, 0x29, 0x96, 0x2c, 0xd6, 0x35, 0x34, 0xa4, 0x5c, 0x33, 0x1a, 0x52, 0x67, 0xac, + 0x21, 0xe3, 0x50, 0xe2, 0x05, 0x9b, 0x59, 0xa4, 0x69, 0x28, 0xc7, 0xfa, 0x1e, 0xa7, 0x16, 0xad, + 0x7f, 0x8b, 0x4b, 0xda, 0xa4, 0xc4, 0x98, 0xc9, 0xa9, 0xaf, 0x72, 0x2a, 0xe7, 0x72, 0xea, 0xa7, + 0x39, 0xf5, 0xe1, 0x2f, 0x06, 0x80, 0x24, 0x85, 0x38, 0x9a, 0x89, 0x35, 0x51, 0xd4, 0x27, 0x37, + 0x18, 0x12, 0xd4, 0xc9, 0xa1, 0xed, 0x7b, 0x92, 0x4f, 0xec, 0x86, 0x8e, 0x02, 0xe6, 0x0d, 0xf0, + 0x92, 0x10, 0xe0, 0x29, 0xa8, 0x24, 0xb7, 0xfb, 0x9c, 0xfb, 0x5c, 0x8f, 0x6d, 0x7b, 0x8d, 0x88, + 0x94, 0xbd, 0x6d, 0x46, 0xd3, 0x66, 0xa5, 0x33, 0x07, 0xf8, 0x67, 0xda, 0xac, 0x64, 0xde, 0x71, + 0x16, 0x3c, 0xe6, 0xea, 0xd3, 0x39, 0x57, 0xe9, 0x26, 0x5c, 0x7b, 0x74, 0x35, 0x57, 0x06, 0xbc, + 0xb1, 0x0f, 0xde, 0x58, 0x51, 0x22, 0x58, 0x05, 0xc5, 0x33, 0x3a, 0x49, 0x94, 0x88, 0xe3, 0x9f, + 0xb0, 0x06, 0x36, 0xc7, 0x64, 0x14, 0x26, 0x8a, 0xbb, 0x87, 0x93, 0xc3, 0x47, 0x85, 0x67, 0x46, + 0xeb, 0xaf, 0x02, 0x78, 0x98, 0x76, 0x80, 0xb3, 0x5e, 0x28, 0xa9, 0x50, 0x1f, 0xd6, 0x5b, 0xd8, + 0xd0, 0x3b, 0x00, 0xf4, 0x39, 0x1b, 0x53, 0xae, 0xd4, 0xaa, 0x42, 0x9b, 0x7b, 0xec, 0xa5, 0x2f, + 0x38, 0x63, 0x05, 0xc7, 0x00, 0x04, 0x84, 0x13, 0x97, 0x4a, 0xca, 0xe3, 0x25, 0x1c, 0xeb, 0xcb, + 0x5e, 0x4f, 0x5f, 0xd9, 0xec, 0xd0, 0xf3, 0x14, 0x24, 0x91, 0x55, 0xca, 0x3b, 0x7f, 0xc0, 0x19, + 0xa6, 0xc6, 0xc7, 0xe0, 0xfe, 0x82, 0xcb, 0xb5, 0xca, 0xfc, 0xd2, 0x00, 0x6f, 0x2e, 0x0d, 0xe4, + 0x16, 0xf6, 0xfb, 0x17, 0x97, 0xf7, 0xfb, 0xce, 0xf5, 0xab, 0xb5, 0x62, 0xc9, 0xff, 0x64, 0x80, + 0xac, 0x3e, 0xe1, 0x21, 0x28, 0xc5, 0x7f, 0xcf, 0xea, 0x14, 0xde, 0x5e, 0x2f, 0x85, 0x13, 0xe6, + 0xd2, 0xf9, 0xa7, 0x36, 0x3e, 0x61, 0x85, 0x02, 0xdf, 0x02, 0x77, 0x5d, 0x2a, 0x04, 0x19, 0xcc, + 0xa4, 0x71, 0x5f, 0x1b, 0xdd, 0x3d, 0x4a, 0xae, 0xf1, 0xec, 0xdd, 0xee, 0x9c, 0x5f, 0x98, 0x1b, + 0x2f, 0x2e, 0xcc, 0x8d, 0x57, 0x17, 0xe6, 0xc6, 0x8f, 0x91, 0x69, 0x9c, 0x47, 0xa6, 0xf1, 0x22, + 0x32, 0x8d, 0x57, 0x91, 0x69, 0xfc, 0x11, 0x99, 0xc6, 0xcf, 0x7f, 0x9a, 0x1b, 0x5f, 0x3d, 0xbe, + 0xe2, 0x3f, 0x98, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x19, 0x2c, 0xaa, 0xdf, 0x0c, 0x00, + 0x00, } func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 49e522be53..380adbf66e 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -65,7 +65,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -74,7 +74,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -94,7 +94,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -108,7 +108,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -116,11 +116,9 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name repeated CSIStorageCapacity items = 2; } @@ -132,7 +130,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -150,7 +148,7 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; @@ -172,7 +170,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is alpha-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -223,7 +221,7 @@ message VolumeAttributesClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Name of the CSI driver // This field is immutable. @@ -250,7 +248,7 @@ message VolumeAttributesClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttributesClass objects. repeated VolumeAttributesClass items = 2; @@ -260,7 +258,7 @@ message VolumeAttributesClassList { message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 5957e48074..1fbf65f819 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -247,8 +247,6 @@ type CSIStorageCapacityList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 42ef65ca0f..446a40c483 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto +// source: k8s.io/api/storage/v1beta1/generated.proto package v1beta1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CSIDriver) Reset() { *m = CSIDriver{} } func (*CSIDriver) ProtoMessage() {} func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{0} + return fileDescriptor_73e4f72503e71065, []int{0} } func (m *CSIDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_CSIDriver proto.InternalMessageInfo func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } func (*CSIDriverList) ProtoMessage() {} func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{1} + return fileDescriptor_73e4f72503e71065, []int{1} } func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } func (*CSIDriverSpec) ProtoMessage() {} func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{2} + return fileDescriptor_73e4f72503e71065, []int{2} } func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo func (m *CSINode) Reset() { *m = CSINode{} } func (*CSINode) ProtoMessage() {} func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{3} + return fileDescriptor_73e4f72503e71065, []int{3} } func (m *CSINode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CSINode proto.InternalMessageInfo func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } func (*CSINodeDriver) ProtoMessage() {} func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{4} + return fileDescriptor_73e4f72503e71065, []int{4} } func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo func (m *CSINodeList) Reset() { *m = CSINodeList{} } func (*CSINodeList) ProtoMessage() {} func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{5} + return fileDescriptor_73e4f72503e71065, []int{5} } func (m *CSINodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_CSINodeList proto.InternalMessageInfo func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } func (*CSINodeSpec) ProtoMessage() {} func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{6} + return fileDescriptor_73e4f72503e71065, []int{6} } func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} } func (*CSIStorageCapacity) ProtoMessage() {} func (*CSIStorageCapacity) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{7} + return fileDescriptor_73e4f72503e71065, []int{7} } func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} } func (*CSIStorageCapacityList) ProtoMessage() {} func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{8} + return fileDescriptor_73e4f72503e71065, []int{8} } func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{9} + return fileDescriptor_73e4f72503e71065, []int{9} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{10} + return fileDescriptor_73e4f72503e71065, []int{10} } func (m *StorageClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{11} + return fileDescriptor_73e4f72503e71065, []int{11} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{12} + return fileDescriptor_73e4f72503e71065, []int{12} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{13} + return fileDescriptor_73e4f72503e71065, []int{13} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{14} + return fileDescriptor_73e4f72503e71065, []int{14} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{15} + return fileDescriptor_73e4f72503e71065, []int{15} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{16} + return fileDescriptor_73e4f72503e71065, []int{16} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -524,10 +524,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{17} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{18} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{17} + return fileDescriptor_73e4f72503e71065, []int{19} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +611,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{18} + return fileDescriptor_73e4f72503e71065, []int{20} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -600,121 +656,127 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) -} - -var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1672 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x4d, - 0x19, 0xcf, 0xc6, 0xce, 0xd7, 0x38, 0x69, 0x92, 0x49, 0x5a, 0x8c, 0x0f, 0x76, 0x64, 0x04, 0x4d, - 0xab, 0xb2, 0x6e, 0x43, 0xa9, 0xaa, 0x4a, 0x95, 0xc8, 0x26, 0x81, 0xba, 0x8d, 0xd3, 0x74, 0x1c, - 0x55, 0x55, 0xc5, 0x81, 0xf1, 0xee, 0xc4, 0x99, 0xc6, 0xfb, 0xd1, 0x9d, 0xd9, 0x10, 0x73, 0x82, - 0x0b, 0x67, 0xc4, 0x81, 0xbf, 0x80, 0x7f, 0x01, 0x24, 0xb8, 0x70, 0xa4, 0x12, 0x12, 0xaa, 0xb8, - 0xd0, 0x93, 0x45, 0xcd, 0x9f, 0xf0, 0x4a, 0xef, 0x21, 0x7a, 0x0f, 0xaf, 0x66, 0x76, 0xec, 0xfd, - 0xb2, 0x9b, 0xe4, 0x3d, 0xf8, 0xe6, 0x79, 0x3e, 0x7e, 0xcf, 0x33, 0xf3, 0x7c, 0xae, 0xc1, 0xce, - 0xe9, 0x63, 0xa6, 0x53, 0xb7, 0x76, 0x1a, 0xb4, 0x88, 0xef, 0x10, 0x4e, 0x58, 0xed, 0x8c, 0x38, - 0x96, 0xeb, 0xd7, 0x14, 0x03, 0x7b, 0xb4, 0xc6, 0xb8, 0xeb, 0xe3, 0x36, 0xa9, 0x9d, 0x3d, 0x68, - 0x11, 0x8e, 0x1f, 0xd4, 0xda, 0xc4, 0x21, 0x3e, 0xe6, 0xc4, 0xd2, 0x3d, 0xdf, 0xe5, 0x2e, 0x2c, - 0x85, 0xb2, 0x3a, 0xf6, 0xa8, 0xae, 0x64, 0x75, 0x25, 0x5b, 0xfa, 0x71, 0x9b, 0xf2, 0x93, 0xa0, - 0xa5, 0x9b, 0xae, 0x5d, 0x6b, 0xbb, 0x6d, 0xb7, 0x26, 0x55, 0x5a, 0xc1, 0xb1, 0x3c, 0xc9, 0x83, - 0xfc, 0x15, 0x42, 0x95, 0xaa, 0x31, 0xb3, 0xa6, 0xeb, 0x0b, 0x9b, 0x69, 0x73, 0xa5, 0x87, 0x91, - 0x8c, 0x8d, 0xcd, 0x13, 0xea, 0x10, 0xbf, 0x5b, 0xf3, 0x4e, 0xdb, 0x52, 0xc9, 0x27, 0xcc, 0x0d, - 0x7c, 0x93, 0x5c, 0x4b, 0x8b, 0xd5, 0x6c, 0xc2, 0xf1, 0x28, 0x5b, 0xb5, 0x71, 0x5a, 0x7e, 0xe0, - 0x70, 0x6a, 0x67, 0xcd, 0x3c, 0xba, 0x4c, 0x81, 0x99, 0x27, 0xc4, 0xc6, 0x69, 0xbd, 0xea, 0xdf, - 0x35, 0xb0, 0xb0, 0xd3, 0xac, 0xef, 0xfa, 0xf4, 0x8c, 0xf8, 0xf0, 0x57, 0x60, 0x5e, 0x78, 0x64, - 0x61, 0x8e, 0x8b, 0xda, 0x86, 0xb6, 0x59, 0xd8, 0xba, 0xaf, 0x47, 0x8f, 0x3c, 0x04, 0xd6, 0xbd, - 0xd3, 0xb6, 0x20, 0x30, 0x5d, 0x48, 0xeb, 0x67, 0x0f, 0xf4, 0x97, 0xad, 0x77, 0xc4, 0xe4, 0x0d, - 0xc2, 0xb1, 0x01, 0x3f, 0xf4, 0x2a, 0x53, 0xfd, 0x5e, 0x05, 0x44, 0x34, 0x34, 0x44, 0x85, 0x2f, - 0x40, 0x9e, 0x79, 0xc4, 0x2c, 0x4e, 0x4b, 0xf4, 0x3b, 0xfa, 0xf8, 0x10, 0xea, 0x43, 0xb7, 0x9a, - 0x1e, 0x31, 0x8d, 0x45, 0x05, 0x9b, 0x17, 0x27, 0x24, 0x41, 0xaa, 0x7f, 0xd3, 0xc0, 0xd2, 0x50, - 0x6a, 0x9f, 0x32, 0x0e, 0x7f, 0x99, 0xb9, 0x80, 0x7e, 0xb5, 0x0b, 0x08, 0x6d, 0xe9, 0xfe, 0x8a, - 0xb2, 0x33, 0x3f, 0xa0, 0xc4, 0x9c, 0x7f, 0x0e, 0x66, 0x28, 0x27, 0x36, 0x2b, 0x4e, 0x6f, 0xe4, - 0x36, 0x0b, 0x5b, 0x3f, 0xbc, 0x92, 0xf7, 0xc6, 0x92, 0x42, 0x9c, 0xa9, 0x0b, 0x5d, 0x14, 0x42, - 0x54, 0xff, 0x9b, 0x8f, 0xf9, 0x2e, 0xee, 0x04, 0x9f, 0x80, 0x1b, 0x98, 0x73, 0x6c, 0x9e, 0x20, - 0xf2, 0x3e, 0xa0, 0x3e, 0xb1, 0xe4, 0x0d, 0xe6, 0x0d, 0xd8, 0xef, 0x55, 0x6e, 0x6c, 0x27, 0x38, - 0x28, 0x25, 0x29, 0x74, 0x3d, 0xd7, 0xaa, 0x3b, 0xc7, 0xee, 0x4b, 0xa7, 0xe1, 0x06, 0x0e, 0x97, - 0x0f, 0xac, 0x74, 0x0f, 0x13, 0x1c, 0x94, 0x92, 0x84, 0x26, 0x58, 0x3f, 0x73, 0x3b, 0x81, 0x4d, - 0xf6, 0xe9, 0x31, 0x31, 0xbb, 0x66, 0x87, 0x34, 0x5c, 0x8b, 0xb0, 0x62, 0x6e, 0x23, 0xb7, 0xb9, - 0x60, 0xd4, 0xfa, 0xbd, 0xca, 0xfa, 0xeb, 0x11, 0xfc, 0x8b, 0x5e, 0x65, 0x6d, 0x04, 0x1d, 0x8d, - 0x04, 0x83, 0x4f, 0xc1, 0xb2, 0x7a, 0xa1, 0x1d, 0xec, 0x61, 0x93, 0xf2, 0x6e, 0x31, 0x2f, 0x3d, - 0x5c, 0xeb, 0xf7, 0x2a, 0xcb, 0xcd, 0x24, 0x0b, 0xa5, 0x65, 0xe1, 0x33, 0xb0, 0x74, 0xcc, 0x7e, - 0xe1, 0xbb, 0x81, 0x77, 0xe8, 0x76, 0xa8, 0xd9, 0x2d, 0xce, 0x6c, 0x68, 0x9b, 0x0b, 0x46, 0xb5, - 0xdf, 0xab, 0x2c, 0xfd, 0xbc, 0x19, 0x63, 0x5c, 0xa4, 0x09, 0x28, 0xa9, 0x08, 0x09, 0x58, 0xe2, - 0xee, 0x29, 0x71, 0xc4, 0xd3, 0x11, 0xc6, 0x59, 0x71, 0x56, 0xc6, 0x72, 0xf3, 0x4b, 0xb1, 0x3c, - 0x8a, 0x29, 0x18, 0x37, 0x55, 0x38, 0x97, 0xe2, 0x54, 0x86, 0x92, 0xa8, 0x70, 0x07, 0xac, 0xfa, - 0x61, 0x70, 0x18, 0x22, 0x5e, 0xd0, 0xea, 0x50, 0x76, 0x52, 0x9c, 0x93, 0x37, 0xbe, 0xd9, 0xef, - 0x55, 0x56, 0x51, 0x9a, 0x89, 0xb2, 0xf2, 0xf0, 0x21, 0x58, 0x64, 0x64, 0x9f, 0x3a, 0xc1, 0x79, - 0x18, 0xd3, 0x79, 0xa9, 0xbf, 0xd2, 0xef, 0x55, 0x16, 0x9b, 0x7b, 0x11, 0x1d, 0x25, 0xa4, 0xaa, - 0x7f, 0xd5, 0xc0, 0xdc, 0x4e, 0xb3, 0x7e, 0xe0, 0x5a, 0x64, 0x02, 0x05, 0x5d, 0x4f, 0x14, 0xf4, - 0xed, 0x4b, 0x4a, 0x42, 0x38, 0x35, 0xb6, 0x9c, 0xbf, 0x0a, 0xcb, 0x59, 0xc8, 0xa8, 0x7e, 0xb4, - 0x01, 0xf2, 0x0e, 0xb6, 0x89, 0x74, 0x7d, 0x21, 0xd2, 0x39, 0xc0, 0x36, 0x41, 0x92, 0x03, 0x7f, - 0x04, 0x66, 0x1d, 0xd7, 0x22, 0xf5, 0x5d, 0xe9, 0xc0, 0x82, 0x71, 0x43, 0xc9, 0xcc, 0x1e, 0x48, - 0x2a, 0x52, 0x5c, 0xf1, 0x94, 0xdc, 0xf5, 0xdc, 0x8e, 0xdb, 0xee, 0xbe, 0x20, 0xdd, 0x41, 0x72, - 0xcb, 0xa7, 0x3c, 0x8a, 0xd1, 0x51, 0x42, 0x0a, 0xb6, 0x40, 0x01, 0x77, 0x3a, 0xae, 0x89, 0x39, - 0x6e, 0x75, 0x88, 0xcc, 0xd8, 0xc2, 0x56, 0xed, 0x4b, 0x77, 0x0c, 0x2b, 0x42, 0x18, 0x47, 0x6a, - 0x22, 0x30, 0x63, 0xb9, 0xdf, 0xab, 0x14, 0xb6, 0x23, 0x1c, 0x14, 0x07, 0xad, 0xfe, 0x45, 0x03, - 0x05, 0x75, 0xeb, 0x09, 0xb4, 0xb0, 0x67, 0xc9, 0x16, 0xf6, 0x83, 0x2b, 0xc4, 0x6b, 0x4c, 0x03, - 0x33, 0x87, 0x6e, 0xcb, 0xee, 0x75, 0x04, 0xe6, 0x2c, 0x19, 0x34, 0x56, 0xd4, 0x24, 0xf4, 0x9d, - 0x2b, 0x40, 0xab, 0x0e, 0xb9, 0xac, 0x0c, 0xcc, 0x85, 0x67, 0x86, 0x06, 0x50, 0xd5, 0xaf, 0x73, - 0x00, 0xee, 0x34, 0xeb, 0xa9, 0xfe, 0x30, 0x81, 0xb4, 0xa6, 0x60, 0x51, 0x64, 0xce, 0x20, 0x37, - 0x54, 0x7a, 0xff, 0xe4, 0x8a, 0x91, 0xc0, 0x2d, 0xd2, 0x69, 0x92, 0x0e, 0x31, 0xb9, 0xeb, 0x87, - 0x49, 0x76, 0x10, 0x03, 0x43, 0x09, 0x68, 0xb8, 0x0b, 0x56, 0x06, 0xed, 0xae, 0x83, 0x19, 0x13, - 0xc9, 0x5d, 0xcc, 0xc9, 0x64, 0x2e, 0x2a, 0x17, 0x57, 0x9a, 0x29, 0x3e, 0xca, 0x68, 0xc0, 0x37, - 0x60, 0xde, 0x8c, 0x77, 0xd6, 0x4b, 0xd2, 0x46, 0x1f, 0x2c, 0x2c, 0xfa, 0xab, 0x00, 0x3b, 0x9c, - 0xf2, 0xae, 0xb1, 0x28, 0x52, 0x66, 0xd8, 0x82, 0x87, 0x68, 0x90, 0x81, 0x55, 0x1b, 0x9f, 0x53, - 0x3b, 0xb0, 0xc3, 0xe4, 0x6e, 0xd2, 0xdf, 0x10, 0xd9, 0x7f, 0xaf, 0x6f, 0x42, 0xb6, 0xbe, 0x46, - 0x1a, 0x0c, 0x65, 0xf1, 0xab, 0xff, 0xd2, 0xc0, 0xad, 0x6c, 0xe0, 0x27, 0x50, 0x20, 0xcd, 0x64, - 0x81, 0xe8, 0x97, 0x64, 0x71, 0xca, 0xc1, 0x31, 0xb5, 0xf2, 0xc7, 0x59, 0xb0, 0x18, 0x8f, 0xe1, - 0x04, 0x12, 0xf8, 0xa7, 0xa0, 0xe0, 0xf9, 0xee, 0x19, 0x65, 0xd4, 0x75, 0x88, 0xaf, 0xba, 0xe3, - 0x9a, 0x52, 0x29, 0x1c, 0x46, 0x2c, 0x14, 0x97, 0x83, 0x1d, 0x00, 0x3c, 0xec, 0x63, 0x9b, 0x70, - 0x51, 0xc9, 0x39, 0xf9, 0x06, 0x8f, 0xbf, 0xf4, 0x06, 0xf1, 0x6b, 0xe9, 0x87, 0x43, 0xd5, 0x3d, - 0x87, 0xfb, 0xdd, 0xc8, 0xc5, 0x88, 0x81, 0x62, 0xf8, 0xf0, 0x14, 0x2c, 0xf9, 0xc4, 0xec, 0x60, - 0x6a, 0xab, 0xb1, 0x9e, 0x97, 0x6e, 0xee, 0x89, 0xf1, 0x8a, 0xe2, 0x8c, 0x8b, 0x5e, 0xe5, 0x7e, - 0x76, 0x45, 0xd7, 0x0f, 0x89, 0xcf, 0x28, 0xe3, 0xc4, 0xe1, 0x61, 0xea, 0x24, 0x74, 0x50, 0x12, - 0x5b, 0x8c, 0x00, 0x5b, 0x0c, 0xc8, 0x97, 0x1e, 0xa7, 0xae, 0xc3, 0x8a, 0x33, 0xd1, 0x08, 0x68, - 0xc4, 0xe8, 0x28, 0x21, 0x05, 0xf7, 0xc1, 0xba, 0xe8, 0xd6, 0xbf, 0x0e, 0x0d, 0xec, 0x9d, 0x7b, - 0xd8, 0x11, 0x4f, 0x55, 0x9c, 0x95, 0xb3, 0xb8, 0x28, 0xb6, 0xa3, 0xed, 0x11, 0x7c, 0x34, 0x52, - 0x0b, 0xbe, 0x01, 0xab, 0xe1, 0x7a, 0x64, 0x50, 0xc7, 0xa2, 0x4e, 0x5b, 0x2c, 0x47, 0x72, 0x2d, - 0x58, 0x30, 0xee, 0x8a, 0xda, 0x78, 0x9d, 0x66, 0x5e, 0x8c, 0x22, 0xa2, 0x2c, 0x08, 0x7c, 0x0f, - 0x56, 0xa5, 0x45, 0x62, 0xa9, 0xc6, 0x42, 0x09, 0x2b, 0xce, 0x67, 0x77, 0x1b, 0xf1, 0x74, 0x22, - 0x91, 0x06, 0xed, 0x67, 0xd0, 0xa6, 0x8e, 0x88, 0x6f, 0x1b, 0xdf, 0x57, 0xf1, 0x5a, 0xdd, 0x4e, - 0x43, 0xa1, 0x2c, 0x7a, 0xe9, 0x29, 0x58, 0x4e, 0x05, 0x1c, 0xae, 0x80, 0xdc, 0x29, 0xe9, 0x86, - 0xf3, 0x1a, 0x89, 0x9f, 0x70, 0x1d, 0xcc, 0x9c, 0xe1, 0x4e, 0x40, 0xc2, 0x0c, 0x44, 0xe1, 0xe1, - 0xc9, 0xf4, 0x63, 0xad, 0xfa, 0x0f, 0x0d, 0x24, 0x1a, 0xdb, 0x04, 0x8a, 0xbb, 0x91, 0x2c, 0xee, - 0xcd, 0xab, 0x26, 0xf6, 0x98, 0xb2, 0xfe, 0x9d, 0x06, 0x16, 0xe3, 0x5b, 0x20, 0xbc, 0x07, 0xe6, - 0x71, 0x60, 0x51, 0xe2, 0x98, 0x83, 0x9d, 0x65, 0xe8, 0xcd, 0xb6, 0xa2, 0xa3, 0xa1, 0x84, 0xd8, - 0x11, 0xc9, 0xb9, 0x47, 0x7d, 0x2c, 0x32, 0xad, 0x49, 0x4c, 0xd7, 0xb1, 0x98, 0x7c, 0xa6, 0x5c, - 0xd8, 0x28, 0xf7, 0xd2, 0x4c, 0x94, 0x95, 0xaf, 0xfe, 0x79, 0x1a, 0xac, 0x84, 0x09, 0x12, 0x7e, - 0x22, 0xd8, 0xc4, 0xe1, 0x13, 0x68, 0x2f, 0x28, 0xb1, 0xf6, 0xdd, 0xbf, 0x7c, 0x25, 0x8a, 0xbc, - 0x1b, 0xb7, 0xff, 0xc1, 0xb7, 0x60, 0x96, 0x71, 0xcc, 0x03, 0x26, 0xc7, 0x5f, 0x61, 0x6b, 0xeb, - 0x5a, 0xa8, 0x52, 0x33, 0xda, 0xff, 0xc2, 0x33, 0x52, 0x88, 0xd5, 0x7f, 0x6a, 0x60, 0x3d, 0xad, - 0x32, 0x81, 0x84, 0x7b, 0x95, 0x4c, 0xb8, 0x7b, 0xd7, 0xb9, 0xd1, 0x98, 0xa4, 0xfb, 0x8f, 0x06, - 0x6e, 0x65, 0x2e, 0x2f, 0xe7, 0xac, 0xe8, 0x55, 0x5e, 0xaa, 0x23, 0x1e, 0x44, 0xeb, 0xb3, 0xec, - 0x55, 0x87, 0x23, 0xf8, 0x68, 0xa4, 0x16, 0x7c, 0x07, 0x56, 0xa8, 0xd3, 0xa1, 0x0e, 0x51, 0x63, - 0x39, 0x0a, 0xf7, 0xc8, 0x86, 0x92, 0x46, 0x96, 0x61, 0x5e, 0x17, 0xdb, 0x4b, 0x3d, 0x85, 0x82, - 0x32, 0xb8, 0xd5, 0x7f, 0x8f, 0x08, 0x8f, 0x5c, 0x2b, 0x45, 0x45, 0x49, 0x0a, 0xf1, 0x33, 0x15, - 0xa5, 0xe8, 0x68, 0x28, 0x21, 0x33, 0x48, 0x3e, 0x85, 0x72, 0xf4, 0x7a, 0x19, 0x24, 0x35, 0x63, - 0x19, 0x24, 0xcf, 0x48, 0x21, 0x0a, 0x4f, 0xc4, 0xda, 0x16, 0x5b, 0xcf, 0x86, 0x9e, 0x1c, 0x28, - 0x3a, 0x1a, 0x4a, 0x54, 0xbf, 0xc9, 0x8d, 0x88, 0x92, 0x4c, 0xc5, 0xd8, 0x95, 0x06, 0x5f, 0xf8, - 0xe9, 0x2b, 0x59, 0xc3, 0x2b, 0x59, 0xf0, 0x4f, 0x1a, 0x80, 0x78, 0x08, 0xd1, 0x18, 0xa4, 0x6a, - 0x98, 0x4f, 0xcf, 0xaf, 0x5f, 0x21, 0xfa, 0x76, 0x06, 0x2c, 0x9c, 0xd5, 0x25, 0xe5, 0x04, 0xcc, - 0x0a, 0xa0, 0x11, 0x1e, 0x40, 0x0a, 0x0a, 0x21, 0x75, 0xcf, 0xf7, 0x5d, 0x5f, 0x95, 0xec, 0xed, - 0xcb, 0x1d, 0x92, 0xe2, 0x46, 0x59, 0x7e, 0x13, 0x45, 0xfa, 0x17, 0xbd, 0x4a, 0x21, 0xc6, 0x47, - 0x71, 0x6c, 0x61, 0xca, 0x22, 0x91, 0xa9, 0xfc, 0x77, 0x30, 0xb5, 0x4b, 0xc6, 0x9b, 0x8a, 0x61, - 0x97, 0xf6, 0xc0, 0xf7, 0xc6, 0x3c, 0xd0, 0xb5, 0x66, 0xdb, 0xef, 0x35, 0x10, 0xb7, 0x01, 0xf7, - 0x41, 0x9e, 0x53, 0x55, 0x89, 0x85, 0xad, 0xbb, 0x57, 0xeb, 0x30, 0x47, 0xd4, 0x26, 0x51, 0xa3, - 0x14, 0x27, 0x24, 0x51, 0xe0, 0x1d, 0x30, 0x67, 0x13, 0xc6, 0x70, 0x5b, 0x59, 0x8e, 0x3e, 0xa0, - 0x1a, 0x21, 0x19, 0x0d, 0xf8, 0xd5, 0x47, 0x60, 0x6d, 0xc4, 0x27, 0x29, 0xac, 0x80, 0x19, 0x53, - 0xfe, 0xa5, 0x20, 0x1c, 0x9a, 0x31, 0x16, 0x44, 0x97, 0xd9, 0x91, 0xff, 0x25, 0x84, 0x74, 0xe3, - 0x67, 0x1f, 0x3e, 0x97, 0xa7, 0x3e, 0x7e, 0x2e, 0x4f, 0x7d, 0xfa, 0x5c, 0x9e, 0xfa, 0x6d, 0xbf, - 0xac, 0x7d, 0xe8, 0x97, 0xb5, 0x8f, 0xfd, 0xb2, 0xf6, 0xa9, 0x5f, 0xd6, 0xfe, 0xd7, 0x2f, 0x6b, - 0x7f, 0xf8, 0x7f, 0x79, 0xea, 0x6d, 0x69, 0xfc, 0xbf, 0xb5, 0xdf, 0x06, 0x00, 0x00, 0xff, 0xff, - 0xee, 0x44, 0x0b, 0xed, 0xe3, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_73e4f72503e71065) +} + +var fileDescriptor_73e4f72503e71065 = []byte{ + // 1728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x4f, 0xc7, 0xce, 0x57, 0x39, 0x99, 0x24, 0x35, 0x99, 0xc5, 0xeb, 0x83, 0x1d, 0x19, 0xc1, + 0x66, 0x46, 0x4b, 0x7b, 0x12, 0x96, 0xd5, 0x68, 0xa5, 0x95, 0x48, 0x27, 0x81, 0xf5, 0x6e, 0x9c, + 0xc9, 0x96, 0xa3, 0xd1, 0x6a, 0xc5, 0x81, 0x72, 0xbb, 0xe2, 0xd4, 0xc6, 0xfd, 0xb1, 0x5d, 0xd5, + 0x21, 0xe6, 0x04, 0x17, 0xce, 0x88, 0x03, 0x7f, 0x01, 0xff, 0x02, 0x48, 0x70, 0xe1, 0xc8, 0x48, + 0x48, 0x68, 0xe1, 0xc2, 0x9e, 0x2c, 0xc6, 0xf3, 0x27, 0x20, 0x71, 0x88, 0x38, 0xa0, 0xaa, 0x2e, + 0xf7, 0xb7, 0x27, 0x36, 0x2b, 0xf9, 0xe6, 0x7a, 0x1f, 0xbf, 0x7a, 0x55, 0xef, 0xf7, 0x5e, 0xbd, + 0x36, 0x78, 0x72, 0xfd, 0x8c, 0xe9, 0xd4, 0x69, 0x60, 0x97, 0x36, 0x18, 0x77, 0x3c, 0xdc, 0x23, + 0x8d, 0x9b, 0xfd, 0x0e, 0xe1, 0x78, 0xbf, 0xd1, 0x23, 0x36, 0xf1, 0x30, 0x27, 0x5d, 0xdd, 0xf5, + 0x1c, 0xee, 0xc0, 0x4a, 0x60, 0xab, 0x63, 0x97, 0xea, 0xca, 0x56, 0x57, 0xb6, 0x95, 0xef, 0xf5, + 0x28, 0xbf, 0xf2, 0x3b, 0xba, 0xe9, 0x58, 0x8d, 0x9e, 0xd3, 0x73, 0x1a, 0xd2, 0xa5, 0xe3, 0x5f, + 0xca, 0x95, 0x5c, 0xc8, 0x5f, 0x01, 0x54, 0xa5, 0x1e, 0xdb, 0xd6, 0x74, 0x3c, 0xb1, 0x67, 0x7a, + 0xbb, 0xca, 0x7b, 0x91, 0x8d, 0x85, 0xcd, 0x2b, 0x6a, 0x13, 0x6f, 0xd0, 0x70, 0xaf, 0x7b, 0xd2, + 0xc9, 0x23, 0xcc, 0xf1, 0x3d, 0x93, 0xcc, 0xe4, 0xc5, 0x1a, 0x16, 0xe1, 0x38, 0x6f, 0xaf, 0xc6, + 0x24, 0x2f, 0xcf, 0xb7, 0x39, 0xb5, 0xb2, 0xdb, 0xbc, 0x7f, 0x9f, 0x03, 0x33, 0xaf, 0x88, 0x85, + 0xd3, 0x7e, 0xf5, 0x3f, 0x69, 0x60, 0xed, 0xa8, 0xdd, 0x3c, 0xf6, 0xe8, 0x0d, 0xf1, 0xe0, 0x4f, + 0xc1, 0xaa, 0x88, 0xa8, 0x8b, 0x39, 0x2e, 0x6b, 0xbb, 0xda, 0x5e, 0xe9, 0xe0, 0xa9, 0x1e, 0x5d, + 0x72, 0x08, 0xac, 0xbb, 0xd7, 0x3d, 0x21, 0x60, 0xba, 0xb0, 0xd6, 0x6f, 0xf6, 0xf5, 0xe7, 0x9d, + 0x2f, 0x88, 0xc9, 0x5b, 0x84, 0x63, 0x03, 0xbe, 0x1c, 0xd6, 0x16, 0x46, 0xc3, 0x1a, 0x88, 0x64, + 0x28, 0x44, 0x85, 0x9f, 0x80, 0x22, 0x73, 0x89, 0x59, 0x5e, 0x94, 0xe8, 0x8f, 0xf5, 0xc9, 0x29, + 0xd4, 0xc3, 0xb0, 0xda, 0x2e, 0x31, 0x8d, 0x75, 0x05, 0x5b, 0x14, 0x2b, 0x24, 0x41, 0xea, 0x7f, + 0xd4, 0xc0, 0x46, 0x68, 0x75, 0x4a, 0x19, 0x87, 0x3f, 0xc9, 0x1c, 0x40, 0x9f, 0xee, 0x00, 0xc2, + 0x5b, 0x86, 0xbf, 0xa5, 0xf6, 0x59, 0x1d, 0x4b, 0x62, 0xc1, 0x7f, 0x0c, 0x96, 0x28, 0x27, 0x16, + 0x2b, 0x2f, 0xee, 0x16, 0xf6, 0x4a, 0x07, 0xdf, 0x99, 0x2a, 0x7a, 0x63, 0x43, 0x21, 0x2e, 0x35, + 0x85, 0x2f, 0x0a, 0x20, 0xea, 0xff, 0x2c, 0xc6, 0x62, 0x17, 0x67, 0x82, 0x1f, 0x80, 0x07, 0x98, + 0x73, 0x6c, 0x5e, 0x21, 0xf2, 0xa5, 0x4f, 0x3d, 0xd2, 0x95, 0x27, 0x58, 0x35, 0xe0, 0x68, 0x58, + 0x7b, 0x70, 0x98, 0xd0, 0xa0, 0x94, 0xa5, 0xf0, 0x75, 0x9d, 0x6e, 0xd3, 0xbe, 0x74, 0x9e, 0xdb, + 0x2d, 0xc7, 0xb7, 0xb9, 0xbc, 0x60, 0xe5, 0x7b, 0x9e, 0xd0, 0xa0, 0x94, 0x25, 0x34, 0xc1, 0xce, + 0x8d, 0xd3, 0xf7, 0x2d, 0x72, 0x4a, 0x2f, 0x89, 0x39, 0x30, 0xfb, 0xa4, 0xe5, 0x74, 0x09, 0x2b, + 0x17, 0x76, 0x0b, 0x7b, 0x6b, 0x46, 0x63, 0x34, 0xac, 0xed, 0xbc, 0xc8, 0xd1, 0xdf, 0x0d, 0x6b, + 0x0f, 0x73, 0xe4, 0x28, 0x17, 0x0c, 0x7e, 0x08, 0x36, 0xd5, 0x0d, 0x1d, 0x61, 0x17, 0x9b, 0x94, + 0x0f, 0xca, 0x45, 0x19, 0xe1, 0xc3, 0xd1, 0xb0, 0xb6, 0xd9, 0x4e, 0xaa, 0x50, 0xda, 0x16, 0x7e, + 0x04, 0x36, 0x2e, 0xd9, 0x8f, 0x3d, 0xc7, 0x77, 0xcf, 0x9d, 0x3e, 0x35, 0x07, 0xe5, 0xa5, 0x5d, + 0x6d, 0x6f, 0xcd, 0xa8, 0x8f, 0x86, 0xb5, 0x8d, 0x1f, 0xb5, 0x63, 0x8a, 0xbb, 0xb4, 0x00, 0x25, + 0x1d, 0x21, 0x01, 0x1b, 0xdc, 0xb9, 0x26, 0xb6, 0xb8, 0x3a, 0xc2, 0x38, 0x2b, 0x2f, 0xcb, 0x5c, + 0xee, 0xbd, 0x29, 0x97, 0x17, 0x31, 0x07, 0xe3, 0x91, 0x4a, 0xe7, 0x46, 0x5c, 0xca, 0x50, 0x12, + 0x15, 0x1e, 0x81, 0x6d, 0x2f, 0x48, 0x0e, 0x43, 0xc4, 0xf5, 0x3b, 0x7d, 0xca, 0xae, 0xca, 0x2b, + 0xf2, 0xc4, 0x8f, 0x46, 0xc3, 0xda, 0x36, 0x4a, 0x2b, 0x51, 0xd6, 0x1e, 0xbe, 0x07, 0xd6, 0x19, + 0x39, 0xa5, 0xb6, 0x7f, 0x1b, 0xe4, 0x74, 0x55, 0xfa, 0x6f, 0x8d, 0x86, 0xb5, 0xf5, 0xf6, 0x49, + 0x24, 0x47, 0x09, 0xab, 0xfa, 0x1f, 0x34, 0xb0, 0x72, 0xd4, 0x6e, 0x9e, 0x39, 0x5d, 0x32, 0x87, + 0x82, 0x6e, 0x26, 0x0a, 0xfa, 0x9d, 0x7b, 0x4a, 0x42, 0x04, 0x35, 0xb1, 0x9c, 0xff, 0x1d, 0x94, + 0xb3, 0xb0, 0x51, 0xfd, 0x68, 0x17, 0x14, 0x6d, 0x6c, 0x11, 0x19, 0xfa, 0x5a, 0xe4, 0x73, 0x86, + 0x2d, 0x82, 0xa4, 0x06, 0x7e, 0x17, 0x2c, 0xdb, 0x4e, 0x97, 0x34, 0x8f, 0x65, 0x00, 0x6b, 0xc6, + 0x03, 0x65, 0xb3, 0x7c, 0x26, 0xa5, 0x48, 0x69, 0xc5, 0x55, 0x72, 0xc7, 0x75, 0xfa, 0x4e, 0x6f, + 0xf0, 0x09, 0x19, 0x8c, 0xc9, 0x2d, 0xaf, 0xf2, 0x22, 0x26, 0x47, 0x09, 0x2b, 0xd8, 0x01, 0x25, + 0xdc, 0xef, 0x3b, 0x26, 0xe6, 0xb8, 0xd3, 0x27, 0x92, 0xb1, 0xa5, 0x83, 0xc6, 0x9b, 0xce, 0x18, + 0x54, 0x84, 0xd8, 0x1c, 0xa9, 0x17, 0x81, 0x19, 0x9b, 0xa3, 0x61, 0xad, 0x74, 0x18, 0xe1, 0xa0, + 0x38, 0x68, 0xfd, 0xf7, 0x1a, 0x28, 0xa9, 0x53, 0xcf, 0xa1, 0x85, 0x7d, 0x94, 0x6c, 0x61, 0xdf, + 0x9e, 0x22, 0x5f, 0x13, 0x1a, 0x98, 0x19, 0x86, 0x2d, 0xbb, 0xd7, 0x05, 0x58, 0xe9, 0xca, 0xa4, + 0xb1, 0xb2, 0x26, 0xa1, 0x1f, 0x4f, 0x01, 0xad, 0x3a, 0xe4, 0xa6, 0xda, 0x60, 0x25, 0x58, 0x33, + 0x34, 0x86, 0xaa, 0xff, 0xa7, 0x00, 0xe0, 0x51, 0xbb, 0x99, 0xea, 0x0f, 0x73, 0xa0, 0x35, 0x05, + 0xeb, 0x82, 0x39, 0x63, 0x6e, 0x28, 0x7a, 0x7f, 0x7f, 0xca, 0x4c, 0xe0, 0x0e, 0xe9, 0xb7, 0x49, + 0x9f, 0x98, 0xdc, 0xf1, 0x02, 0x92, 0x9d, 0xc5, 0xc0, 0x50, 0x02, 0x1a, 0x1e, 0x83, 0xad, 0x71, + 0xbb, 0xeb, 0x63, 0xc6, 0x04, 0xb9, 0xcb, 0x05, 0x49, 0xe6, 0xb2, 0x0a, 0x71, 0xab, 0x9d, 0xd2, + 0xa3, 0x8c, 0x07, 0xfc, 0x0c, 0xac, 0x9a, 0xf1, 0xce, 0x7a, 0x0f, 0x6d, 0xf4, 0xf1, 0xc0, 0xa2, + 0x7f, 0xea, 0x63, 0x9b, 0x53, 0x3e, 0x30, 0xd6, 0x05, 0x65, 0xc2, 0x16, 0x1c, 0xa2, 0x41, 0x06, + 0xb6, 0x2d, 0x7c, 0x4b, 0x2d, 0xdf, 0x0a, 0xc8, 0xdd, 0xa6, 0x3f, 0x27, 0xb2, 0xff, 0xce, 0xbe, + 0x85, 0x6c, 0x7d, 0xad, 0x34, 0x18, 0xca, 0xe2, 0xd7, 0xff, 0xaa, 0x81, 0xb7, 0xb2, 0x89, 0x9f, + 0x43, 0x81, 0xb4, 0x93, 0x05, 0xa2, 0xdf, 0xc3, 0xe2, 0x54, 0x80, 0x13, 0x6a, 0xe5, 0x37, 0xcb, + 0x60, 0x3d, 0x9e, 0xc3, 0x39, 0x10, 0xf8, 0x07, 0xa0, 0xe4, 0x7a, 0xce, 0x0d, 0x65, 0xd4, 0xb1, + 0x89, 0xa7, 0xba, 0xe3, 0x43, 0xe5, 0x52, 0x3a, 0x8f, 0x54, 0x28, 0x6e, 0x07, 0xfb, 0x00, 0xb8, + 0xd8, 0xc3, 0x16, 0xe1, 0xa2, 0x92, 0x0b, 0xf2, 0x0e, 0x9e, 0xbd, 0xe9, 0x0e, 0xe2, 0xc7, 0xd2, + 0xcf, 0x43, 0xd7, 0x13, 0x9b, 0x7b, 0x83, 0x28, 0xc4, 0x48, 0x81, 0x62, 0xf8, 0xf0, 0x1a, 0x6c, + 0x78, 0xc4, 0xec, 0x63, 0x6a, 0xa9, 0x67, 0xbd, 0x28, 0xc3, 0x3c, 0x11, 0xcf, 0x2b, 0x8a, 0x2b, + 0xee, 0x86, 0xb5, 0xa7, 0xd9, 0x11, 0x5d, 0x3f, 0x27, 0x1e, 0xa3, 0x8c, 0x13, 0x9b, 0x07, 0xd4, + 0x49, 0xf8, 0xa0, 0x24, 0xb6, 0x78, 0x02, 0x2c, 0xf1, 0x40, 0x3e, 0x77, 0x39, 0x75, 0x6c, 0x56, + 0x5e, 0x8a, 0x9e, 0x80, 0x56, 0x4c, 0x8e, 0x12, 0x56, 0xf0, 0x14, 0xec, 0x88, 0x6e, 0xfd, 0xb3, + 0x60, 0x83, 0x93, 0x5b, 0x17, 0xdb, 0xe2, 0xaa, 0xca, 0xcb, 0xf2, 0x2d, 0x2e, 0x8b, 0xe9, 0xe8, + 0x30, 0x47, 0x8f, 0x72, 0xbd, 0xe0, 0x67, 0x60, 0x3b, 0x18, 0x8f, 0x0c, 0x6a, 0x77, 0xa9, 0xdd, + 0x13, 0xc3, 0x91, 0x1c, 0x0b, 0xd6, 0x8c, 0x27, 0xa2, 0x36, 0x5e, 0xa4, 0x95, 0x77, 0x79, 0x42, + 0x94, 0x05, 0x81, 0x5f, 0x82, 0x6d, 0xb9, 0x23, 0xe9, 0xaa, 0xc6, 0x42, 0x09, 0x2b, 0xaf, 0x66, + 0x67, 0x1b, 0x71, 0x75, 0x82, 0x48, 0xe3, 0xf6, 0x33, 0x6e, 0x53, 0x17, 0xc4, 0xb3, 0x8c, 0xb7, + 0x55, 0xbe, 0xb6, 0x0f, 0xd3, 0x50, 0x28, 0x8b, 0x5e, 0xf9, 0x10, 0x6c, 0xa6, 0x12, 0x0e, 0xb7, + 0x40, 0xe1, 0x9a, 0x0c, 0x82, 0xf7, 0x1a, 0x89, 0x9f, 0x70, 0x07, 0x2c, 0xdd, 0xe0, 0xbe, 0x4f, + 0x02, 0x06, 0xa2, 0x60, 0xf1, 0xc1, 0xe2, 0x33, 0xad, 0xfe, 0x67, 0x0d, 0x24, 0x1a, 0xdb, 0x1c, + 0x8a, 0xbb, 0x95, 0x2c, 0xee, 0xbd, 0x69, 0x89, 0x3d, 0xa1, 0xac, 0x7f, 0xa9, 0x81, 0xf5, 0xf8, + 0x14, 0x08, 0xdf, 0x05, 0xab, 0xd8, 0xef, 0x52, 0x62, 0x9b, 0xe3, 0x99, 0x25, 0x8c, 0xe6, 0x50, + 0xc9, 0x51, 0x68, 0x21, 0x66, 0x44, 0x72, 0xeb, 0x52, 0x0f, 0x0b, 0xa6, 0xb5, 0x89, 0xe9, 0xd8, + 0x5d, 0x26, 0xaf, 0xa9, 0x10, 0x34, 0xca, 0x93, 0xb4, 0x12, 0x65, 0xed, 0xeb, 0xbf, 0x5b, 0x04, + 0x5b, 0x01, 0x41, 0x82, 0x4f, 0x04, 0x8b, 0xd8, 0x7c, 0x0e, 0xed, 0x05, 0x25, 0xc6, 0xbe, 0xa7, + 0xf7, 0x8f, 0x44, 0x51, 0x74, 0x93, 0xe6, 0x3f, 0xf8, 0x39, 0x58, 0x66, 0x1c, 0x73, 0x9f, 0xc9, + 0xe7, 0xaf, 0x74, 0x70, 0x30, 0x13, 0xaa, 0xf4, 0x8c, 0xe6, 0xbf, 0x60, 0x8d, 0x14, 0x62, 0xfd, + 0x2f, 0x1a, 0xd8, 0x49, 0xbb, 0xcc, 0x81, 0x70, 0x9f, 0x26, 0x09, 0xf7, 0xee, 0x2c, 0x27, 0x9a, + 0x40, 0xba, 0x7f, 0x68, 0xe0, 0xad, 0xcc, 0xe1, 0xe5, 0x3b, 0x2b, 0x7a, 0x95, 0x9b, 0xea, 0x88, + 0x67, 0xd1, 0xf8, 0x2c, 0x7b, 0xd5, 0x79, 0x8e, 0x1e, 0xe5, 0x7a, 0xc1, 0x2f, 0xc0, 0x16, 0xb5, + 0xfb, 0xd4, 0x26, 0xea, 0x59, 0x8e, 0xd2, 0x9d, 0xdb, 0x50, 0xd2, 0xc8, 0x32, 0xcd, 0x3b, 0x62, + 0x7a, 0x69, 0xa6, 0x50, 0x50, 0x06, 0xb7, 0xfe, 0xb7, 0x9c, 0xf4, 0xc8, 0xb1, 0x52, 0x54, 0x94, + 0x94, 0x10, 0x2f, 0x53, 0x51, 0x4a, 0x8e, 0x42, 0x0b, 0xc9, 0x20, 0x79, 0x15, 0x2a, 0xd0, 0xd9, + 0x18, 0x24, 0x3d, 0x63, 0x0c, 0x92, 0x6b, 0xa4, 0x10, 0x45, 0x24, 0x62, 0x6c, 0x8b, 0x8d, 0x67, + 0x61, 0x24, 0x67, 0x4a, 0x8e, 0x42, 0x8b, 0xfa, 0x7f, 0x0b, 0x39, 0x59, 0x92, 0x54, 0x8c, 0x1d, + 0x69, 0xfc, 0x85, 0x9f, 0x3e, 0x52, 0x37, 0x3c, 0x52, 0x17, 0xfe, 0x56, 0x03, 0x10, 0x87, 0x10, + 0xad, 0x31, 0x55, 0x03, 0x3e, 0x7d, 0x3c, 0x7b, 0x85, 0xe8, 0x87, 0x19, 0xb0, 0xe0, 0xad, 0xae, + 0xa8, 0x20, 0x60, 0xd6, 0x00, 0xe5, 0x44, 0x00, 0x29, 0x28, 0x05, 0xd2, 0x13, 0xcf, 0x73, 0x3c, + 0x55, 0xb2, 0xef, 0xdc, 0x1f, 0x90, 0x34, 0x37, 0xaa, 0xf2, 0x9b, 0x28, 0xf2, 0xbf, 0x1b, 0xd6, + 0x4a, 0x31, 0x3d, 0x8a, 0x63, 0x8b, 0xad, 0xba, 0x24, 0xda, 0xaa, 0xf8, 0x7f, 0x6c, 0x75, 0x4c, + 0x26, 0x6f, 0x15, 0xc3, 0xae, 0x9c, 0x80, 0x6f, 0x4d, 0xb8, 0xa0, 0x99, 0xde, 0xb6, 0xd7, 0x8b, + 0xe0, 0x51, 0x78, 0xff, 0x1e, 0xed, 0xf8, 0x9c, 0xb0, 0x79, 0x4d, 0x7e, 0x07, 0x00, 0x04, 0x9f, + 0x4f, 0x92, 0xaa, 0xc1, 0xe0, 0x17, 0x7a, 0x1c, 0x87, 0x1a, 0x14, 0xb3, 0x82, 0x7e, 0xce, 0xd8, + 0x77, 0x38, 0x15, 0xb9, 0xe2, 0x87, 0x9b, 0x75, 0xfe, 0xfb, 0xa6, 0x13, 0xc4, 0xdf, 0x35, 0xf0, + 0x76, 0x6e, 0x20, 0x73, 0xe8, 0xec, 0x2f, 0x92, 0x9d, 0x7d, 0x7f, 0xe6, 0xcb, 0x9a, 0xd0, 0xde, + 0x7f, 0xa5, 0x81, 0x38, 0x3b, 0xe1, 0x29, 0x28, 0x72, 0xaa, 0x7a, 0x78, 0xe9, 0xe0, 0xc9, 0x74, + 0x27, 0xb8, 0xa0, 0x16, 0x89, 0x9e, 0x58, 0xb1, 0x42, 0x12, 0x05, 0x3e, 0x06, 0x2b, 0x16, 0x61, + 0x0c, 0xf7, 0xc6, 0xc4, 0x08, 0x3f, 0xbd, 0x5b, 0x81, 0x18, 0x8d, 0xf5, 0xf5, 0xf7, 0xc1, 0xc3, + 0x9c, 0x3f, 0x33, 0x60, 0x0d, 0x2c, 0x99, 0xf2, 0xcf, 0x28, 0x11, 0xd0, 0x92, 0xb1, 0x26, 0x0e, + 0x70, 0x24, 0xff, 0x85, 0x0a, 0xe4, 0xc6, 0x0f, 0x5f, 0xbe, 0xaa, 0x2e, 0x7c, 0xf5, 0xaa, 0xba, + 0xf0, 0xf5, 0xab, 0xea, 0xc2, 0x2f, 0x46, 0x55, 0xed, 0xe5, 0xa8, 0xaa, 0x7d, 0x35, 0xaa, 0x6a, + 0x5f, 0x8f, 0xaa, 0xda, 0xbf, 0x46, 0x55, 0xed, 0xd7, 0xaf, 0xab, 0x0b, 0x9f, 0x57, 0x26, 0xff, + 0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x30, 0xdb, 0x24, 0x04, 0x18, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -1665,6 +1727,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2087,6 +2258,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -2385,6 +2594,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -5155,6 +5402,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index b99fd39e48..dfef3f6cc5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -47,7 +47,7 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; @@ -58,7 +58,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -127,6 +127,7 @@ message CSIDriverSpec { // This field is immutable. // // +optional + // +listType=atomic repeated string volumeLifecycleModes = 3; // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage @@ -228,7 +229,7 @@ message CSIDriverSpec { // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -263,6 +264,7 @@ message CSINodeDriver { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic repeated string topologyKeys = 3; // allocatable represents the volume resources of a node that are available for scheduling. @@ -275,7 +277,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -287,6 +289,8 @@ message CSINodeSpec { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated CSINodeDriver drivers = 1; } @@ -325,7 +329,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -334,7 +338,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -354,7 +358,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -368,7 +372,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -376,11 +380,9 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name repeated CSIStorageCapacity items = 2; } @@ -393,7 +395,7 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -412,6 +414,7 @@ message StorageClass { // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic repeated string mountOptions = 5; // allowVolumeExpansion shows whether the storage class allow volume expand @@ -430,7 +433,7 @@ message StorageClass { // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -438,7 +441,7 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of StorageClasses repeated StorageClass items = 2; @@ -465,7 +468,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -483,7 +486,7 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; @@ -505,7 +508,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -549,11 +552,51 @@ message VolumeAttachmentStatus { optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index a281d0f26e..e2214ef2f5 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -58,6 +58,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 0f5ade3c13..ce294e3dba 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -59,6 +59,7 @@ type StorageClass struct { // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` // allowVolumeExpansion shows whether the storage class allow volume expand @@ -347,6 +348,7 @@ type CSIDriverSpec struct { // This field is immutable. // // +optional + // +listType=atomic VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage @@ -535,6 +537,8 @@ type CSINodeSpec struct { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` } @@ -567,6 +571,7 @@ type CSINodeDriver struct { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` // allocatable represents the volume resources of a node that are available for scheduling. @@ -707,7 +712,57 @@ type CSIStorageCapacityList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 6d9d233066..8c1a663507 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -216,6 +216,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "time represents the time the error was encountered.", diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index f0450182b2..d87aa6b90b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -579,6 +579,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go index c5d23e7d45..4be57dc0d4 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go @@ -264,3 +264,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go b/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go new file mode 100644 index 0000000000..192f9ff3c3 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=storagemigration.k8s.io + +package v1alpha1 // import "k8s.io/api/storagemigration/v1alpha1" diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..ed57f34b59 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go @@ -0,0 +1,1688 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/storagemigration/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{0} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo + +func (m *MigrationCondition) Reset() { *m = MigrationCondition{} } +func (*MigrationCondition) ProtoMessage() {} +func (*MigrationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{1} +} +func (m *MigrationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MigrationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MigrationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MigrationCondition.Merge(m, src) +} +func (m *MigrationCondition) XXX_Size() int { + return m.Size() +} +func (m *MigrationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MigrationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MigrationCondition proto.InternalMessageInfo + +func (m *StorageVersionMigration) Reset() { *m = StorageVersionMigration{} } +func (*StorageVersionMigration) ProtoMessage() {} +func (*StorageVersionMigration) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{2} +} +func (m *StorageVersionMigration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigration) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigration.Merge(m, src) +} +func (m *StorageVersionMigration) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigration) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigration.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigration proto.InternalMessageInfo + +func (m *StorageVersionMigrationList) Reset() { *m = StorageVersionMigrationList{} } +func (*StorageVersionMigrationList) ProtoMessage() {} +func (*StorageVersionMigrationList) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{3} +} +func (m *StorageVersionMigrationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigrationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigrationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigrationList.Merge(m, src) +} +func (m *StorageVersionMigrationList) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigrationList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigrationList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigrationList proto.InternalMessageInfo + +func (m *StorageVersionMigrationSpec) Reset() { *m = StorageVersionMigrationSpec{} } +func (*StorageVersionMigrationSpec) ProtoMessage() {} +func (*StorageVersionMigrationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{4} +} +func (m *StorageVersionMigrationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigrationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigrationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigrationSpec.Merge(m, src) +} +func (m *StorageVersionMigrationSpec) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigrationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigrationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigrationSpec proto.InternalMessageInfo + +func (m *StorageVersionMigrationStatus) Reset() { *m = StorageVersionMigrationStatus{} } +func (*StorageVersionMigrationStatus) ProtoMessage() {} +func (*StorageVersionMigrationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{5} +} +func (m *StorageVersionMigrationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigrationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigrationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigrationStatus.Merge(m, src) +} +func (m *StorageVersionMigrationStatus) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigrationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigrationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigrationStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.api.storagemigration.v1alpha1.GroupVersionResource") + proto.RegisterType((*MigrationCondition)(nil), "k8s.io.api.storagemigration.v1alpha1.MigrationCondition") + proto.RegisterType((*StorageVersionMigration)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigration") + proto.RegisterType((*StorageVersionMigrationList)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationList") + proto.RegisterType((*StorageVersionMigrationSpec)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationSpec") + proto.RegisterType((*StorageVersionMigrationStatus)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationStatus") +} + +func init() { + proto.RegisterFile("k8s.io/api/storagemigration/v1alpha1/generated.proto", fileDescriptor_0117377a57b172b9) +} + +var fileDescriptor_0117377a57b172b9 = []byte{ + // 719 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x4f, 0x13, 0x4f, + 0x14, 0xef, 0x42, 0x0b, 0x7c, 0xa7, 0x5f, 0xc0, 0x4c, 0x14, 0x1a, 0x8c, 0x5b, 0x53, 0x09, 0x41, + 0xa3, 0xb3, 0xd2, 0x10, 0x43, 0x30, 0x1e, 0x28, 0x07, 0xa3, 0x81, 0x98, 0x0c, 0xc8, 0xc1, 0x78, + 0x70, 0xba, 0x1d, 0xb7, 0x43, 0xd9, 0x9d, 0xcd, 0xce, 0x6c, 0x13, 0x6e, 0xfe, 0x09, 0x1e, 0xfc, + 0x93, 0x3c, 0x70, 0x31, 0xe1, 0xc8, 0xc5, 0x2a, 0xf5, 0xbf, 0xe0, 0x64, 0x66, 0x76, 0x76, 0xfb, + 0x8b, 0x62, 0x13, 0x6e, 0x3b, 0xef, 0xbd, 0xcf, 0x67, 0xde, 0x7b, 0x9f, 0x79, 0x6f, 0xc1, 0x66, + 0x6b, 0x4b, 0x20, 0xc6, 0x1d, 0x12, 0x32, 0x47, 0x48, 0x1e, 0x11, 0x8f, 0xfa, 0xcc, 0x8b, 0x88, + 0x64, 0x3c, 0x70, 0xda, 0x1b, 0xe4, 0x24, 0x6c, 0x92, 0x0d, 0xc7, 0xa3, 0x01, 0x8d, 0x88, 0xa4, + 0x0d, 0x14, 0x46, 0x5c, 0x72, 0xb8, 0x9a, 0xa0, 0x10, 0x09, 0x19, 0x1a, 0x46, 0xa1, 0x14, 0xb5, + 0xf2, 0xcc, 0x63, 0xb2, 0x19, 0xd7, 0x91, 0xcb, 0x7d, 0xc7, 0xe3, 0x1e, 0x77, 0x34, 0xb8, 0x1e, + 0x7f, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, 0x09, 0xe9, 0x4a, 0xa5, 0x2f, 0x15, 0x97, 0x47, 0xd4, 0x69, + 0x8f, 0x5c, 0xbc, 0xd2, 0x97, 0xae, 0x4f, 0xdc, 0x26, 0x0b, 0x68, 0x74, 0xea, 0x84, 0x2d, 0x4f, + 0x19, 0x84, 0xe3, 0x53, 0x49, 0xae, 0x43, 0x39, 0xe3, 0x50, 0x51, 0x1c, 0x48, 0xe6, 0xd3, 0x11, + 0xc0, 0x8b, 0x7f, 0x01, 0x84, 0xdb, 0xa4, 0x3e, 0x19, 0xc6, 0x55, 0xbe, 0x59, 0xe0, 0xee, 0xeb, + 0x88, 0xc7, 0xe1, 0x11, 0x8d, 0x04, 0xe3, 0x01, 0xa6, 0x82, 0xc7, 0x91, 0x4b, 0xe1, 0x23, 0x50, + 0xf0, 0x94, 0xbd, 0x64, 0x3d, 0xb4, 0xd6, 0xff, 0xab, 0xcd, 0x9f, 0x75, 0xca, 0xb9, 0x6e, 0xa7, + 0x5c, 0xd0, 0xc1, 0x38, 0xf1, 0xc1, 0xc7, 0x60, 0xb6, 0x9d, 0xe0, 0x4a, 0x53, 0x3a, 0x6c, 0xd1, + 0x84, 0xcd, 0xa6, 0x74, 0xa9, 0x1f, 0x3e, 0x05, 0x73, 0x91, 0xe1, 0x2e, 0x4d, 0xeb, 0xd8, 0x3b, + 0x26, 0x76, 0x2e, 0xbd, 0x13, 0x67, 0x11, 0x95, 0x9f, 0x53, 0x00, 0xee, 0xa7, 0xfa, 0xec, 0xf2, + 0xa0, 0xc1, 0xd4, 0x07, 0xdc, 0x06, 0x79, 0x79, 0x1a, 0x52, 0x93, 0xd3, 0x9a, 0x21, 0xc8, 0x1f, + 0x9e, 0x86, 0xf4, 0xaa, 0x53, 0x5e, 0x1a, 0x45, 0x28, 0x0f, 0xd6, 0x18, 0xb8, 0x07, 0x66, 0x84, + 0x24, 0x32, 0x16, 0x26, 0xd5, 0x4d, 0x83, 0x9e, 0x39, 0xd0, 0xd6, 0xab, 0x4e, 0xf9, 0x1a, 0x39, + 0x51, 0xc6, 0x94, 0x44, 0x61, 0xc3, 0x01, 0x8f, 0xc1, 0xc2, 0x09, 0x11, 0xf2, 0x7d, 0xd8, 0x20, + 0x92, 0x1e, 0x32, 0x3f, 0x29, 0xaa, 0x58, 0x7d, 0x82, 0x7a, 0x0f, 0x2d, 0x13, 0x02, 0x85, 0x2d, + 0x4f, 0x19, 0x04, 0x52, 0x7a, 0xa3, 0xf6, 0x06, 0x52, 0x88, 0xda, 0x92, 0xc9, 0x60, 0x61, 0x6f, + 0x80, 0x09, 0x0f, 0x31, 0xc3, 0x35, 0x30, 0x13, 0x51, 0x22, 0x78, 0x50, 0xca, 0xeb, 0xcc, 0x17, + 0xd2, 0xcc, 0xb1, 0xb6, 0x62, 0xe3, 0x55, 0x6a, 0xf8, 0x54, 0x08, 0xe2, 0xd1, 0x52, 0x61, 0x50, + 0x8d, 0xfd, 0xc4, 0x8c, 0x53, 0x7f, 0xe5, 0xc7, 0x14, 0x58, 0x3e, 0x48, 0xc6, 0xc0, 0x28, 0x95, + 0xf5, 0x0e, 0x7e, 0x02, 0x73, 0x2a, 0xcd, 0x06, 0x91, 0x44, 0x37, 0xba, 0x58, 0x7d, 0x3e, 0x59, + 0x51, 0xef, 0xea, 0xc7, 0xd4, 0x95, 0xfb, 0x54, 0x92, 0x1a, 0x34, 0x37, 0x83, 0x9e, 0x0d, 0x67, + 0xac, 0xd0, 0x05, 0x79, 0x11, 0x52, 0x57, 0x0b, 0x51, 0xac, 0xee, 0xa0, 0x49, 0x66, 0x13, 0x8d, + 0x49, 0xf7, 0x20, 0xa4, 0x6e, 0xed, 0xff, 0xf4, 0x25, 0xa8, 0x13, 0xd6, 0xe4, 0xb0, 0x95, 0xe9, + 0x9d, 0x28, 0xb3, 0x7b, 0xbb, 0x6b, 0x34, 0x55, 0xaf, 0xf5, 0x83, 0xcf, 0xa1, 0xf2, 0xcb, 0x02, + 0xf7, 0xc7, 0x20, 0xf7, 0x98, 0x90, 0xf0, 0xe3, 0x48, 0x4f, 0xd1, 0x64, 0x3d, 0x55, 0x68, 0xdd, + 0xd1, 0x6c, 0x5a, 0x52, 0x4b, 0x5f, 0x3f, 0xeb, 0xa0, 0xc0, 0x24, 0xf5, 0xd5, 0xcb, 0x9e, 0x5e, + 0x2f, 0x56, 0x5f, 0xdd, 0xaa, 0xd2, 0xde, 0xa8, 0xbf, 0x51, 0x9c, 0x38, 0xa1, 0xae, 0x7c, 0x1f, + 0x5f, 0xa1, 0x6a, 0x3a, 0x6c, 0xf6, 0xcd, 0x77, 0x52, 0xe1, 0xf6, 0x64, 0x69, 0x5c, 0xb7, 0x7d, + 0x6e, 0xda, 0x0d, 0xf0, 0x25, 0x98, 0x77, 0x79, 0x20, 0x59, 0x10, 0xd3, 0x43, 0xde, 0xa2, 0xe9, + 0xea, 0xb9, 0x67, 0x20, 0xf3, 0xbb, 0xfd, 0x4e, 0x3c, 0x18, 0x5b, 0x39, 0xb7, 0xc0, 0x83, 0x1b, + 0x25, 0x86, 0x27, 0x00, 0xb8, 0xe9, 0xd0, 0x8b, 0x92, 0xa5, 0x3b, 0xba, 0x35, 0x59, 0x29, 0xa3, + 0xfb, 0xa7, 0x37, 0x08, 0x99, 0x49, 0xe0, 0x3e, 0x7e, 0xb8, 0x03, 0x16, 0xd3, 0xc2, 0x8e, 0x06, + 0x36, 0xe9, 0xb2, 0x01, 0x2e, 0xe2, 0x41, 0x37, 0x1e, 0x8e, 0xaf, 0xbd, 0x3d, 0xbb, 0xb4, 0x73, + 0xe7, 0x97, 0x76, 0xee, 0xe2, 0xd2, 0xce, 0x7d, 0xe9, 0xda, 0xd6, 0x59, 0xd7, 0xb6, 0xce, 0xbb, + 0xb6, 0x75, 0xd1, 0xb5, 0xad, 0xdf, 0x5d, 0xdb, 0xfa, 0xfa, 0xc7, 0xce, 0x7d, 0x58, 0x9d, 0xe4, + 0xb7, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0x01, 0xc1, 0xb1, 0xd8, 0x5d, 0x07, 0x00, 0x00, +} + +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MigrationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MigrationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MigrationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigrationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigrationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigrationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigrationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ContinueToken) + copy(dAtA[i:], m.ContinueToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContinueToken))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigrationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigrationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigrationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MigrationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionMigration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionMigrationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersionMigrationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContinueToken) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionMigrationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GroupVersionResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionResource{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `}`, + }, "") + return s +} +func (this *MigrationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MigrationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionMigration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionMigrationSpec", "StorageVersionMigrationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionMigrationStatus", "StorageVersionMigrationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigrationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageVersionMigration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersionMigration", "StorageVersionMigration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageVersionMigrationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigrationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionMigrationSpec{`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "GroupVersionResource", 1), `&`, ``, 1) + `,`, + `ContinueToken:` + fmt.Sprintf("%v", this.ContinueToken) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigrationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]MigrationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "MigrationCondition", "MigrationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StorageVersionMigrationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MigrationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MigrationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MigrationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MigrationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigrationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigrationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigrationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageVersionMigration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigrationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigrationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigrationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContinueToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContinueToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigrationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigrationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigrationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, MigrationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto new file mode 100644 index 0000000000..341e0bc5cf --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.storagemigration.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/storagemigration/v1alpha1"; + +// The names of the group, the version, and the resource. +message GroupVersionResource { + // The name of the group. + optional string group = 1; + + // The name of the version. + optional string version = 2; + + // The name of the resource. + optional string resource = 3; +} + +// Describes the state of a migration at a certain point. +message MigrationCondition { + // Type of the condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StorageVersionMigration represents a migration of stored data to the latest +// storage version. +message StorageVersionMigration { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the migration. + // +optional + optional StorageVersionMigrationSpec spec = 2; + + // Status of the migration. + // +optional + optional StorageVersionMigrationStatus status = 3; +} + +// StorageVersionMigrationList is a collection of storage version migrations. +message StorageVersionMigrationList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageVersionMigration + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated StorageVersionMigration items = 2; +} + +// Spec of the storage version migration. +message StorageVersionMigrationSpec { + // The resource that is being migrated. The migrator sends requests to + // the endpoint serving the resource. + // Immutable. + optional GroupVersionResource resource = 1; + + // The token used in the list options to get the next chunk of objects + // to migrate. When the .status.conditions indicates the migration is + // "Running", users can use this token to check the progress of the + // migration. + // +optional + optional string continueToken = 2; +} + +// Status of the storage version migration. +message StorageVersionMigrationStatus { + // The latest available observations of the migration's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + repeated MigrationCondition conditions = 1; + + // ResourceVersion to compare with the GC cache for performing the migration. + // This is the current resource version of given group, version and resource when + // kube-controller-manager first observes this StorageVersionMigration resource. + optional string resourceVersion = 2; +} + diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/register.go b/vendor/k8s.io/api/storagemigration/v1alpha1/register.go new file mode 100644 index 0000000000..c9706050f1 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GroupName is the group name use in this package +const GroupName = "storagemigration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageVersionMigration{}, + &StorageVersionMigrationList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/types.go b/vendor/k8s.io/api/storagemigration/v1alpha1/types.go new file mode 100644 index 0000000000..0f343d1e95 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/types.go @@ -0,0 +1,131 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// StorageVersionMigration represents a migration of stored data to the latest +// storage version. +type StorageVersionMigration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the migration. + // +optional + Spec StorageVersionMigrationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status of the migration. + // +optional + Status StorageVersionMigrationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// Spec of the storage version migration. +type StorageVersionMigrationSpec struct { + // The resource that is being migrated. The migrator sends requests to + // the endpoint serving the resource. + // Immutable. + Resource GroupVersionResource `json:"resource" protobuf:"bytes,1,opt,name=resource"` + // The token used in the list options to get the next chunk of objects + // to migrate. When the .status.conditions indicates the migration is + // "Running", users can use this token to check the progress of the + // migration. + // +optional + ContinueToken string `json:"continueToken,omitempty" protobuf:"bytes,2,opt,name=continueToken"` + // TODO: consider recording the storage version hash when the migration + // is created. It can avoid races. +} + +// The names of the group, the version, and the resource. +type GroupVersionResource struct { + // The name of the group. + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // The name of the version. + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + // The name of the resource. + Resource string `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` +} + +type MigrationConditionType string + +const ( + // Indicates that the migration is running. + MigrationRunning MigrationConditionType = "Running" + // Indicates that the migration has completed successfully. + MigrationSucceeded MigrationConditionType = "Succeeded" + // Indicates that the migration has failed. + MigrationFailed MigrationConditionType = "Failed" +) + +// Describes the state of a migration at a certain point. +type MigrationCondition struct { + // Type of the condition. + Type MigrationConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=MigrationConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,3,opt,name=lastUpdateTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// Status of the storage version migration. +type StorageVersionMigrationStatus struct { + // The latest available observations of the migration's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []MigrationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // ResourceVersion to compare with the GC cache for performing the migration. + // This is the current resource version of given group, version and resource when + // kube-controller-manager first observes this StorageVersionMigration resource. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// StorageVersionMigrationList is a collection of storage version migrations. +type StorageVersionMigrationList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of StorageVersionMigration + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Items []StorageVersionMigration `json:"items" listType:"map" listMapKey:"type" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..257d72a236 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_GroupVersionResource = map[string]string{ + "": "The names of the group, the version, and the resource.", + "group": "The name of the group.", + "version": "The name of the version.", + "resource": "The name of the resource.", +} + +func (GroupVersionResource) SwaggerDoc() map[string]string { + return map_GroupVersionResource +} + +var map_MigrationCondition = map[string]string{ + "": "Describes the state of a migration at a certain point.", + "type": "Type of the condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (MigrationCondition) SwaggerDoc() map[string]string { + return map_MigrationCondition +} + +var map_StorageVersionMigration = map[string]string{ + "": "StorageVersionMigration represents a migration of stored data to the latest storage version.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the migration.", + "status": "Status of the migration.", +} + +func (StorageVersionMigration) SwaggerDoc() map[string]string { + return map_StorageVersionMigration +} + +var map_StorageVersionMigrationList = map[string]string{ + "": "StorageVersionMigrationList is a collection of storage version migrations.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of StorageVersionMigration", +} + +func (StorageVersionMigrationList) SwaggerDoc() map[string]string { + return map_StorageVersionMigrationList +} + +var map_StorageVersionMigrationSpec = map[string]string{ + "": "Spec of the storage version migration.", + "resource": "The resource that is being migrated. The migrator sends requests to the endpoint serving the resource. Immutable.", + "continueToken": "The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration.", +} + +func (StorageVersionMigrationSpec) SwaggerDoc() map[string]string { + return map_StorageVersionMigrationSpec +} + +var map_StorageVersionMigrationStatus = map[string]string{ + "": "Status of the storage version migration.", + "conditions": "The latest available observations of the migration's current state.", + "resourceVersion": "ResourceVersion to compare with the GC cache for performing the migration. This is the current resource version of given group, version and resource when kube-controller-manager first observes this StorageVersionMigration resource.", +} + +func (StorageVersionMigrationStatus) SwaggerDoc() map[string]string { + return map_StorageVersionMigrationStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9d35011d59 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,160 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationCondition) DeepCopyInto(out *MigrationCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationCondition. +func (in *MigrationCondition) DeepCopy() *MigrationCondition { + if in == nil { + return nil + } + out := new(MigrationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigration) DeepCopyInto(out *StorageVersionMigration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigration. +func (in *StorageVersionMigration) DeepCopy() *StorageVersionMigration { + if in == nil { + return nil + } + out := new(StorageVersionMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationList) DeepCopyInto(out *StorageVersionMigrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersionMigration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationList. +func (in *StorageVersionMigrationList) DeepCopy() *StorageVersionMigrationList { + if in == nil { + return nil + } + out := new(StorageVersionMigrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationSpec) DeepCopyInto(out *StorageVersionMigrationSpec) { + *out = *in + out.Resource = in.Resource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationSpec. +func (in *StorageVersionMigrationSpec) DeepCopy() *StorageVersionMigrationSpec { + if in == nil { + return nil + } + out := new(StorageVersionMigrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationStatus) DeepCopyInto(out *StorageVersionMigrationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]MigrationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationStatus. +func (in *StorageVersionMigrationStatus) DeepCopy() *StorageVersionMigrationStatus { + if in == nil { + return nil + } + out := new(StorageVersionMigrationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..acdb574351 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageVersionMigration) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *StorageVersionMigration) APILifecycleDeprecated() (major, minor int) { + return 1, 33 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *StorageVersionMigration) APILifecycleRemoved() (major, minor int) { + return 1, 36 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageVersionMigrationList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *StorageVersionMigrationList) APILifecycleDeprecated() (major, minor int) { + return 1, 33 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *StorageVersionMigrationList) APILifecycleRemoved() (major, minor int) { + return 1, 36 +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go new file mode 100644 index 0000000000..72c6438cb6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testrestmapper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TestOnlyStaticRESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +// +// TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests +// TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked. +func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + unionMapper = append(unionMapper, newRESTMapper(enabledVersion.Group, scheme)) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := prioritiesForGroups(scheme, prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := prioritiesForGroups(scheme, remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func prioritiesForGroups(scheme *runtime.Scheme, groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := scheme.PrioritizedVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +func newRESTMapper(group string, scheme *runtime.Scheme) meta.RESTMapper { + mapper := meta.NewDefaultRESTMapper(scheme.PrioritizedVersionsForGroup(group)) + for _, gv := range scheme.PrioritizedVersionsForGroup(group) { + for kind := range scheme.KnownTypes(gv) { + if ignoredKinds.Has(kind) { + continue + } + scope := meta.RESTScopeNamespace + if rootScopedKinds[gv.WithKind(kind).GroupKind()] { + scope = meta.RESTScopeRoot + } + mapper.Add(gv.WithKind(kind), scope) + } + } + + return mapper +} + +// hardcoded is good enough for the test we're running +var rootScopedKinds = map[schema.GroupKind]bool{ + {Group: "admission.k8s.io", Kind: "AdmissionReview"}: true, + + {Group: "admissionregistration.k8s.io", Kind: "ValidatingWebhookConfiguration"}: true, + {Group: "admissionregistration.k8s.io", Kind: "MutatingWebhookConfiguration"}: true, + + {Group: "authentication.k8s.io", Kind: "TokenReview"}: true, + + {Group: "authorization.k8s.io", Kind: "SubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectRulesReview"}: true, + + {Group: "certificates.k8s.io", Kind: "CertificateSigningRequest"}: true, + + {Group: "", Kind: "Node"}: true, + {Group: "", Kind: "Namespace"}: true, + {Group: "", Kind: "PersistentVolume"}: true, + {Group: "", Kind: "ComponentStatus"}: true, + + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: true, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}: true, + + {Group: "scheduling.k8s.io", Kind: "PriorityClass"}: true, + + {Group: "storage.k8s.io", Kind: "StorageClass"}: true, + {Group: "storage.k8s.io", Kind: "VolumeAttachment"}: true, + + {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true, + + {Group: "apiserver.k8s.io", Kind: "AdmissionConfiguration"}: true, + + {Group: "audit.k8s.io", Kind: "Event"}: true, + {Group: "audit.k8s.io", Kind: "Policy"}: true, + + {Group: "apiregistration.k8s.io", Kind: "APIService"}: true, + + {Group: "metrics.k8s.io", Kind: "NodeMetrics"}: true, + + {Group: "wardle.example.com", Kind: "Fischer"}: true, +} + +// hardcoded is good enough for the test we're running +var ignoredKinds = sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 53a25d3449..c3a272168e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// source: k8s.io/apimachinery/pkg/api/resource/generated.proto package resource @@ -41,7 +41,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Quantity) Reset() { *m = Quantity{} } func (*Quantity) ProtoMessage() {} func (*Quantity) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{0} + return fileDescriptor_7288c78ff45111e9, []int{0} } func (m *Quantity) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Quantity.Unmarshal(m, b) @@ -64,7 +64,7 @@ var xxx_messageInfo_Quantity proto.InternalMessageInfo func (m *QuantityValue) Reset() { *m = QuantityValue{} } func (*QuantityValue) ProtoMessage() {} func (*QuantityValue) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{1} + return fileDescriptor_7288c78ff45111e9, []int{1} } func (m *QuantityValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_QuantityValue.Unmarshal(m, b) @@ -90,25 +90,24 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) + proto.RegisterFile("k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_7288c78ff45111e9) } -var fileDescriptor_612bba87bd70906c = []byte{ - // 254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0xcd, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0x4b, - 0xcd, 0x4b, 0xc9, 0x2f, 0xd2, 0x87, 0x4a, 0x24, 0x16, 0x64, 0xe6, 0x26, 0x26, 0x67, 0x64, 0xe6, - 0xa5, 0x16, 0x55, 0xea, 0x17, 0x64, 0xa7, 0x83, 0x04, 0xf4, 0x8b, 0x52, 0x8b, 0xf3, 0x4b, 0x8b, - 0x92, 0x53, 0xf5, 0xd3, 0x53, 0xf3, 0x52, 0x8b, 0x12, 0x4b, 0x52, 0x53, 0xf4, 0x0a, 0x8a, 0xf2, - 0x4b, 0xf2, 0x85, 0x54, 0x20, 0xba, 0xf4, 0x90, 0x75, 0xe9, 0x15, 0x64, 0xa7, 0x83, 0x04, 0xf4, - 0x60, 0xba, 0xa4, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, - 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x18, - 0xaa, 0x64, 0xc1, 0xc5, 0x11, 0x58, 0x9a, 0x98, 0x57, 0x92, 0x59, 0x52, 0x29, 0x24, 0xc6, 0xc5, - 0x56, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x59, - 0x89, 0xcc, 0x58, 0x20, 0xcf, 0xd0, 0xb1, 0x50, 0x9e, 0x61, 0xc2, 0x42, 0x79, 0x86, 0x05, 0x0b, - 0xe5, 0x19, 0x1a, 0xee, 0x28, 0x30, 0x28, 0xd9, 0x72, 0xf1, 0xc2, 0x74, 0x86, 0x25, 0xe6, 0x94, - 0xa6, 0x92, 0xa6, 0xdd, 0xc9, 0xeb, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, - 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37, - 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x94, 0x0a, 0x31, 0x21, - 0x05, 0x08, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x70, 0x98, 0xa3, 0x69, 0x01, 0x00, 0x00, +var fileDescriptor_7288c78ff45111e9 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xc9, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0x17, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0xea, 0xa7, + 0xa7, 0xe6, 0xa5, 0x16, 0x25, 0x96, 0xa4, 0xa6, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xa9, + 0x40, 0x74, 0xe9, 0x21, 0xeb, 0xd2, 0x2b, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0xc1, 0x74, 0x49, 0xe9, + 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, + 0x83, 0x35, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x54, 0xc9, 0x82, 0x8b, + 0x23, 0xb0, 0x34, 0x31, 0xaf, 0x24, 0xb3, 0xa4, 0x52, 0x48, 0x8c, 0x8b, 0xad, 0xb8, 0xa4, 0x28, + 0x33, 0x2f, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xca, 0xb3, 0x12, 0x99, 0xb1, 0x40, + 0x9e, 0xa1, 0x63, 0xa1, 0x3c, 0xc3, 0x84, 0x85, 0xf2, 0x0c, 0x0b, 0x16, 0xca, 0x33, 0x34, 0xdc, + 0x51, 0x60, 0x50, 0xb2, 0xe5, 0xe2, 0x85, 0xe9, 0x0c, 0x4b, 0xcc, 0x29, 0x4d, 0x25, 0x4d, 0xbb, + 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, + 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x6f, 0x3c, 0x92, 0x63, 0x7c, + 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x15, 0x62, 0x42, 0x0a, 0x10, 0x00, 0x00, + 0xff, 0xff, 0x50, 0x91, 0xd0, 0x9c, 0x50, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 69f1bc336d..50af8334f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + inf "gopkg.in/inf.v0" ) @@ -683,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +func (q Quantity) MarshalCBOR() ([]byte, error) { + // The call to String() should never return the string "" because the receiver's + // address will never be nil. + return cbor.Marshal(q.String()) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (q Quantity) ToUnstructured() interface{} { return q.String() @@ -711,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +func (q *Quantity) UnmarshalCBOR(value []byte) error { + var s *string + if err := cbor.Unmarshal(value, &s); err != nil { + return err + } + + if s == nil { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + + parsed, err := ParseQuantity(strings.TrimSpace(*s)) + if err != nil { + return err + } + + *q = parsed + return nil +} + // NewDecimalQuantity returns a new Quantity representing the given // value in the given format. func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS new file mode 100644 index 0000000000..4023732476 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go new file mode 100644 index 0000000000..2734a8f3ba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateListOptions returns all validation errors found while validating the ListOptions. +func ValidateListOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + if options.Watch { + return validateWatchOptions(options, isWatchListFeatureEnabled) + } + allErrs := field.ErrorList{} + if match := options.ResourceVersionMatch; len(match) > 0 { + if len(options.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided")) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""})) + } + if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\"")) + } + } + if options.SendInitialEvents != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for list")) + } + return allErrs +} + +func validateWatchOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + allErrs := field.ErrorList{} + match := options.ResourceVersionMatch + if options.SendInitialEvents != nil { + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), fmt.Sprintf("sendInitialEvents requires setting resourceVersionMatch to %s", metav1.ResourceVersionMatchNotOlderThan))) + } + if !isWatchListFeatureEnabled { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for watch unless the WatchList feature gate is enabled")) + } + } + if len(match) > 0 { + if options.SendInitialEvents == nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch unless sendInitialEvents is provided")) + } + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchNotOlderThan)})) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ffa84..5005beb12d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -18,6 +18,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" ) // IsControlledBy checks if the object has a controllerRef set to the given owner @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference { return nil } cp := *ref + cp.Controller = ptr.To(*ref.Controller) + if ref.BlockOwnerDeletion != nil { + cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion) + } return &cp } -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller func GetControllerOfNoCopy(controllee Object) *OwnerReference { refs := controllee.GetOwnerReferences() for i := range refs { @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference { // NewControllerRef creates an OwnerReference pointing to the given owner. func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { - blockOwnerDeletion := true - isController := true return &OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 1a641e7c12..229ea2c2c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// source: k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto package v1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *APIGroup) Reset() { *m = APIGroup{} } func (*APIGroup) ProtoMessage() {} func (*APIGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{0} + return fileDescriptor_a8431b6e0aeeb761, []int{0} } func (m *APIGroup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_APIGroup proto.InternalMessageInfo func (m *APIGroupList) Reset() { *m = APIGroupList{} } func (*APIGroupList) ProtoMessage() {} func (*APIGroupList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{1} + return fileDescriptor_a8431b6e0aeeb761, []int{1} } func (m *APIGroupList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_APIGroupList proto.InternalMessageInfo func (m *APIResource) Reset() { *m = APIResource{} } func (*APIResource) ProtoMessage() {} func (*APIResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{2} + return fileDescriptor_a8431b6e0aeeb761, []int{2} } func (m *APIResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_APIResource proto.InternalMessageInfo func (m *APIResourceList) Reset() { *m = APIResourceList{} } func (*APIResourceList) ProtoMessage() {} func (*APIResourceList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{3} + return fileDescriptor_a8431b6e0aeeb761, []int{3} } func (m *APIResourceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_APIResourceList proto.InternalMessageInfo func (m *APIVersions) Reset() { *m = APIVersions{} } func (*APIVersions) ProtoMessage() {} func (*APIVersions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{4} + return fileDescriptor_a8431b6e0aeeb761, []int{4} } func (m *APIVersions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_APIVersions proto.InternalMessageInfo func (m *ApplyOptions) Reset() { *m = ApplyOptions{} } func (*ApplyOptions) ProtoMessage() {} func (*ApplyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{5} + return fileDescriptor_a8431b6e0aeeb761, []int{5} } func (m *ApplyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_ApplyOptions proto.InternalMessageInfo func (m *Condition) Reset() { *m = Condition{} } func (*Condition) ProtoMessage() {} func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{6} + return fileDescriptor_a8431b6e0aeeb761, []int{6} } func (m *Condition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_Condition proto.InternalMessageInfo func (m *CreateOptions) Reset() { *m = CreateOptions{} } func (*CreateOptions) ProtoMessage() {} func (*CreateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{7} + return fileDescriptor_a8431b6e0aeeb761, []int{7} } func (m *CreateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_CreateOptions proto.InternalMessageInfo func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} func (*DeleteOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{8} + return fileDescriptor_a8431b6e0aeeb761, []int{8} } func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo func (m *Duration) Reset() { *m = Duration{} } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{9} + return fileDescriptor_a8431b6e0aeeb761, []int{9} } func (m *Duration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() { var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} } +func (*FieldSelectorRequirement) ProtoMessage() {} +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a8431b6e0aeeb761, []int{10} +} +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorRequirement.Merge(m, src) +} +func (m *FieldSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo + func (m *FieldsV1) Reset() { *m = FieldsV1{} } func (*FieldsV1) ProtoMessage() {} func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{10} + return fileDescriptor_a8431b6e0aeeb761, []int{11} } func (m *FieldsV1) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{11} + return fileDescriptor_a8431b6e0aeeb761, []int{12} } func (m *GetOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{12} + return fileDescriptor_a8431b6e0aeeb761, []int{13} } func (m *GroupKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{13} + return fileDescriptor_a8431b6e0aeeb761, []int{14} } func (m *GroupResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{14} + return fileDescriptor_a8431b6e0aeeb761, []int{15} } func (m *GroupVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{15} + return fileDescriptor_a8431b6e0aeeb761, []int{16} } func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{16} + return fileDescriptor_a8431b6e0aeeb761, []int{17} } func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{17} + return fileDescriptor_a8431b6e0aeeb761, []int{18} } func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{18} + return fileDescriptor_a8431b6e0aeeb761, []int{19} } func (m *LabelSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{19} + return fileDescriptor_a8431b6e0aeeb761, []int{20} } func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{20} + return fileDescriptor_a8431b6e0aeeb761, []int{21} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{21} + return fileDescriptor_a8431b6e0aeeb761, []int{22} } func (m *ListMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{22} + return fileDescriptor_a8431b6e0aeeb761, []int{23} } func (m *ListOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } func (*ManagedFieldsEntry) ProtoMessage() {} func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{23} + return fileDescriptor_a8431b6e0aeeb761, []int{24} } func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{24} + return fileDescriptor_a8431b6e0aeeb761, []int{25} } func (m *MicroTime) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MicroTime.Unmarshal(m, b) @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{25} + return fileDescriptor_a8431b6e0aeeb761, []int{26} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{26} + return fileDescriptor_a8431b6e0aeeb761, []int{27} } func (m *OwnerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } func (*PartialObjectMetadata) ProtoMessage() {} func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{27} + return fileDescriptor_a8431b6e0aeeb761, []int{28} } func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{28} + return fileDescriptor_a8431b6e0aeeb761, []int{29} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{29} + return fileDescriptor_a8431b6e0aeeb761, []int{30} } func (m *Patch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo func (m *PatchOptions) Reset() { *m = PatchOptions{} } func (*PatchOptions) ProtoMessage() {} func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{30} + return fileDescriptor_a8431b6e0aeeb761, []int{31} } func (m *PatchOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{31} + return fileDescriptor_a8431b6e0aeeb761, []int{32} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{32} + return fileDescriptor_a8431b6e0aeeb761, []int{33} } func (m *RootPaths) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{33} + return fileDescriptor_a8431b6e0aeeb761, []int{34} } func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{34} + return fileDescriptor_a8431b6e0aeeb761, []int{35} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{35} + return fileDescriptor_a8431b6e0aeeb761, []int{36} } func (m *StatusCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{36} + return fileDescriptor_a8431b6e0aeeb761, []int{37} } func (m *StatusDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo func (m *TableOptions) Reset() { *m = TableOptions{} } func (*TableOptions) ProtoMessage() {} func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{37} + return fileDescriptor_a8431b6e0aeeb761, []int{38} } func (m *TableOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{38} + return fileDescriptor_a8431b6e0aeeb761, []int{39} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{39} + return fileDescriptor_a8431b6e0aeeb761, []int{40} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{40} + return fileDescriptor_a8431b6e0aeeb761, []int{41} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{41} + return fileDescriptor_a8431b6e0aeeb761, []int{42} } func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{42} + return fileDescriptor_a8431b6e0aeeb761, []int{43} } func (m *Verbs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{43} + return fileDescriptor_a8431b6e0aeeb761, []int{44} } func (m *WatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1282,6 +1310,7 @@ func init() { proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement") proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") @@ -1322,191 +1351,191 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) -} - -var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2867 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, - 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, - 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, - 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, - 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, - 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, - 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0xec, 0x1c, 0x5f, 0x63, 0x75, 0xc7, 0x5f, 0x3f, 0xee, 0x1d, 0x10, - 0xea, 0x91, 0x80, 0xb0, 0xf5, 0x13, 0xe2, 0xd9, 0x3e, 0x5d, 0x57, 0x08, 0xab, 0xeb, 0x74, 0xac, - 0xd6, 0x91, 0xe3, 0x11, 0xda, 0x5f, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xad, 0x77, 0x48, 0x60, 0xad, - 0x9f, 0x5c, 0x59, 0x6f, 0x13, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xbd, 0x4b, 0xfd, 0xc0, 0x47, 0x8f, - 0x49, 0xae, 0xba, 0xce, 0x55, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0xe4, 0xca, - 0xca, 0x53, 0x6d, 0x27, 0x38, 0xea, 0x1d, 0xd4, 0x5b, 0x7e, 0x67, 0xbd, 0xed, 0xb7, 0xfd, 0x75, - 0xc1, 0x7c, 0xd0, 0x3b, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0x26, 0x9a, 0x42, 0x7b, - 0x5e, 0xe0, 0x74, 0xc8, 0xb0, 0x15, 0x2b, 0xcf, 0x9e, 0xc7, 0xc0, 0x5a, 0x47, 0xa4, 0x63, 0x0d, - 0xf3, 0x99, 0x7f, 0xca, 0x42, 0x61, 0x63, 0x6f, 0xfb, 0x26, 0xf5, 0x7b, 0x5d, 0xb4, 0x06, 0x39, - 0xcf, 0xea, 0x90, 0xaa, 0xb1, 0x66, 0x5c, 0x2e, 0x36, 0xca, 0x1f, 0x0c, 0x6a, 0x33, 0xa7, 0x83, - 0x5a, 0xee, 0x55, 0xab, 0x43, 0xb0, 0xc0, 0x20, 0x17, 0x0a, 0x27, 0x84, 0x32, 0xc7, 0xf7, 0x58, - 0x35, 0xb3, 0x96, 0xbd, 0x5c, 0xba, 0xfa, 0x62, 0x3d, 0xcd, 0xfa, 0xeb, 0x42, 0xc1, 0x5d, 0xc9, - 0xba, 0xe5, 0xd3, 0xa6, 0xc3, 0x5a, 0xfe, 0x09, 0xa1, 0xfd, 0xc6, 0xa2, 0xd2, 0x52, 0x50, 0x48, - 0x86, 0x23, 0x0d, 0xe8, 0x47, 0x06, 0x2c, 0x76, 0x29, 0x39, 0x24, 0x94, 0x12, 0x5b, 0xe1, 0xab, - 0xd9, 0x35, 0xe3, 0x21, 0xa8, 0xad, 0x2a, 0xb5, 0x8b, 0x7b, 0x43, 0xf2, 0xf1, 0x88, 0x46, 0xf4, - 0x6b, 0x03, 0x56, 0x18, 0xa1, 0x27, 0x84, 0x6e, 0xd8, 0x36, 0x25, 0x8c, 0x35, 0xfa, 0x9b, 0xae, - 0x43, 0xbc, 0x60, 0x73, 0xbb, 0x89, 0x59, 0x35, 0x27, 0xf6, 0xe1, 0xeb, 0xe9, 0x0c, 0xda, 0x9f, - 0x24, 0xa7, 0x61, 0x2a, 0x8b, 0x56, 0x26, 0x92, 0x30, 0x7c, 0x1f, 0x33, 0xcc, 0x43, 0x28, 0x87, - 0x07, 0x79, 0xcb, 0x61, 0x01, 0xba, 0x0b, 0xb3, 0x6d, 0xfe, 0xc1, 0xaa, 0x86, 0x30, 0xb0, 0x9e, - 0xce, 0xc0, 0x50, 0x46, 0x63, 0x5e, 0xd9, 0x33, 0x2b, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0xe5, - 0xa0, 0xb4, 0xb1, 0xb7, 0x8d, 0x09, 0xf3, 0x7b, 0xb4, 0x45, 0x52, 0x38, 0xcd, 0x35, 0x28, 0x33, - 0xc7, 0x6b, 0xf7, 0x5c, 0x8b, 0x72, 0x68, 0x75, 0x56, 0x50, 0x2e, 0x2b, 0xca, 0xf2, 0xbe, 0x86, - 0xc3, 0x09, 0x4a, 0x74, 0x15, 0x80, 0x4b, 0x60, 0x5d, 0xab, 0x45, 0xec, 0x6a, 0x66, 0xcd, 0xb8, - 0x5c, 0x68, 0x20, 0xc5, 0x07, 0xaf, 0x46, 0x18, 0xac, 0x51, 0xa1, 0x47, 0x21, 0x2f, 0x2c, 0xad, - 0x16, 0x84, 0x9a, 0x8a, 0x22, 0xcf, 0x8b, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc0, 0x9c, 0xf2, 0xb2, - 0x6a, 0x51, 0x90, 0x2d, 0x28, 0xb2, 0xb9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0xb1, 0xe3, 0xd9, - 0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xe2, 0x78, 0x36, 0x16, 0x18, 0x74, 0x0b, 0xf2, 0x27, 0x84, 0x1e, - 0x70, 0x4f, 0xe0, 0xae, 0xf9, 0xe5, 0x74, 0x1b, 0x7d, 0x97, 0xb3, 0x34, 0x8a, 0xdc, 0x34, 0xf1, - 0x13, 0x4b, 0x21, 0xa8, 0x0e, 0xc0, 0x8e, 0x7c, 0x1a, 0x88, 0xe5, 0x55, 0xf3, 0x6b, 0xd9, 0xcb, - 0xc5, 0xc6, 0x3c, 0x5f, 0xef, 0x7e, 0x04, 0xc5, 0x1a, 0x05, 0xa7, 0x6f, 0x59, 0x01, 0x69, 0xfb, - 0xd4, 0x21, 0xac, 0x3a, 0x17, 0xd3, 0x6f, 0x46, 0x50, 0xac, 0x51, 0xa0, 0x97, 0x01, 0xb1, 0xc0, - 0xa7, 0x56, 0x9b, 0xa8, 0xa5, 0xbe, 0x64, 0xb1, 0xa3, 0x2a, 0x88, 0xd5, 0xad, 0xa8, 0xd5, 0xa1, - 0xfd, 0x11, 0x0a, 0x3c, 0x86, 0xcb, 0xfc, 0x9d, 0x01, 0x0b, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x06, - 0xe5, 0xb6, 0x76, 0xeb, 0x94, 0x5f, 0x44, 0xa7, 0xad, 0xdf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, - 0x54, 0x49, 0x0a, 0xa3, 0xcb, 0x95, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7, - 0x92, 0xcd, 0x7f, 0x18, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x2e, 0x6b, 0x31, 0xcd, 0x10, 0xdb, 0x57, - 0x9e, 0x10, 0x8f, 0xce, 0x09, 0x04, 0x99, 0xff, 0x8b, 0x40, 0x70, 0xbd, 0xf0, 0xcb, 0xf7, 0x6a, - 0x33, 0x6f, 0xff, 0x6d, 0x6d, 0xc6, 0xfc, 0x85, 0x01, 0xe5, 0x8d, 0x6e, 0xd7, 0xed, 0xef, 0x76, - 0x03, 0xb1, 0x00, 0x13, 0x66, 0x6d, 0xda, 0xc7, 0x3d, 0x4f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e, 0x0a, - 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x0e, 0x7d, 0xda, 0x22, 0xea, 0xba, 0x45, 0xf7, 0x67, 0x8b, 0x03, - 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x74, 0x88, 0x6b, 0xef, 0x58, 0x9e, 0xd5, 0x26, 0x54, 0x5d, 0x8e, - 0x68, 0xeb, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0xff, 0xc9, 0x40, 0x71, 0xd3, 0xf7, 0x6c, 0x27, - 0x50, 0x97, 0x2b, 0xe8, 0x77, 0x47, 0x82, 0xc7, 0xed, 0x7e, 0x97, 0x60, 0x81, 0x41, 0xcf, 0xc1, - 0x2c, 0x0b, 0xac, 0xa0, 0xc7, 0x84, 0x3d, 0xc5, 0xc6, 0x23, 0x61, 0x58, 0xda, 0x17, 0xd0, 0xb3, - 0x41, 0x6d, 0x21, 0x12, 0x27, 0x41, 0x58, 0x31, 0x70, 0x4f, 0xf7, 0x0f, 0xc4, 0x46, 0xd9, 0x37, - 0xe5, 0xb3, 0x17, 0xbe, 0x1f, 0xd9, 0xd8, 0xd3, 0x77, 0x47, 0x28, 0xf0, 0x18, 0x2e, 0x74, 0x02, - 0xc8, 0xb5, 0x58, 0x70, 0x9b, 0x5a, 0x1e, 0x13, 0xba, 0x6e, 0x3b, 0x1d, 0xa2, 0x2e, 0xfc, 0x97, - 0xd2, 0x9d, 0x38, 0xe7, 0x88, 0xf5, 0xde, 0x1a, 0x91, 0x86, 0xc7, 0x68, 0x40, 0x8f, 0xc3, 0x2c, - 0x25, 0x16, 0xf3, 0xbd, 0x6a, 0x5e, 0x2c, 0x3f, 0x8a, 0xca, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0x80, - 0xd6, 0x21, 0x8c, 0x59, 0xed, 0x30, 0xbc, 0x46, 0x01, 0x6d, 0x47, 0x82, 0x71, 0x88, 0x37, 0x7f, - 0x6b, 0x40, 0x65, 0x93, 0x12, 0x2b, 0x20, 0xd3, 0xb8, 0xc5, 0xa7, 0x3e, 0x71, 0xb4, 0x01, 0x0b, - 0xe2, 0xfb, 0xae, 0xe5, 0x3a, 0xb6, 0x3c, 0x83, 0x9c, 0x60, 0xfe, 0xbc, 0x62, 0x5e, 0xd8, 0x4a, - 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x93, 0x2c, 0x54, 0x9a, 0xc4, 0x25, 0xb1, 0xc9, 0x5b, 0x80, 0xda, - 0xd4, 0x6a, 0x91, 0x3d, 0x42, 0x1d, 0xdf, 0xde, 0x27, 0x2d, 0xdf, 0xb3, 0x99, 0x70, 0xa3, 0x6c, - 0xe3, 0x73, 0x7c, 0x7f, 0x6f, 0x8e, 0x60, 0xf1, 0x18, 0x0e, 0xe4, 0x42, 0xa5, 0x4b, 0xc5, 0x6f, - 0xb1, 0xe7, 0xd2, 0xcb, 0x4a, 0x57, 0xbf, 0x92, 0xee, 0x48, 0xf7, 0x74, 0xd6, 0xc6, 0xd2, 0xe9, - 0xa0, 0x56, 0x49, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x06, 0x2c, 0xfa, 0xb4, 0x7b, 0x64, 0x79, 0x4d, - 0xd2, 0x25, 0x9e, 0x4d, 0xbc, 0x80, 0x89, 0x8d, 0x2c, 0x34, 0x96, 0x79, 0x2e, 0xb2, 0x3b, 0x84, - 0xc3, 0x23, 0xd4, 0xe8, 0x35, 0x58, 0xea, 0x52, 0xbf, 0x6b, 0xb5, 0xc5, 0xc6, 0xec, 0xf9, 0xae, - 0xd3, 0xea, 0xab, 0xed, 0x7c, 0xf2, 0x74, 0x50, 0x5b, 0xda, 0x1b, 0x46, 0x9e, 0x0d, 0x6a, 0x17, - 0xc4, 0xd6, 0x71, 0x48, 0x8c, 0xc4, 0xa3, 0x62, 0x34, 0x37, 0xc8, 0x4f, 0x72, 0x03, 0x73, 0x1b, - 0x0a, 0xcd, 0x9e, 0xba, 0x13, 0x2f, 0x40, 0xc1, 0x56, 0xbf, 0xd5, 0xce, 0x87, 0x97, 0x33, 0xa2, - 0x39, 0x1b, 0xd4, 0x2a, 0x3c, 0xfd, 0xac, 0x87, 0x00, 0x1c, 0xb1, 0x98, 0x8f, 0x43, 0x41, 0x1c, - 0x3c, 0xbb, 0x7b, 0x05, 0x2d, 0x42, 0x16, 0x5b, 0xf7, 0x84, 0x94, 0x32, 0xe6, 0x3f, 0xb5, 0x28, - 0xb6, 0x0b, 0x70, 0x93, 0x04, 0xe1, 0xc1, 0x6f, 0xc0, 0x42, 0x18, 0xca, 0x93, 0x2f, 0x4c, 0xe4, - 0x4d, 0x38, 0x89, 0xc6, 0xc3, 0xf4, 0xe6, 0xeb, 0x50, 0x14, 0xaf, 0x10, 0x7f, 0xc2, 0xe3, 0x74, - 0xc1, 0xb8, 0x4f, 0xba, 0x10, 0xe6, 0x00, 0x99, 0x49, 0x39, 0x80, 0x66, 0xae, 0x0b, 0x15, 0xc9, - 0x1b, 0x26, 0x48, 0xa9, 0x34, 0x3c, 0x09, 0x85, 0xd0, 0x4c, 0xa5, 0x25, 0x4a, 0x8c, 0x43, 0x41, - 0x38, 0xa2, 0xd0, 0xb4, 0x1d, 0x41, 0xe2, 0x45, 0x4d, 0xa7, 0x4c, 0xcb, 0x7e, 0x32, 0xf7, 0xcf, - 0x7e, 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6c, 0xfa, 0x01, 0xde, 0xfc, 0xf4, 0xa6, 0x98, 0xef, - 0x18, 0xb0, 0xa8, 0x4b, 0x4a, 0x7f, 0x7c, 0xe9, 0x95, 0x9c, 0x9f, 0xed, 0x69, 0x3b, 0xf2, 0x2b, - 0x03, 0x96, 0x13, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, 0x30, 0x4a, 0x77, 0x8e, 0xec, 0x14, 0xce, 0xf1, - 0x97, 0x0c, 0x54, 0x6e, 0x59, 0x07, 0xc4, 0xdd, 0x27, 0x2e, 0x69, 0x05, 0x3e, 0x45, 0x3f, 0x80, - 0x52, 0xc7, 0x0a, 0x5a, 0x47, 0x02, 0x1a, 0x56, 0x06, 0xcd, 0x74, 0xc1, 0x2e, 0x21, 0xa9, 0xbe, - 0x13, 0x8b, 0xb9, 0xe1, 0x05, 0xb4, 0xdf, 0xb8, 0xa0, 0x4c, 0x2a, 0x69, 0x18, 0xac, 0x6b, 0x13, - 0xe5, 0x9c, 0xf8, 0xbe, 0xf1, 0x56, 0x97, 0xa7, 0x2d, 0xd3, 0x57, 0x91, 0x09, 0x13, 0x30, 0x79, - 0xb3, 0xe7, 0x50, 0xd2, 0x21, 0x5e, 0x10, 0x97, 0x73, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0x95, - 0x17, 0x61, 0x71, 0xd8, 0x78, 0x1e, 0x7f, 0x8e, 0x49, 0x5f, 0x9e, 0x17, 0xe6, 0x3f, 0xd1, 0x32, - 0xe4, 0x4f, 0x2c, 0xb7, 0xa7, 0x6e, 0x23, 0x96, 0x1f, 0xd7, 0x33, 0xd7, 0x0c, 0xf3, 0x37, 0x06, - 0x54, 0x27, 0x19, 0x82, 0xbe, 0xa8, 0x09, 0x6a, 0x94, 0x94, 0x55, 0xd9, 0x57, 0x48, 0x5f, 0x4a, - 0xbd, 0x01, 0x05, 0xbf, 0xcb, 0x73, 0x0a, 0x9f, 0xaa, 0x53, 0x7f, 0x22, 0x3c, 0xc9, 0x5d, 0x05, - 0x3f, 0x1b, 0xd4, 0x2e, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x48, 0x2d, 0xec, 0xe1, 0xaf, - 0x47, 0x14, 0xa9, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0xf7, 0x06, 0xe4, 0x44, 0x42, 0xfe, 0x3a, - 0x14, 0xf8, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x97, 0x82, 0x9c, 0x7b, 0x87, 0x04, 0x56, - 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0xbc, 0x13, 0x90, 0x4e, 0x78, 0x90, 0x4f, 0x4d, - 0x14, 0xad, 0x1a, 0x11, 0x75, 0x6c, 0xdd, 0xbb, 0xf1, 0x56, 0x40, 0x3c, 0x7e, 0x18, 0xf1, 0xd5, - 0xd8, 0xe6, 0x32, 0xb0, 0x14, 0x65, 0xfe, 0xcb, 0x80, 0x48, 0x15, 0x77, 0x7e, 0x46, 0xdc, 0xc3, - 0x5b, 0x8e, 0x77, 0xac, 0xb6, 0x35, 0x32, 0x67, 0x5f, 0xc1, 0x71, 0x44, 0x31, 0xee, 0x79, 0xc8, - 0x4c, 0xf7, 0x3c, 0x70, 0x85, 0x2d, 0xdf, 0x0b, 0x1c, 0xaf, 0x37, 0x72, 0xdb, 0x36, 0x15, 0x1c, - 0x47, 0x14, 0x3c, 0x11, 0xa1, 0xa4, 0x63, 0x39, 0x9e, 0xe3, 0xb5, 0xf9, 0x22, 0x36, 0xfd, 0x9e, - 0x17, 0x88, 0x17, 0x59, 0x25, 0x22, 0x78, 0x04, 0x8b, 0xc7, 0x70, 0x98, 0xff, 0xce, 0x41, 0x89, - 0xaf, 0x39, 0x7c, 0xe7, 0x9e, 0x87, 0x8a, 0xab, 0x7b, 0x81, 0x5a, 0xfb, 0x45, 0x65, 0x4a, 0xf2, - 0x5e, 0xe3, 0x24, 0x2d, 0x67, 0x16, 0x29, 0x54, 0xc4, 0x9c, 0x49, 0x32, 0x6f, 0xe9, 0x48, 0x9c, - 0xa4, 0xe5, 0xd1, 0xeb, 0x1e, 0xbf, 0x1f, 0x2a, 0x33, 0x89, 0x8e, 0xe8, 0x9b, 0x1c, 0x88, 0x25, - 0x0e, 0xed, 0xc0, 0x05, 0xcb, 0x75, 0xfd, 0x7b, 0x02, 0xd8, 0xf0, 0xfd, 0xe3, 0x8e, 0x45, 0x8f, - 0x99, 0x28, 0xa6, 0x0b, 0x8d, 0x2f, 0x28, 0x96, 0x0b, 0x1b, 0xa3, 0x24, 0x78, 0x1c, 0xdf, 0xb8, - 0x63, 0xcb, 0x4d, 0x79, 0x6c, 0x47, 0xb0, 0x3c, 0x04, 0x12, 0xb7, 0x5c, 0x55, 0xb6, 0xcf, 0x28, - 0x39, 0xcb, 0x78, 0x0c, 0xcd, 0xd9, 0x04, 0x38, 0x1e, 0x2b, 0x11, 0x5d, 0x87, 0x79, 0xee, 0xc9, - 0x7e, 0x2f, 0x08, 0xf3, 0xce, 0xbc, 0x38, 0x6e, 0x74, 0x3a, 0xa8, 0xcd, 0xdf, 0x4e, 0x60, 0xf0, - 0x10, 0x25, 0xdf, 0x5c, 0xd7, 0xe9, 0x38, 0x41, 0x75, 0x4e, 0xb0, 0x44, 0x9b, 0x7b, 0x8b, 0x03, - 0xb1, 0xc4, 0x25, 0x3c, 0xb0, 0x70, 0xae, 0x07, 0x6e, 0xc2, 0x12, 0x23, 0x9e, 0xbd, 0xed, 0x39, - 0x81, 0x63, 0xb9, 0x37, 0x4e, 0x44, 0x56, 0x59, 0x12, 0x07, 0x71, 0x91, 0xa7, 0x84, 0xfb, 0xc3, - 0x48, 0x3c, 0x4a, 0x6f, 0xfe, 0x39, 0x0b, 0x48, 0x26, 0xec, 0xb6, 0x4c, 0xca, 0x64, 0x5c, 0xe4, - 0x65, 0x85, 0x4a, 0xf8, 0x8d, 0xa1, 0xb2, 0x42, 0xe5, 0xfa, 0x21, 0x1e, 0xed, 0x40, 0x51, 0xc6, - 0xa7, 0xf8, 0xce, 0xad, 0x2b, 0xe2, 0xe2, 0x6e, 0x88, 0x38, 0x1b, 0xd4, 0x56, 0x12, 0x6a, 0x22, - 0x8c, 0x28, 0xf9, 0x62, 0x09, 0xe8, 0x2a, 0x80, 0xd5, 0x75, 0xf4, 0xa6, 0x5f, 0x31, 0x6e, 0xfd, - 0xc4, 0xe5, 0x3b, 0xd6, 0xa8, 0xd0, 0x4b, 0x90, 0x0b, 0x3e, 0x5d, 0x59, 0x56, 0x10, 0x55, 0x27, - 0x2f, 0xc2, 0x84, 0x04, 0xae, 0x5d, 0x5c, 0x0a, 0xc6, 0xcd, 0x52, 0x15, 0x55, 0xa4, 0x7d, 0x2b, - 0xc2, 0x60, 0x8d, 0x0a, 0x7d, 0x0b, 0x0a, 0x87, 0x2a, 0x9f, 0x15, 0xa7, 0x9b, 0x3a, 0xce, 0x86, - 0x59, 0xb0, 0xec, 0x3b, 0x84, 0x5f, 0x38, 0x92, 0x86, 0xbe, 0x0a, 0x25, 0xd6, 0x3b, 0x88, 0x52, - 0x00, 0xe9, 0x12, 0xd1, 0x7b, 0xbb, 0x1f, 0xa3, 0xb0, 0x4e, 0x67, 0xbe, 0x09, 0xc5, 0x1d, 0xa7, - 0x45, 0x7d, 0x51, 0x48, 0x3e, 0x01, 0x73, 0x2c, 0x51, 0x25, 0x45, 0x27, 0x19, 0xba, 0x6a, 0x88, - 0xe7, 0x3e, 0xea, 0x59, 0x9e, 0x2f, 0x6b, 0xa1, 0x7c, 0xec, 0xa3, 0xaf, 0x72, 0x20, 0x96, 0xb8, - 0xeb, 0xcb, 0x3c, 0xcb, 0xf8, 0xe9, 0xfb, 0xb5, 0x99, 0x77, 0xdf, 0xaf, 0xcd, 0xbc, 0xf7, 0xbe, - 0xca, 0x38, 0xfe, 0x00, 0x00, 0xbb, 0x07, 0xdf, 0x23, 0x2d, 0x19, 0xbb, 0x53, 0xf5, 0x06, 0xc3, - 0x96, 0xb4, 0xe8, 0x0d, 0x66, 0x86, 0x32, 0x47, 0x0d, 0x87, 0x13, 0x94, 0x68, 0x1d, 0x8a, 0x51, - 0xd7, 0x4f, 0xf9, 0xc7, 0x52, 0xe8, 0x6f, 0x51, 0x6b, 0x10, 0xc7, 0x34, 0x89, 0x87, 0x24, 0x77, - 0xee, 0x43, 0xd2, 0x80, 0x6c, 0xcf, 0xb1, 0x55, 0xd5, 0xfd, 0x74, 0xf8, 0x90, 0xdf, 0xd9, 0x6e, - 0x9e, 0x0d, 0x6a, 0x8f, 0x4c, 0x6a, 0xb6, 0x07, 0xfd, 0x2e, 0x61, 0xf5, 0x3b, 0xdb, 0x4d, 0xcc, - 0x99, 0xc7, 0x45, 0xb5, 0xd9, 0x29, 0xa3, 0xda, 0x55, 0x80, 0x76, 0xdc, 0xbb, 0x90, 0x41, 0x23, - 0x72, 0x44, 0xad, 0x67, 0xa1, 0x51, 0x21, 0x06, 0x4b, 0x2d, 0x5e, 0xdf, 0xab, 0x1e, 0x02, 0x0b, - 0xac, 0x8e, 0xec, 0x86, 0x4e, 0x77, 0x27, 0x2e, 0x29, 0x35, 0x4b, 0x9b, 0xc3, 0xc2, 0xf0, 0xa8, - 0x7c, 0xe4, 0xc3, 0x92, 0xad, 0xca, 0xcc, 0x58, 0x69, 0x71, 0x6a, 0xa5, 0x22, 0x62, 0x35, 0x87, - 0x05, 0xe1, 0x51, 0xd9, 0xe8, 0xbb, 0xb0, 0x12, 0x02, 0x47, 0x6b, 0x7d, 0x11, 0xf5, 0xb3, 0x8d, - 0xd5, 0xd3, 0x41, 0x6d, 0xa5, 0x39, 0x91, 0x0a, 0xdf, 0x47, 0x02, 0xb2, 0x61, 0xd6, 0x95, 0x59, - 0x72, 0x49, 0x64, 0x36, 0x5f, 0x4b, 0xb7, 0x8a, 0xd8, 0xfb, 0xeb, 0x7a, 0x76, 0x1c, 0xf5, 0x6d, - 0x54, 0x62, 0xac, 0x64, 0xa3, 0xb7, 0xa0, 0x64, 0x79, 0x9e, 0x1f, 0x58, 0xb2, 0xfb, 0x50, 0x16, - 0xaa, 0x36, 0xa6, 0x56, 0xb5, 0x11, 0xcb, 0x18, 0xca, 0xc6, 0x35, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, - 0xb0, 0xe0, 0xdf, 0xf3, 0x08, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb5, 0x08, 0xab, 0x56, 0x84, 0xf6, - 0x67, 0x52, 0x6a, 0x4f, 0x30, 0xc7, 0x2e, 0x9d, 0x84, 0x33, 0x3c, 0xac, 0x05, 0xd5, 0x79, 0x6c, - 0xf5, 0x2c, 0xd7, 0xf9, 0x3e, 0xa1, 0xac, 0x3a, 0x1f, 0x37, 0xac, 0xb7, 0x22, 0x28, 0xd6, 0x28, - 0x50, 0x0f, 0x2a, 0x1d, 0xfd, 0xc9, 0xa8, 0x2e, 0x09, 0x33, 0xaf, 0xa5, 0x33, 0x73, 0xf4, 0x51, - 0x8b, 0xd3, 0xa0, 0x04, 0x0e, 0x27, 0xb5, 0xac, 0x3c, 0x07, 0xa5, 0x4f, 0x59, 0x21, 0xf0, 0x0a, - 0x63, 0xf8, 0x40, 0xa6, 0xaa, 0x30, 0xfe, 0x98, 0x81, 0xf9, 0xe4, 0x36, 0x0e, 0x3d, 0x87, 0xf9, - 0x54, 0xcf, 0x61, 0x58, 0xcb, 0x1a, 0x13, 0x27, 0x17, 0x61, 0x7c, 0xce, 0x4e, 0x8c, 0xcf, 0x2a, - 0x0c, 0xe6, 0x1e, 0x24, 0x0c, 0xd6, 0x01, 0x78, 0xb2, 0x42, 0x7d, 0xd7, 0x25, 0x54, 0x44, 0xc0, - 0x82, 0x9a, 0x50, 0x44, 0x50, 0xac, 0x51, 0xf0, 0x94, 0xfa, 0xc0, 0xf5, 0x5b, 0xc7, 0x62, 0x0b, - 0xc2, 0xdb, 0x2b, 0x62, 0x5f, 0x41, 0xa6, 0xd4, 0x8d, 0x11, 0x2c, 0x1e, 0xc3, 0x61, 0xf6, 0xe1, - 0xe2, 0x9e, 0x45, 0x79, 0x92, 0x13, 0xdf, 0x14, 0x51, 0xb3, 0xbc, 0x31, 0x52, 0x11, 0x3d, 0x3d, - 0xed, 0x8d, 0x8b, 0x37, 0x3f, 0x86, 0xc5, 0x55, 0x91, 0xf9, 0x57, 0x03, 0x2e, 0x8d, 0xd5, 0xfd, - 0x19, 0x54, 0x64, 0x6f, 0x24, 0x2b, 0xb2, 0xe7, 0x53, 0xb6, 0x32, 0xc7, 0x59, 0x3b, 0xa1, 0x3e, - 0x9b, 0x83, 0xfc, 0x1e, 0xcf, 0x84, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, 0x5f, 0xd3, 0x74, 0x92, 0x6b, - 0xc9, 0x01, 0x43, 0xf1, 0xe1, 0x0d, 0x17, 0x1e, 0x46, 0xab, 0xf9, 0x1d, 0x03, 0x92, 0x3d, 0x5c, - 0xf4, 0xa2, 0xbc, 0x02, 0x46, 0xd4, 0x64, 0x9d, 0xd2, 0xfd, 0x5f, 0x98, 0x54, 0x92, 0x5e, 0x48, - 0xd5, 0xad, 0x7c, 0x12, 0x8a, 0xd8, 0xf7, 0x83, 0x3d, 0x2b, 0x38, 0x62, 0x7c, 0xef, 0xba, 0xfc, - 0x87, 0xda, 0x5e, 0xb1, 0x77, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x97, 0x26, 0xce, 0x8d, - 0x78, 0x14, 0x69, 0x45, 0x5f, 0x6a, 0x45, 0x91, 0x23, 0xc7, 0x74, 0x58, 0xa3, 0xe2, 0xb5, 0x64, - 0x62, 0xd8, 0x34, 0x5c, 0x4b, 0x26, 0xb4, 0xe1, 0x24, 0xad, 0xf9, 0xcf, 0x0c, 0xa8, 0x41, 0xcd, - 0xff, 0xd8, 0xe9, 0x1f, 0x1f, 0x1a, 0x13, 0xcd, 0x27, 0xc7, 0x44, 0xd1, 0x4c, 0x48, 0x9b, 0x93, - 0x64, 0xef, 0x3f, 0x27, 0x41, 0xcf, 0x46, 0xa3, 0x17, 0xe9, 0x43, 0xab, 0xc9, 0xd1, 0xcb, 0xd9, - 0xa0, 0x56, 0x56, 0xc2, 0x93, 0xa3, 0x98, 0xd7, 0x60, 0xce, 0x26, 0x81, 0xe5, 0xb8, 0xb2, 0x2e, - 0x4c, 0x3d, 0x4c, 0x90, 0xc2, 0x9a, 0x92, 0xb5, 0x51, 0xe2, 0x36, 0xa9, 0x0f, 0x1c, 0x0a, 0xe4, - 0x01, 0xbb, 0xe5, 0xdb, 0xb2, 0x22, 0xc9, 0xc7, 0x01, 0x7b, 0xd3, 0xb7, 0x09, 0x16, 0x18, 0xf3, - 0x5d, 0x03, 0x4a, 0x52, 0xd2, 0xa6, 0xd5, 0x63, 0x04, 0x5d, 0x89, 0x56, 0x21, 0x8f, 0xfb, 0x92, - 0x3e, 0x63, 0x3b, 0x1b, 0xd4, 0x8a, 0x82, 0x4c, 0x14, 0x33, 0x63, 0x66, 0x49, 0x99, 0x73, 0xf6, - 0xe8, 0x51, 0xc8, 0x8b, 0x0b, 0xa4, 0x36, 0x33, 0x1e, 0x16, 0x72, 0x20, 0x96, 0x38, 0xf3, 0xe3, - 0x0c, 0x54, 0x12, 0x8b, 0x4b, 0x51, 0x17, 0x44, 0x2d, 0xd4, 0x4c, 0x8a, 0xb6, 0xfc, 0xe4, 0xd1, - 0xbc, 0x7a, 0xbe, 0x66, 0x1f, 0xe4, 0xf9, 0xfa, 0x36, 0xcc, 0xb6, 0xf8, 0x1e, 0x85, 0xff, 0xf4, - 0xb8, 0x32, 0xcd, 0x71, 0x8a, 0xdd, 0x8d, 0xbd, 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x9b, 0xb0, - 0x44, 0x49, 0x40, 0xfb, 0x1b, 0x87, 0x01, 0xa1, 0x7a, 0x33, 0x21, 0x1f, 0x67, 0xdf, 0x78, 0x98, - 0x00, 0x8f, 0xf2, 0x98, 0x07, 0x50, 0xbe, 0x6d, 0x1d, 0xb8, 0xd1, 0x78, 0x0c, 0x43, 0xc5, 0xf1, - 0x5a, 0x6e, 0xcf, 0x26, 0x32, 0xa0, 0x87, 0xd1, 0x2b, 0xbc, 0xb4, 0xdb, 0x3a, 0xf2, 0x6c, 0x50, - 0xbb, 0x90, 0x00, 0xc8, 0x79, 0x10, 0x4e, 0x8a, 0x30, 0x5d, 0xc8, 0x7d, 0x86, 0x95, 0xe4, 0x77, - 0xa0, 0x18, 0xe7, 0xfa, 0x0f, 0x59, 0xa5, 0xf9, 0x06, 0x14, 0xb8, 0xc7, 0x87, 0x35, 0xea, 0x39, - 0x59, 0x52, 0x32, 0xf7, 0xca, 0xa4, 0xc9, 0xbd, 0xc4, 0x90, 0xf5, 0x4e, 0xd7, 0x7e, 0xc0, 0x21, - 0x6b, 0xe6, 0x41, 0x5e, 0xbe, 0xec, 0x94, 0x2f, 0xdf, 0x55, 0x90, 0x7f, 0x44, 0xe1, 0x8f, 0x8c, - 0x4c, 0x20, 0xb4, 0x47, 0x46, 0x7f, 0xff, 0xb5, 0x09, 0xc3, 0x8f, 0x0d, 0x00, 0xd1, 0xca, 0x13, - 0x6d, 0xa4, 0x14, 0xe3, 0xfc, 0x3b, 0x30, 0xeb, 0x4b, 0x8f, 0x94, 0x83, 0xd6, 0x29, 0xfb, 0xc5, - 0xd1, 0x45, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xc6, 0xcb, 0x1f, 0x7c, 0xb2, 0x3a, 0xf3, 0xe1, 0x27, - 0xab, 0x33, 0x1f, 0x7d, 0xb2, 0x3a, 0xf3, 0xf6, 0xe9, 0xaa, 0xf1, 0xc1, 0xe9, 0xaa, 0xf1, 0xe1, - 0xe9, 0xaa, 0xf1, 0xd1, 0xe9, 0xaa, 0xf1, 0xf1, 0xe9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0x67, 0x5e, - 0x7b, 0x2c, 0xcd, 0x1f, 0xfc, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x28, 0x27, 0x65, 0xab, 0x20, - 0x28, 0x00, 0x00, + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_a8431b6e0aeeb761) +} + +var fileDescriptor_a8431b6e0aeeb761 = []byte{ + // 2873 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57, + 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa, + 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44, + 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49, + 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f, + 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84, + 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7, + 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e, + 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26, + 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03, + 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5, + 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf, + 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b, + 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e, + 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76, + 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f, + 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28, + 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26, + 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28, + 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29, + 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f, + 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, + 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d, + 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86, + 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5, + 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, + 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e, + 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a, + 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb, + 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71, + 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53, + 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7, + 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, + 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5, + 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95, + 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b, + 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8, + 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75, + 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b, + 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09, + 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, + 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3, + 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3, + 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74, + 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81, + 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2, + 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22, + 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1, + 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60, + 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6, + 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1, + 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82, + 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8, + 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, + 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56, + 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e, + 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47, + 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79, + 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13, + 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77, + 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a, + 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e, + 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98, + 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3, + 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e, + 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18, + 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d, + 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69, + 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91, + 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54, + 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63, + 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b, + 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92, + 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23, + 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57, + 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b, + 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a, + 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f, + 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c, + 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f, + 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e, + 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41, + 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37, + 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf, + 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73, + 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15, + 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88, + 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69, + 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7, + 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d, + 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7, + 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7, + 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14, + 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1, + 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17, + 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc, + 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f, + 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98, + 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b, + 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51, + 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c, + 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a, + 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3, + 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e, + 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd, + 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf, + 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b, + 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65, + 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6, + 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5, + 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59, + 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc, + 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17, + 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6, + 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75, + 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43, + 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86, + 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c, + 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53, + 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54, + 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, + 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2, + 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91, + 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77, + 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4, + 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1, + 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0, + 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8, + 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8, + 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30, + 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a, + 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39, + 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb, + 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95, + 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0, + 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04, + 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66, + 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d, + 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0, + 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1, + 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09, + 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd, + 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70, + 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a, + 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2, + 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5, + 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9, + 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80, + 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a, + 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef, + 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2, + 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, + 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06, + 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f, + 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a, + 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81, + 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52, + 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f, + 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad, + 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78, + 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03, + 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52, + 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a, + 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19, + 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90, + 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8, + 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda, + 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98, + 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73, + 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0, + 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82, + 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc, + 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f, + 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f, + 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31, + 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31, + 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2026,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3715,6 +3786,25 @@ func (m *Duration) Size() (n int) { return n } +func (m *FieldSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *FieldsV1) Size() (n int) { if m == nil { return 0 @@ -4430,6 +4520,18 @@ func (this *Duration) String() string { }, "") return s } +func (this *FieldSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" @@ -6444,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index a2cd8015fb..18dd0b067c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -34,6 +34,7 @@ message APIGroup { optional string name = 1; // versions are the versions supported in this group. + // +listType=atomic repeated GroupVersionForDiscovery versions = 2; // preferredVersion is the version preferred by the API server, which @@ -49,6 +50,7 @@ message APIGroup { // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. // +optional + // +listType=atomic repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; } @@ -56,6 +58,7 @@ message APIGroup { // /apis. message APIGroupList { // groups is a list of APIGroup. + // +listType=atomic repeated APIGroup groups = 1; } @@ -88,9 +91,11 @@ message APIResource { optional Verbs verbs = 4; // shortNames is a list of suggested short names of the resource. + // +listType=atomic repeated string shortNames = 5; // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + // +listType=atomic repeated string categories = 7; // The hash value of the storage version, the version this resource is @@ -112,6 +117,7 @@ message APIResourceList { optional string groupVersion = 1; // resources contains the name of the resources and if they are namespaced. + // +listType=atomic repeated APIResource resources = 2; } @@ -122,6 +128,7 @@ message APIResourceList { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { // versions are the api versions that are available. + // +listType=atomic repeated string versions = 1; // a map of client CIDR to server address that is serving this group. @@ -131,6 +138,7 @@ message APIVersions { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +listType=atomic repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; } @@ -145,6 +153,7 @@ message ApplyOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // Force is going to "force" Apply requests. It means user will @@ -235,6 +244,7 @@ message CreateOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // fieldManager is a name associated with the actor or entity @@ -303,6 +313,7 @@ message DeleteOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 5; } @@ -313,6 +324,25 @@ message Duration { optional int64 duration = 1; } +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message FieldSelectorRequirement { + // key is the field selector key that the requirement applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + optional string operator = 2; + + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + repeated string values = 3; +} + // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. // // Each key is either a '.' representing the field itself, and will always map to an empty set, @@ -418,6 +448,7 @@ message LabelSelector { // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +optional + // +listType=atomic repeated LabelSelectorRequirement matchExpressions = 2; } @@ -436,6 +467,7 @@ message LabelSelectorRequirement { // the values array must be empty. This array is replaced during a strategic // merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -447,7 +479,7 @@ message List { optional ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListMeta describes metadata that synthetic resources must have, including lists and @@ -788,6 +820,8 @@ message ObjectMeta { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid repeated OwnerReference ownerReferences = 13; // Must be empty before the object is deleted from the registry. Each entry @@ -805,6 +839,7 @@ message ObjectMeta { // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge + // +listType=set repeated string finalizers = 14; // ManagedFields maps workflow-id and version to the set of fields @@ -816,6 +851,7 @@ message ObjectMeta { // workflow used when modifying the object. // // +optional + // +listType=atomic repeated ManagedFieldsEntry managedFields = 17; } @@ -890,6 +926,7 @@ message PatchOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // Force is going to "force" Apply requests. It means user will @@ -943,6 +980,7 @@ message Preconditions { // For example: "/healthz", "/apis". message RootPaths { // paths are the paths available at root. + // +listType=atomic repeated string paths = 1; } @@ -985,6 +1023,7 @@ message Status { // is not guaranteed to conform to any schema except that defined by // the reason type. // +optional + // +listType=atomic optional StatusDetails details = 5; // Suggested HTTP return code for this status, 0 if not set. @@ -1049,6 +1088,7 @@ message StatusDetails { // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional + // +listType=atomic repeated StatusCause causes = 4; // If specified, the time in seconds before the operation should be retried. Some errors may indicate @@ -1135,6 +1175,7 @@ message UpdateOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // fieldManager is a name associated with the actor or entity @@ -1187,6 +1228,6 @@ message WatchEvent { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 592dcb8a74..c748071ed7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" + utiljson "k8s.io/apimachinery/pkg/util/json" ) // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) { if f.Raw == nil { return []byte("null"), nil } + if f.getContentType() == fieldsV1InvalidOrValidCBORObject { + var u map[string]interface{} + if err := cbor.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err) + } + return utiljson.Marshal(u) + } return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler func (f *FieldsV1) UnmarshalJSON(b []byte) error { if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer") } if !bytes.Equal(b, []byte("null")) { f.Raw = append(f.Raw[0:0], b...) @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error { var _ json.Marshaler = FieldsV1{} var _ json.Unmarshaler = &FieldsV1{} + +func (f FieldsV1) MarshalCBOR() ([]byte, error) { + if f.Raw == nil { + return cbor.Marshal(nil) + } + if f.getContentType() == fieldsV1InvalidOrValidJSONObject { + var u map[string]interface{} + if err := utiljson.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err) + } + return cbor.Marshal(u) + } + return f.Raw, nil +} + +var cborNull = []byte{0xf6} + +func (f *FieldsV1) UnmarshalCBOR(b []byte) error { + if f == nil { + return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(b, cborNull) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +const ( + // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw + // bytes don't represent an allowable value in any supported encoding. + fieldsV1InvalidOrEmpty = iota + + // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that + // are a valid JSON encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidJSONObject + + // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that + // are a valid CBOR encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidCBORObject +) + +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject, +// fieldsV1InvalidOrValidCBORObject based on the value of Raw. +// +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map) +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty +// and represents an allowable value, then the initial byte unambiguously distinguishes a +// JSON-encoded value from a CBOR-encoded value. +// +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map", +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect. +func (f FieldsV1) getContentType() int { + if len(f.Raw) > 0 { + p := f.Raw[0] + switch p { + case 'n', '{', '\t', '\r', '\n', ' ': + return fieldsV1InvalidOrValidJSONObject + case 0xf6: // null + return fieldsV1InvalidOrValidCBORObject + default: + if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ { + return fieldsV1InvalidOrValidCBORObject + } + } + } + return fieldsV1InvalidOrEmpty +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 8eb37f4367..9f302b3f36 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { return nil } +func (t *MicroTime) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(RFC3339Micro, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *MicroTime) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } +func (t MicroTime) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + return cbor.Marshal(t.UTC().Format(RFC3339Micro)) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 421770d432..0333cfdb33 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) // Time is a wrapper around time.Time which supports correct @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error { return nil } +func (t *Time) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +func (t Time) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + + return cbor.Marshal(t.UTC().Format(time.RFC3339)) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (t Time) ToUnstructured() interface{} { if t.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 8a8ff70189..473adb9ef5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -236,6 +236,8 @@ type ObjectMeta struct { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` // Must be empty before the object is deleted from the registry. Each entry @@ -253,6 +255,7 @@ type ObjectMeta struct { // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge + // +listType=set Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` // Tombstone: ClusterName was a legacy field that was always cleared by @@ -268,6 +271,7 @@ type ObjectMeta struct { // workflow used when modifying the object. // // +optional + // +listType=atomic ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } @@ -428,6 +432,15 @@ type ListOptions struct { SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"` } +const ( + // InitialEventsAnnotationKey the name of the key + // under which an annotation marking the end of + // a watchlist stream is stored. + // + // The annotation is added to a "Bookmark" event. + InitialEventsAnnotationKey = "k8s.io/initial-events-end" +) + // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch // may only be set if resourceVersion is also set. // @@ -531,6 +544,7 @@ type DeleteOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } @@ -556,6 +570,7 @@ type CreateOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // +k8s:deprecated=includeUninitialized,protobuf=2 @@ -600,6 +615,7 @@ type PatchOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // Force is going to "force" Apply requests. It means user will @@ -651,6 +667,7 @@ type ApplyOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // Force is going to "force" Apply requests. It means user will @@ -683,6 +700,7 @@ type UpdateOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // fieldManager is a name associated with the actor or entity @@ -751,6 +769,7 @@ type Status struct { // is not guaranteed to conform to any schema except that defined by // the reason type. // +optional + // +listType=atomic Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` // Suggested HTTP return code for this status, 0 if not set. // +optional @@ -784,6 +803,7 @@ type StatusDetails struct { // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional + // +listType=atomic Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` // If specified, the time in seconds before the operation should be retried. Some errors may indicate // the client must take an alternate action - for those errors this field may indicate how long to wait @@ -1047,6 +1067,7 @@ type List struct { type APIVersions struct { TypeMeta `json:",inline"` // versions are the api versions that are available. + // +listType=atomic Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` // a map of client CIDR to server address that is serving this group. // This is to help clients reach servers in the most network-efficient way possible. @@ -1055,6 +1076,7 @@ type APIVersions struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +listType=atomic ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` } @@ -1065,6 +1087,7 @@ type APIVersions struct { type APIGroupList struct { TypeMeta `json:",inline"` // groups is a list of APIGroup. + // +listType=atomic Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` } @@ -1077,6 +1100,7 @@ type APIGroup struct { // name is the name of the group. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // versions are the versions supported in this group. + // +listType=atomic Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` // preferredVersion is the version preferred by the API server, which // probably is the storage version. @@ -1090,6 +1114,7 @@ type APIGroup struct { // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. // +optional + // +listType=atomic ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } @@ -1134,8 +1159,10 @@ type APIResource struct { // update, patch, delete, deletecollection, and proxy) Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` // shortNames is a list of suggested short names of the resource. + // +listType=atomic ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + // +listType=atomic Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` // The hash value of the storage version, the version this resource is // converted to when written to the data store. Value must be treated @@ -1168,6 +1195,7 @@ type APIResourceList struct { // groupVersion is the group and version this APIResourceList is for. GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` // resources contains the name of the resources and if they are namespaced. + // +listType=atomic APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` } @@ -1175,6 +1203,7 @@ type APIResourceList struct { // For example: "/healthz", "/apis". type RootPaths struct { // paths are the paths available at root. + // +listType=atomic Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -1218,6 +1247,7 @@ type LabelSelector struct { MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +optional + // +listType=atomic MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } @@ -1234,6 +1264,7 @@ type LabelSelectorRequirement struct { // the values array must be empty. This array is replaced during a strategic // merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -1247,6 +1278,33 @@ const ( LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type FieldSelectorRequirement struct { + // key is the field selector key that the requirement applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"` + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A field selector operator is the set of operators that can be used in a selector requirement. +type FieldSelectorOperator string + +const ( + FieldSelectorOpIn FieldSelectorOperator = "In" + FieldSelectorOpNotIn FieldSelectorOperator = "NotIn" + FieldSelectorOpExists FieldSelectorOperator = "Exists" + FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist" +) + // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource // that the fieldset applies to. type ManagedFieldsEntry struct { @@ -1335,8 +1393,10 @@ type Table struct { // columnDefinitions describes each column in the returned items array. The number of cells per row // will always match the number of column definitions. + // +listType=atomic ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` // rows is the list of items in the table. + // +listType=atomic Rows []TableRow `json:"rows"` } @@ -1369,12 +1429,14 @@ type TableRow struct { // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a // more detailed description. + // +listType=atomic Cells []interface{} `json:"cells"` // conditions describe additional status of a row that are relevant for a human user. These conditions // apply to the row, not to the object, and will be specific to table output. The only defined // condition type is 'Completed', for a row that indicates a resource that has run to completion and // can be given less visual priority. // +optional + // +listType=atomic Conditions []TableRowCondition `json:"conditions,omitempty"` // This field contains the requested additional information about each object based on the includeObject // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index b736e83712..1fa37215cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_FieldSelectorRequirement = map[string]string{ + "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the field selector key that the requirement applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", +} + +func (FieldSelectorRequirement) SwaggerDoc() map[string]string { + return map_FieldSelectorRequirement +} + var map_FieldsV1 = map[string]string{ "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index a0f709ad86..3eba5ba541 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -32,6 +32,10 @@ import ( type LabelSelectorValidationOptions struct { // Allow invalid label value in selector AllowInvalidLabelValueInSelector bool + + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool } // LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. @@ -79,7 +83,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) if !opts.AllowInvalidLabelValueInSelector { @@ -113,6 +119,39 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi return allErrs } +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options +type FieldSelectorValidationOptions struct { + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors. +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(requirement.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified")) + } + + switch requirement.Operator { + case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn: + if len(requirement.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist: + if len(requirement.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator")) + } + } + + return allErrs +} + func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs := field.ErrorList{} //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 7d29c504ab..90cc54a7e7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement. +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement { + if in == nil { + return nil + } + out := new(FieldSelectorRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index a2abc67c15..819d936fe5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +// source: k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_90ec10f86b91f9a8, []int{0} + return fileDescriptor_39237a8d8061b52f, []int{0} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,31 +77,30 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_39237a8d8061b52f) } -var fileDescriptor_90ec10f86b91f9a8 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto +var fileDescriptor_39237a8d8061b52f = []byte{ + // 303 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30, 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1, - 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x7f, 0xbb, 0x58, - 0xd3, 0x94, 0xe4, 0xdf, 0x81, 0x37, 0x3f, 0x82, 0x1f, 0x6b, 0xc7, 0x1d, 0x07, 0xc2, 0x70, 0xf5, - 0x8b, 0x48, 0xda, 0x2a, 0x32, 0x14, 0x7a, 0xeb, 0xf3, 0x94, 0xdf, 0x2f, 0x4f, 0x20, 0xfe, 0x2c, - 0x3e, 0xb7, 0x4c, 0x6a, 0x1e, 0x67, 0x01, 0x98, 0x04, 0x10, 0x2c, 0x5f, 0x42, 0x32, 0xd7, 0x86, - 0x57, 0x3f, 0x44, 0x2a, 0x95, 0x08, 0x17, 0x32, 0x01, 0xf3, 0xcc, 0xd3, 0x38, 0x72, 0x85, 0xe5, - 0x0a, 0x50, 0xf0, 0xe5, 0x28, 0x00, 0x14, 0x23, 0x1e, 0x41, 0x02, 0x46, 0x20, 0xcc, 0x59, 0x6a, - 0x34, 0xea, 0xf6, 0xb0, 0x44, 0xd9, 0x4f, 0x94, 0xa5, 0x71, 0xe4, 0x0a, 0xcb, 0x1c, 0xca, 0x2a, - 0xb4, 0x7b, 0x12, 0x49, 0x5c, 0x64, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41, - 0xf6, 0x50, 0xa4, 0x22, 0x14, 0x5f, 0xa5, 0xb9, 0x7b, 0x5a, 0x67, 0xd4, 0xfe, 0x9e, 0xee, 0xd9, - 0x5f, 0x94, 0xc9, 0x12, 0x94, 0x0a, 0xb8, 0x0d, 0x17, 0xa0, 0xc4, 0x3e, 0x77, 0xfc, 0x46, 0xfc, - 0xa3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0x69, 0xf0, 0x08, 0x21, 0x5e, 0x03, 0x8a, 0xb9, 0x40, 0x71, - 0x25, 0x2d, 0xb6, 0xef, 0xfc, 0xa6, 0xaa, 0x72, 0xe7, 0x5f, 0x9f, 0x0c, 0x5a, 0x63, 0xc6, 0xea, - 0x5c, 0x9c, 0x39, 0xda, 0x99, 0x26, 0x87, 0xab, 0x6d, 0xcf, 0xcb, 0xb7, 0xbd, 0xe6, 0x57, 0x33, - 0xfb, 0x36, 0xb6, 0xef, 0xfd, 0x86, 0x44, 0x50, 0xb6, 0x43, 0xfa, 0xff, 0x07, 0xad, 0xf1, 0x45, - 0x3d, 0xf5, 0xaf, 0x6b, 0x27, 0x07, 0xd5, 0x39, 0x8d, 0x4b, 0x67, 0x9c, 0x95, 0xe2, 0xc9, 0x74, - 0xb5, 0xa3, 0xde, 0x7a, 0x47, 0xbd, 0xcd, 0x8e, 0x7a, 0x2f, 0x39, 0x25, 0xab, 0x9c, 0x92, 0x75, - 0x4e, 0xc9, 0x26, 0xa7, 0xe4, 0x3d, 0xa7, 0xe4, 0xf5, 0x83, 0x7a, 0xb7, 0xc3, 0xda, 0xcf, 0xe0, - 0x33, 0x00, 0x00, 0xff, 0xff, 0x30, 0x97, 0x8b, 0x11, 0x4b, 0x02, 0x00, 0x00, + 0x84, 0x0d, 0x11, 0xc5, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x31, 0x8b, 0x35, + 0x4d, 0x69, 0xfe, 0x15, 0xbc, 0xf9, 0x11, 0xfc, 0x58, 0x3d, 0xee, 0x38, 0x10, 0x86, 0x8d, 0x5f, + 0x44, 0xd2, 0x56, 0x91, 0xa1, 0xd0, 0x5b, 0x9e, 0x07, 0x7e, 0xbf, 0x3c, 0x81, 0xf8, 0x67, 0xd1, + 0xa9, 0x21, 0x52, 0x53, 0x96, 0x48, 0xc5, 0xc2, 0x95, 0x8c, 0x79, 0xfa, 0x4c, 0x93, 0x48, 0xb8, + 0xc2, 0x50, 0xc5, 0x81, 0xd1, 0xa7, 0x49, 0xc0, 0x81, 0x4d, 0xa8, 0xe0, 0x31, 0x4f, 0x19, 0xf0, + 0x25, 0x49, 0x52, 0x0d, 0xba, 0x3b, 0xae, 0x50, 0xf2, 0x13, 0x25, 0x49, 0x24, 0x5c, 0x61, 0x88, + 0x43, 0x49, 0x8d, 0xf6, 0x8f, 0x84, 0x84, 0x55, 0x16, 0x90, 0x50, 0x2b, 0x2a, 0xb4, 0xd0, 0xb4, + 0x34, 0x04, 0xd9, 0x7d, 0x99, 0xca, 0x50, 0x9e, 0x2a, 0x73, 0xff, 0xb8, 0xc9, 0xa8, 0xdd, 0x3d, + 0xfd, 0x93, 0xbf, 0xa8, 0x34, 0x8b, 0x41, 0x2a, 0x4e, 0x4d, 0xb8, 0xe2, 0x8a, 0xed, 0x72, 0x87, + 0x6f, 0xc8, 0x3f, 0xb8, 0x66, 0x29, 0x48, 0xf6, 0x38, 0x0f, 0x1e, 0x78, 0x08, 0x57, 0x1c, 0xd8, + 0x92, 0x01, 0xbb, 0x94, 0x06, 0xba, 0xb7, 0x7e, 0x5b, 0xd5, 0xb9, 0xf7, 0x6f, 0x88, 0x46, 0x9d, + 0x29, 0x21, 0x4d, 0x1e, 0x4e, 0x1c, 0xed, 0x4c, 0xb3, 0xfd, 0x7c, 0x3b, 0xf0, 0xec, 0x76, 0xd0, + 0xfe, 0x6a, 0x16, 0xdf, 0xc6, 0xee, 0x9d, 0xdf, 0x92, 0xc0, 0x95, 0xe9, 0xa1, 0xe1, 0xff, 0x51, + 0x67, 0x7a, 0xde, 0x4c, 0xfd, 0xeb, 0xda, 0xd9, 0x5e, 0x7d, 0x4f, 0xeb, 0xc2, 0x19, 0x17, 0x95, + 0x78, 0x36, 0xcf, 0x0b, 0xec, 0xad, 0x0b, 0xec, 0x6d, 0x0a, 0xec, 0xbd, 0x58, 0x8c, 0x72, 0x8b, + 0xd1, 0xda, 0x62, 0xb4, 0xb1, 0x18, 0xbd, 0x5b, 0x8c, 0x5e, 0x3f, 0xb0, 0x77, 0x33, 0x6e, 0xfc, + 0x0d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x0f, 0xd7, 0x36, 0x32, 0x02, 0x00, 0x00, } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index d14d42591b..fcec553542 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -33,9 +33,9 @@ message PartialObjectMetadataList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 5e60142405..9e22a00564 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -45,6 +45,19 @@ var ( // Requirements is AND of all requirements. type Requirements []Requirement +func (r Requirements) String() string { + var sb strings.Builder + + for i, requirement := range r { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(requirement.String()) + } + + return sb.String() +} + // Selector represents a label selector. type Selector interface { // Matches returns true if this selector matches the given set of labels. @@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String { return ret } +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting. +func (r *Requirement) ValuesUnsorted() []string { + ret := make([]string, 0, len(r.strValues)) + ret = append(ret, r.strValues...) + return ret +} + // Equal checks the equality of requirement. func (r Requirement) Equal(x Requirement) bool { if r.key != x.key { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 9056397fa5..60c000bcb7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -18,16 +18,77 @@ package runtime import ( "bytes" - "encoding/json" "errors" + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/util/json" ) +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the +// signature of ToUnstructured does not allow returning an error value in cases where the conversion +// is not possible (content type is unrecognized or bytes don't match content type). +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) { + switch contentType { + case ContentTypeJSON: + var u interface{} + if err := json.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err) + } + return u, nil + case ContentTypeCBOR: + var u interface{} + if err := cbor.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err) + } + return u, nil + default: + return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured") + } +} + +func (re RawExtension) guessContentType() string { + switch { + case bytes.HasPrefix(re.Raw, cborSelfDescribed): + return ContentTypeCBOR + case len(re.Raw) > 0: + switch re.Raw[0] { + case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null. + return ContentTypeJSON + } + } + return "" +} + func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - if !bytes.Equal(in, []byte("null")) { - re.Raw = append(re.Raw[0:0], in...) + if bytes.Equal(in, []byte("null")) { + return nil + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +var ( + cborNull = []byte{0xf6} + cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7} +) + +func (re *RawExtension) UnmarshalCBOR(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(in, cborNull) { + if !bytes.HasPrefix(in, cborSelfDescribed) { + // The self-described CBOR tag doesn't change the interpretation of the data + // item it encloses, but it is useful as a magic number. Its encoding is + // also what is used to implement the CBOR RecognizingDecoder. + re.Raw = append(re.Raw[:0], cborSelfDescribed...) + } + re.Raw = append(re.Raw, in...) } return nil } @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - // TODO: Check whether ContentType is actually JSON before returning it. - return re.Raw, nil + + contentType := re.guessContentType() + if contentType == ContentTypeJSON { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return json.Marshal(u) +} + +func (re RawExtension) MarshalCBOR() ([]byte, error) { + if re.Raw == nil { + if re.Object != nil { + return cbor.Marshal(re.Object) + } + return cbor.Marshal(nil) + } + + contentType := re.guessContentType() + if contentType == ContentTypeCBOR { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return cbor.Marshal(u) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index ec677a7d96..2e40e140ae 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// source: k8s.io/apimachinery/pkg/runtime/generated.proto package runtime @@ -45,7 +45,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *RawExtension) Reset() { *m = RawExtension{} } func (*RawExtension) ProtoMessage() {} func (*RawExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{0} + return fileDescriptor_2e0e4b920403a48c, []int{0} } func (m *RawExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,7 +73,7 @@ var xxx_messageInfo_RawExtension proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{1} + return fileDescriptor_2e0e4b920403a48c, []int{1} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -101,7 +101,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *Unknown) Reset() { *m = Unknown{} } func (*Unknown) ProtoMessage() {} func (*Unknown) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{2} + return fileDescriptor_2e0e4b920403a48c, []int{2} } func (m *Unknown) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,35 +133,34 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) -} - -var fileDescriptor_9d3c45d7f546725c = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0xaa, 0x13, 0x31, - 0x14, 0xc6, 0x27, 0xb7, 0x85, 0x7b, 0x4d, 0x0b, 0x57, 0xe2, 0xc2, 0xd1, 0x45, 0xe6, 0xd2, 0x95, - 0x77, 0x61, 0x02, 0x17, 0x04, 0xb7, 0x9d, 0x52, 0x50, 0x44, 0x90, 0xe0, 0x1f, 0x70, 0x65, 0x3a, - 0x13, 0xa7, 0x61, 0xe8, 0xc9, 0x90, 0x66, 0x1c, 0xbb, 0xf3, 0x11, 0x7c, 0xac, 0x2e, 0xbb, 0xec, - 0xaa, 0xd8, 0xf1, 0x21, 0xdc, 0x4a, 0xd3, 0xb4, 0x56, 0x5d, 0x74, 0x97, 0x73, 0xbe, 0xef, 0xf7, - 0x9d, 0x73, 0x20, 0xf8, 0x45, 0xf9, 0x7c, 0xce, 0xb4, 0xe1, 0x65, 0x3d, 0x51, 0x16, 0x94, 0x53, - 0x73, 0xfe, 0x45, 0x41, 0x6e, 0x2c, 0x0f, 0x82, 0xac, 0xf4, 0x4c, 0x66, 0x53, 0x0d, 0xca, 0x2e, - 0x78, 0x55, 0x16, 0xdc, 0xd6, 0xe0, 0xf4, 0x4c, 0xf1, 0x42, 0x81, 0xb2, 0xd2, 0xa9, 0x9c, 0x55, - 0xd6, 0x38, 0x43, 0x92, 0x3d, 0xc0, 0x4e, 0x01, 0x56, 0x95, 0x05, 0x0b, 0xc0, 0xe3, 0xa7, 0x85, - 0x76, 0xd3, 0x7a, 0xc2, 0x32, 0x33, 0xe3, 0x85, 0x29, 0x0c, 0xf7, 0xdc, 0xa4, 0xfe, 0xec, 0x2b, - 0x5f, 0xf8, 0xd7, 0x3e, 0x6f, 0x70, 0x8b, 0xfb, 0x42, 0x36, 0xe3, 0xaf, 0x4e, 0xc1, 0x5c, 0x1b, - 0x20, 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0x37, 0xe8, 0x49, 0x3f, 0xbd, 0x6c, 0x37, 0x49, 0x47, - 0xc8, 0x46, 0xec, 0x7a, 0x83, 0x4f, 0xf8, 0xea, 0xed, 0xa2, 0x52, 0xaf, 0x95, 0x93, 0xe4, 0x0e, - 0x63, 0x59, 0xe9, 0xf7, 0xca, 0xee, 0x20, 0xef, 0xbe, 0x97, 0x92, 0xe5, 0x26, 0x89, 0xda, 0x4d, - 0x82, 0x87, 0x6f, 0x5e, 0x06, 0x45, 0x9c, 0xb8, 0xc8, 0x0d, 0xee, 0x96, 0x1a, 0xf2, 0xf8, 0xc2, - 0xbb, 0xfb, 0xc1, 0xdd, 0x7d, 0xa5, 0x21, 0x17, 0x5e, 0x19, 0xfc, 0x42, 0xf8, 0xf2, 0x1d, 0x94, - 0x60, 0x1a, 0x20, 0x1f, 0xf0, 0x95, 0x0b, 0xd3, 0x7c, 0x7e, 0xef, 0xee, 0x96, 0x9d, 0xb9, 0x9d, - 0x1d, 0xd6, 0x4b, 0xef, 0x87, 0xf0, 0xe3, 0xc2, 0xe2, 0x18, 0x76, 0xb8, 0xf0, 0xe2, 0xff, 0x0b, - 0xc9, 0x10, 0x5f, 0x67, 0x06, 0x9c, 0x02, 0x37, 0x86, 0xcc, 0xe4, 0x1a, 0x8a, 0xb8, 0xe3, 0x97, - 0x7d, 0x18, 0xf2, 0xae, 0x47, 0x7f, 0xcb, 0xe2, 0x5f, 0x3f, 0x79, 0x86, 0x7b, 0xa1, 0xb5, 0x1b, - 0x1d, 0x77, 0x3d, 0xfe, 0x20, 0xe0, 0xbd, 0xd1, 0x1f, 0x49, 0x9c, 0xfa, 0xd2, 0xf1, 0x72, 0x4b, - 0xa3, 0xd5, 0x96, 0x46, 0xeb, 0x2d, 0x8d, 0xbe, 0xb5, 0x14, 0x2d, 0x5b, 0x8a, 0x56, 0x2d, 0x45, - 0xeb, 0x96, 0xa2, 0x1f, 0x2d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0x63, 0x72, 0xe6, 0xb7, 0xfc, 0x0e, - 0x00, 0x00, 0xff, 0xff, 0x1f, 0x32, 0xd5, 0x68, 0x68, 0x02, 0x00, 0x00, + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_2e0e4b920403a48c) +} + +var fileDescriptor_2e0e4b920403a48c = []byte{ + // 365 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x6b, 0x22, 0x31, + 0x18, 0xc6, 0x27, 0x2a, 0xe8, 0x46, 0xc1, 0x25, 0x7b, 0xd8, 0xd9, 0x3d, 0x64, 0xc4, 0xd3, 0x7a, + 0xd8, 0x0c, 0x08, 0x85, 0x5e, 0x1d, 0xf1, 0x50, 0x4a, 0xa1, 0x84, 0xfe, 0x81, 0x9e, 0x1a, 0x67, + 0xd2, 0x31, 0x0c, 0x26, 0xc3, 0x18, 0x99, 0x7a, 0xeb, 0x47, 0xe8, 0xc7, 0xf2, 0xe8, 0xd1, 0x93, + 0xd4, 0xe9, 0x87, 0xe8, 0xb5, 0x18, 0xa3, 0xb5, 0xed, 0xc1, 0x5b, 0xde, 0xf7, 0x79, 0x7e, 0xcf, + 0xfb, 0xbe, 0x10, 0xe8, 0x27, 0xa7, 0x13, 0x22, 0x94, 0xcf, 0x52, 0x31, 0x66, 0xe1, 0x48, 0x48, + 0x9e, 0xcd, 0xfc, 0x34, 0x89, 0xfd, 0x6c, 0x2a, 0xb5, 0x18, 0x73, 0x3f, 0xe6, 0x92, 0x67, 0x4c, + 0xf3, 0x88, 0xa4, 0x99, 0xd2, 0x0a, 0x79, 0x5b, 0x80, 0x1c, 0x02, 0x24, 0x4d, 0x62, 0x62, 0x81, + 0xbf, 0xff, 0x63, 0xa1, 0x47, 0xd3, 0x21, 0x09, 0xd5, 0xd8, 0x8f, 0x55, 0xac, 0x7c, 0xc3, 0x0d, + 0xa7, 0x0f, 0xa6, 0x32, 0x85, 0x79, 0x6d, 0xf3, 0xda, 0x1d, 0xd8, 0xa0, 0x2c, 0x1f, 0x3c, 0x6a, + 0x2e, 0x27, 0x42, 0x49, 0xf4, 0x07, 0x96, 0x33, 0x96, 0xbb, 0xa0, 0x05, 0xfe, 0x35, 0x82, 0x6a, + 0xb1, 0xf2, 0xca, 0x94, 0xe5, 0x74, 0xd3, 0x6b, 0xdf, 0xc3, 0xda, 0xd5, 0x2c, 0xe5, 0x17, 0x5c, + 0x33, 0xd4, 0x85, 0x90, 0xa5, 0xe2, 0x86, 0x67, 0x1b, 0xc8, 0xb8, 0x7f, 0x04, 0x68, 0xbe, 0xf2, + 0x9c, 0x62, 0xe5, 0xc1, 0xde, 0xe5, 0x99, 0x55, 0xe8, 0x81, 0x0b, 0xb5, 0x60, 0x25, 0x11, 0x32, + 0x72, 0x4b, 0xc6, 0xdd, 0xb0, 0xee, 0xca, 0xb9, 0x90, 0x11, 0x35, 0x4a, 0xfb, 0x0d, 0xc0, 0xea, + 0xb5, 0x4c, 0xa4, 0xca, 0x25, 0xba, 0x85, 0x35, 0x6d, 0xa7, 0x99, 0xfc, 0x7a, 0xb7, 0x43, 0x8e, + 0xdc, 0x4e, 0x76, 0xeb, 0x05, 0x3f, 0x6d, 0xf8, 0x7e, 0x61, 0xba, 0x0f, 0xdb, 0x5d, 0x58, 0xfa, + 0x7e, 0x21, 0xea, 0xc1, 0x66, 0xa8, 0xa4, 0xe6, 0x52, 0x0f, 0x64, 0xa8, 0x22, 0x21, 0x63, 0xb7, + 0x6c, 0x96, 0xfd, 0x6d, 0xf3, 0x9a, 0xfd, 0xcf, 0x32, 0xfd, 0xea, 0x47, 0x27, 0xb0, 0x6e, 0x5b, + 0x9b, 0xd1, 0x6e, 0xc5, 0xe0, 0xbf, 0x2c, 0x5e, 0xef, 0x7f, 0x48, 0xf4, 0xd0, 0x17, 0x0c, 0xe6, + 0x6b, 0xec, 0x2c, 0xd6, 0xd8, 0x59, 0xae, 0xb1, 0xf3, 0x54, 0x60, 0x30, 0x2f, 0x30, 0x58, 0x14, + 0x18, 0x2c, 0x0b, 0x0c, 0x5e, 0x0a, 0x0c, 0x9e, 0x5f, 0xb1, 0x73, 0xe7, 0x1d, 0xf9, 0x2d, 0xef, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x9b, 0x09, 0xb3, 0x4f, 0x02, 0x00, 0x00, } func (m *RawExtension) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index f46a24cc6c..cc0a77bba6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -236,10 +236,14 @@ func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { gvk = preferredGVK } } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err + + // The gvk only needs to be set if not already as desired. + if gvk != oldGVK { + kind.SetGroupVersionKind(gvk) + defer kind.SetGroupVersionKind(oldGVK) + } + + return e.Encoder.Encode(obj, stream) } // WithoutVersionDecoder clears the group version kind of a deserialized object. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 46b1e787bd..7a26d2798e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto package schema @@ -39,21 +39,20 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_25f8f0eed21c6089) } -var fileDescriptor_0462724132518e0d = []byte{ - // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xad, 0x8e, 0xc3, 0x30, - 0x0c, 0xc0, 0xf1, 0x84, 0x1e, 0x3c, 0x78, 0xc0, 0xb0, 0xec, 0x62, 0x7a, 0xf8, 0xf0, 0xa4, 0xf1, - 0xb1, 0xb4, 0xf5, 0xd2, 0x28, 0xca, 0x87, 0xd2, 0x64, 0xd2, 0xd8, 0x1e, 0x61, 0x8f, 0x55, 0x58, - 0x58, 0xb8, 0x66, 0x2f, 0x32, 0x29, 0x2d, 0x18, 0x1c, 0xf3, 0x5f, 0xd6, 0xcf, 0xf2, 0xd7, 0xd1, - 0xfc, 0x8d, 0x42, 0x7b, 0x34, 0xb9, 0xa5, 0xe8, 0x28, 0xd1, 0x88, 0x17, 0x72, 0xbd, 0x8f, 0xb8, - 0x2f, 0x64, 0xd0, 0x56, 0x76, 0x83, 0x76, 0x14, 0xaf, 0x18, 0x8c, 0xc2, 0x98, 0x5d, 0xd2, 0x96, - 0x70, 0xec, 0x06, 0xb2, 0x12, 0x15, 0x39, 0x8a, 0x32, 0x51, 0x2f, 0x42, 0xf4, 0xc9, 0x7f, 0x37, - 0x9b, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0xc4, 0xe6, 0x7e, 0x7e, 0x95, 0x4e, 0x43, 0x6e, - 0x45, 0xe7, 0x2d, 0x2a, 0xaf, 0x3c, 0x56, 0xde, 0xe6, 0x73, 0xad, 0x1a, 0x75, 0xda, 0xce, 0xfe, - 0x1f, 0xa6, 0x15, 0xd8, 0xbc, 0x02, 0x5b, 0x56, 0x60, 0xb7, 0x02, 0x7c, 0x2a, 0xc0, 0xe7, 0x02, - 0x7c, 0x29, 0xc0, 0x1f, 0x05, 0xf8, 0xfd, 0x09, 0xec, 0xd4, 0x7c, 0xf6, 0xf4, 0x2b, 0x00, 0x00, - 0xff, 0xff, 0x12, 0xb4, 0xae, 0x48, 0xf6, 0x00, 0x00, 0x00, +var fileDescriptor_25f8f0eed21c6089 = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xa1, 0x0e, 0xc2, 0x30, + 0x10, 0xc6, 0xf1, 0xd6, 0x22, 0x91, 0x88, 0x93, 0x73, 0xdc, 0x39, 0x82, 0x46, 0xf3, 0x04, 0xb8, + 0x6e, 0x94, 0xae, 0x59, 0xba, 0x6b, 0xba, 0x4e, 0xe0, 0x78, 0x04, 0x1e, 0x6b, 0x72, 0x72, 0x92, + 0x95, 0x17, 0x21, 0x69, 0x11, 0x48, 0xdc, 0xfd, 0xc5, 0xef, 0xf2, 0x6d, 0x0e, 0xdd, 0x71, 0x40, + 0xcb, 0xa4, 0xbc, 0x75, 0xaa, 0x69, 0x6d, 0xaf, 0xc3, 0x9d, 0x7c, 0x67, 0x28, 0x8c, 0x7d, 0xb4, + 0x4e, 0xd3, 0xd0, 0xb4, 0xda, 0x29, 0x32, 0xba, 0xd7, 0x41, 0x45, 0x7d, 0x45, 0x1f, 0x38, 0xf2, + 0xb6, 0x2a, 0x0e, 0x7f, 0x1d, 0xfa, 0xce, 0xe0, 0xd7, 0x61, 0x71, 0xbb, 0xbd, 0xb1, 0xb1, 0x1d, + 0x6b, 0x6c, 0xd8, 0x91, 0x61, 0xc3, 0x94, 0x79, 0x3d, 0xde, 0x72, 0xe5, 0xc8, 0x57, 0x79, 0x7b, + 0x3a, 0x4f, 0x2b, 0x88, 0x79, 0x05, 0xb1, 0xac, 0x20, 0x1e, 0x09, 0xe4, 0x94, 0x40, 0xce, 0x09, + 0xe4, 0x92, 0x40, 0xbe, 0x12, 0xc8, 0xe7, 0x1b, 0xc4, 0xa5, 0xfa, 0x6f, 0xf4, 0x27, 0x00, 0x00, + 0xff, 0xff, 0x97, 0xb8, 0x4d, 0x1f, 0xdd, 0x00, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go new file mode 100644 index 0000000000..cd78b1df26 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular, +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions. +package direct + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" +) + +func Marshal(src interface{}) ([]byte, error) { + return modes.Encode.Marshal(src) +} + +func Unmarshal(src []byte, dst interface{}) error { + return modes.Decode.Unmarshal(src, dst) +} + +func Diagnose(src []byte) (string, error) { + return modes.Diagnostic.Diagnose(src) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go new file mode 100644 index 0000000000..f14cbd6b58 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "bytes" + "sync" +) + +var buffers = BufferProvider{p: new(sync.Pool)} + +type buffer struct { + bytes.Buffer +} + +type pool interface { + Get() interface{} + Put(interface{}) +} + +type BufferProvider struct { + p pool +} + +func (b *BufferProvider) Get() *buffer { + if buf, ok := b.p.Get().(*buffer); ok { + return buf + } + return &buffer{} +} + +func (b *BufferProvider) Put(buf *buffer) { + if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ { + // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption + // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as + // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small" + // objects very frequently and much larger objects (especially large lists) only + // occasionally. Under steady load, pooled buffers tend to be borrowed frequently + // enough to prevent them from being released. Over time, each buffer is used to + // encode a large object and its capacity increases accordingly. The result is that + // practically all buffers in the pool retain much more capacity than needed to + // encode most objects. + + // As a basic mitigation for the worst case, buffers with more capacity than the + // default max request body size are never returned to the pool. + // TODO: Optimize for higher buffer utilization. + return + } + buf.Reset() + b.p.Put(buf) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go new file mode 100644 index 0000000000..858529e958 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -0,0 +1,422 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/fxamacker/cbor/v2" +) + +// Returns a non-nil error if and only if the argument's type (or one of its component types, for +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces. +// +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree +// types in client programs. +func RejectCustomMarshalers(v interface{}) error { + if v == nil { + return nil + } + rv := reflect.ValueOf(v) + if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + return nil +} + +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the +// encoder. +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)") + +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of +// this mechanism. +const maxDepth = 2048 + +var marshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Marshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Marshaler](), + reflect.TypeFor[encoding.TextMarshaler](), + }, +} + +var unmarshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Unmarshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Unmarshaler](), + reflect.TypeFor[encoding.TextUnmarshaler](), + }, + assumeAddressableValues: true, +} + +// checker wraps a function for dynamically checking a value of a specific type for custom JSON +// behaviors not matched by a custom CBOR behavior. +type checker struct { + // check returns a non-nil error if the given value might be marshalled to or from CBOR + // using the default behavior for its kind, but marshalled to or from JSON using custom + // behavior. + check func(rv reflect.Value, depth int) error + + // safe returns true if all values of this type are safe from mismatched custom marshalers. + safe func() bool +} + +// TODO: stale +// Having a single addressable checker for comparisons lets us prune and collapse parts of the +// object traversal that are statically known to be safe. Depending on the type, it may be +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a +// distinct type from bool). +var noop = checker{ + safe: func() bool { + return true + }, + check: func(rv reflect.Value, depth int) error { + return nil + }, +} + +type checkers struct { + m sync.Map // reflect.Type => *checker + + cborInterface reflect.Type + nonCBORInterfaces []reflect.Type + + assumeAddressableValues bool +} + +func (cache *checkers) getChecker(rt reflect.Type) checker { + if ptr, ok := cache.m.Load(rt); ok { + return *ptr.(*checker) + } + + return cache.getCheckerInternal(rt, nil) +} + +// linked list node representing the path from a composite type to an element type +type path struct { + Type reflect.Type + Parent *path +} + +func (p path) cyclic(rt reflect.Type) bool { + for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent { + if ancestor.Type == rt { + return true + } + } + return false +} + +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) { + // Store a placeholder cache entry first to handle cyclic types. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + c = checker{ + safe: func() bool { + wg.Wait() + return c.safe() + }, + check: func(rv reflect.Value, depth int) error { + wg.Wait() + return c.check(rv, depth) + }, + } + if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + // Someone else stored an entry for this type, use it. + return *actual.(*checker) + } + + // Take a nonreflective path for the unstructured container types. They're common and + // usually nested inside one another. + switch rt { + case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}](): + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + return checkUnstructuredValue(cache, rv.Interface(), depth) + }, + } + } + + // It's possible that one of the relevant interfaces is implemented on a type with a pointer + // receiver, but that a particular value of that type is not addressable. For example: + // + // func (Foo) MarshalText() ([]byte, error) { ... } + // func (*Foo) MarshalCBOR() ([]byte, error) { ... } + // + // Both methods are in the method set of *Foo, but the method set of Foo contains only + // MarshalText. + // + // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text + // interface with a pointer receiver are always accessible. Only the unmarshaler check + // assumes that CBOR methods with pointer receivers are accessible. + + if rt.Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if rt.Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if reflect.PointerTo(rt).Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + self := &path{Type: rt, Parent: parent} + + switch rt.Kind() { + case reflect.Array: + ce := cache.getCheckerInternal(rt.Elem(), self) + rtlen := rt.Len() + if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rtlen; i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Interface: + // All interface values have to be checked because their dynamic type might + // implement one of the interesting interfaces or be composed of another type that + // does. + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + // Unpacking interfaces must count against recursion depth, + // consider this cycle: + // > var i interface{} + // > var p *interface{} = &i + // > i = p + // > rv := reflect.ValueOf(i) + // > for { + // > rv = rv.Elem() + // > } + if depth <= 0 { + return errMaxDepthExceeded + } + rv = rv.Elem() + return cache.getChecker(rv.Type()).check(rv, depth-1) + }, + } + + case reflect.Map: + rtk := rt.Key() + ck := cache.getCheckerInternal(rtk, self) + rte := rt.Elem() + ce := cache.getCheckerInternal(rte, self) + if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := ck.check(rvk, depth-1); err != nil { + return err + } + rve.SetIterValue(iter) + if err := ce.check(rve, depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Pointer: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + if depth <= 0 { + return errMaxDepthExceeded + } + return ce.check(rv.Elem(), depth-1) + }, + } + + case reflect.Slice: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rv.Len(); i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Struct: + type field struct { + Index int + Checker checker + } + var fields []field + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + cf := cache.getCheckerInternal(f.Type, self) + if !self.cyclic(f.Type) && cf.safe() { + continue + } + fields = append(fields, field{Index: i, Checker: cf}) + } + if len(fields) == 0 { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for _, fi := range fields { + if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil { + return err + } + } + return nil + }, + } + + default: + // Not a serializable composite type (funcs and channels are composite types but are + // rejected by JSON and CBOR serialization). + return noop + + } +} + +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error { + switch v := v.(type) { + case nil, bool, int64, float64, string: + return nil + case []interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + case map[string]interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + default: + // Unmarshaling an unstructured doesn't use other dynamic types, but nothing + // prevents inserting values with arbitrary dynamic types into unstructured content, + // as long as they can be marshalled. + rv := reflect.ValueOf(v) + return cache.getChecker(rv.Type()).check(rv, depth) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go new file mode 100644 index 0000000000..895b0deff9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "reflect" + + "github.com/fxamacker/cbor/v2" +) + +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry { + var opts []func(*cbor.SimpleValueRegistry) error + for sv := 0; sv <= 255; sv++ { + // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved + // and considered ill-formed by the CBOR specification. We only accept false (20), + // true (21), and null (22). + switch sv { + case 20: // false + case 21: // true + case 22: // null + case 24, 25, 26, 27, 28, 29, 30, 31: // reserved + default: + opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv))) + } + } + simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...) + if err != nil { + panic(err) + } + return simpleValues +}() + +var Decode cbor.DecMode = func() cbor.DecMode { + decode, err := cbor.DecOptions{ + // Maps with duplicate keys are well-formed but invalid according to the CBOR spec + // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map + // keys are rejected outright and not surfaced as a strict decoding error. + DupMapKey: cbor.DupMapKeyEnforcedAPF, + + // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted + // with or without tagging. If a tag number is present, it must be valid. + TimeTag: cbor.DecTagOptional, + + // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit + // is 10000. + MaxNestedLevels: 64, + + MaxArrayElements: 1024, + MaxMapPairs: 1024, + + // Indefinite-length sequences aren't produced by this serializer, but other + // implementations can. + IndefLength: cbor.IndefLengthAllowed, + + // Accept inputs that contain CBOR tags. + TagsMd: cbor.TagsAllowed, + + // Decode type 0 (unsigned integer) as int64. + // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64. + IntDec: cbor.IntDecConvertSignedOrFail, + + // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for + // decodes into interface{}. + MapKeyByteString: cbor.MapKeyByteStringForbidden, + + // Error on map keys that don't map to a field in the destination struct. + ExtraReturnErrors: cbor.ExtraDecErrorUnknownField, + + // Decode maps into concrete type map[string]interface{} when the destination is an + // interface{}. + DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)), + + // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but + // invalid according to the CBOR spec. Reject invalid inputs. Encoders are + // responsible for ensuring that all text strings they produce contain valid UTF-8 + // sequences and may use the byte string major type to encode strings that have not + // been validated. + UTF8: cbor.UTF8RejectInvalid, + + // Never make a case-insensitive match between a map key and a struct field. + FieldNameMatching: cbor.FieldNameMatchingCaseSensitive, + + // Produce string concrete values when decoding a CBOR byte string into interface{}. + DefaultByteStringType: reflect.TypeOf(""), + + // Allow CBOR byte strings to be decoded into string destination values. If a byte + // string is enclosed in an "expected later encoding" tag + // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text + // encoding indicated by that tag (e.g. base64) will be applied to the contents of + // the byte string. + ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding, + + // Allow CBOR byte strings to match struct fields when appearing as a map key. + FieldNameByteString: cbor.FieldNameByteStringAllowed, + + // When decoding an unrecognized tag to interface{}, return the decoded tag content + // instead of the default, a cbor.Tag representing a (number, content) pair. + UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny, + + // Decode time tags to interface{} as strings containing RFC 3339 timestamps. + TimeTagToAny: cbor.TimeTagToRFC3339Nano, + + // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339 + // timestamps. + ByteStringToTime: cbor.ByteStringToTimeAllowed, + + // Reject NaN and infinite floating-point values since they don't have a JSON + // representation (RFC 8259 Section 6). + NaN: cbor.NaNDecodeForbidden, + Inf: cbor.InfDecodeForbidden, + + // When unmarshaling a byte string into a []byte, assume that the byte string + // contains base64-encoded bytes, unless explicitly counterindicated by an "expected + // later encoding" tag. This is consistent with the because of unmarshaling a JSON + // text into a []byte. + ByteStringExpectedFormat: cbor.ByteStringExpectedBase64, + + // Reject the arbitrary-precision integer tags because they can't be faithfully + // roundtripped through the allowable Unstructured types. + BignumTag: cbor.BignumTagForbidden, + + // Reject anything other than the simple values true, false, and null. + SimpleValues: simpleValues, + + // Disable default recognition of types implementing encoding.BinaryUnmarshaler, + // which is not recognized for JSON decoding. + BinaryUnmarshaler: cbor.BinaryUnmarshalerNone, + }.DecMode() + if err != nil { + panic(err) + } + return decode +}() + +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input. +var DecodeLax cbor.DecMode = func() cbor.DecMode { + opts := Decode.DecOptions() + opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit + dm, err := opts.DecMode() + if err != nil { + panic(err) + } + return dm +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go new file mode 100644 index 0000000000..61f3f145f5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "github.com/fxamacker/cbor/v2" +) + +var Diagnostic cbor.DiagMode = func() cbor.DiagMode { + opts := Decode.DecOptions() + diagnostic, err := cbor.DiagOptions{ + ByteStringText: true, + + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.DiagMode() + if err != nil { + panic(err) + } + return diagnostic +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go new file mode 100644 index 0000000000..c669313844 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -0,0 +1,155 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "io" + + "github.com/fxamacker/cbor/v2" +) + +var Encode = EncMode{ + delegate: func() cbor.UserBufferEncMode { + encode, err := cbor.EncOptions{ + // Map keys need to be sorted to have deterministic output, and this is the order + // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements". + Sort: cbor.SortBytewiseLexical, + + // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store + // floats in the smallest width that preserves value so that equivalent float32 and + // float64 values encode to identical bytes, as they do in a JSON + // encoding. Satisfies one of the "Core Deterministic Encoding Requirements". + ShortestFloat: cbor.ShortestFloat16, + + // Error on attempt to encode NaN and infinite values. This is what the JSON + // serializer does. + NaNConvert: cbor.NaNConvertReject, + InfConvert: cbor.InfConvertReject, + + // Error on attempt to encode math/big.Int values, which can't be faithfully + // roundtripped through Unstructured in general (the dynamic numeric types allowed + // in Unstructured are limited to float64 and int64). + BigIntConvert: cbor.BigIntConvertReject, + + // MarshalJSON for time.Time writes RFC3339 with nanos. + Time: cbor.TimeRFC3339Nano, + + // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by + // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no + // reliable way of knowing that a particular string originated from serializing a + // time.Time), so producing tag 0 has little use. + TimeTag: cbor.EncTagNone, + + // Indefinite-length items have multiple encodings and aren't being used anyway, so + // disable to avoid an opportunity for nondeterminism. + IndefLength: cbor.IndefLengthForbidden, + + // Preserve distinction between nil and empty for slices and maps. + NilContainers: cbor.NilContainerAsNull, + + // OK to produce tags. + TagsMd: cbor.TagsAllowed, + + // Use the same definition of "empty" as encoding/json. + OmitEmpty: cbor.OmitEmptyGoValue, + + // The CBOR types text string and byte string are structurally equivalent, with the + // semantic difference that a text string whose content is an invalid UTF-8 sequence + // is itself invalid. We reject all invalid text strings at decode time and do not + // validate or sanitize all Go strings at encode time. Encoding Go strings to the + // byte string type is comparable to the existing Protobuf behavior and cheaply + // ensures that the output is valid CBOR. + String: cbor.StringToByteString, + + // Encode struct field names to the byte string type rather than the text string + // type. + FieldName: cbor.FieldNameToByteString, + + // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte + // strings. + ByteArray: cbor.ByteArrayToArray, + + // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64 + // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to + // interoperate with the existing JSON behavior. This indicates to the decoder that, + // when decoding into a string (or unstructured), the resulting value should be the + // base64 encoding of the original bytes. No base64 encoding or decoding needs to be + // performed for []byte-to-CBOR-to-[]byte roundtrips. + ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64, + + // Disable default recognition of types implementing encoding.BinaryMarshaler, which + // is not recognized for JSON encoding. + BinaryMarshaler: cbor.BinaryMarshalerNone, + }.UserBufferEncMode() + if err != nil { + panic(err) + } + return encode + }(), +} + +var EncodeNondeterministic = EncMode{ + delegate: func() cbor.UserBufferEncMode { + opts := Encode.options() + opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0. + em, err := opts.UserBufferEncMode() + if err != nil { + panic(err) + } + return em + }(), +} + +type EncMode struct { + delegate cbor.UserBufferEncMode +} + +func (em EncMode) options() cbor.EncOptions { + return em.delegate.EncOptions() +} + +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error { + if buf, ok := w.(*buffer); ok { + return em.delegate.MarshalToBuffer(v, &buf.Buffer) + } + + buf := buffers.Get() + defer buffers.Put(buf) + if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil { + return err + } + + if _, err := io.Copy(w, buf); err != nil { + return err + } + + return nil +} + +func (em EncMode) Marshal(v interface{}) ([]byte, error) { + buf := buffers.Get() + defer buffers.Put(buf) + + if err := em.MarshalTo(v, &buf.Buffer); err != nil { + return nil, err + } + + clone := make([]byte, buf.Len()) + copy(clone, buf.Bytes()) + + return clone, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index ce77c7910a..1680c149f9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -46,6 +46,7 @@ const ( ContentTypeJSON string = "application/json" ContentTypeYAML string = "application/yaml" ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" ) // RawExtension is used to hold extensions in external versions. diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 9b3c9c8d5a..1ab8fd396e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see // data written to data, or be larger than data and a different array. - n := len(data) m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. - if len(m) > n { - //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer + if len(m) > cap(data) { + copy(data, m) + r.remaining = m[len(data):] + return len(data), io.ErrShortBuffer } + + if len(m) > len(data) { + // The bytes beyond len(data) were stored in data's underlying array, which we do + // not own after this function returns. + r.remaining = append([]byte(nil), m[len(data):]...) + return len(data), io.ErrShortBuffer + } + return len(m), nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 8f9ced93fb..1f2877399f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// source: k8s.io/apimachinery/pkg/util/intstr/generated.proto package intstr @@ -43,7 +43,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *IntOrString) Reset() { *m = IntOrString{} } func (*IntOrString) ProtoMessage() {} func (*IntOrString) Descriptor() ([]byte, []int) { - return fileDescriptor_94e046ae3ce6121c, []int{0} + return fileDescriptor_771bacc35a5ec189, []int{0} } func (m *IntOrString) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,30 +73,29 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) + proto.RegisterFile("k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_771bacc35a5ec189) } -var fileDescriptor_94e046ae3ce6121c = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xb1, 0x4a, 0x03, 0x31, - 0x1c, 0xc6, 0x13, 0x5b, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x58, 0x90, 0x5b, 0x4c, - 0x56, 0x71, 0xec, 0x56, 0x10, 0x84, 0x56, 0x1c, 0xdc, 0xee, 0xda, 0x98, 0x86, 0x6b, 0x93, 0x90, - 0xfb, 0x9f, 0x70, 0x5b, 0x1f, 0x41, 0x37, 0x47, 0x1f, 0xe7, 0xc6, 0x8e, 0x1d, 0xa4, 0x78, 0xf1, - 0x2d, 0x9c, 0xe4, 0x72, 0x07, 0x3a, 0x3a, 0x25, 0xdf, 0xf7, 0xfd, 0x7e, 0x19, 0x12, 0xdc, 0xa6, - 0xd7, 0x19, 0x95, 0x9a, 0xa5, 0x79, 0xc2, 0xad, 0xe2, 0xc0, 0x33, 0xf6, 0xcc, 0xd5, 0x42, 0x5b, - 0xd6, 0x0e, 0xb1, 0x91, 0xeb, 0x78, 0xbe, 0x94, 0x8a, 0xdb, 0x82, 0x99, 0x54, 0xb0, 0x1c, 0xe4, - 0x8a, 0x49, 0x05, 0x19, 0x58, 0x26, 0xb8, 0xe2, 0x36, 0x06, 0xbe, 0xa0, 0xc6, 0x6a, 0xd0, 0xfd, - 0x51, 0x23, 0xd1, 0xbf, 0x12, 0x35, 0xa9, 0xa0, 0xb5, 0x44, 0x1b, 0xe9, 0xfc, 0x4a, 0x48, 0x58, - 0xe6, 0x09, 0x9d, 0xeb, 0x35, 0x13, 0x5a, 0x68, 0xe6, 0xdd, 0x24, 0x7f, 0xf2, 0xc9, 0x07, 0x7f, - 0x6b, 0xde, 0xbc, 0x78, 0xc5, 0xc1, 0xc9, 0x44, 0xc1, 0x9d, 0x9d, 0x81, 0x95, 0x4a, 0xf4, 0xa3, - 0xa0, 0x0b, 0x85, 0xe1, 0x03, 0x1c, 0xe2, 0xa8, 0x33, 0x3e, 0x2b, 0xf7, 0x43, 0xe4, 0xf6, 0xc3, - 0xee, 0x7d, 0x61, 0xf8, 0x77, 0x7b, 0x4e, 0x3d, 0xd1, 0xbf, 0x0c, 0x7a, 0x52, 0xc1, 0x43, 0xbc, - 0x1a, 0x1c, 0x84, 0x38, 0x3a, 0x1c, 0x9f, 0xb6, 0x6c, 0x6f, 0xe2, 0xdb, 0x69, 0xbb, 0xd6, 0x5c, - 0x06, 0xb6, 0xe6, 0x3a, 0x21, 0x8e, 0x8e, 0x7f, 0xb9, 0x99, 0x6f, 0xa7, 0xed, 0x7a, 0x73, 0xf4, - 0xf6, 0x3e, 0x44, 0x9b, 0x8f, 0x10, 0x8d, 0x27, 0x65, 0x45, 0xd0, 0xb6, 0x22, 0x68, 0x57, 0x11, - 0xb4, 0x71, 0x04, 0x97, 0x8e, 0xe0, 0xad, 0x23, 0x78, 0xe7, 0x08, 0xfe, 0x74, 0x04, 0xbf, 0x7c, - 0x11, 0xf4, 0x38, 0xfa, 0xc7, 0x17, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0xf0, 0xa0, - 0x81, 0x01, 0x00, 0x00, +var fileDescriptor_771bacc35a5ec189 = []byte{ + // 277 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x52, 0x86, 0x68, 0xd2, 0x43, 0xd6, 0xa4, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd2, 0xa4, 0x07, 0xd1, + 0x24, 0xa5, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f, + 0x9e, 0xaf, 0x0f, 0xd6, 0x9b, 0x54, 0x9a, 0x06, 0xe6, 0x81, 0x39, 0x60, 0x16, 0xc4, 0x4c, 0xa5, + 0x89, 0x8c, 0x5c, 0xdc, 0x9e, 0x79, 0x25, 0xfe, 0x45, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x42, + 0x1a, 0x5c, 0x2c, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x4e, 0x22, 0x27, + 0xee, 0xc9, 0x33, 0x3c, 0xba, 0x27, 0xcf, 0x12, 0x52, 0x59, 0x90, 0xfa, 0x0b, 0x4a, 0x07, 0x81, + 0x55, 0x08, 0xa9, 0x71, 0xb1, 0x65, 0xe6, 0x95, 0x84, 0x25, 0xe6, 0x48, 0x30, 0x29, 0x30, 0x6a, + 0xb0, 0x3a, 0xf1, 0x41, 0xd5, 0xb2, 0x79, 0x82, 0x45, 0x83, 0xa0, 0xb2, 0x20, 0x75, 0xc5, 0x25, + 0x45, 0x20, 0x75, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x08, 0x75, 0xc1, 0x60, 0xd1, 0x20, 0xa8, 0xac, + 0x15, 0xc7, 0x8c, 0x05, 0xf2, 0x0c, 0x0d, 0x77, 0x14, 0x18, 0x9c, 0x3c, 0x4f, 0x3c, 0x94, 0x63, + 0xb8, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, + 0x31, 0x5e, 0x78, 0x24, 0xc7, 0x78, 0xe3, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, + 0xcb, 0x31, 0x44, 0x29, 0x13, 0x11, 0x84, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0xa1, 0x0b, + 0x1e, 0x68, 0x01, 0x00, 0x00, } func (m *IntOrString) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index f358c794d1..5fd2e16c84 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/klog/v2" ) @@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { return json.Unmarshal(value, &intstr.IntVal) } +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error { + if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil { + intstr.Type = String + return nil + } + + if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil { + return err + } + + intstr.Type = Int + return nil +} + // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { if intstr == nil { @@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (intstr IntOrString) MarshalCBOR() ([]byte, error) { + switch intstr.Type { + case Int: + return cbor.Marshal(intstr.IntVal) + case String: + return cbor.Marshal(intstr.StrVal) + default: + return nil, fmt.Errorf("impossible IntOrString.Type") + } +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml index 66e849f23f..a7f2d54fdf 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -120,7 +120,7 @@ status: type: PIDPressure - lastHeartbeatTime: "2019-09-20T19:32:50Z" lastTransitionTime: "2019-07-09T16:17:49Z" - message: kubelet is posting ready status. AppArmor enabled + message: kubelet is posting ready status reason: KubeletReady status: "True" type: Ready diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3674914f70..4fe0c5eb25 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -35,7 +36,7 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -73,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -84,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() d := now.Sub(r.lastErrorTime) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index 194883390c..fd281bdb88 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package sets has generic set and specified sets. Generic set will // replace specified ones over time. And specific ones are deprecated. -package sets +package sets // import "k8s.io/apimachinery/pkg/util/sets" diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go b/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go deleted file mode 100644 index 443dac62eb..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sets - -// ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type ordered interface { - integer | float | ~string -} - -// integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type integer interface { - signed | unsigned -} - -// float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type float interface { - ~float32 | ~float64 -} - -// signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index d50526f426..cd961c8c59 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -17,6 +17,7 @@ limitations under the License. package sets import ( + "cmp" "sort" ) @@ -37,7 +38,7 @@ func New[T comparable](items ...T) Set[T] { // KeySet creates a Set from a keys of a map[comparable](? extends interface{}). // If the value passed in is not actually a map, this will panic. func KeySet[T comparable, V any](theMap map[T]V) Set[T] { - ret := Set[T]{} + ret := make(Set[T], len(theMap)) for keyValue := range theMap { ret.Insert(keyValue) } @@ -67,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] { // Clear empties the set. // It is preferable to replace the set with a newly constructed set, // but not all callers can do that (when there are other references to the map). -// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN -// can't be cleared because NaN can't be removed. -// For sets containing items of a type that is reflexive for ==, -// this is optimized to a single call to runtime.mapclear(). func (s Set[T]) Clear() Set[T] { - for key := range s { - delete(s, key) - } + clear(s) return s } @@ -193,7 +188,7 @@ func (s1 Set[T]) Equal(s2 Set[T]) bool { return len(s1) == len(s2) && s1.IsSuperset(s2) } -type sortableSliceOfGeneric[T ordered] []T +type sortableSliceOfGeneric[T cmp.Ordered] []T func (g sortableSliceOfGeneric[T]) Len() int { return len(g) } func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) } @@ -203,7 +198,7 @@ func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] // // This is a separate function and not a method because not all types supported // by Generic are ordered and only those can be sorted. -func List[T ordered](s Set[T]) []T { +func List[T cmp.Ordered](s Set[T]) []T { res := make(sortableSliceOfGeneric[T], 0, len(s)) for key := range s { res = append(res, key) @@ -236,6 +231,6 @@ func (s Set[T]) Len() int { return len(s) } -func less[T ordered](lhs, rhs T) bool { +func less[T cmp.Ordered](lhs, rhs T) bool { return lhs < rhs } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 920c113bbd..6825a808e6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1361,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // original. Otherwise, check if we want to preserve it or skip it. // Preserving the null value is useful when we want to send an explicit // delete to the API server. + // In some cases, this may lead to inconsistent behavior with create. + // ref: https://github.com/kubernetes/kubernetes/issues/123304 + // To avoid breaking compatibility, + // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent. if patchV == nil { delete(original, k) if mergeOptions.IgnoreUnmatchedNulls { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS new file mode 100644 index 0000000000..4023732476 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 0b8a6cb354..b32644902b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -19,10 +19,9 @@ package validation import ( "fmt" "math" - "net" "regexp" - "strconv" "strings" + "unicode" "k8s.io/apimachinery/pkg/util/validation/field" netutils "k8s.io/utils/net" @@ -352,11 +351,12 @@ func IsValidPortName(port string) []string { } // IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) []string { +func IsValidIP(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList if netutils.ParseIPSloppy(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) } - return nil + return allErrors } // IsValidIPv4Address tests that the argument is a valid IPv4 address. @@ -379,6 +379,16 @@ func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { return allErrors } +// IsValidCIDR tests that the argument is a valid CIDR value. +func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + _, _, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) + } + return allErrors +} + const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" @@ -409,6 +419,9 @@ func IsHTTPHeaderName(value string) []string { const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" +// TODO(hirazawaui): Rename this when the RelaxedEnvironmentVariableValidation gate is removed. +const relaxedEnvVarNameFmtErrMsg string = "a valid environment variable name must consist only of printable ASCII characters other than '='" + var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") // IsEnvVarName tests if a string is a valid environment variable name. @@ -422,6 +435,24 @@ func IsEnvVarName(value string) []string { return errs } +// IsRelaxedEnvVarName tests if a string is a valid environment variable name. +func IsRelaxedEnvVarName(value string) []string { + var errs []string + + if len(value) == 0 { + errs = append(errs, "environment variable name "+EmptyError()) + } + + for _, r := range value { + if r > unicode.MaxASCII || !unicode.IsPrint(r) || r == '=' { + errs = append(errs, relaxedEnvVarNameFmtErrMsg) + break + } + } + + return errs +} + const configMapKeyFmt = `[-._a-zA-Z0-9]+` const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" @@ -493,18 +524,3 @@ func hasChDirPrefix(value string) []string { } return errs } - -// IsValidSocketAddr checks that string represents a valid socket address -// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) -func IsValidSocketAddr(value string) []string { - var errs []string - ip, port, err := net.SplitHostPort(value) - if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") - return errs - } - portInt, _ := strconv.Atoi(port) - errs = append(errs, IsValidPortNum(portInt)...) - errs = append(errs, IsValidIP(ip)...) - return errs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 2292ba1376..b7812ff2d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -23,6 +23,8 @@ import ( "regexp" "strconv" "strings" + + apimachineryversion "k8s.io/apimachinery/pkg/version" ) // Version is an opaque representation of a version number @@ -31,6 +33,7 @@ type Version struct { semver bool preRelease string buildMetadata string + info apimachineryversion.Info } var ( @@ -145,6 +148,43 @@ func MustParseGeneric(str string) *Version { return v } +// Parse tries to do ParseSemantic first to keep more information. +// If ParseSemantic fails, it would just do ParseGeneric. +func Parse(str string) (*Version, error) { + v, err := parse(str, true) + if err != nil { + return parse(str, false) + } + return v, err +} + +// MustParse is like Parse except that it panics on error +func MustParse(str string) *Version { + v, err := Parse(str) + if err != nil { + panic(err) + } + return v +} + +// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version. +func ParseMajorMinor(str string) (*Version, error) { + v, err := ParseGeneric(str) + if err != nil { + return nil, err + } + return MajorMinor(v.Major(), v.Minor()), nil +} + +// MustParseMajorMinor is like ParseMajorMinor except that it panics on error +func MustParseMajorMinor(str string) *Version { + v, err := ParseMajorMinor(str) + if err != nil { + panic(err) + } + return v +} + // ParseSemantic parses a version string that exactly obeys the syntax and semantics of // the "Semantic Versioning" specification (http://semver.org/) (although it ignores // leading and trailing whitespace, and allows the version to be preceded by "v"). For @@ -215,6 +255,32 @@ func (v *Version) WithMinor(minor uint) *Version { return &result } +// SubtractMinor returns the version with offset from the original minor, with the same major and no patch. +// If -offset >= current minor, the minor would be 0. +func (v *Version) OffsetMinor(offset int) *Version { + var minor uint + if offset >= 0 { + minor = v.Minor() + uint(offset) + } else { + diff := uint(-offset) + if diff < v.Minor() { + minor = v.Minor() - diff + } + } + return MajorMinor(v.Major(), minor) +} + +// SubtractMinor returns the version diff minor versions back, with the same major and no patch. +// If diff >= current minor, the minor would be 0. +func (v *Version) SubtractMinor(diff uint) *Version { + return v.OffsetMinor(-int(diff)) +} + +// AddMinor returns the version diff minor versions forward, with the same major and no patch. +func (v *Version) AddMinor(diff uint) *Version { + return v.OffsetMinor(int(diff)) +} + // WithPatch returns copy of the version object with requested patch number func (v *Version) WithPatch(patch uint) *Version { result := *v @@ -224,6 +290,9 @@ func (v *Version) WithPatch(patch uint) *Version { // WithPreRelease returns copy of the version object with requested prerelease func (v *Version) WithPreRelease(preRelease string) *Version { + if len(preRelease) == 0 { + return v + } result := *v result.components = []uint{v.Major(), v.Minor(), v.Patch()} result.preRelease = preRelease @@ -345,6 +414,17 @@ func onlyZeros(array []uint) bool { return true } +// EqualTo tests if a version is equal to a given version. +func (v *Version) EqualTo(other *Version) bool { + if v == nil { + return other == nil + } + if other == nil { + return false + } + return v.compareInternal(other) == 0 +} + // AtLeast tests if a version is at least equal to a given minimum version. If both // Versions are Semantic Versions, this will use the Semantic Version comparison // algorithm. Otherwise, it will compare only the numeric components, with non-present @@ -360,6 +440,11 @@ func (v *Version) LessThan(other *Version) bool { return v.compareInternal(other) == -1 } +// GreaterThan tests if a version is greater than a given version. +func (v *Version) GreaterThan(other *Version) bool { + return v.compareInternal(other) == 1 +} + // Compare compares v against a version string (which will be parsed as either Semantic // or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if // it is greater than other, or 0 if they are equal. @@ -370,3 +455,30 @@ func (v *Version) Compare(other string) (int, error) { } return v.compareInternal(ov), nil } + +// WithInfo returns copy of the version object with requested info +func (v *Version) WithInfo(info apimachineryversion.Info) *Version { + result := *v + result.info = info + return &result +} + +func (v *Version) Info() *apimachineryversion.Info { + if v == nil { + return nil + } + // in case info is empty, or the major and minor in info is different from the actual major and minor + v.info.Major = itoa(v.Major()) + v.info.Minor = itoa(v.Minor()) + if v.info.GitVersion == "" { + v.info.GitVersion = v.String() + } + return &v.info +} + +func itoa(i uint) string { + if i == 0 { + return "" + } + return strconv.Itoa(int(i)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index b6c7bbfa8f..ce37fd8c18 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -27,13 +27,25 @@ import ( // Interface can be implemented by anything that knows how to watch and report changes. type Interface interface { - // Stop stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. + // Stop tells the producer that the consumer is done watching, so the + // producer should stop sending events and close the result channel. The + // consumer should keep watching for events until the result channel is + // closed. + // + // Because some implementations may create channels when constructed, Stop + // must always be called, even if the consumer has not yet called + // ResultChan(). + // + // Only the consumer should call Stop(), not the producer. If the producer + // errors and needs to stop the watch prematurely, it should instead send + // an error event and close the result channel. Stop() - // ResultChan returns a chan which will receive all the events. If an error occurs - // or Stop() is called, the implementation will close this channel and - // release any resources used by the watch. + // ResultChan returns a channel which will receive events from the event + // producer. If an error occurs or Stop() is called, the producer must + // close this channel and release any resources used by the watch. + // Closing the result channel tells the consumer that no more events will be + // sent. ResultChan() <-chan Event } @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event { func (pw *ProxyWatcher) StopChan() <-chan struct{} { return pw.stopCh } + +// MockWatcher implements watch.Interface with mockable functions. +type MockWatcher struct { + StopFunc func() + ResultChanFunc func() <-chan Event +} + +var _ Interface = &MockWatcher{} + +// Stop calls StopFunc +func (mw MockWatcher) Stop() { + mw.StopFunc() +} + +// ResultChan calls ResultChanFunc +func (mw MockWatcher) ResultChan() <-chan Event { + return mw.ResultChanFunc() +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/OWNERS b/vendor/k8s.io/client-go/applyconfigurations/OWNERS new file mode 100644 index 0000000000..ea0928429d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse + - jpbetz diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go new file mode 100644 index 0000000000..0d50d44ac2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go new file mode 100644 index 0000000000..1f890bcfcb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go index ea1dc377b9..d8a816f1e2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go new file mode 100644 index 0000000000..e8e371d7dd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use +// with apply. +type MatchResourcesApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` + ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` + MatchPolicy *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` +} + +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with +// apply. +func MatchResources() *MatchResourcesApplyConfiguration { + return &MatchResourcesApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.ObjectSelector = value + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludeResourceRules") + } + b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i]) + } + return b +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value apiadmissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration { + b.MatchPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index faff51a041..cd8096f902 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -40,7 +40,7 @@ type MutatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go index 61c8f667d2..58b71d6d58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go new file mode 100644 index 0000000000..eda3bf635a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" +) + +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use +// with apply. +type NamedRuleWithOperationsApplyConfiguration struct { + ResourceNames []string `json:"resourceNames,omitempty"` + RuleWithOperationsApplyConfiguration `json:",inline"` +} + +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with +// apply. +func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { + return &NamedRuleWithOperationsApplyConfiguration{} +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b +} + +// WithOperations adds the given value to the Operations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Operations field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Operations = append(b.Operations, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIVersions field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIVersions = append(b.APIVersions, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { + b.Scope = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go new file mode 100644 index 0000000000..07577929ab --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use +// with apply. +type ParamKindApplyConfiguration struct { + APIVersion *string `json:"apiVersion,omitempty"` + Kind *string `json:"kind,omitempty"` +} + +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with +// apply. +func ParamKind() *ParamKindApplyConfiguration { + return &ParamKindApplyConfiguration{} +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration { + b.Kind = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go new file mode 100644 index 0000000000..73cda9b04d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use +// with apply. +type ParamRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` +} + +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with +// apply. +func ParamRef() *ParamRefApplyConfiguration { + return &ParamRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration { + b.Namespace = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { + b.Selector = value + return b +} + +// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { + b.ParameterNotFoundAction = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go index 41d4179df4..36a93643c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/admissionregistration/v1" ) -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use // with apply. type RuleApplyConfiguration struct { APIGroups []string `json:"apiGroups,omitempty"` @@ -31,7 +31,7 @@ type RuleApplyConfiguration struct { Scope *v1.ScopeType `json:"scope,omitempty"` } -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with // apply. func Rule() *RuleApplyConfiguration { return &RuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go index 59bbb8fe3d..92bddd502a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/admissionregistration/v1" ) -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use +// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use // with apply. type RuleWithOperationsApplyConfiguration struct { Operations []v1.OperationType `json:"operations,omitempty"` RuleApplyConfiguration `json:",inline"` } -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with +// RuleWithOperationsApplyConfiguration constructs a declarative configuration of the RuleWithOperations type for use with // apply. func RuleWithOperations() *RuleWithOperationsApplyConfiguration { return &RuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go index 2cd55d9ea2..239780664d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go new file mode 100644 index 0000000000..723d10ecf5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go new file mode 100644 index 0000000000..841209cae1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use +// with apply. +type ValidatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with +// apply. +func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b +} + +// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a +// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") +} + +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicy.Name) + + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go new file mode 100644 index 0000000000..1acad056f3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// with apply. +type ValidatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// apply. +func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b +} + +// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a +// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicyBinding.Name) + + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go new file mode 100644 index 0000000000..eb426af42a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" +) + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// with apply. +type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions,omitempty"` +} + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// apply. +func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go new file mode 100644 index 0000000000..1635b30a61 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go @@ -0,0 +1,117 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" +) + +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use +// with apply. +type ValidatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Validations []ValidationApplyConfiguration `json:"validations,omitempty"` + FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` +} + +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// apply. +func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { + return &ValidatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithValidations adds the given value to the Validations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Validations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithValidations") + } + b.Validations = append(b.Validations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go new file mode 100644 index 0000000000..e6f4e84591 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index 613856bac7..a2c705eb5c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -39,7 +39,7 @@ type ValidatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go index 811bfdf0b6..0d1a6c81ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go new file mode 100644 index 0000000000..2a828b6b4f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use +// with apply. +type ValidationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with +// apply. +func Validation() *ValidationApplyConfiguration { + return &ValidationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration { + b.Expression = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration { + b.Message = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go new file mode 100644 index 0000000000..9dd20afa72 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use +// with apply. +type VariableApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with +// apply. +func Variable() *VariableApplyConfiguration { + return &VariableApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go index aa358ae205..77f2227b95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go index 023695139d..958a537406 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go index f8b511f512..f36c2f0f5c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go index 186c750f96..7f983dcb22 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go index a6710ac7ed..e443535b6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go index bb2a7ba890..5e6744fd74 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go @@ -23,14 +23,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go index 350993cea0..daf17fb249 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go index 0951cae8a9..c4fff1d475 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct { ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go index 42a9170710..d1a7fff50e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index c860b85cf7..fe60eb5f25 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index dc08226404..0c11ee5945 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go index c9a4ff7ab4..0f8e4e4357 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go index 7ee320e428..d5d3529949 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go index 821184c8a8..2fec5ba477 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go index 9a5fc8475a..5f73043734 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go index 2c70a8cfb5..0459dae655 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go index e92fba0ddb..8718db9447 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go index 059c1b94ba..66cfc8cdc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go index d099b6b6ea..63db7fc801 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go index 25d4139db6..4005e55a33 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index 54845341f4..b2ab76aefd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -41,7 +41,7 @@ type MutatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 10dd034e25..51bb823896 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go index fa346c4a57..5de70c7ad3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go @@ -23,14 +23,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go index 6050e60251..3983125281 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go index 2be98dbc52..0a94ae0673 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ParamRefApplyConfiguration struct { ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go index c21b574908..70cc6b5b27 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go index 07baf334cd..cea6e11dee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go index e144bc9f70..c29ee56cbe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 0dc06aedec..4347c4810c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go index d20a78efff..bddc3a40c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go index c6e9389103..8b235337d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go index e3e6d417ed..4612af0cff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 8c5c341bad..1e107d68f7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -40,7 +40,7 @@ type ValidatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 75f1b9d716..c3535c180c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go index ed9ff1ac0c..019e8e7aa9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go index 0fc294c65d..0ece197db2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go index 490f9d5f3f..76ff71b4ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go index 81c56330bb..8394298b93 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// ServerStorageVersionApplyConfiguration represents an declarative configuration of the ServerStorageVersion type for use +// ServerStorageVersionApplyConfiguration represents a declarative configuration of the ServerStorageVersion type for use // with apply. type ServerStorageVersionApplyConfiguration struct { APIServerID *string `json:"apiServerID,omitempty"` @@ -27,7 +27,7 @@ type ServerStorageVersionApplyConfiguration struct { ServedVersions []string `json:"servedVersions,omitempty"` } -// ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with +// ServerStorageVersionApplyConfiguration constructs a declarative configuration of the ServerStorageVersion type for use with // apply. func ServerStorageVersion() *ServerStorageVersionApplyConfiguration { return &ServerStorageVersionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go index 6b9f178390..d734328b06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageVersionApplyConfiguration represents an declarative configuration of the StorageVersion type for use +// StorageVersionApplyConfiguration represents a declarative configuration of the StorageVersion type for use // with apply. type StorageVersionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StorageVersionApplyConfiguration struct { Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` } -// StorageVersion constructs an declarative configuration of the StorageVersion type for use with +// StorageVersion constructs a declarative configuration of the StorageVersion type for use with // apply. func StorageVersion(name string) *StorageVersionApplyConfiguration { b := &StorageVersionApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go index 75b6256478..68d894d0c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StorageVersionConditionApplyConfiguration represents an declarative configuration of the StorageVersionCondition type for use +// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use // with apply. type StorageVersionConditionApplyConfiguration struct { Type *v1alpha1.StorageVersionConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StorageVersionConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StorageVersionConditionApplyConfiguration constructs an declarative configuration of the StorageVersionCondition type for use with +// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with // apply. func StorageVersionCondition() *StorageVersionConditionApplyConfiguration { return &StorageVersionConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go index 43b0bf71b1..2e25d67524 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// StorageVersionStatusApplyConfiguration represents an declarative configuration of the StorageVersionStatus type for use +// StorageVersionStatusApplyConfiguration represents a declarative configuration of the StorageVersionStatus type for use // with apply. type StorageVersionStatusApplyConfiguration struct { StorageVersions []ServerStorageVersionApplyConfiguration `json:"storageVersions,omitempty"` @@ -26,7 +26,7 @@ type StorageVersionStatusApplyConfiguration struct { Conditions []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"` } -// StorageVersionStatusApplyConfiguration constructs an declarative configuration of the StorageVersionStatus type for use with +// StorageVersionStatusApplyConfiguration constructs a declarative configuration of the StorageVersionStatus type for use with // apply. func StorageVersionStatus() *StorageVersionStatusApplyConfiguration { return &StorageVersionStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go index c4e2085078..25b6450591 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go index cc9fdcd5dd..a157856514 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go index 283ae10a29..de91745b83 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go index 5e808874b7..99dc5abae8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go index d1c4462aa9..a40dc16512 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go index f1ba18226f..15af4e66be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go index 13edda7727..52b7a21b71 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go index 7747044136..84df752bc1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go index 812253dae8..063f1c2765 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go index 7b48b42557..747813ade8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go index e9571edab1..dc4b97c55a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go index 4e7818e535..35ca4e4dff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go index 19b0355d15..32da80842c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go index ca32865835..0390584867 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go index 12f41490f9..a1408ae25e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go index ebe8e86d1f..e898f5081c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go index ca9daaf249..2bc2937241 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go index c1b5dea855..dd0de81a6c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go index 24041d99f8..6f2b340dab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go index f9d47850d6..c62a5e854c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go index 9778f1c4a0..86f39e16c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go index ba01d5d3c1..cd65fd4364 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index 81afdca596..1848a963cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go index d88881b656..637a1c649d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go index 5268a1e065..b59e107355 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go index 827c063598..606de58a1e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go index e22f76b665..145aaed70d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go index 9da8ce0899..504dddd94e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go index 5e18476bdc..5531c756f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go index f8d1cf5d25..adc023a34d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go index 7279318a88..2c322b4ace 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go index 131e57a39d..775f82eef8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go index dde5f064b0..244701a5e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go index 8989a08d2c..94c2971343 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go index ed5cfab41c..2705938862 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go index 97e994ab71..8a17391cd2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1beta1.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go index 8f349a2d27..2e3049e5e2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go index 0048724c04..69a8ee0f0b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index 1eb1ba7b03..ac325d717e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go index f31066b6ff..27ae7540fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go index 895c1e7f8a..7714ebbb72 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go index 4abab6851c..5f75a45510 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go index 906a8ca46e..9ffda6182e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go index 55dc1f4877..8315050f0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1beta2.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go index 48137819af..74d8bf51c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go index 29cda7a90e..6b0fda8953 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go index 07fc07fc6a..7d66f1da43 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go index 7e39e67510..485da788af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go index 852a2c6832..1924278741 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta2.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go index 6898941ace..1b55130c6b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go index fe99ca9917..5fa9122332 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go index 8714e153e4..c769436ee0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta2.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go index d9303e1b22..d8608aa51c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go index 47776bfa2e..beec546f7c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1beta2.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go index 14d548169e..1d77b9e0fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go index 7c1b8fb29d..d3c92e274d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go index b586b678d4..ad6021d37a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go index 78ef210081..b0cc3a4ee4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go index 4a12e51c0a..0046c264bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go index 0e89668cb3..126ab2d8bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -34,7 +34,7 @@ type ScaleApplyConfiguration struct { Status *v1beta2.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleAp b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go index 03d5428b4b..3d2b5d1917 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go index c33e68b5e2..aa45db686e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1beta2.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go index c586da775c..a899243a5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go index aee27803d3..318e5f4642 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index b6165fbd9a..bebf80c896 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go index 63835904c1..a647cd7d26 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go index 03c2914917..81d4ba1df3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go index 0eac22692c..51ec665012 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go index 38fa205841..8150635ee6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go index 561ac60d35..0ca2f84ea9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go index abc2e05aa7..fcb231c3be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -32,7 +32,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go index f770922803..40f3db8c5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type ScaleApplyConfiguration struct { Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -215,3 +215,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go index 2339a8fef2..025004ba5f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use +// ScaleSpecApplyConfiguration represents a declarative configuration of the ScaleSpec type for use // with apply. type ScaleSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` } -// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with +// ScaleSpecApplyConfiguration constructs a declarative configuration of the ScaleSpec type for use with // apply. func ScaleSpec() *ScaleSpecApplyConfiguration { return &ScaleSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go index 81c8d1b30a..51f96d2357 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use +// ScaleStatusApplyConfiguration represents a declarative configuration of the ScaleStatus type for use // with apply. type ScaleStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` Selector *string `json:"selector,omitempty"` } -// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with +// ScaleStatusApplyConfiguration constructs a declarative configuration of the ScaleStatus type for use with // apply. func ScaleStatus() *ScaleStatusApplyConfiguration { return &ScaleStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go index 15ef216d1b..b6e071e848 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go index 34213bca3f..46bd2bac20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go index 19045706dc..645f098577 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go index 11a8eff263..a9c45b31a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go index 3b1a0329b8..4280086f5e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go index 31061de85e..e26b530c18 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go index e6fdabd7c8..05750cc21d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go index c020eccd3d..844c6dc862 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go index c36bc3f225..e34ababc58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go index d4d551df85..f1a2c3f4e9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go index 139f0fb5c7..b8b735747b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { Type *v2.HPAScalingPolicyType `json:"type,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct { PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go index e768076aa4..c7020f77bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct { Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go index 312ad3ddd6..2f99f7d0b4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go index 094ead6c16..89e6b5c68b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go index c65ad446f0..86ae3348b6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go index f301e4d2be..bf68a1c346 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { Type *v2.MetricTargetType `json:"type,omitempty"` @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go index e8474b1890..59732548b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go index a9482565e0..2391fa5c22 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go index 70ba43bedd..9ffd0c180d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go index 0a7a5c2595..28a35a2ae1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go index 865fcc33e3..4614282ce1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go index 25a065fef6..ffc9042b9f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go index fb5625afab..0fdbfcb555 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go index 2594e8e072..f41c5af10f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go index ae897237c4..4cd56eea37 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go index fe3d15e866..f03261612e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go index c118e6ca1e..8dce4529dd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go index ab771214e2..4034d7e55c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go index 66b8d5f738..93e37eaffa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go index de3e6ea5cd..8bb82298d1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go index 761d94a850..6f111ceafd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go index 95ec5be43b..391b577258 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go index 70beec84e0..961e2c5b48 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go @@ -22,7 +22,7 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2beta1.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go index b03ea2f9e4..587b5a1f88 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go @@ -22,7 +22,7 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2beta1.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go index 07d467972e..a9e2eead4d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricSourceApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go index b5e0d3e3d2..4d3be8df6c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricStatusApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go index a4122b8989..cfcd752e24 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricSourceApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go index d6172011b7..f7a7777fd4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricStatusApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go index 804f3f4926..ad97d83c3c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go index 5fdc29c132..78fbeaad06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go index aa334744ea..1050165ea3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go index bf0822a066..708f68bc6b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go index 2903629bc8..c281084b16 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go index 80053a6b33..d34ca11494 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go index 71ac35adbc..be29e607fa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go index 1c750cb164..ce666f0f3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go index ec41bfadea..e9b1a9fb9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go index 0f0cae75d3..a73e7ebaa8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go index c60adee581..9629e4bd59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go index 881a874e51..1eee645050 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go index 2a535891af..b799f99e0d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { Type *v2beta2.HPAScalingPolicyType `json:"type,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct { PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go index 57c917b894..f7e8d9ae3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct { Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go index 70cbd4e815..e8b2abb0e6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go index 1e7ee1419d..3ec7108618 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2beta2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go index 353ec6d943..40d32795bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2beta2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go index fbf006a5a6..aeec3102ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { Type *v2beta2.MetricTargetType `json:"type,omitempty"` @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go index 5796a0b4c1..cc409fc283 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go index eed31dab61..17b492fa06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go index 175e2120d6..e87417f2e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go index 0365880950..6ecbb18071 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go index e6f98be8c4..cd10297261 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go index cc8118d5e3..c482d75f4b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go index 0ab56be0f7..eb13e90b7d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go index 5225a5a079..8b26816e58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go index 22a34dcb61..62f9b5298b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/batch/v1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` @@ -35,7 +35,7 @@ type CronJobSpecApplyConfiguration struct { FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go index b7cc2bdfb5..095dfe017f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go index fb10ba3968..1333e91844 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobApplyConfiguration represents an declarative configuration of the Job type for use +// JobApplyConfiguration represents a declarative configuration of the Job type for use // with apply. type JobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type JobApplyConfiguration struct { Status *JobStatusApplyConfiguration `json:"status,omitempty"` } -// Job constructs an declarative configuration of the Job type for use with +// Job constructs a declarative configuration of the Job type for use with // apply. func Job(name, namespace string) *JobApplyConfiguration { b := &JobApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go index 388ca7a1c0..4f15bc6045 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobConditionApplyConfiguration represents an declarative configuration of the JobCondition type for use +// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use // with apply. type JobConditionApplyConfiguration struct { Type *v1.JobConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type JobConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// JobConditionApplyConfiguration constructs an declarative configuration of the JobCondition type for use with +// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with // apply. func JobCondition() *JobConditionApplyConfiguration { return &JobConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index 3d46a3ecf9..2104fe113d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -24,13 +24,14 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use +// JobSpecApplyConfiguration represents a declarative configuration of the JobSpec type for use // with apply. type JobSpecApplyConfiguration struct { Parallelism *int32 `json:"parallelism,omitempty"` Completions *int32 `json:"completions,omitempty"` ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"` + SuccessPolicy *SuccessPolicyApplyConfiguration `json:"successPolicy,omitempty"` BackoffLimit *int32 `json:"backoffLimit,omitempty"` BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"` MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"` @@ -41,9 +42,10 @@ type JobSpecApplyConfiguration struct { CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"` Suspend *bool `json:"suspend,omitempty"` PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` } -// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with +// JobSpecApplyConfiguration constructs a declarative configuration of the JobSpec type for use with // apply. func JobSpec() *JobSpecApplyConfiguration { return &JobSpecApplyConfiguration{} @@ -81,6 +83,14 @@ func (b *JobSpecApplyConfiguration) WithPodFailurePolicy(value *PodFailurePolicy return b } +// WithSuccessPolicy sets the SuccessPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuccessPolicy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithSuccessPolicy(value *SuccessPolicyApplyConfiguration) *JobSpecApplyConfiguration { + b.SuccessPolicy = value + return b +} + // WithBackoffLimit sets the BackoffLimit field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the BackoffLimit field is set to the value of the last call. @@ -160,3 +170,11 @@ func (b *JobSpecApplyConfiguration) WithPodReplacementPolicy(value batchv1.PodRe b.PodReplacementPolicy = &value return b } + +// WithManagedBy sets the ManagedBy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedBy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithManagedBy(value string) *JobSpecApplyConfiguration { + b.ManagedBy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index e8e472f8f7..071a0153f5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use +// JobStatusApplyConfiguration represents a declarative configuration of the JobStatus type for use // with apply. type JobStatusApplyConfiguration struct { Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"` @@ -38,7 +38,7 @@ type JobStatusApplyConfiguration struct { Ready *int32 `json:"ready,omitempty"` } -// JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with +// JobStatusApplyConfiguration constructs a declarative configuration of the JobStatus type for use with // apply. func JobStatus() *JobStatusApplyConfiguration { return &JobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go index b37a815680..901c4228e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go index 6da98386c9..05a68b3c94 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodFailurePolicyApplyConfiguration represents an declarative configuration of the PodFailurePolicy type for use +// PodFailurePolicyApplyConfiguration represents a declarative configuration of the PodFailurePolicy type for use // with apply. type PodFailurePolicyApplyConfiguration struct { Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// PodFailurePolicyApplyConfiguration constructs an declarative configuration of the PodFailurePolicy type for use with +// PodFailurePolicyApplyConfiguration constructs a declarative configuration of the PodFailurePolicy type for use with // apply. func PodFailurePolicy() *PodFailurePolicyApplyConfiguration { return &PodFailurePolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go index 65f6251810..cd32296ca0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/batch/v1" ) -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use // with apply. type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { ContainerName *string `json:"containerName,omitempty"` @@ -30,7 +30,7 @@ type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { Values []int32 `json:"values,omitempty"` } -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with // apply. func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go index da1556ff8b..07af4fb0e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use // with apply. type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct { Type *v1.PodConditionType `json:"type,omitempty"` Status *v1.ConditionStatus `json:"status,omitempty"` } -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with // apply. func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go index d435243531..b004921d38 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/batch/v1" ) -// PodFailurePolicyRuleApplyConfiguration represents an declarative configuration of the PodFailurePolicyRule type for use +// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use // with apply. type PodFailurePolicyRuleApplyConfiguration struct { Action *v1.PodFailurePolicyAction `json:"action,omitempty"` @@ -30,7 +30,7 @@ type PodFailurePolicyRuleApplyConfiguration struct { OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"` } -// PodFailurePolicyRuleApplyConfiguration constructs an declarative configuration of the PodFailurePolicyRule type for use with +// PodFailurePolicyRuleApplyConfiguration constructs a declarative configuration of the PodFailurePolicyRule type for use with // apply. func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { return &PodFailurePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go new file mode 100644 index 0000000000..a3f4f39e2e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SuccessPolicyApplyConfiguration represents a declarative configuration of the SuccessPolicy type for use +// with apply. +type SuccessPolicyApplyConfiguration struct { + Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"` +} + +// SuccessPolicyApplyConfiguration constructs a declarative configuration of the SuccessPolicy type for use with +// apply. +func SuccessPolicy() *SuccessPolicyApplyConfiguration { + return &SuccessPolicyApplyConfiguration{} +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *SuccessPolicyApplyConfiguration) WithRules(values ...*SuccessPolicyRuleApplyConfiguration) *SuccessPolicyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go new file mode 100644 index 0000000000..2b5e3d91fe --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SuccessPolicyRuleApplyConfiguration represents a declarative configuration of the SuccessPolicyRule type for use +// with apply. +type SuccessPolicyRuleApplyConfiguration struct { + SucceededIndexes *string `json:"succeededIndexes,omitempty"` + SucceededCount *int32 `json:"succeededCount,omitempty"` +} + +// SuccessPolicyRuleApplyConfiguration constructs a declarative configuration of the SuccessPolicyRule type for use with +// apply. +func SuccessPolicyRule() *SuccessPolicyRuleApplyConfiguration { + return &SuccessPolicyRuleApplyConfiguration{} +} + +// WithSucceededIndexes sets the SucceededIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededIndexes field is set to the value of the last call. +func (b *SuccessPolicyRuleApplyConfiguration) WithSucceededIndexes(value string) *SuccessPolicyRuleApplyConfiguration { + b.SucceededIndexes = &value + return b +} + +// WithSucceededCount sets the SucceededCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededCount field is set to the value of the last call. +func (b *SuccessPolicyRuleApplyConfiguration) WithSucceededCount(value int32) *SuccessPolicyRuleApplyConfiguration { + b.SucceededCount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go index 1409303fff..ff6b57b86c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use +// UncountedTerminatedPodsApplyConfiguration represents a declarative configuration of the UncountedTerminatedPods type for use // with apply. type UncountedTerminatedPodsApplyConfiguration struct { Succeeded []types.UID `json:"succeeded,omitempty"` Failed []types.UID `json:"failed,omitempty"` } -// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with +// UncountedTerminatedPodsApplyConfiguration constructs a declarative configuration of the UncountedTerminatedPods type for use with // apply. func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration { return &UncountedTerminatedPodsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go index 1d735a8407..765ed5e651 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go index 68c0777de0..21043690da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/batch/v1beta1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` @@ -35,7 +35,7 @@ type CronJobSpecApplyConfiguration struct { FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go index 8dca14f663..335f9e0dce 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go index f925d65a7e..5fd2485c69 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go @@ -25,14 +25,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -187,3 +187,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go index 3d02c0be80..e30bb62427 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go index 13d69cfcef..7a4bfce011 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { Type *v1.RequestConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go index 81ca214a9d..9c4a85693a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/certificates/v1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { Request []byte `json:"request,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct { Extra map[string]v1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go index 59d5930331..897f6d1e98 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 788d2a07dc..9cd10bc56a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use +// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use // with apply. type ClusterTrustBundleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ClusterTrustBundleApplyConfiguration struct { Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` } -// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with +// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with // apply. func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { b := &ClusterTrustBundleApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundl b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterTrustBundleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go index d1aea1d6dc..7bb36f7084 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use // with apply. type ClusterTrustBundleSpecApplyConfiguration struct { SignerName *string `json:"signerName,omitempty"` TrustBundle *string `json:"trustBundle,omitempty"` } -// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// ClusterTrustBundleSpecApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleSpec type for use with // apply. func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { return &ClusterTrustBundleSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go index 83a0edc18f..d6e08824a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go index 2c32a3272c..6e3692d1c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { Type *v1beta1.RequestConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go index 9554b1f400..9284eca3a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/certificates/v1beta1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { Request []byte `json:"request,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct { Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go index 9d8c5d4585..f82e8aed3b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go index 618f12fb21..ffd84583f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go index a5f6a6ebba..01d0df1380 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..ef76847791 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use +// with apply. +type LeaseCandidateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with +// apply. +func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration { + b := &LeaseCandidateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha1") + return b +} + +// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from +// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a +// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API. +// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "") +} + +// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "status") +} + +func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) { + b := &LeaseCandidateApplyConfiguration{} + err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha1.LeaseCandidate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(leaseCandidate.Name) + b.WithNamespace(leaseCandidate.Namespace) + + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *LeaseCandidateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseCandidateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go new file mode 100644 index 0000000000..61d3dca10b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go @@ -0,0 +1,91 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use +// with apply. +type LeaseCandidateSpecApplyConfiguration struct { + LeaseName *string `json:"leaseName,omitempty"` + PingTime *v1.MicroTime `json:"pingTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + BinaryVersion *string `json:"binaryVersion,omitempty"` + EmulationVersion *string `json:"emulationVersion,omitempty"` + PreferredStrategies []coordinationv1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty"` +} + +// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with +// apply. +func LeaseCandidateSpec() *LeaseCandidateSpecApplyConfiguration { + return &LeaseCandidateSpecApplyConfiguration{} +} + +// WithLeaseName sets the LeaseName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LeaseName field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithLeaseName(value string) *LeaseCandidateSpecApplyConfiguration { + b.LeaseName = &value + return b +} + +// WithPingTime sets the PingTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PingTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithPingTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.PingTime = &value + return b +} + +// WithRenewTime sets the RenewTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenewTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.RenewTime = &value + return b +} + +// WithBinaryVersion sets the BinaryVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BinaryVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithBinaryVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.BinaryVersion = &value + return b +} + +// WithEmulationVersion sets the EmulationVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EmulationVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.EmulationVersion = &value + return b +} + +// WithPreferredStrategies adds the given value to the PreferredStrategies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PreferredStrategies field. +func (b *LeaseCandidateSpecApplyConfiguration) WithPreferredStrategies(values ...coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration { + for i := range values { + b.PreferredStrategies = append(b.PreferredStrategies, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go index 867e0f58ba..9aa0703e8e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go index 865eb76455..8c7fddfc61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1beta1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go index df6d1c64e5..45484f140d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AffinityApplyConfiguration represents an declarative configuration of the Affinity type for use +// AffinityApplyConfiguration represents a declarative configuration of the Affinity type for use // with apply. type AffinityApplyConfiguration struct { NodeAffinity *NodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` @@ -26,7 +26,7 @@ type AffinityApplyConfiguration struct { PodAntiAffinity *PodAntiAffinityApplyConfiguration `json:"podAntiAffinity,omitempty"` } -// AffinityApplyConfiguration constructs an declarative configuration of the Affinity type for use with +// AffinityApplyConfiguration constructs a declarative configuration of the Affinity type for use with // apply. func Affinity() *AffinityApplyConfiguration { return &AffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go new file mode 100644 index 0000000000..1d698fd610 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use +// with apply. +type AppArmorProfileApplyConfiguration struct { + Type *v1.AppArmorProfileType `json:"type,omitempty"` + LocalhostProfile *string `json:"localhostProfile,omitempty"` +} + +// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with +// apply. +func AppArmorProfile() *AppArmorProfileApplyConfiguration { + return &AppArmorProfileApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AppArmorProfileApplyConfiguration) WithType(value v1.AppArmorProfileType) *AppArmorProfileApplyConfiguration { + b.Type = &value + return b +} + +// WithLocalhostProfile sets the LocalhostProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LocalhostProfile field is set to the value of the last call. +func (b *AppArmorProfileApplyConfiguration) WithLocalhostProfile(value string) *AppArmorProfileApplyConfiguration { + b.LocalhostProfile = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go index 970bf24c45..e4c2fff3f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// AttachedVolumeApplyConfiguration represents an declarative configuration of the AttachedVolume type for use +// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use // with apply. type AttachedVolumeApplyConfiguration struct { Name *v1.UniqueVolumeName `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// AttachedVolumeApplyConfiguration constructs an declarative configuration of the AttachedVolume type for use with +// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with // apply. func AttachedVolume() *AttachedVolumeApplyConfiguration { return &AttachedVolumeApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go index 6ff335e9d6..d08786965e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use +// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use // with apply. type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with +// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with // apply. func AWSElasticBlockStoreVolumeSource() *AWSElasticBlockStoreVolumeSourceApplyConfiguration { return &AWSElasticBlockStoreVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go index b2774735ae..40ad5ac78f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// AzureDiskVolumeSourceApplyConfiguration represents an declarative configuration of the AzureDiskVolumeSource type for use +// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use // with apply. type AzureDiskVolumeSourceApplyConfiguration struct { DiskName *string `json:"diskName,omitempty"` @@ -33,7 +33,7 @@ type AzureDiskVolumeSourceApplyConfiguration struct { Kind *v1.AzureDataDiskKind `json:"kind,omitempty"` } -// AzureDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureDiskVolumeSource type for use with +// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with // apply. func AzureDiskVolumeSource() *AzureDiskVolumeSourceApplyConfiguration { return &AzureDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go index f173938334..70a6b17be8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFilePersistentVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFilePersistentVolumeSource type for use +// AzureFilePersistentVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFilePersistentVolumeSource type for use // with apply. type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretNamespace *string `json:"secretNamespace,omitempty"` } -// AzureFilePersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFilePersistentVolumeSource type for use with +// AzureFilePersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFilePersistentVolumeSource type for use with // apply. func AzureFilePersistentVolumeSource() *AzureFilePersistentVolumeSourceApplyConfiguration { return &AzureFilePersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go index a7f7f33d88..ff0c867919 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFileVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFileVolumeSource type for use +// AzureFileVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFileVolumeSource type for use // with apply. type AzureFileVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -26,7 +26,7 @@ type AzureFileVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AzureFileVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFileVolumeSource type for use with +// AzureFileVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFileVolumeSource type for use with // apply. func AzureFileVolumeSource() *AzureFileVolumeSourceApplyConfiguration { return &AzureFileVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go index c3d176c4d8..1c463aef50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// CapabilitiesApplyConfiguration represents an declarative configuration of the Capabilities type for use +// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use // with apply. type CapabilitiesApplyConfiguration struct { Add []v1.Capability `json:"add,omitempty"` Drop []v1.Capability `json:"drop,omitempty"` } -// CapabilitiesApplyConfiguration constructs an declarative configuration of the Capabilities type for use with +// CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with // apply. func Capabilities() *CapabilitiesApplyConfiguration { return &CapabilitiesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go index a41936fe3d..f3ee2d03e9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSPersistentVolumeSource type for use +// CephFSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSPersistentVolumeSource type for use // with apply. type CephFSPersistentVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSPersistentVolumeSource type for use with +// CephFSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSPersistentVolumeSource type for use with // apply. func CephFSPersistentVolumeSource() *CephFSPersistentVolumeSourceApplyConfiguration { return &CephFSPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go index 0ea070ba5d..77d53d6eb0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSVolumeSource type for use +// CephFSVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSVolumeSource type for use // with apply. type CephFSVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSVolumeSource type for use with +// CephFSVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSVolumeSource type for use with // apply. func CephFSVolumeSource() *CephFSVolumeSourceApplyConfiguration { return &CephFSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go index 7754cf92f7..b265734882 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CinderPersistentVolumeSource type for use +// CinderPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CinderPersistentVolumeSource type for use // with apply. type CinderPersistentVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderPersistentVolumeSourceApplyConfiguration struct { SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderPersistentVolumeSource type for use with +// CinderPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderPersistentVolumeSource type for use with // apply. func CinderPersistentVolumeSource() *CinderPersistentVolumeSourceApplyConfiguration { return &CinderPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go index 51271e279d..131cbf219c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderVolumeSourceApplyConfiguration represents an declarative configuration of the CinderVolumeSource type for use +// CinderVolumeSourceApplyConfiguration represents a declarative configuration of the CinderVolumeSource type for use // with apply. type CinderVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderVolumeSource type for use with +// CinderVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderVolumeSource type for use with // apply. func CinderVolumeSource() *CinderVolumeSourceApplyConfiguration { return &CinderVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go deleted file mode 100644 index 2153570fc0..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ClaimSourceApplyConfiguration represents an declarative configuration of the ClaimSource type for use -// with apply. -type ClaimSourceApplyConfiguration struct { - ResourceClaimName *string `json:"resourceClaimName,omitempty"` - ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` -} - -// ClaimSourceApplyConfiguration constructs an declarative configuration of the ClaimSource type for use with -// apply. -func ClaimSource() *ClaimSourceApplyConfiguration { - return &ClaimSourceApplyConfiguration{} -} - -// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClaimName field is set to the value of the last call. -func (b *ClaimSourceApplyConfiguration) WithResourceClaimName(value string) *ClaimSourceApplyConfiguration { - b.ResourceClaimName = &value - return b -} - -// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. -func (b *ClaimSourceApplyConfiguration) WithResourceClaimTemplateName(value string) *ClaimSourceApplyConfiguration { - b.ResourceClaimTemplateName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go index a666e8faae..02c4e55e13 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ClientIPConfigApplyConfiguration represents an declarative configuration of the ClientIPConfig type for use +// ClientIPConfigApplyConfiguration represents a declarative configuration of the ClientIPConfig type for use // with apply. type ClientIPConfigApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` } -// ClientIPConfigApplyConfiguration constructs an declarative configuration of the ClientIPConfig type for use with +// ClientIPConfigApplyConfiguration constructs a declarative configuration of the ClientIPConfig type for use with // apply. func ClientIPConfig() *ClientIPConfigApplyConfiguration { return &ClientIPConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go index 5aa686782b..bcfbac63e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use +// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use // with apply. type ClusterTrustBundleProjectionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ClusterTrustBundleProjectionApplyConfiguration struct { Path *string `json:"path,omitempty"` } -// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with +// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with // apply. func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { return &ClusterTrustBundleProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go index 1ef65f5a0c..0044c7c0bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ComponentConditionApplyConfiguration represents an declarative configuration of the ComponentCondition type for use +// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use // with apply. type ComponentConditionApplyConfiguration struct { Type *v1.ComponentConditionType `json:"type,omitempty"` @@ -31,7 +31,7 @@ type ComponentConditionApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// ComponentConditionApplyConfiguration constructs an declarative configuration of the ComponentCondition type for use with +// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with // apply. func ComponentCondition() *ComponentConditionApplyConfiguration { return &ComponentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go index 300e526942..195bde7219 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ComponentStatusApplyConfiguration represents an declarative configuration of the ComponentStatus type for use +// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use // with apply. type ComponentStatusApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ComponentStatusApplyConfiguration struct { Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` } -// ComponentStatus constructs an declarative configuration of the ComponentStatus type for use with +// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with // apply. func ComponentStatus(name string) *ComponentStatusApplyConfiguration { b := &ComponentStatusApplyConfiguration{} @@ -250,3 +250,9 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ComponentStatusApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go index f4cc7024d2..576b7a3d68 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ConfigMapApplyConfiguration represents an declarative configuration of the ConfigMap type for use +// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use // with apply. type ConfigMapApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ConfigMapApplyConfiguration struct { BinaryData map[string][]byte `json:"binaryData,omitempty"` } -// ConfigMap constructs an declarative configuration of the ConfigMap type for use with +// ConfigMap constructs a declarative configuration of the ConfigMap type for use with // apply. func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration { b := &ConfigMapApplyConfiguration{} @@ -277,3 +277,9 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte) } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConfigMapApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go index 8802fff48f..b1fccd7000 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ConfigMapEnvSourceApplyConfiguration represents an declarative configuration of the ConfigMapEnvSource type for use +// ConfigMapEnvSourceApplyConfiguration represents a declarative configuration of the ConfigMapEnvSource type for use // with apply. type ConfigMapEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// ConfigMapEnvSourceApplyConfiguration constructs an declarative configuration of the ConfigMapEnvSource type for use with +// ConfigMapEnvSourceApplyConfiguration constructs a declarative configuration of the ConfigMapEnvSource type for use with // apply. func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration { return &ConfigMapEnvSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go index 2a8c800afc..26c2a75b5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapKeySelectorApplyConfiguration represents an declarative configuration of the ConfigMapKeySelector type for use +// ConfigMapKeySelectorApplyConfiguration represents a declarative configuration of the ConfigMapKeySelector type for use // with apply. type ConfigMapKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapKeySelectorApplyConfiguration constructs an declarative configuration of the ConfigMapKeySelector type for use with +// ConfigMapKeySelectorApplyConfiguration constructs a declarative configuration of the ConfigMapKeySelector type for use with // apply. func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration { return &ConfigMapKeySelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go index da9655a544..135bb7d427 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ConfigMapNodeConfigSourceApplyConfiguration represents an declarative configuration of the ConfigMapNodeConfigSource type for use +// ConfigMapNodeConfigSourceApplyConfiguration represents a declarative configuration of the ConfigMapNodeConfigSource type for use // with apply. type ConfigMapNodeConfigSourceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -32,7 +32,7 @@ type ConfigMapNodeConfigSourceApplyConfiguration struct { KubeletConfigKey *string `json:"kubeletConfigKey,omitempty"` } -// ConfigMapNodeConfigSourceApplyConfiguration constructs an declarative configuration of the ConfigMapNodeConfigSource type for use with +// ConfigMapNodeConfigSourceApplyConfiguration constructs a declarative configuration of the ConfigMapNodeConfigSource type for use with // apply. func ConfigMapNodeConfigSource() *ConfigMapNodeConfigSourceApplyConfiguration { return &ConfigMapNodeConfigSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go index 7297d3a437..308b28f57d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapProjectionApplyConfiguration represents an declarative configuration of the ConfigMapProjection type for use +// ConfigMapProjectionApplyConfiguration represents a declarative configuration of the ConfigMapProjection type for use // with apply. type ConfigMapProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapProjectionApplyConfiguration constructs an declarative configuration of the ConfigMapProjection type for use with +// ConfigMapProjectionApplyConfiguration constructs a declarative configuration of the ConfigMapProjection type for use with // apply. func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration { return &ConfigMapProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go index deaebde319..8e0e8dc0f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapVolumeSourceApplyConfiguration represents an declarative configuration of the ConfigMapVolumeSource type for use +// ConfigMapVolumeSourceApplyConfiguration represents a declarative configuration of the ConfigMapVolumeSource type for use // with apply. type ConfigMapVolumeSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -27,7 +27,7 @@ type ConfigMapVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapVolumeSourceApplyConfiguration constructs an declarative configuration of the ConfigMapVolumeSource type for use with +// ConfigMapVolumeSourceApplyConfiguration constructs a declarative configuration of the ConfigMapVolumeSource type for use with // apply. func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration { return &ConfigMapVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index 32d7156063..eed5f7d027 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerApplyConfiguration represents an declarative configuration of the Container type for use +// ContainerApplyConfiguration represents a declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -51,7 +51,7 @@ type ContainerApplyConfiguration struct { TTY *bool `json:"tty,omitempty"` } -// ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with +// ContainerApplyConfiguration constructs a declarative configuration of the Container type for use with // apply. func Container() *ContainerApplyConfiguration { return &ContainerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go index d5c874a7ce..bc9428fd10 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerImageApplyConfiguration represents an declarative configuration of the ContainerImage type for use +// ContainerImageApplyConfiguration represents a declarative configuration of the ContainerImage type for use // with apply. type ContainerImageApplyConfiguration struct { Names []string `json:"names,omitempty"` SizeBytes *int64 `json:"sizeBytes,omitempty"` } -// ContainerImageApplyConfiguration constructs an declarative configuration of the ContainerImage type for use with +// ContainerImageApplyConfiguration constructs a declarative configuration of the ContainerImage type for use with // apply. func ContainerImage() *ContainerImageApplyConfiguration { return &ContainerImageApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go index a23ad9268a..7acc0638f2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerPortApplyConfiguration represents an declarative configuration of the ContainerPort type for use +// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use // with apply. type ContainerPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerPortApplyConfiguration struct { HostIP *string `json:"hostIP,omitempty"` } -// ContainerPortApplyConfiguration constructs an declarative configuration of the ContainerPort type for use with +// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with // apply. func ContainerPort() *ContainerPortApplyConfiguration { return &ContainerPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go index bbbcbc9f13..ea60e3d987 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use // with apply. type ContainerResizePolicyApplyConfiguration struct { ResourceName *v1.ResourceName `json:"resourceName,omitempty"` RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` } -// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with // apply. func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { return &ContainerResizePolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go index 6cbfc7fd9b..b958e01774 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ContainerStateApplyConfiguration represents an declarative configuration of the ContainerState type for use +// ContainerStateApplyConfiguration represents a declarative configuration of the ContainerState type for use // with apply. type ContainerStateApplyConfiguration struct { Waiting *ContainerStateWaitingApplyConfiguration `json:"waiting,omitempty"` @@ -26,7 +26,7 @@ type ContainerStateApplyConfiguration struct { Terminated *ContainerStateTerminatedApplyConfiguration `json:"terminated,omitempty"` } -// ContainerStateApplyConfiguration constructs an declarative configuration of the ContainerState type for use with +// ContainerStateApplyConfiguration constructs a declarative configuration of the ContainerState type for use with // apply. func ContainerState() *ContainerStateApplyConfiguration { return &ContainerStateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go index 6c1d7311e7..6eec9f7f2c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateRunningApplyConfiguration represents an declarative configuration of the ContainerStateRunning type for use +// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use // with apply. type ContainerStateRunningApplyConfiguration struct { StartedAt *v1.Time `json:"startedAt,omitempty"` } -// ContainerStateRunningApplyConfiguration constructs an declarative configuration of the ContainerStateRunning type for use with +// ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with // apply. func ContainerStateRunning() *ContainerStateRunningApplyConfiguration { return &ContainerStateRunningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go index 0383c9dd9d..b067aa211e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateTerminatedApplyConfiguration represents an declarative configuration of the ContainerStateTerminated type for use +// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use // with apply. type ContainerStateTerminatedApplyConfiguration struct { ExitCode *int32 `json:"exitCode,omitempty"` @@ -34,7 +34,7 @@ type ContainerStateTerminatedApplyConfiguration struct { ContainerID *string `json:"containerID,omitempty"` } -// ContainerStateTerminatedApplyConfiguration constructs an declarative configuration of the ContainerStateTerminated type for use with +// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with // apply. func ContainerStateTerminated() *ContainerStateTerminatedApplyConfiguration { return &ContainerStateTerminatedApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go index e51b778c0d..7756c7da03 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerStateWaitingApplyConfiguration represents an declarative configuration of the ContainerStateWaiting type for use +// ContainerStateWaitingApplyConfiguration represents a declarative configuration of the ContainerStateWaiting type for use // with apply. type ContainerStateWaitingApplyConfiguration struct { Reason *string `json:"reason,omitempty"` Message *string `json:"message,omitempty"` } -// ContainerStateWaitingApplyConfiguration constructs an declarative configuration of the ContainerStateWaiting type for use with +// ContainerStateWaitingApplyConfiguration constructs a declarative configuration of the ContainerStateWaiting type for use with // apply. func ContainerStateWaiting() *ContainerStateWaitingApplyConfiguration { return &ContainerStateWaitingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index 2b98c4658f..6a28939c2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -22,23 +22,26 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use +// ContainerStatusApplyConfiguration represents a declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` - AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` -} - -// ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"` + User *ContainerUserApplyConfiguration `json:"user,omitempty"` + AllocatedResourcesStatus []ResourceStatusApplyConfiguration `json:"allocatedResourcesStatus,omitempty"` +} + +// ContainerStatusApplyConfiguration constructs a declarative configuration of the ContainerStatus type for use with // apply. func ContainerStatus() *ContainerStatusApplyConfiguration { return &ContainerStatusApplyConfiguration{} @@ -131,3 +134,37 @@ func (b *ContainerStatusApplyConfiguration) WithResources(value *ResourceRequire b.Resources = value return b } + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *ContainerStatusApplyConfiguration) WithVolumeMounts(values ...*VolumeMountStatusApplyConfiguration) *ContainerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVolumeMounts") + } + b.VolumeMounts = append(b.VolumeMounts, *values[i]) + } + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithUser(value *ContainerUserApplyConfiguration) *ContainerStatusApplyConfiguration { + b.User = value + return b +} + +// WithAllocatedResourcesStatus adds the given value to the AllocatedResourcesStatus field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllocatedResourcesStatus field. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResourcesStatus(values ...*ResourceStatusApplyConfiguration) *ContainerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllocatedResourcesStatus") + } + b.AllocatedResourcesStatus = append(b.AllocatedResourcesStatus, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go new file mode 100644 index 0000000000..34ec8e4146 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ContainerUserApplyConfiguration represents a declarative configuration of the ContainerUser type for use +// with apply. +type ContainerUserApplyConfiguration struct { + Linux *LinuxContainerUserApplyConfiguration `json:"linux,omitempty"` +} + +// ContainerUserApplyConfiguration constructs a declarative configuration of the ContainerUser type for use with +// apply. +func ContainerUser() *ContainerUserApplyConfiguration { + return &ContainerUserApplyConfiguration{} +} + +// WithLinux sets the Linux field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Linux field is set to the value of the last call. +func (b *ContainerUserApplyConfiguration) WithLinux(value *LinuxContainerUserApplyConfiguration) *ContainerUserApplyConfiguration { + b.Linux = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go index 2fc681604e..a614d10805 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CSIPersistentVolumeSource type for use +// CSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CSIPersistentVolumeSource type for use // with apply. type CSIPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -33,7 +33,7 @@ type CSIPersistentVolumeSourceApplyConfiguration struct { NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"` } -// CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with +// CSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIPersistentVolumeSource type for use with // apply. func CSIPersistentVolumeSource() *CSIPersistentVolumeSourceApplyConfiguration { return &CSIPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go index c2a32df8d0..b58d9bbb4b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIVolumeSourceApplyConfiguration represents an declarative configuration of the CSIVolumeSource type for use +// CSIVolumeSourceApplyConfiguration represents a declarative configuration of the CSIVolumeSource type for use // with apply. type CSIVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type CSIVolumeSourceApplyConfiguration struct { NodePublishSecretRef *LocalObjectReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"` } -// CSIVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIVolumeSource type for use with +// CSIVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIVolumeSource type for use with // apply. func CSIVolumeSource() *CSIVolumeSourceApplyConfiguration { return &CSIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go index 13a2e948f1..5be27ec0c5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DaemonEndpointApplyConfiguration represents an declarative configuration of the DaemonEndpoint type for use +// DaemonEndpointApplyConfiguration represents a declarative configuration of the DaemonEndpoint type for use // with apply. type DaemonEndpointApplyConfiguration struct { Port *int32 `json:"Port,omitempty"` } -// DaemonEndpointApplyConfiguration constructs an declarative configuration of the DaemonEndpoint type for use with +// DaemonEndpointApplyConfiguration constructs a declarative configuration of the DaemonEndpoint type for use with // apply. func DaemonEndpoint() *DaemonEndpointApplyConfiguration { return &DaemonEndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go index f88a87c0b5..ed6b8b1bbe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DownwardAPIProjectionApplyConfiguration represents an declarative configuration of the DownwardAPIProjection type for use +// DownwardAPIProjectionApplyConfiguration represents a declarative configuration of the DownwardAPIProjection type for use // with apply. type DownwardAPIProjectionApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` } -// DownwardAPIProjectionApplyConfiguration constructs an declarative configuration of the DownwardAPIProjection type for use with +// DownwardAPIProjectionApplyConfiguration constructs a declarative configuration of the DownwardAPIProjection type for use with // apply. func DownwardAPIProjection() *DownwardAPIProjectionApplyConfiguration { return &DownwardAPIProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go index b25ff25fa9..ec9d013dd9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DownwardAPIVolumeFileApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeFile type for use +// DownwardAPIVolumeFileApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeFile type for use // with apply. type DownwardAPIVolumeFileApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -27,7 +27,7 @@ type DownwardAPIVolumeFileApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// DownwardAPIVolumeFileApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeFile type for use with +// DownwardAPIVolumeFileApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeFile type for use with // apply. func DownwardAPIVolumeFile() *DownwardAPIVolumeFileApplyConfiguration { return &DownwardAPIVolumeFileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go index 6913bb5218..eef9d7ef8d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// DownwardAPIVolumeSourceApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeSource type for use +// DownwardAPIVolumeSourceApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeSource type for use // with apply. type DownwardAPIVolumeSourceApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// DownwardAPIVolumeSourceApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeSource type for use with +// DownwardAPIVolumeSourceApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeSource type for use with // apply. func DownwardAPIVolumeSource() *DownwardAPIVolumeSourceApplyConfiguration { return &DownwardAPIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go index 021280daf6..a619fdb074 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go @@ -23,14 +23,14 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// EmptyDirVolumeSourceApplyConfiguration represents an declarative configuration of the EmptyDirVolumeSource type for use +// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use // with apply. type EmptyDirVolumeSourceApplyConfiguration struct { Medium *v1.StorageMedium `json:"medium,omitempty"` SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` } -// EmptyDirVolumeSourceApplyConfiguration constructs an declarative configuration of the EmptyDirVolumeSource type for use with +// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with // apply. func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration { return &EmptyDirVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go index 52a54b6008..536e697a9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointAddressApplyConfiguration represents an declarative configuration of the EndpointAddress type for use +// EndpointAddressApplyConfiguration represents a declarative configuration of the EndpointAddress type for use // with apply. type EndpointAddressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -27,7 +27,7 @@ type EndpointAddressApplyConfiguration struct { TargetRef *ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"` } -// EndpointAddressApplyConfiguration constructs an declarative configuration of the EndpointAddress type for use with +// EndpointAddressApplyConfiguration constructs a declarative configuration of the EndpointAddress type for use with // apply. func EndpointAddress() *EndpointAddressApplyConfiguration { return &EndpointAddressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go index cc00d0e491..d0d96230ce 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go index b98fed0858..98dc69aaab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointsApplyConfiguration represents an declarative configuration of the Endpoints type for use +// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use // with apply. type EndpointsApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EndpointsApplyConfiguration struct { Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` } -// Endpoints constructs an declarative configuration of the Endpoints type for use with +// Endpoints constructs a declarative configuration of the Endpoints type for use with // apply. func Endpoints(name, namespace string) *EndpointsApplyConfiguration { b := &EndpointsApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointsApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go index cd0657a80c..33cd8496a7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointSubsetApplyConfiguration represents an declarative configuration of the EndpointSubset type for use +// EndpointSubsetApplyConfiguration represents a declarative configuration of the EndpointSubset type for use // with apply. type EndpointSubsetApplyConfiguration struct { Addresses []EndpointAddressApplyConfiguration `json:"addresses,omitempty"` @@ -26,7 +26,7 @@ type EndpointSubsetApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSubsetApplyConfiguration constructs an declarative configuration of the EndpointSubset type for use with +// EndpointSubsetApplyConfiguration constructs a declarative configuration of the EndpointSubset type for use with // apply. func EndpointSubset() *EndpointSubsetApplyConfiguration { return &EndpointSubsetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go index 9e46d25ded..7aa181cf1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvFromSourceApplyConfiguration represents an declarative configuration of the EnvFromSource type for use +// EnvFromSourceApplyConfiguration represents a declarative configuration of the EnvFromSource type for use // with apply. type EnvFromSourceApplyConfiguration struct { Prefix *string `json:"prefix,omitempty"` @@ -26,7 +26,7 @@ type EnvFromSourceApplyConfiguration struct { SecretRef *SecretEnvSourceApplyConfiguration `json:"secretRef,omitempty"` } -// EnvFromSourceApplyConfiguration constructs an declarative configuration of the EnvFromSource type for use with +// EnvFromSourceApplyConfiguration constructs a declarative configuration of the EnvFromSource type for use with // apply. func EnvFromSource() *EnvFromSourceApplyConfiguration { return &EnvFromSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go index a83528a28e..5894166ca4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarApplyConfiguration represents an declarative configuration of the EnvVar type for use +// EnvVarApplyConfiguration represents a declarative configuration of the EnvVar type for use // with apply. type EnvVarApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -26,7 +26,7 @@ type EnvVarApplyConfiguration struct { ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"` } -// EnvVarApplyConfiguration constructs an declarative configuration of the EnvVar type for use with +// EnvVarApplyConfiguration constructs a declarative configuration of the EnvVar type for use with // apply. func EnvVar() *EnvVarApplyConfiguration { return &EnvVarApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go index 70c695bd5b..a3a55ea7af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarSourceApplyConfiguration represents an declarative configuration of the EnvVarSource type for use +// EnvVarSourceApplyConfiguration represents a declarative configuration of the EnvVarSource type for use // with apply. type EnvVarSourceApplyConfiguration struct { FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"` @@ -27,7 +27,7 @@ type EnvVarSourceApplyConfiguration struct { SecretKeyRef *SecretKeySelectorApplyConfiguration `json:"secretKeyRef,omitempty"` } -// EnvVarSourceApplyConfiguration constructs an declarative configuration of the EnvVarSource type for use with +// EnvVarSourceApplyConfiguration constructs a declarative configuration of the EnvVarSource type for use with // apply. func EnvVarSource() *EnvVarSourceApplyConfiguration { return &EnvVarSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 5fa79a246e..a15ac6ec34 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerApplyConfiguration represents an declarative configuration of the EphemeralContainer type for use +// EphemeralContainerApplyConfiguration represents a declarative configuration of the EphemeralContainer type for use // with apply. type EphemeralContainerApplyConfiguration struct { EphemeralContainerCommonApplyConfiguration `json:",inline"` TargetContainerName *string `json:"targetContainerName,omitempty"` } -// EphemeralContainerApplyConfiguration constructs an declarative configuration of the EphemeralContainer type for use with +// EphemeralContainerApplyConfiguration constructs a declarative configuration of the EphemeralContainer type for use with // apply. func EphemeralContainer() *EphemeralContainerApplyConfiguration { return &EphemeralContainerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 8cded29a9e..d5d13d27a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use +// EphemeralContainerCommonApplyConfiguration represents a declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -51,7 +51,7 @@ type EphemeralContainerCommonApplyConfiguration struct { TTY *bool `json:"tty,omitempty"` } -// EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with +// EphemeralContainerCommonApplyConfiguration constructs a declarative configuration of the EphemeralContainerCommon type for use with // apply. func EphemeralContainerCommon() *EphemeralContainerCommonApplyConfiguration { return &EphemeralContainerCommonApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go index 31859404cc..d2c8c6722e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EphemeralVolumeSourceApplyConfiguration represents an declarative configuration of the EphemeralVolumeSource type for use +// EphemeralVolumeSourceApplyConfiguration represents a declarative configuration of the EphemeralVolumeSource type for use // with apply. type EphemeralVolumeSourceApplyConfiguration struct { VolumeClaimTemplate *PersistentVolumeClaimTemplateApplyConfiguration `json:"volumeClaimTemplate,omitempty"` } -// EphemeralVolumeSourceApplyConfiguration constructs an declarative configuration of the EphemeralVolumeSource type for use with +// EphemeralVolumeSourceApplyConfiguration constructs a declarative configuration of the EphemeralVolumeSource type for use with // apply. func EphemeralVolumeSource() *EphemeralVolumeSourceApplyConfiguration { return &EphemeralVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go index 60aff6b5b2..65d6577ab6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -48,7 +48,7 @@ type EventApplyConfiguration struct { ReportingInstance *string `json:"reportingInstance,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -364,3 +364,9 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl b.ReportingInstance = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go index e66fb41271..18069c0d1b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go index 2eb4aa8e44..97edb04931 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// EventSourceApplyConfiguration represents an declarative configuration of the EventSource type for use +// EventSourceApplyConfiguration represents a declarative configuration of the EventSource type for use // with apply. type EventSourceApplyConfiguration struct { Component *string `json:"component,omitempty"` Host *string `json:"host,omitempty"` } -// EventSourceApplyConfiguration constructs an declarative configuration of the EventSource type for use with +// EventSourceApplyConfiguration constructs a declarative configuration of the EventSource type for use with // apply. func EventSource() *EventSourceApplyConfiguration { return &EventSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go index 1df52144d7..b7208a91cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ExecActionApplyConfiguration represents an declarative configuration of the ExecAction type for use +// ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use // with apply. type ExecActionApplyConfiguration struct { Command []string `json:"command,omitempty"` } -// ExecActionApplyConfiguration constructs an declarative configuration of the ExecAction type for use with +// ExecActionApplyConfiguration constructs a declarative configuration of the ExecAction type for use with // apply. func ExecAction() *ExecActionApplyConfiguration { return &ExecActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go index 43069de9a6..000ff2cc62 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FCVolumeSourceApplyConfiguration represents an declarative configuration of the FCVolumeSource type for use +// FCVolumeSourceApplyConfiguration represents a declarative configuration of the FCVolumeSource type for use // with apply. type FCVolumeSourceApplyConfiguration struct { TargetWWNs []string `json:"targetWWNs,omitempty"` @@ -28,7 +28,7 @@ type FCVolumeSourceApplyConfiguration struct { WWIDs []string `json:"wwids,omitempty"` } -// FCVolumeSourceApplyConfiguration constructs an declarative configuration of the FCVolumeSource type for use with +// FCVolumeSourceApplyConfiguration constructs a declarative configuration of the FCVolumeSource type for use with // apply. func FCVolumeSource() *FCVolumeSourceApplyConfiguration { return &FCVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go index 47e7c746ee..355c2c82d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the FlexPersistentVolumeSource type for use +// FlexPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the FlexPersistentVolumeSource type for use // with apply. type FlexPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexPersistentVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexPersistentVolumeSource type for use with +// FlexPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexPersistentVolumeSource type for use with // apply. func FlexPersistentVolumeSource() *FlexPersistentVolumeSourceApplyConfiguration { return &FlexPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go index 7c09516a98..08ae9e1bea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexVolumeSourceApplyConfiguration represents an declarative configuration of the FlexVolumeSource type for use +// FlexVolumeSourceApplyConfiguration represents a declarative configuration of the FlexVolumeSource type for use // with apply. type FlexVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexVolumeSource type for use with +// FlexVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexVolumeSource type for use with // apply. func FlexVolumeSource() *FlexVolumeSourceApplyConfiguration { return &FlexVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go index 74896d55ac..e4ecbba0e4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// FlockerVolumeSourceApplyConfiguration represents an declarative configuration of the FlockerVolumeSource type for use +// FlockerVolumeSourceApplyConfiguration represents a declarative configuration of the FlockerVolumeSource type for use // with apply. type FlockerVolumeSourceApplyConfiguration struct { DatasetName *string `json:"datasetName,omitempty"` DatasetUUID *string `json:"datasetUUID,omitempty"` } -// FlockerVolumeSourceApplyConfiguration constructs an declarative configuration of the FlockerVolumeSource type for use with +// FlockerVolumeSourceApplyConfiguration constructs a declarative configuration of the FlockerVolumeSource type for use with // apply. func FlockerVolumeSource() *FlockerVolumeSourceApplyConfiguration { return &FlockerVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go index 0869d3eaa6..56c4d03fa2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GCEPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the GCEPersistentDiskVolumeSource type for use +// GCEPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the GCEPersistentDiskVolumeSource type for use // with apply. type GCEPersistentDiskVolumeSourceApplyConfiguration struct { PDName *string `json:"pdName,omitempty"` @@ -27,7 +27,7 @@ type GCEPersistentDiskVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GCEPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the GCEPersistentDiskVolumeSource type for use with +// GCEPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the GCEPersistentDiskVolumeSource type for use with // apply. func GCEPersistentDiskVolumeSource() *GCEPersistentDiskVolumeSourceApplyConfiguration { return &GCEPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go index 825e02e4e4..4ed92317c8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GitRepoVolumeSourceApplyConfiguration represents an declarative configuration of the GitRepoVolumeSource type for use +// GitRepoVolumeSourceApplyConfiguration represents a declarative configuration of the GitRepoVolumeSource type for use // with apply. type GitRepoVolumeSourceApplyConfiguration struct { Repository *string `json:"repository,omitempty"` @@ -26,7 +26,7 @@ type GitRepoVolumeSourceApplyConfiguration struct { Directory *string `json:"directory,omitempty"` } -// GitRepoVolumeSourceApplyConfiguration constructs an declarative configuration of the GitRepoVolumeSource type for use with +// GitRepoVolumeSourceApplyConfiguration constructs a declarative configuration of the GitRepoVolumeSource type for use with // apply. func GitRepoVolumeSource() *GitRepoVolumeSourceApplyConfiguration { return &GitRepoVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go index 21a3925e52..c9a23ca5d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsPersistentVolumeSource type for use +// GlusterfsPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsPersistentVolumeSource type for use // with apply. type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -27,7 +27,7 @@ type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsNamespace *string `json:"endpointsNamespace,omitempty"` } -// GlusterfsPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsPersistentVolumeSource type for use with +// GlusterfsPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsPersistentVolumeSource type for use with // apply. func GlusterfsPersistentVolumeSource() *GlusterfsPersistentVolumeSourceApplyConfiguration { return &GlusterfsPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go index 7ce6f0b399..8c27f8c70d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsVolumeSource type for use +// GlusterfsVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsVolumeSource type for use // with apply. type GlusterfsVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -26,7 +26,7 @@ type GlusterfsVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GlusterfsVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsVolumeSource type for use with +// GlusterfsVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsVolumeSource type for use with // apply. func GlusterfsVolumeSource() *GlusterfsVolumeSourceApplyConfiguration { return &GlusterfsVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go index f94e55937a..0f3a886714 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use +// GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use // with apply. type GRPCActionApplyConfiguration struct { Port *int32 `json:"port,omitempty"` Service *string `json:"service,omitempty"` } -// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with +// GRPCActionApplyConfiguration constructs a declarative configuration of the GRPCAction type for use with // apply. func GRPCAction() *GRPCActionApplyConfiguration { return &GRPCActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go index 861508ef53..ec9ea17413 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HostAliasApplyConfiguration represents an declarative configuration of the HostAlias type for use +// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use // with apply. type HostAliasApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostnames []string `json:"hostnames,omitempty"` } -// HostAliasApplyConfiguration constructs an declarative configuration of the HostAlias type for use with +// HostAliasApplyConfiguration constructs a declarative configuration of the HostAlias type for use with // apply. func HostAlias() *HostAliasApplyConfiguration { return &HostAliasApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go index c2a42cf747..439b5ce2d6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HostIPApplyConfiguration represents an declarative configuration of the HostIP type for use +// HostIPApplyConfiguration represents a declarative configuration of the HostIP type for use // with apply. type HostIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// HostIPApplyConfiguration constructs an declarative configuration of the HostIP type for use with +// HostIPApplyConfiguration constructs a declarative configuration of the HostIP type for use with // apply. func HostIP() *HostIPApplyConfiguration { return &HostIPApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go index 8b15689eef..10dfedfdef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// HostPathVolumeSourceApplyConfiguration represents an declarative configuration of the HostPathVolumeSource type for use +// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use // with apply. type HostPathVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` Type *v1.HostPathType `json:"type,omitempty"` } -// HostPathVolumeSourceApplyConfiguration constructs an declarative configuration of the HostPathVolumeSource type for use with +// HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with // apply. func HostPathVolumeSource() *HostPathVolumeSourceApplyConfiguration { return &HostPathVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go index e4ecdd4303..5ecbc27fea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// HTTPGetActionApplyConfiguration represents an declarative configuration of the HTTPGetAction type for use +// HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use // with apply. type HTTPGetActionApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -33,7 +33,7 @@ type HTTPGetActionApplyConfiguration struct { HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"` } -// HTTPGetActionApplyConfiguration constructs an declarative configuration of the HTTPGetAction type for use with +// HTTPGetActionApplyConfiguration constructs a declarative configuration of the HTTPGetAction type for use with // apply. func HTTPGetAction() *HTTPGetActionApplyConfiguration { return &HTTPGetActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go index d55f36bfd2..2526371669 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HTTPHeaderApplyConfiguration represents an declarative configuration of the HTTPHeader type for use +// HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use // with apply. type HTTPHeaderApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// HTTPHeaderApplyConfiguration constructs an declarative configuration of the HTTPHeader type for use with +// HTTPHeaderApplyConfiguration constructs a declarative configuration of the HTTPHeader type for use with // apply. func HTTPHeader() *HTTPHeaderApplyConfiguration { return &HTTPHeaderApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go new file mode 100644 index 0000000000..340f150400 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use +// with apply. +type ImageVolumeSourceApplyConfiguration struct { + Reference *string `json:"reference,omitempty"` + PullPolicy *v1.PullPolicy `json:"pullPolicy,omitempty"` +} + +// ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with +// apply. +func ImageVolumeSource() *ImageVolumeSourceApplyConfiguration { + return &ImageVolumeSourceApplyConfiguration{} +} + +// WithReference sets the Reference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reference field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *ImageVolumeSourceApplyConfiguration { + b.Reference = &value + return b +} + +// WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullPolicy field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value v1.PullPolicy) *ImageVolumeSourceApplyConfiguration { + b.PullPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go index c7b248181a..42f420c568 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIPersistentVolumeSource type for use +// ISCSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIPersistentVolumeSource type for use // with apply. type ISCSIPersistentVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIPersistentVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIPersistentVolumeSource type for use with +// ISCSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIPersistentVolumeSource type for use with // apply. func ISCSIPersistentVolumeSource() *ISCSIPersistentVolumeSourceApplyConfiguration { return &ISCSIPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go index c95941a9c7..61055434bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIVolumeSource type for use +// ISCSIVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIVolumeSource type for use // with apply. type ISCSIVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIVolumeSource type for use with +// ISCSIVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIVolumeSource type for use with // apply. func ISCSIVolumeSource() *ISCSIVolumeSourceApplyConfiguration { return &ISCSIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go index d58676d34c..c961b07955 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// KeyToPathApplyConfiguration represents an declarative configuration of the KeyToPath type for use +// KeyToPathApplyConfiguration represents a declarative configuration of the KeyToPath type for use // with apply. type KeyToPathApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -26,7 +26,7 @@ type KeyToPathApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// KeyToPathApplyConfiguration constructs an declarative configuration of the KeyToPath type for use with +// KeyToPathApplyConfiguration constructs a declarative configuration of the KeyToPath type for use with // apply. func KeyToPath() *KeyToPathApplyConfiguration { return &KeyToPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go index db9abf8af7..e37a30f597 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use +// LifecycleApplyConfiguration represents a declarative configuration of the Lifecycle type for use // with apply. type LifecycleApplyConfiguration struct { PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"` PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"` } -// LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with +// LifecycleApplyConfiguration constructs a declarative configuration of the Lifecycle type for use with // apply. func Lifecycle() *LifecycleApplyConfiguration { return &LifecycleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index e4ae9c49f7..b7c706d58d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use +// LifecycleHandlerApplyConfiguration represents a declarative configuration of the LifecycleHandler type for use // with apply. type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type LifecycleHandlerApplyConfiguration struct { Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } -// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with +// LifecycleHandlerApplyConfiguration constructs a declarative configuration of the LifecycleHandler type for use with // apply. func LifecycleHandler() *LifecycleHandlerApplyConfiguration { return &LifecycleHandlerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go index eaf635c76a..7770200a0a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LimitRangeApplyConfiguration represents an declarative configuration of the LimitRange type for use +// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use // with apply. type LimitRangeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LimitRangeApplyConfiguration struct { Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` } -// LimitRange constructs an declarative configuration of the LimitRange type for use with +// LimitRange constructs a declarative configuration of the LimitRange type for use with // apply. func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { b := &LimitRangeApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LimitRangeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go index 084650fdaa..61d8344e80 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// LimitRangeItemApplyConfiguration represents an declarative configuration of the LimitRangeItem type for use +// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use // with apply. type LimitRangeItemApplyConfiguration struct { Type *v1.LimitType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type LimitRangeItemApplyConfiguration struct { MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"` } -// LimitRangeItemApplyConfiguration constructs an declarative configuration of the LimitRangeItem type for use with +// LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with // apply. func LimitRangeItem() *LimitRangeItemApplyConfiguration { return &LimitRangeItemApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go index 5eee5c498e..8d69c1c0cd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LimitRangeSpecApplyConfiguration represents an declarative configuration of the LimitRangeSpec type for use +// LimitRangeSpecApplyConfiguration represents a declarative configuration of the LimitRangeSpec type for use // with apply. type LimitRangeSpecApplyConfiguration struct { Limits []LimitRangeItemApplyConfiguration `json:"limits,omitempty"` } -// LimitRangeSpecApplyConfiguration constructs an declarative configuration of the LimitRangeSpec type for use with +// LimitRangeSpecApplyConfiguration constructs a declarative configuration of the LimitRangeSpec type for use with // apply. func LimitRangeSpec() *LimitRangeSpecApplyConfiguration { return &LimitRangeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go new file mode 100644 index 0000000000..fbab4815ab --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LinuxContainerUserApplyConfiguration represents a declarative configuration of the LinuxContainerUser type for use +// with apply. +type LinuxContainerUserApplyConfiguration struct { + UID *int64 `json:"uid,omitempty"` + GID *int64 `json:"gid,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` +} + +// LinuxContainerUserApplyConfiguration constructs a declarative configuration of the LinuxContainerUser type for use with +// apply. +func LinuxContainerUser() *LinuxContainerUserApplyConfiguration { + return &LinuxContainerUserApplyConfiguration{} +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithUID(value int64) *LinuxContainerUserApplyConfiguration { + b.UID = &value + return b +} + +// WithGID sets the GID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithGID(value int64) *LinuxContainerUserApplyConfiguration { + b.GID = &value + return b +} + +// WithSupplementalGroups adds the given value to the SupplementalGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SupplementalGroups field. +func (b *LinuxContainerUserApplyConfiguration) WithSupplementalGroups(values ...int64) *LinuxContainerUserApplyConfiguration { + for i := range values { + b.SupplementalGroups = append(b.SupplementalGroups, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index a48dac6810..1a7d998152 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use +// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -31,7 +31,7 @@ type LoadBalancerIngressApplyConfiguration struct { Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } -// LoadBalancerIngressApplyConfiguration constructs an declarative configuration of the LoadBalancerIngress type for use with +// LoadBalancerIngressApplyConfiguration constructs a declarative configuration of the LoadBalancerIngress type for use with // apply. func LoadBalancerIngress() *LoadBalancerIngressApplyConfiguration { return &LoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go index 2fcc0cad18..bb3d616c15 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LoadBalancerStatusApplyConfiguration represents an declarative configuration of the LoadBalancerStatus type for use +// LoadBalancerStatusApplyConfiguration represents a declarative configuration of the LoadBalancerStatus type for use // with apply. type LoadBalancerStatusApplyConfiguration struct { Ingress []LoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// LoadBalancerStatusApplyConfiguration constructs an declarative configuration of the LoadBalancerStatus type for use with +// LoadBalancerStatusApplyConfiguration constructs a declarative configuration of the LoadBalancerStatus type for use with // apply. func LoadBalancerStatus() *LoadBalancerStatusApplyConfiguration { return &LoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go index 7662e32b31..c55d6803dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LocalObjectReferenceApplyConfiguration represents an declarative configuration of the LocalObjectReference type for use +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use // with apply. type LocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// LocalObjectReferenceApplyConfiguration constructs an declarative configuration of the LocalObjectReference type for use with +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with // apply. func LocalObjectReference() *LocalObjectReferenceApplyConfiguration { return &LocalObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go index 5d289bd12d..db711d9934 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LocalVolumeSourceApplyConfiguration represents an declarative configuration of the LocalVolumeSource type for use +// LocalVolumeSourceApplyConfiguration represents a declarative configuration of the LocalVolumeSource type for use // with apply. type LocalVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` FSType *string `json:"fsType,omitempty"` } -// LocalVolumeSourceApplyConfiguration constructs an declarative configuration of the LocalVolumeSource type for use with +// LocalVolumeSourceApplyConfiguration constructs a declarative configuration of the LocalVolumeSource type for use with // apply. func LocalVolumeSource() *LocalVolumeSourceApplyConfiguration { return &LocalVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go index 4ff1d040cf..704c321652 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use +// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use // with apply. type ModifyVolumeStatusApplyConfiguration struct { TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` } -// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with +// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with // apply. func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { return &ModifyVolumeStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go index bdc9ef167c..0b77af183d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NamespaceApplyConfiguration represents an declarative configuration of the Namespace type for use +// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use // with apply. type NamespaceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type NamespaceApplyConfiguration struct { Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` } -// Namespace constructs an declarative configuration of the Namespace type for use with +// Namespace constructs a declarative configuration of the Namespace type for use with // apply. func Namespace(name string) *NamespaceApplyConfiguration { b := &NamespaceApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NamespaceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go index 8651978b0f..9784c3e6f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NamespaceConditionApplyConfiguration represents an declarative configuration of the NamespaceCondition type for use +// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use // with apply. type NamespaceConditionApplyConfiguration struct { Type *v1.NamespaceConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type NamespaceConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// NamespaceConditionApplyConfiguration constructs an declarative configuration of the NamespaceCondition type for use with +// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with // apply. func NamespaceCondition() *NamespaceConditionApplyConfiguration { return &NamespaceConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go index 9bc02d1fa2..6d7b7f1f95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// NamespaceSpecApplyConfiguration represents an declarative configuration of the NamespaceSpec type for use +// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use // with apply. type NamespaceSpecApplyConfiguration struct { Finalizers []v1.FinalizerName `json:"finalizers,omitempty"` } -// NamespaceSpecApplyConfiguration constructs an declarative configuration of the NamespaceSpec type for use with +// NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with // apply. func NamespaceSpec() *NamespaceSpecApplyConfiguration { return &NamespaceSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go index d950fd3161..314908109f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// NamespaceStatusApplyConfiguration represents an declarative configuration of the NamespaceStatus type for use +// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use // with apply. type NamespaceStatusApplyConfiguration struct { Phase *v1.NamespacePhase `json:"phase,omitempty"` Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"` } -// NamespaceStatusApplyConfiguration constructs an declarative configuration of the NamespaceStatus type for use with +// NamespaceStatusApplyConfiguration constructs a declarative configuration of the NamespaceStatus type for use with // apply. func NamespaceStatus() *NamespaceStatusApplyConfiguration { return &NamespaceStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go index cb300ee81e..ed49a87a9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NFSVolumeSourceApplyConfiguration represents an declarative configuration of the NFSVolumeSource type for use +// NFSVolumeSourceApplyConfiguration represents a declarative configuration of the NFSVolumeSource type for use // with apply. type NFSVolumeSourceApplyConfiguration struct { Server *string `json:"server,omitempty"` @@ -26,7 +26,7 @@ type NFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// NFSVolumeSourceApplyConfiguration constructs an declarative configuration of the NFSVolumeSource type for use with +// NFSVolumeSourceApplyConfiguration constructs a declarative configuration of the NFSVolumeSource type for use with // apply. func NFSVolumeSource() *NFSVolumeSourceApplyConfiguration { return &NFSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go index 047f4ac1cb..ef13392591 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NodeApplyConfiguration represents an declarative configuration of the Node type for use +// NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type NodeApplyConfiguration struct { Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } -// Node constructs an declarative configuration of the Node type for use with +// Node constructs a declarative configuration of the Node type for use with // apply. func Node(name string) *NodeApplyConfiguration { b := &NodeApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go index a1d4fbe04e..a9cb036c54 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeAddressApplyConfiguration represents an declarative configuration of the NodeAddress type for use +// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use // with apply. type NodeAddressApplyConfiguration struct { Type *v1.NodeAddressType `json:"type,omitempty"` Address *string `json:"address,omitempty"` } -// NodeAddressApplyConfiguration constructs an declarative configuration of the NodeAddress type for use with +// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with // apply. func NodeAddress() *NodeAddressApplyConfiguration { return &NodeAddressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go index e28ced6e46..5d11d746dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeAffinityApplyConfiguration represents an declarative configuration of the NodeAffinity type for use +// NodeAffinityApplyConfiguration represents a declarative configuration of the NodeAffinity type for use // with apply. type NodeAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution *NodeSelectorApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// NodeAffinityApplyConfiguration constructs an declarative configuration of the NodeAffinity type for use with +// NodeAffinityApplyConfiguration constructs a declarative configuration of the NodeAffinity type for use with // apply. func NodeAffinity() *NodeAffinityApplyConfiguration { return &NodeAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go index eb81ca543f..a1b8ed0f38 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NodeConditionApplyConfiguration represents an declarative configuration of the NodeCondition type for use +// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use // with apply. type NodeConditionApplyConfiguration struct { Type *v1.NodeConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type NodeConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// NodeConditionApplyConfiguration constructs an declarative configuration of the NodeCondition type for use with +// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with // apply. func NodeCondition() *NodeConditionApplyConfiguration { return &NodeConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go index 60567aa431..00a671fc0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeConfigSourceApplyConfiguration represents an declarative configuration of the NodeConfigSource type for use +// NodeConfigSourceApplyConfiguration represents a declarative configuration of the NodeConfigSource type for use // with apply. type NodeConfigSourceApplyConfiguration struct { ConfigMap *ConfigMapNodeConfigSourceApplyConfiguration `json:"configMap,omitempty"` } -// NodeConfigSourceApplyConfiguration constructs an declarative configuration of the NodeConfigSource type for use with +// NodeConfigSourceApplyConfiguration constructs a declarative configuration of the NodeConfigSource type for use with // apply. func NodeConfigSource() *NodeConfigSourceApplyConfiguration { return &NodeConfigSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go index 71447fe9c0..d5ccc45c6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeConfigStatusApplyConfiguration represents an declarative configuration of the NodeConfigStatus type for use +// NodeConfigStatusApplyConfiguration represents a declarative configuration of the NodeConfigStatus type for use // with apply. type NodeConfigStatusApplyConfiguration struct { Assigned *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"` @@ -27,7 +27,7 @@ type NodeConfigStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// NodeConfigStatusApplyConfiguration constructs an declarative configuration of the NodeConfigStatus type for use with +// NodeConfigStatusApplyConfiguration constructs a declarative configuration of the NodeConfigStatus type for use with // apply. func NodeConfigStatus() *NodeConfigStatusApplyConfiguration { return &NodeConfigStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go index 4cabc7f526..11228b3691 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeDaemonEndpointsApplyConfiguration represents an declarative configuration of the NodeDaemonEndpoints type for use +// NodeDaemonEndpointsApplyConfiguration represents a declarative configuration of the NodeDaemonEndpoints type for use // with apply. type NodeDaemonEndpointsApplyConfiguration struct { KubeletEndpoint *DaemonEndpointApplyConfiguration `json:"kubeletEndpoint,omitempty"` } -// NodeDaemonEndpointsApplyConfiguration constructs an declarative configuration of the NodeDaemonEndpoints type for use with +// NodeDaemonEndpointsApplyConfiguration constructs a declarative configuration of the NodeDaemonEndpoints type for use with // apply. func NodeDaemonEndpoints() *NodeDaemonEndpointsApplyConfiguration { return &NodeDaemonEndpointsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go new file mode 100644 index 0000000000..678b0e36d6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use +// with apply. +type NodeFeaturesApplyConfiguration struct { + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"` +} + +// NodeFeaturesApplyConfiguration constructs a declarative configuration of the NodeFeatures type for use with +// apply. +func NodeFeatures() *NodeFeaturesApplyConfiguration { + return &NodeFeaturesApplyConfiguration{} +} + +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *NodeFeaturesApplyConfiguration) WithSupplementalGroupsPolicy(value bool) *NodeFeaturesApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go new file mode 100644 index 0000000000..c7c664974e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeRuntimeHandlerApplyConfiguration represents a declarative configuration of the NodeRuntimeHandler type for use +// with apply. +type NodeRuntimeHandlerApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Features *NodeRuntimeHandlerFeaturesApplyConfiguration `json:"features,omitempty"` +} + +// NodeRuntimeHandlerApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandler type for use with +// apply. +func NodeRuntimeHandler() *NodeRuntimeHandlerApplyConfiguration { + return &NodeRuntimeHandlerApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NodeRuntimeHandlerApplyConfiguration) WithName(value string) *NodeRuntimeHandlerApplyConfiguration { + b.Name = &value + return b +} + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NodeRuntimeHandlerApplyConfiguration) WithFeatures(value *NodeRuntimeHandlerFeaturesApplyConfiguration) *NodeRuntimeHandlerApplyConfiguration { + b.Features = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go new file mode 100644 index 0000000000..a295b60969 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeRuntimeHandlerFeaturesApplyConfiguration represents a declarative configuration of the NodeRuntimeHandlerFeatures type for use +// with apply. +type NodeRuntimeHandlerFeaturesApplyConfiguration struct { + RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty"` + UserNamespaces *bool `json:"userNamespaces,omitempty"` +} + +// NodeRuntimeHandlerFeaturesApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandlerFeatures type for use with +// apply. +func NodeRuntimeHandlerFeatures() *NodeRuntimeHandlerFeaturesApplyConfiguration { + return &NodeRuntimeHandlerFeaturesApplyConfiguration{} +} + +// WithRecursiveReadOnlyMounts sets the RecursiveReadOnlyMounts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveReadOnlyMounts field is set to the value of the last call. +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithRecursiveReadOnlyMounts(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration { + b.RecursiveReadOnlyMounts = &value + return b +} + +// WithUserNamespaces sets the UserNamespaces field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNamespaces field is set to the value of the last call. +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithUserNamespaces(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration { + b.UserNamespaces = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go index 5489097f5a..6eab109795 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeSelectorApplyConfiguration represents an declarative configuration of the NodeSelector type for use +// NodeSelectorApplyConfiguration represents a declarative configuration of the NodeSelector type for use // with apply. type NodeSelectorApplyConfiguration struct { NodeSelectorTerms []NodeSelectorTermApplyConfiguration `json:"nodeSelectorTerms,omitempty"` } -// NodeSelectorApplyConfiguration constructs an declarative configuration of the NodeSelector type for use with +// NodeSelectorApplyConfiguration constructs a declarative configuration of the NodeSelector type for use with // apply. func NodeSelector() *NodeSelectorApplyConfiguration { return &NodeSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go index a6e43e607e..7c383e06c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeSelectorRequirementApplyConfiguration represents an declarative configuration of the NodeSelectorRequirement type for use +// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use // with apply. type NodeSelectorRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -30,7 +30,7 @@ type NodeSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// NodeSelectorRequirementApplyConfiguration constructs an declarative configuration of the NodeSelectorRequirement type for use with +// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with // apply. func NodeSelectorRequirement() *NodeSelectorRequirementApplyConfiguration { return &NodeSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go index 13b3ddbc1b..9d0d780f3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeSelectorTermApplyConfiguration represents an declarative configuration of the NodeSelectorTerm type for use +// NodeSelectorTermApplyConfiguration represents a declarative configuration of the NodeSelectorTerm type for use // with apply. type NodeSelectorTermApplyConfiguration struct { MatchExpressions []NodeSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` MatchFields []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"` } -// NodeSelectorTermApplyConfiguration constructs an declarative configuration of the NodeSelectorTerm type for use with +// NodeSelectorTermApplyConfiguration constructs a declarative configuration of the NodeSelectorTerm type for use with // apply. func NodeSelectorTerm() *NodeSelectorTermApplyConfiguration { return &NodeSelectorTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go index 63b61078d0..8ac3497127 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { PodCIDR *string `json:"podCIDR,omitempty"` @@ -30,7 +30,7 @@ type NodeSpecApplyConfiguration struct { DoNotUseExternalID *string `json:"externalID,omitempty"` } -// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with // apply. func NodeSpec() *NodeSpecApplyConfiguration { return &NodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go index aa3603f4fc..8411c57ac0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeStatusApplyConfiguration represents an declarative configuration of the NodeStatus type for use +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { Capacity *v1.ResourceList `json:"capacity,omitempty"` @@ -36,9 +36,11 @@ type NodeStatusApplyConfiguration struct { VolumesInUse []v1.UniqueVolumeName `json:"volumesInUse,omitempty"` VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"` Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"` + RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"` + Features *NodeFeaturesApplyConfiguration `json:"features,omitempty"` } -// NodeStatusApplyConfiguration constructs an declarative configuration of the NodeStatus type for use with +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with // apply. func NodeStatus() *NodeStatusApplyConfiguration { return &NodeStatusApplyConfiguration{} @@ -153,3 +155,24 @@ func (b *NodeStatusApplyConfiguration) WithConfig(value *NodeConfigStatusApplyCo b.Config = value return b } + +// WithRuntimeHandlers adds the given value to the RuntimeHandlers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RuntimeHandlers field. +func (b *NodeStatusApplyConfiguration) WithRuntimeHandlers(values ...*NodeRuntimeHandlerApplyConfiguration) *NodeStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRuntimeHandlers") + } + b.RuntimeHandlers = append(b.RuntimeHandlers, *values[i]) + } + return b +} + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConfiguration) *NodeStatusApplyConfiguration { + b.Features = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go index 2634ea9842..11ac50713c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSystemInfoApplyConfiguration represents an declarative configuration of the NodeSystemInfo type for use +// NodeSystemInfoApplyConfiguration represents a declarative configuration of the NodeSystemInfo type for use // with apply. type NodeSystemInfoApplyConfiguration struct { MachineID *string `json:"machineID,omitempty"` @@ -33,7 +33,7 @@ type NodeSystemInfoApplyConfiguration struct { Architecture *string `json:"architecture,omitempty"` } -// NodeSystemInfoApplyConfiguration constructs an declarative configuration of the NodeSystemInfo type for use with +// NodeSystemInfoApplyConfiguration constructs a declarative configuration of the NodeSystemInfo type for use with // apply. func NodeSystemInfo() *NodeSystemInfoApplyConfiguration { return &NodeSystemInfoApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go index 0c2402b3c7..c129c998b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ObjectFieldSelectorApplyConfiguration represents an declarative configuration of the ObjectFieldSelector type for use +// ObjectFieldSelectorApplyConfiguration represents a declarative configuration of the ObjectFieldSelector type for use // with apply. type ObjectFieldSelectorApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectFieldSelectorApplyConfiguration constructs an declarative configuration of the ObjectFieldSelector type for use with +// ObjectFieldSelectorApplyConfiguration constructs a declarative configuration of the ObjectFieldSelector type for use with // apply. func ObjectFieldSelector() *ObjectFieldSelectorApplyConfiguration { return &ObjectFieldSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go index 667fa84a81..4cd3f226ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use // with apply. type ObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -34,7 +34,7 @@ type ObjectReferenceApplyConfiguration struct { FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with // apply. func ObjectReference() *ObjectReferenceApplyConfiguration { return &ObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go index 2599c197e8..020f87411e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeApplyConfiguration represents an declarative configuration of the PersistentVolume type for use +// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use // with apply. type PersistentVolumeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PersistentVolumeApplyConfiguration struct { Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolume constructs an declarative configuration of the PersistentVolume type for use with +// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with // apply. func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { b := &PersistentVolumeApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go index a0a0017018..81cf791443 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimApplyConfiguration represents an declarative configuration of the PersistentVolumeClaim type for use +// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use // with apply. type PersistentVolumeClaimApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PersistentVolumeClaimApplyConfiguration struct { Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolumeClaim constructs an declarative configuration of the PersistentVolumeClaim type for use with +// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with // apply. func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyConfiguration { b := &PersistentVolumeClaimApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go index 65449e92eb..80038c0677 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeClaimConditionApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimCondition type for use +// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use // with apply. type PersistentVolumeClaimConditionApplyConfiguration struct { Type *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type PersistentVolumeClaimConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PersistentVolumeClaimConditionApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimCondition type for use with +// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with // apply. func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfiguration { return &PersistentVolumeClaimConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index 4db12fadb3..5ce671cd99 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use +// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` @@ -37,7 +37,7 @@ type PersistentVolumeClaimSpecApplyConfiguration struct { VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with +// PersistentVolumeClaimSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimSpec type for use with // apply. func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration { return &PersistentVolumeClaimSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 1f6d5ae323..3eebf95ad5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use +// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` @@ -35,7 +35,7 @@ type PersistentVolumeClaimStatusApplyConfiguration struct { ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } -// PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with +// PersistentVolumeClaimStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimStatus type for use with // apply. func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguration { return &PersistentVolumeClaimStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go index 894d04f0b4..ed49702913 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimTemplateApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimTemplate type for use +// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use // with apply. type PersistentVolumeClaimTemplateApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` } -// PersistentVolumeClaimTemplateApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimTemplate type for use with +// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with // apply. func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfiguration { return &PersistentVolumeClaimTemplateApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go index a498fa6a5e..ccccdfb493 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PersistentVolumeClaimVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimVolumeSource type for use +// PersistentVolumeClaimVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimVolumeSource type for use // with apply. type PersistentVolumeClaimVolumeSourceApplyConfiguration struct { ClaimName *string `json:"claimName,omitempty"` ReadOnly *bool `json:"readOnly,omitempty"` } -// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimVolumeSource type for use with +// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimVolumeSource type for use with // apply. func PersistentVolumeClaimVolumeSource() *PersistentVolumeClaimVolumeSourceApplyConfiguration { return &PersistentVolumeClaimVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go index 0576e7dd31..aba0124622 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PersistentVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeSource type for use +// PersistentVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeSource type for use // with apply. type PersistentVolumeSourceApplyConfiguration struct { GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"` @@ -45,7 +45,7 @@ type PersistentVolumeSourceApplyConfiguration struct { CSI *CSIPersistentVolumeSourceApplyConfiguration `json:"csi,omitempty"` } -// PersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeSource type for use with +// PersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeSource type for use with // apply. func PersistentVolumeSource() *PersistentVolumeSourceApplyConfiguration { return &PersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index 8a30dab649..074fa55d1f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PersistentVolumeSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeSpec type for use +// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use // with apply. type PersistentVolumeSpecApplyConfiguration struct { Capacity *v1.ResourceList `json:"capacity,omitempty"` @@ -37,7 +37,7 @@ type PersistentVolumeSpecApplyConfiguration struct { VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with +// PersistentVolumeSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeSpec type for use with // apply. func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration { return &PersistentVolumeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go index a473c0e927..95ba90f48b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use +// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use // with apply. type PersistentVolumeStatusApplyConfiguration struct { Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` @@ -32,7 +32,7 @@ type PersistentVolumeStatusApplyConfiguration struct { LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` } -// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with +// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with // apply. func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration { return &PersistentVolumeStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go index 43587d6768..d8dc103e2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PhotonPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the PhotonPersistentDiskVolumeSource type for use +// PhotonPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the PhotonPersistentDiskVolumeSource type for use // with apply. type PhotonPersistentDiskVolumeSourceApplyConfiguration struct { PdID *string `json:"pdID,omitempty"` FSType *string `json:"fsType,omitempty"` } -// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the PhotonPersistentDiskVolumeSource type for use with +// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the PhotonPersistentDiskVolumeSource type for use with // apply. func PhotonPersistentDiskVolumeSource() *PhotonPersistentDiskVolumeSourceApplyConfiguration { return &PhotonPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go index 7210bd9836..507d57d6f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodApplyConfiguration represents an declarative configuration of the Pod type for use +// PodApplyConfiguration represents a declarative configuration of the Pod type for use // with apply. type PodApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodApplyConfiguration struct { Status *PodStatusApplyConfiguration `json:"status,omitempty"` } -// Pod constructs an declarative configuration of the Pod type for use with +// Pod constructs a declarative configuration of the Pod type for use with // apply. func Pod(name, namespace string) *PodApplyConfiguration { b := &PodApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go index 7049c62121..23fed95464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAffinityApplyConfiguration represents an declarative configuration of the PodAffinity type for use +// PodAffinityApplyConfiguration represents a declarative configuration of the PodAffinity type for use // with apply. type PodAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAffinityApplyConfiguration constructs an declarative configuration of the PodAffinity type for use with +// PodAffinityApplyConfiguration constructs a declarative configuration of the PodAffinity type for use with // apply. func PodAffinity() *PodAffinityApplyConfiguration { return &PodAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index ac1eab3d8c..3afce026d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodAffinityTermApplyConfiguration represents an declarative configuration of the PodAffinityTerm type for use +// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use // with apply. type PodAffinityTermApplyConfiguration struct { LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` @@ -33,7 +33,7 @@ type PodAffinityTermApplyConfiguration struct { MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } -// PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with +// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with // apply. func PodAffinityTerm() *PodAffinityTermApplyConfiguration { return &PodAffinityTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go index 42681c54c4..ae9848963d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAntiAffinityApplyConfiguration represents an declarative configuration of the PodAntiAffinity type for use +// PodAntiAffinityApplyConfiguration represents a declarative configuration of the PodAntiAffinity type for use // with apply. type PodAntiAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAntiAffinityApplyConfiguration constructs an declarative configuration of the PodAntiAffinity type for use with +// PodAntiAffinityApplyConfiguration constructs a declarative configuration of the PodAntiAffinity type for use with // apply. func PodAntiAffinity() *PodAntiAffinityApplyConfiguration { return &PodAntiAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go index 610209f3c4..98968d26d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodConditionApplyConfiguration represents an declarative configuration of the PodCondition type for use +// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use // with apply. type PodConditionApplyConfiguration struct { Type *v1.PodConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type PodConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PodConditionApplyConfiguration constructs an declarative configuration of the PodCondition type for use with +// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with // apply. func PodCondition() *PodConditionApplyConfiguration { return &PodConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go index 0fe6a08349..2e0ce9a91e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PodDNSConfigApplyConfiguration represents an declarative configuration of the PodDNSConfig type for use +// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use // with apply. type PodDNSConfigApplyConfiguration struct { Nameservers []string `json:"nameservers,omitempty"` @@ -26,7 +26,7 @@ type PodDNSConfigApplyConfiguration struct { Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"` } -// PodDNSConfigApplyConfiguration constructs an declarative configuration of the PodDNSConfig type for use with +// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with // apply. func PodDNSConfig() *PodDNSConfigApplyConfiguration { return &PodDNSConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go index 327bf803b3..458b333bf2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodDNSConfigOptionApplyConfiguration represents an declarative configuration of the PodDNSConfigOption type for use +// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use // with apply. type PodDNSConfigOptionApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// PodDNSConfigOptionApplyConfiguration constructs an declarative configuration of the PodDNSConfigOption type for use with +// PodDNSConfigOptionApplyConfiguration constructs a declarative configuration of the PodDNSConfigOption type for use with // apply. func PodDNSConfigOption() *PodDNSConfigOptionApplyConfiguration { return &PodDNSConfigOptionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go index 3c6e6b87ac..73f089856f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodIPApplyConfiguration represents an declarative configuration of the PodIP type for use +// PodIPApplyConfiguration represents a declarative configuration of the PodIP type for use // with apply. type PodIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// PodIPApplyConfiguration constructs an declarative configuration of the PodIP type for use with +// PodIPApplyConfiguration constructs a declarative configuration of the PodIP type for use with // apply. func PodIP() *PodIPApplyConfiguration { return &PodIPApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go index a5315d636b..7f156f817d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use +// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use // with apply. type PodOSApplyConfiguration struct { Name *v1.OSName `json:"name,omitempty"` } -// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with +// PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with // apply. func PodOS() *PodOSApplyConfiguration { return &PodOSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go index 9d3ad458ac..09746df1bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodReadinessGateApplyConfiguration represents an declarative configuration of the PodReadinessGate type for use +// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use // with apply. type PodReadinessGateApplyConfiguration struct { ConditionType *v1.PodConditionType `json:"conditionType,omitempty"` } -// PodReadinessGateApplyConfiguration constructs an declarative configuration of the PodReadinessGate type for use with +// PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with // apply. func PodReadinessGate() *PodReadinessGateApplyConfiguration { return &PodReadinessGateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go index 69b250d474..b0bd67fa11 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go @@ -18,14 +18,15 @@ limitations under the License. package v1 -// PodResourceClaimApplyConfiguration represents an declarative configuration of the PodResourceClaim type for use +// PodResourceClaimApplyConfiguration represents a declarative configuration of the PodResourceClaim type for use // with apply. type PodResourceClaimApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Source *ClaimSourceApplyConfiguration `json:"source,omitempty"` + Name *string `json:"name,omitempty"` + ResourceClaimName *string `json:"resourceClaimName,omitempty"` + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` } -// PodResourceClaimApplyConfiguration constructs an declarative configuration of the PodResourceClaim type for use with +// PodResourceClaimApplyConfiguration constructs a declarative configuration of the PodResourceClaim type for use with // apply. func PodResourceClaim() *PodResourceClaimApplyConfiguration { return &PodResourceClaimApplyConfiguration{} @@ -39,10 +40,18 @@ func (b *PodResourceClaimApplyConfiguration) WithName(value string) *PodResource return b } -// WithSource sets the Source field in the declarative configuration to the given value +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Source field is set to the value of the last call. -func (b *PodResourceClaimApplyConfiguration) WithSource(value *ClaimSourceApplyConfiguration) *PodResourceClaimApplyConfiguration { - b.Source = value +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimName = &value + return b +} + +// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimTemplateName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimTemplateName = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go index ae79ca01b7..f60ad4b052 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodResourceClaimStatusApplyConfiguration represents an declarative configuration of the PodResourceClaimStatus type for use +// PodResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodResourceClaimStatus type for use // with apply. type PodResourceClaimStatusApplyConfiguration struct { Name *string `json:"name,omitempty"` ResourceClaimName *string `json:"resourceClaimName,omitempty"` } -// PodResourceClaimStatusApplyConfiguration constructs an declarative configuration of the PodResourceClaimStatus type for use with +// PodResourceClaimStatusApplyConfiguration constructs a declarative configuration of the PodResourceClaimStatus type for use with // apply. func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration { return &PodResourceClaimStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go index f7649c2e92..3d91092776 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodSchedulingGateApplyConfiguration represents an declarative configuration of the PodSchedulingGate type for use +// PodSchedulingGateApplyConfiguration represents a declarative configuration of the PodSchedulingGate type for use // with apply. type PodSchedulingGateApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PodSchedulingGateApplyConfiguration constructs an declarative configuration of the PodSchedulingGate type for use with +// PodSchedulingGateApplyConfiguration constructs a declarative configuration of the PodSchedulingGate type for use with // apply. func PodSchedulingGate() *PodSchedulingGateApplyConfiguration { return &PodSchedulingGateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go index 6db09aa32f..55085e6307 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go @@ -22,22 +22,24 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSecurityContextApplyConfiguration represents an declarative configuration of the PodSecurityContext type for use +// PodSecurityContextApplyConfiguration represents a declarative configuration of the PodSecurityContext type for use // with apply. type PodSecurityContextApplyConfiguration struct { - SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` - WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` - RunAsUser *int64 `json:"runAsUser,omitempty"` - RunAsGroup *int64 `json:"runAsGroup,omitempty"` - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` - FSGroup *int64 `json:"fsGroup,omitempty"` - Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` - FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` - SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` + WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty"` + FSGroup *int64 `json:"fsGroup,omitempty"` + Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` + FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` + SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// PodSecurityContextApplyConfiguration constructs an declarative configuration of the PodSecurityContext type for use with +// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with // apply. func PodSecurityContext() *PodSecurityContextApplyConfiguration { return &PodSecurityContextApplyConfiguration{} @@ -93,6 +95,14 @@ func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroups(values ... return b } +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroupsPolicy(value corev1.SupplementalGroupsPolicy) *PodSecurityContextApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} + // WithFSGroup sets the FSGroup field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroup field is set to the value of the last call. @@ -129,3 +139,11 @@ func (b *PodSecurityContextApplyConfiguration) WithSeccompProfile(value *Seccomp b.SeccompProfile = value return b } + +// WithAppArmorProfile sets the AppArmorProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AppArmorProfile field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArmorProfileApplyConfiguration) *PodSecurityContextApplyConfiguration { + b.AppArmorProfile = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index a9acd36fc7..8134e044f1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSpecApplyConfiguration represents an declarative configuration of the PodSpec type for use +// PodSpecApplyConfiguration represents a declarative configuration of the PodSpec type for use // with apply. type PodSpecApplyConfiguration struct { Volumes []VolumeApplyConfiguration `json:"volumes,omitempty"` @@ -66,7 +66,7 @@ type PodSpecApplyConfiguration struct { ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with +// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with // apply. func PodSpec() *PodSpecApplyConfiguration { return &PodSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 1a58ab6be2..0b68996cd1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use +// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use // with apply. type PodStatusApplyConfiguration struct { Phase *v1.PodPhase `json:"phase,omitempty"` @@ -44,7 +44,7 @@ type PodStatusApplyConfiguration struct { ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"` } -// PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with +// PodStatusApplyConfiguration constructs a declarative configuration of the PodStatus type for use with // apply. func PodStatus() *PodStatusApplyConfiguration { return &PodStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go index 7fe51d9e1b..b4c8a658a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateApplyConfiguration represents an declarative configuration of the PodTemplate type for use +// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use // with apply. type PodTemplateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type PodTemplateApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// PodTemplate constructs an declarative configuration of the PodTemplate type for use with +// PodTemplate constructs a declarative configuration of the PodTemplate type for use with // apply. func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { b := &PodTemplateApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply b.Template = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go index 82878a9ace..6146c01c7c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateSpecApplyConfiguration represents an declarative configuration of the PodTemplateSpec type for use +// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use // with apply. type PodTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` } -// PodTemplateSpecApplyConfiguration constructs an declarative configuration of the PodTemplateSpec type for use with +// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with // apply. func PodTemplateSpec() *PodTemplateSpecApplyConfiguration { return &PodTemplateSpecApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go index 8c70c8f6cf..5e738cabdb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PortStatusApplyConfiguration represents an declarative configuration of the PortStatus type for use +// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use // with apply. type PortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type PortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// PortStatusApplyConfiguration constructs an declarative configuration of the PortStatus type for use with +// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with // apply. func PortStatus() *PortStatusApplyConfiguration { return &PortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go index 19cbb82edb..29715e0219 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PortworxVolumeSourceApplyConfiguration represents an declarative configuration of the PortworxVolumeSource type for use +// PortworxVolumeSourceApplyConfiguration represents a declarative configuration of the PortworxVolumeSource type for use // with apply. type PortworxVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -26,7 +26,7 @@ type PortworxVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// PortworxVolumeSourceApplyConfiguration constructs an declarative configuration of the PortworxVolumeSource type for use with +// PortworxVolumeSourceApplyConfiguration constructs a declarative configuration of the PortworxVolumeSource type for use with // apply. func PortworxVolumeSource() *PortworxVolumeSourceApplyConfiguration { return &PortworxVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go index a373e4afe0..b88a3646fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PreferredSchedulingTermApplyConfiguration represents an declarative configuration of the PreferredSchedulingTerm type for use +// PreferredSchedulingTermApplyConfiguration represents a declarative configuration of the PreferredSchedulingTerm type for use // with apply. type PreferredSchedulingTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` Preference *NodeSelectorTermApplyConfiguration `json:"preference,omitempty"` } -// PreferredSchedulingTermApplyConfiguration constructs an declarative configuration of the PreferredSchedulingTerm type for use with +// PreferredSchedulingTermApplyConfiguration constructs a declarative configuration of the PreferredSchedulingTerm type for use with // apply. func PreferredSchedulingTerm() *PreferredSchedulingTermApplyConfiguration { return &PreferredSchedulingTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index 10730557a0..3be1c9650b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeApplyConfiguration represents an declarative configuration of the Probe type for use +// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use // with apply. type ProbeApplyConfiguration struct { ProbeHandlerApplyConfiguration `json:",inline"` @@ -30,7 +30,7 @@ type ProbeApplyConfiguration struct { TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } -// ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with +// ProbeApplyConfiguration constructs a declarative configuration of the Probe type for use with // apply. func Probe() *ProbeApplyConfiguration { return &ProbeApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go index 54f3344ac7..1f88745eab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use +// ProbeHandlerApplyConfiguration represents a declarative configuration of the ProbeHandler type for use // with apply. type ProbeHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type ProbeHandlerApplyConfiguration struct { GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"` } -// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with +// ProbeHandlerApplyConfiguration constructs a declarative configuration of the ProbeHandler type for use with // apply. func ProbeHandler() *ProbeHandlerApplyConfiguration { return &ProbeHandlerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go index 0a9d1d88e6..c922ec8cc2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ProjectedVolumeSourceApplyConfiguration represents an declarative configuration of the ProjectedVolumeSource type for use +// ProjectedVolumeSourceApplyConfiguration represents a declarative configuration of the ProjectedVolumeSource type for use // with apply. type ProjectedVolumeSourceApplyConfiguration struct { Sources []VolumeProjectionApplyConfiguration `json:"sources,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// ProjectedVolumeSourceApplyConfiguration constructs an declarative configuration of the ProjectedVolumeSource type for use with +// ProjectedVolumeSourceApplyConfiguration constructs a declarative configuration of the ProjectedVolumeSource type for use with // apply. func ProjectedVolumeSource() *ProjectedVolumeSourceApplyConfiguration { return &ProjectedVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go index 646052ea4a..9a042a0a12 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QuobyteVolumeSourceApplyConfiguration represents an declarative configuration of the QuobyteVolumeSource type for use +// QuobyteVolumeSourceApplyConfiguration represents a declarative configuration of the QuobyteVolumeSource type for use // with apply. type QuobyteVolumeSourceApplyConfiguration struct { Registry *string `json:"registry,omitempty"` @@ -29,7 +29,7 @@ type QuobyteVolumeSourceApplyConfiguration struct { Tenant *string `json:"tenant,omitempty"` } -// QuobyteVolumeSourceApplyConfiguration constructs an declarative configuration of the QuobyteVolumeSource type for use with +// QuobyteVolumeSourceApplyConfiguration constructs a declarative configuration of the QuobyteVolumeSource type for use with // apply. func QuobyteVolumeSource() *QuobyteVolumeSourceApplyConfiguration { return &QuobyteVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go index ffcb836eb0..64f25724a3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the RBDPersistentVolumeSource type for use +// RBDPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the RBDPersistentVolumeSource type for use // with apply. type RBDPersistentVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDPersistentVolumeSource type for use with +// RBDPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDPersistentVolumeSource type for use with // apply. func RBDPersistentVolumeSource() *RBDPersistentVolumeSourceApplyConfiguration { return &RBDPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go index 8e7c81732c..8dae198c09 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDVolumeSourceApplyConfiguration represents an declarative configuration of the RBDVolumeSource type for use +// RBDVolumeSourceApplyConfiguration represents a declarative configuration of the RBDVolumeSource type for use // with apply. type RBDVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDVolumeSource type for use with +// RBDVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDVolumeSource type for use with // apply. func RBDVolumeSource() *RBDVolumeSourceApplyConfiguration { return &RBDVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go index 7cd71460a9..b28f422dc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicationControllerApplyConfiguration represents an declarative configuration of the ReplicationController type for use +// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use // with apply. type ReplicationControllerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicationControllerApplyConfiguration struct { Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicationController constructs an declarative configuration of the ReplicationController type for use with +// ReplicationController constructs a declarative configuration of the ReplicationController type for use with // apply. func ReplicationController(name, namespace string) *ReplicationControllerApplyConfiguration { b := &ReplicationControllerApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicationControllerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go index c3d56cc697..0d74c1db9a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicationControllerConditionApplyConfiguration represents an declarative configuration of the ReplicationControllerCondition type for use +// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use // with apply. type ReplicationControllerConditionApplyConfiguration struct { Type *v1.ReplicationControllerConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type ReplicationControllerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicationControllerConditionApplyConfiguration constructs an declarative configuration of the ReplicationControllerCondition type for use with +// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with // apply. func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfiguration { return &ReplicationControllerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go index dd4e081d9f..07bac9f4c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerSpecApplyConfiguration represents an declarative configuration of the ReplicationControllerSpec type for use +// ReplicationControllerSpecApplyConfiguration represents a declarative configuration of the ReplicationControllerSpec type for use // with apply. type ReplicationControllerSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -27,7 +27,7 @@ type ReplicationControllerSpecApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicationControllerSpecApplyConfiguration constructs an declarative configuration of the ReplicationControllerSpec type for use with +// ReplicationControllerSpecApplyConfiguration constructs a declarative configuration of the ReplicationControllerSpec type for use with // apply. func ReplicationControllerSpec() *ReplicationControllerSpecApplyConfiguration { return &ReplicationControllerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go index 1b994cfb8c..c8046aa5a4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerStatusApplyConfiguration represents an declarative configuration of the ReplicationControllerStatus type for use +// ReplicationControllerStatusApplyConfiguration represents a declarative configuration of the ReplicationControllerStatus type for use // with apply. type ReplicationControllerStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicationControllerStatusApplyConfiguration struct { Conditions []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicationControllerStatusApplyConfiguration constructs an declarative configuration of the ReplicationControllerStatus type for use with +// ReplicationControllerStatusApplyConfiguration constructs a declarative configuration of the ReplicationControllerStatus type for use with // apply. func ReplicationControllerStatus() *ReplicationControllerStatusApplyConfiguration { return &ReplicationControllerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go index 064dd4e2e4..b00c692485 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go @@ -18,13 +18,14 @@ limitations under the License. package v1 -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use // with apply. type ResourceClaimApplyConfiguration struct { - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` + Request *string `json:"request,omitempty"` } -// ResourceClaimApplyConfiguration constructs an declarative configuration of the ResourceClaim type for use with +// ResourceClaimApplyConfiguration constructs a declarative configuration of the ResourceClaim type for use with // apply. func ResourceClaim() *ResourceClaimApplyConfiguration { return &ResourceClaimApplyConfiguration{} @@ -37,3 +38,11 @@ func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimA b.Name = &value return b } + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithRequest(value string) *ResourceClaimApplyConfiguration { + b.Request = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go index 2741227dd7..1b4918a633 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceFieldSelectorApplyConfiguration represents an declarative configuration of the ResourceFieldSelector type for use +// ResourceFieldSelectorApplyConfiguration represents a declarative configuration of the ResourceFieldSelector type for use // with apply. type ResourceFieldSelectorApplyConfiguration struct { ContainerName *string `json:"containerName,omitempty"` @@ -30,7 +30,7 @@ type ResourceFieldSelectorApplyConfiguration struct { Divisor *resource.Quantity `json:"divisor,omitempty"` } -// ResourceFieldSelectorApplyConfiguration constructs an declarative configuration of the ResourceFieldSelector type for use with +// ResourceFieldSelectorApplyConfiguration constructs a declarative configuration of the ResourceFieldSelector type for use with // apply. func ResourceFieldSelector() *ResourceFieldSelectorApplyConfiguration { return &ResourceFieldSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go new file mode 100644 index 0000000000..5169cb4bc3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use +// with apply. +type ResourceHealthApplyConfiguration struct { + ResourceID *v1.ResourceID `json:"resourceID,omitempty"` + Health *v1.ResourceHealthStatus `json:"health,omitempty"` +} + +// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with +// apply. +func ResourceHealth() *ResourceHealthApplyConfiguration { + return &ResourceHealthApplyConfiguration{} +} + +// WithResourceID sets the ResourceID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceID field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *ResourceHealthApplyConfiguration { + b.ResourceID = &value + return b +} + +// WithHealth sets the Health field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Health field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithHealth(value v1.ResourceHealthStatus) *ResourceHealthApplyConfiguration { + b.Health = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go index 6b22ebdc59..2b78ba7038 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceQuotaApplyConfiguration represents an declarative configuration of the ResourceQuota type for use +// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use // with apply. type ResourceQuotaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ResourceQuotaApplyConfiguration struct { Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` } -// ResourceQuota constructs an declarative configuration of the ResourceQuota type for use with +// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with // apply. func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { b := &ResourceQuotaApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceQuotaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go index feb454bc4b..0012ace25f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceQuotaSpecApplyConfiguration represents an declarative configuration of the ResourceQuotaSpec type for use +// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use // with apply. type ResourceQuotaSpecApplyConfiguration struct { Hard *v1.ResourceList `json:"hard,omitempty"` @@ -30,7 +30,7 @@ type ResourceQuotaSpecApplyConfiguration struct { ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"` } -// ResourceQuotaSpecApplyConfiguration constructs an declarative configuration of the ResourceQuotaSpec type for use with +// ResourceQuotaSpecApplyConfiguration constructs a declarative configuration of the ResourceQuotaSpec type for use with // apply. func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration { return &ResourceQuotaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go index 4dced90f7a..364b96eecf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceQuotaStatusApplyConfiguration represents an declarative configuration of the ResourceQuotaStatus type for use +// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use // with apply. type ResourceQuotaStatusApplyConfiguration struct { Hard *v1.ResourceList `json:"hard,omitempty"` Used *v1.ResourceList `json:"used,omitempty"` } -// ResourceQuotaStatusApplyConfiguration constructs an declarative configuration of the ResourceQuotaStatus type for use with +// ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with // apply. func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration { return &ResourceQuotaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go index 9482b8d713..51197862c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceRequirementsApplyConfiguration represents an declarative configuration of the ResourceRequirements type for use +// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use // with apply. type ResourceRequirementsApplyConfiguration struct { Limits *v1.ResourceList `json:"limits,omitempty"` @@ -30,7 +30,7 @@ type ResourceRequirementsApplyConfiguration struct { Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"` } -// ResourceRequirementsApplyConfiguration constructs an declarative configuration of the ResourceRequirements type for use with +// ResourceRequirementsApplyConfiguration constructs a declarative configuration of the ResourceRequirements type for use with // apply. func ResourceRequirements() *ResourceRequirementsApplyConfiguration { return &ResourceRequirementsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go new file mode 100644 index 0000000000..1e63c87f8c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use +// with apply. +type ResourceStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"` +} + +// ResourceStatusApplyConfiguration constructs a declarative configuration of the ResourceStatus type for use with +// apply. +func ResourceStatus() *ResourceStatusApplyConfiguration { + return &ResourceStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourceStatusApplyConfiguration) WithResources(values ...*ResourceHealthApplyConfiguration) *ResourceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go index fffb5b186d..b07f46de91 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOPersistentVolumeSource type for use +// ScaleIOPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOPersistentVolumeSource type for use // with apply. type ScaleIOPersistentVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOPersistentVolumeSource type for use with +// ScaleIOPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOPersistentVolumeSource type for use with // apply. func ScaleIOPersistentVolumeSource() *ScaleIOPersistentVolumeSourceApplyConfiguration { return &ScaleIOPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go index b54e1161eb..740c05ebb7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOVolumeSource type for use +// ScaleIOVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOVolumeSource type for use // with apply. type ScaleIOVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOVolumeSource type for use with +// ScaleIOVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOVolumeSource type for use with // apply. func ScaleIOVolumeSource() *ScaleIOVolumeSourceApplyConfiguration { return &ScaleIOVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go index c901a2ae6d..c6ec87827f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ScopedResourceSelectorRequirementApplyConfiguration represents an declarative configuration of the ScopedResourceSelectorRequirement type for use +// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use // with apply. type ScopedResourceSelectorRequirementApplyConfiguration struct { ScopeName *v1.ResourceQuotaScope `json:"scopeName,omitempty"` @@ -30,7 +30,7 @@ type ScopedResourceSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// ScopedResourceSelectorRequirementApplyConfiguration constructs an declarative configuration of the ScopedResourceSelectorRequirement type for use with +// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with // apply. func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApplyConfiguration { return &ScopedResourceSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go index 3251e9dc18..a9fb9a1b19 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScopeSelectorApplyConfiguration represents an declarative configuration of the ScopeSelector type for use +// ScopeSelectorApplyConfiguration represents a declarative configuration of the ScopeSelector type for use // with apply. type ScopeSelectorApplyConfiguration struct { MatchExpressions []ScopedResourceSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// ScopeSelectorApplyConfiguration constructs an declarative configuration of the ScopeSelector type for use with +// ScopeSelectorApplyConfiguration constructs a declarative configuration of the ScopeSelector type for use with // apply. func ScopeSelector() *ScopeSelectorApplyConfiguration { return &ScopeSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go index 9818a00e7a..eb3077a051 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// SeccompProfileApplyConfiguration represents an declarative configuration of the SeccompProfile type for use +// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use // with apply. type SeccompProfileApplyConfiguration struct { Type *v1.SeccompProfileType `json:"type,omitempty"` LocalhostProfile *string `json:"localhostProfile,omitempty"` } -// SeccompProfileApplyConfiguration constructs an declarative configuration of the SeccompProfile type for use with +// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with // apply. func SeccompProfile() *SeccompProfileApplyConfiguration { return &SeccompProfileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go index 3f7e1eb039..1d850b00bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// SecretApplyConfiguration represents an declarative configuration of the Secret type for use +// SecretApplyConfiguration represents a declarative configuration of the Secret type for use // with apply. type SecretApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -38,7 +38,7 @@ type SecretApplyConfiguration struct { Type *corev1.SecretType `json:"type,omitempty"` } -// Secret constructs an declarative configuration of the Secret type for use with +// Secret constructs a declarative configuration of the Secret type for use with // apply. func Secret(name, namespace string) *SecretApplyConfiguration { b := &SecretApplyConfiguration{} @@ -286,3 +286,9 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl b.Type = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SecretApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go index 7b22a8d0b2..ba99b7f5fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretEnvSourceApplyConfiguration represents an declarative configuration of the SecretEnvSource type for use +// SecretEnvSourceApplyConfiguration represents a declarative configuration of the SecretEnvSource type for use // with apply. type SecretEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// SecretEnvSourceApplyConfiguration constructs an declarative configuration of the SecretEnvSource type for use with +// SecretEnvSourceApplyConfiguration constructs a declarative configuration of the SecretEnvSource type for use with // apply. func SecretEnvSource() *SecretEnvSourceApplyConfiguration { return &SecretEnvSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go index b8464a348a..2d490b8108 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretKeySelectorApplyConfiguration represents an declarative configuration of the SecretKeySelector type for use +// SecretKeySelectorApplyConfiguration represents a declarative configuration of the SecretKeySelector type for use // with apply. type SecretKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretKeySelectorApplyConfiguration constructs an declarative configuration of the SecretKeySelector type for use with +// SecretKeySelectorApplyConfiguration constructs a declarative configuration of the SecretKeySelector type for use with // apply. func SecretKeySelector() *SecretKeySelectorApplyConfiguration { return &SecretKeySelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go index e8edc61273..65ce3c66da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretProjectionApplyConfiguration represents an declarative configuration of the SecretProjection type for use +// SecretProjectionApplyConfiguration represents a declarative configuration of the SecretProjection type for use // with apply. type SecretProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretProjectionApplyConfiguration constructs an declarative configuration of the SecretProjection type for use with +// SecretProjectionApplyConfiguration constructs a declarative configuration of the SecretProjection type for use with // apply. func SecretProjection() *SecretProjectionApplyConfiguration { return &SecretProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go index 95579d003e..f5e0de23aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretReferenceApplyConfiguration represents an declarative configuration of the SecretReference type for use +// SecretReferenceApplyConfiguration represents a declarative configuration of the SecretReference type for use // with apply. type SecretReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` } -// SecretReferenceApplyConfiguration constructs an declarative configuration of the SecretReference type for use with +// SecretReferenceApplyConfiguration constructs a declarative configuration of the SecretReference type for use with // apply. func SecretReference() *SecretReferenceApplyConfiguration { return &SecretReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go index bcb441e9f3..9f765d354d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretVolumeSourceApplyConfiguration represents an declarative configuration of the SecretVolumeSource type for use +// SecretVolumeSourceApplyConfiguration represents a declarative configuration of the SecretVolumeSource type for use // with apply. type SecretVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type SecretVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretVolumeSourceApplyConfiguration constructs an declarative configuration of the SecretVolumeSource type for use with +// SecretVolumeSourceApplyConfiguration constructs a declarative configuration of the SecretVolumeSource type for use with // apply. func SecretVolumeSource() *SecretVolumeSourceApplyConfiguration { return &SecretVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go index 8f01537eb3..99faab72da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// SecurityContextApplyConfiguration represents an declarative configuration of the SecurityContext type for use +// SecurityContextApplyConfiguration represents a declarative configuration of the SecurityContext type for use // with apply. type SecurityContextApplyConfiguration struct { Capabilities *CapabilitiesApplyConfiguration `json:"capabilities,omitempty"` @@ -36,9 +36,10 @@ type SecurityContextApplyConfiguration struct { AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` ProcMount *corev1.ProcMountType `json:"procMount,omitempty"` SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// SecurityContextApplyConfiguration constructs an declarative configuration of the SecurityContext type for use with +// SecurityContextApplyConfiguration constructs a declarative configuration of the SecurityContext type for use with // apply. func SecurityContext() *SecurityContextApplyConfiguration { return &SecurityContextApplyConfiguration{} @@ -131,3 +132,11 @@ func (b *SecurityContextApplyConfiguration) WithSeccompProfile(value *SeccompPro b.SeccompProfile = value return b } + +// WithAppArmorProfile sets the AppArmorProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AppArmorProfile field is set to the value of the last call. +func (b *SecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArmorProfileApplyConfiguration) *SecurityContextApplyConfiguration { + b.AppArmorProfile = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go index 2938faa18e..bad01300f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SELinuxOptionsApplyConfiguration represents an declarative configuration of the SELinuxOptions type for use +// SELinuxOptionsApplyConfiguration represents a declarative configuration of the SELinuxOptions type for use // with apply. type SELinuxOptionsApplyConfiguration struct { User *string `json:"user,omitempty"` @@ -27,7 +27,7 @@ type SELinuxOptionsApplyConfiguration struct { Level *string `json:"level,omitempty"` } -// SELinuxOptionsApplyConfiguration constructs an declarative configuration of the SELinuxOptions type for use with +// SELinuxOptionsApplyConfiguration constructs a declarative configuration of the SELinuxOptions type for use with // apply. func SELinuxOptions() *SELinuxOptionsApplyConfiguration { return &SELinuxOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go index 3fa1195237..2dac0589d2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceApplyConfiguration represents an declarative configuration of the Service type for use +// ServiceApplyConfiguration represents a declarative configuration of the Service type for use // with apply. type ServiceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ServiceApplyConfiguration struct { Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` } -// Service constructs an declarative configuration of the Service type for use with +// Service constructs a declarative configuration of the Service type for use with // apply. func Service(name, namespace string) *ServiceApplyConfiguration { b := &ServiceApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go index 53a8193750..26d33deb95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceAccountApplyConfiguration represents an declarative configuration of the ServiceAccount type for use +// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use // with apply. type ServiceAccountApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ServiceAccountApplyConfiguration struct { AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` } -// ServiceAccount constructs an declarative configuration of the ServiceAccount type for use with +// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with // apply. func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { b := &ServiceAccountApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu b.AutomountServiceAccountToken = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceAccountApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go index a52fad7d8d..fab81bf8a2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceAccountTokenProjectionApplyConfiguration represents an declarative configuration of the ServiceAccountTokenProjection type for use +// ServiceAccountTokenProjectionApplyConfiguration represents a declarative configuration of the ServiceAccountTokenProjection type for use // with apply. type ServiceAccountTokenProjectionApplyConfiguration struct { Audience *string `json:"audience,omitempty"` @@ -26,7 +26,7 @@ type ServiceAccountTokenProjectionApplyConfiguration struct { Path *string `json:"path,omitempty"` } -// ServiceAccountTokenProjectionApplyConfiguration constructs an declarative configuration of the ServiceAccountTokenProjection type for use with +// ServiceAccountTokenProjectionApplyConfiguration constructs a declarative configuration of the ServiceAccountTokenProjection type for use with // apply. func ServiceAccountTokenProjection() *ServiceAccountTokenProjectionApplyConfiguration { return &ServiceAccountTokenProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go index 8bc63bd950..e889f21345 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// ServicePortApplyConfiguration represents an declarative configuration of the ServicePort type for use +// ServicePortApplyConfiguration represents a declarative configuration of the ServicePort type for use // with apply. type ServicePortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -34,7 +34,7 @@ type ServicePortApplyConfiguration struct { NodePort *int32 `json:"nodePort,omitempty"` } -// ServicePortApplyConfiguration constructs an declarative configuration of the ServicePort type for use with +// ServicePortApplyConfiguration constructs a declarative configuration of the ServicePort type for use with // apply. func ServicePort() *ServicePortApplyConfiguration { return &ServicePortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index 493af6fb3c..41367dce4f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ServiceSpecApplyConfiguration represents an declarative configuration of the ServiceSpec type for use +// ServiceSpecApplyConfiguration represents a declarative configuration of the ServiceSpec type for use // with apply. type ServiceSpecApplyConfiguration struct { Ports []ServicePortApplyConfiguration `json:"ports,omitempty"` @@ -44,9 +44,10 @@ type ServiceSpecApplyConfiguration struct { AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` + TrafficDistribution *string `json:"trafficDistribution,omitempty"` } -// ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with +// ServiceSpecApplyConfiguration constructs a declarative configuration of the ServiceSpec type for use with // apply. func ServiceSpec() *ServiceSpecApplyConfiguration { return &ServiceSpecApplyConfiguration{} @@ -222,3 +223,11 @@ func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.S b.InternalTrafficPolicy = &value return b } + +// WithTrafficDistribution sets the TrafficDistribution field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrafficDistribution field is set to the value of the last call. +func (b *ServiceSpecApplyConfiguration) WithTrafficDistribution(value string) *ServiceSpecApplyConfiguration { + b.TrafficDistribution = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go index 2347cec678..11c3f8a80a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go @@ -22,14 +22,14 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceStatusApplyConfiguration represents an declarative configuration of the ServiceStatus type for use +// ServiceStatusApplyConfiguration represents a declarative configuration of the ServiceStatus type for use // with apply. type ServiceStatusApplyConfiguration struct { LoadBalancer *LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceStatusApplyConfiguration constructs an declarative configuration of the ServiceStatus type for use with +// ServiceStatusApplyConfiguration constructs a declarative configuration of the ServiceStatus type for use with // apply. func ServiceStatus() *ServiceStatusApplyConfiguration { return &ServiceStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go index 7016f836a1..13b045fffc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SessionAffinityConfigApplyConfiguration represents an declarative configuration of the SessionAffinityConfig type for use +// SessionAffinityConfigApplyConfiguration represents a declarative configuration of the SessionAffinityConfig type for use // with apply. type SessionAffinityConfigApplyConfiguration struct { ClientIP *ClientIPConfigApplyConfiguration `json:"clientIP,omitempty"` } -// SessionAffinityConfigApplyConfiguration constructs an declarative configuration of the SessionAffinityConfig type for use with +// SessionAffinityConfigApplyConfiguration constructs a declarative configuration of the SessionAffinityConfig type for use with // apply. func SessionAffinityConfig() *SessionAffinityConfigApplyConfiguration { return &SessionAffinityConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go index 8b3284536a..b4115609b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use +// SleepActionApplyConfiguration represents a declarative configuration of the SleepAction type for use // with apply. type SleepActionApplyConfiguration struct { Seconds *int64 `json:"seconds,omitempty"` } -// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with +// SleepActionApplyConfiguration constructs a declarative configuration of the SleepAction type for use with // apply. func SleepAction() *SleepActionApplyConfiguration { return &SleepActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go index 00ed39ccb0..7381a498e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSPersistentVolumeSource type for use +// StorageOSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSPersistentVolumeSource type for use // with apply. type StorageOSPersistentVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSPersistentVolumeSourceApplyConfiguration struct { SecretRef *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSPersistentVolumeSource type for use with +// StorageOSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSPersistentVolumeSource type for use with // apply. func StorageOSPersistentVolumeSource() *StorageOSPersistentVolumeSourceApplyConfiguration { return &StorageOSPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go index 7f3b810cf6..81d9373c19 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSVolumeSource type for use +// StorageOSVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSVolumeSource type for use // with apply. type StorageOSVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSVolumeSource type for use with +// StorageOSVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSVolumeSource type for use with // apply. func StorageOSVolumeSource() *StorageOSVolumeSourceApplyConfiguration { return &StorageOSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go index deab9e0b38..7719eb7d60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SysctlApplyConfiguration represents an declarative configuration of the Sysctl type for use +// SysctlApplyConfiguration represents a declarative configuration of the Sysctl type for use // with apply. type SysctlApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// SysctlApplyConfiguration constructs an declarative configuration of the Sysctl type for use with +// SysctlApplyConfiguration constructs a declarative configuration of the Sysctl type for use with // apply. func Sysctl() *SysctlApplyConfiguration { return &SysctlApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go index 4672b87427..a34fb05526 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TaintApplyConfiguration represents an declarative configuration of the Taint type for use +// TaintApplyConfiguration represents a declarative configuration of the Taint type for use // with apply. type TaintApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -32,7 +32,7 @@ type TaintApplyConfiguration struct { TimeAdded *metav1.Time `json:"timeAdded,omitempty"` } -// TaintApplyConfiguration constructs an declarative configuration of the Taint type for use with +// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with // apply. func Taint() *TaintApplyConfiguration { return &TaintApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go index bd038fc3ae..cba1a7d081 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// TCPSocketActionApplyConfiguration represents an declarative configuration of the TCPSocketAction type for use +// TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use // with apply. type TCPSocketActionApplyConfiguration struct { Port *intstr.IntOrString `json:"port,omitempty"` Host *string `json:"host,omitempty"` } -// TCPSocketActionApplyConfiguration constructs an declarative configuration of the TCPSocketAction type for use with +// TCPSocketActionApplyConfiguration constructs a declarative configuration of the TCPSocketAction type for use with // apply. func TCPSocketAction() *TCPSocketActionApplyConfiguration { return &TCPSocketActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go index 1a92a8c668..1bcc85b65f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// TolerationApplyConfiguration represents an declarative configuration of the Toleration type for use +// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use // with apply. type TolerationApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -32,7 +32,7 @@ type TolerationApplyConfiguration struct { TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` } -// TolerationApplyConfiguration constructs an declarative configuration of the Toleration type for use with +// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with // apply. func Toleration() *TolerationApplyConfiguration { return &TolerationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go index 9581490de2..674ddec93c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TopologySelectorLabelRequirementApplyConfiguration represents an declarative configuration of the TopologySelectorLabelRequirement type for use +// TopologySelectorLabelRequirementApplyConfiguration represents a declarative configuration of the TopologySelectorLabelRequirement type for use // with apply. type TopologySelectorLabelRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` Values []string `json:"values,omitempty"` } -// TopologySelectorLabelRequirementApplyConfiguration constructs an declarative configuration of the TopologySelectorLabelRequirement type for use with +// TopologySelectorLabelRequirementApplyConfiguration constructs a declarative configuration of the TopologySelectorLabelRequirement type for use with // apply. func TopologySelectorLabelRequirement() *TopologySelectorLabelRequirementApplyConfiguration { return &TopologySelectorLabelRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go index a025b8a2a8..7812ae5204 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// TopologySelectorTermApplyConfiguration represents an declarative configuration of the TopologySelectorTerm type for use +// TopologySelectorTermApplyConfiguration represents a declarative configuration of the TopologySelectorTerm type for use // with apply. type TopologySelectorTermApplyConfiguration struct { MatchLabelExpressions []TopologySelectorLabelRequirementApplyConfiguration `json:"matchLabelExpressions,omitempty"` } -// TopologySelectorTermApplyConfiguration constructs an declarative configuration of the TopologySelectorTerm type for use with +// TopologySelectorTermApplyConfiguration constructs a declarative configuration of the TopologySelectorTerm type for use with // apply. func TopologySelectorTerm() *TopologySelectorTermApplyConfiguration { return &TopologySelectorTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go index fbfa8fa886..b21d233513 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use +// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use // with apply. type TopologySpreadConstraintApplyConfiguration struct { MaxSkew *int32 `json:"maxSkew,omitempty"` @@ -36,7 +36,7 @@ type TopologySpreadConstraintApplyConfiguration struct { MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` } -// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with +// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with // apply. func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration { return &TopologySpreadConstraintApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go index cdc2eb7d34..1e63b79889 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedLocalObjectReferenceApplyConfiguration represents an declarative configuration of the TypedLocalObjectReference type for use +// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use // with apply. type TypedLocalObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type TypedLocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// TypedLocalObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedLocalObjectReference type for use with +// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with // apply. func TypedLocalObjectReference() *TypedLocalObjectReferenceApplyConfiguration { return &TypedLocalObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go index d9a01c9c3a..f07de8902e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedObjectReferenceApplyConfiguration represents an declarative configuration of the TypedObjectReference type for use +// TypedObjectReferenceApplyConfiguration represents a declarative configuration of the TypedObjectReference type for use // with apply. type TypedObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -27,7 +27,7 @@ type TypedObjectReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// TypedObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedObjectReference type for use with +// TypedObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedObjectReference type for use with // apply. func TypedObjectReference() *TypedObjectReferenceApplyConfiguration { return &TypedObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go index db0686bce7..9a48f83497 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeApplyConfiguration represents an declarative configuration of the Volume type for use +// VolumeApplyConfiguration represents a declarative configuration of the Volume type for use // with apply. type VolumeApplyConfiguration struct { Name *string `json:"name,omitempty"` VolumeSourceApplyConfiguration `json:",inline"` } -// VolumeApplyConfiguration constructs an declarative configuration of the Volume type for use with +// VolumeApplyConfiguration constructs a declarative configuration of the Volume type for use with // apply. func Volume() *VolumeApplyConfiguration { return &VolumeApplyConfiguration{} @@ -270,3 +270,11 @@ func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApp b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { + b.Image = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go index ea18ca8d9e..0bc52aad2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeDeviceApplyConfiguration represents an declarative configuration of the VolumeDevice type for use +// VolumeDeviceApplyConfiguration represents a declarative configuration of the VolumeDevice type for use // with apply. type VolumeDeviceApplyConfiguration struct { Name *string `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// VolumeDeviceApplyConfiguration constructs an declarative configuration of the VolumeDevice type for use with +// VolumeDeviceApplyConfiguration constructs a declarative configuration of the VolumeDevice type for use with // apply. func VolumeDevice() *VolumeDeviceApplyConfiguration { return &VolumeDeviceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go index b0bec9ffed..49f22cc4e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go @@ -22,18 +22,19 @@ import ( v1 "k8s.io/api/core/v1" ) -// VolumeMountApplyConfiguration represents an declarative configuration of the VolumeMount type for use +// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use // with apply. type VolumeMountApplyConfiguration struct { - Name *string `json:"name,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - MountPath *string `json:"mountPath,omitempty"` - SubPath *string `json:"subPath,omitempty"` - MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"` - SubPathExpr *string `json:"subPathExpr,omitempty"` + Name *string `json:"name,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + SubPath *string `json:"subPath,omitempty"` + MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"` + SubPathExpr *string `json:"subPathExpr,omitempty"` } -// VolumeMountApplyConfiguration constructs an declarative configuration of the VolumeMount type for use with +// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with // apply. func VolumeMount() *VolumeMountApplyConfiguration { return &VolumeMountApplyConfiguration{} @@ -55,6 +56,14 @@ func (b *VolumeMountApplyConfiguration) WithReadOnly(value bool) *VolumeMountApp return b } +// WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveReadOnly field is set to the value of the last call. +func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration { + b.RecursiveReadOnly = &value + return b +} + // WithMountPath sets the MountPath field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MountPath field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go new file mode 100644 index 0000000000..a0a9b5401c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use +// with apply. +type VolumeMountStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` +} + +// VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with +// apply. +func VolumeMountStatus() *VolumeMountStatusApplyConfiguration { + return &VolumeMountStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithName(value string) *VolumeMountStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithMountPath(value string) *VolumeMountStatusApplyConfiguration { + b.MountPath = &value + return b +} + +// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadOnly field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithReadOnly(value bool) *VolumeMountStatusApplyConfiguration { + b.ReadOnly = &value + return b +} + +// WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveReadOnly field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration { + b.RecursiveReadOnly = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go index 32bfd82928..9198c25dc8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeAffinityApplyConfiguration represents an declarative configuration of the VolumeNodeAffinity type for use +// VolumeNodeAffinityApplyConfiguration represents a declarative configuration of the VolumeNodeAffinity type for use // with apply. type VolumeNodeAffinityApplyConfiguration struct { Required *NodeSelectorApplyConfiguration `json:"required,omitempty"` } -// VolumeNodeAffinityApplyConfiguration constructs an declarative configuration of the VolumeNodeAffinity type for use with +// VolumeNodeAffinityApplyConfiguration constructs a declarative configuration of the VolumeNodeAffinity type for use with // apply. func VolumeNodeAffinity() *VolumeNodeAffinityApplyConfiguration { return &VolumeNodeAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index a2ef0a9943..c14e9fe697 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeProjectionApplyConfiguration represents an declarative configuration of the VolumeProjection type for use +// VolumeProjectionApplyConfiguration represents a declarative configuration of the VolumeProjection type for use // with apply. type VolumeProjectionApplyConfiguration struct { Secret *SecretProjectionApplyConfiguration `json:"secret,omitempty"` @@ -28,7 +28,7 @@ type VolumeProjectionApplyConfiguration struct { ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } -// VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with +// VolumeProjectionApplyConfiguration constructs a declarative configuration of the VolumeProjection type for use with // apply. func VolumeProjection() *VolumeProjectionApplyConfiguration { return &VolumeProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go index 89ad1da8b3..ae849f7741 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use +// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use // with apply. type VolumeResourceRequirementsApplyConfiguration struct { Limits *v1.ResourceList `json:"limits,omitempty"` Requests *v1.ResourceList `json:"requests,omitempty"` } -// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with +// VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with // apply. func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { return &VolumeResourceRequirementsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go index 4a8d316dd5..aeead953cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeSourceApplyConfiguration represents an declarative configuration of the VolumeSource type for use +// VolumeSourceApplyConfiguration represents a declarative configuration of the VolumeSource type for use // with apply. type VolumeSourceApplyConfiguration struct { HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"` @@ -50,9 +50,10 @@ type VolumeSourceApplyConfiguration struct { StorageOS *StorageOSVolumeSourceApplyConfiguration `json:"storageos,omitempty"` CSI *CSIVolumeSourceApplyConfiguration `json:"csi,omitempty"` Ephemeral *EphemeralVolumeSourceApplyConfiguration `json:"ephemeral,omitempty"` + Image *ImageVolumeSourceApplyConfiguration `json:"image,omitempty"` } -// VolumeSourceApplyConfiguration constructs an declarative configuration of the VolumeSource type for use with +// VolumeSourceApplyConfiguration constructs a declarative configuration of the VolumeSource type for use with // apply. func VolumeSource() *VolumeSourceApplyConfiguration { return &VolumeSourceApplyConfiguration{} @@ -289,3 +290,11 @@ func (b *VolumeSourceApplyConfiguration) WithEphemeral(value *EphemeralVolumeSou b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeSourceApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeSourceApplyConfiguration { + b.Image = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go index ff3e3e27d9..ea8fd8d62e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VsphereVirtualDiskVolumeSourceApplyConfiguration represents an declarative configuration of the VsphereVirtualDiskVolumeSource type for use +// VsphereVirtualDiskVolumeSourceApplyConfiguration represents a declarative configuration of the VsphereVirtualDiskVolumeSource type for use // with apply. type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { VolumePath *string `json:"volumePath,omitempty"` @@ -27,7 +27,7 @@ type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { StoragePolicyID *string `json:"storagePolicyID,omitempty"` } -// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the VsphereVirtualDiskVolumeSource type for use with +// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the VsphereVirtualDiskVolumeSource type for use with // apply. func VsphereVirtualDiskVolumeSource() *VsphereVirtualDiskVolumeSourceApplyConfiguration { return &VsphereVirtualDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go index eb99d06ffa..c49ef93eb4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// WeightedPodAffinityTermApplyConfiguration represents an declarative configuration of the WeightedPodAffinityTerm type for use +// WeightedPodAffinityTermApplyConfiguration represents a declarative configuration of the WeightedPodAffinityTerm type for use // with apply. type WeightedPodAffinityTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` PodAffinityTerm *PodAffinityTermApplyConfiguration `json:"podAffinityTerm,omitempty"` } -// WeightedPodAffinityTermApplyConfiguration constructs an declarative configuration of the WeightedPodAffinityTerm type for use with +// WeightedPodAffinityTermApplyConfiguration constructs a declarative configuration of the WeightedPodAffinityTerm type for use with // apply. func WeightedPodAffinityTerm() *WeightedPodAffinityTermApplyConfiguration { return &WeightedPodAffinityTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go index 20692e0146..bb37a500b4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WindowsSecurityContextOptionsApplyConfiguration represents an declarative configuration of the WindowsSecurityContextOptions type for use +// WindowsSecurityContextOptionsApplyConfiguration represents a declarative configuration of the WindowsSecurityContextOptions type for use // with apply. type WindowsSecurityContextOptionsApplyConfiguration struct { GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"` @@ -27,7 +27,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct { HostProcess *bool `json:"hostProcess,omitempty"` } -// WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with +// WindowsSecurityContextOptionsApplyConfiguration constructs a declarative configuration of the WindowsSecurityContextOptions type for use with // apply. func WindowsSecurityContextOptions() *WindowsSecurityContextOptionsApplyConfiguration { return &WindowsSecurityContextOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go index d8c2359a3b..df45a6fb8a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -35,7 +35,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go index 68c25dd57c..20f0b97124 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go index 6eb9f21a51..d2d0f67769 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go index c712956009..12908deb61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go index 640613753d..97002d2bbb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go index 192a5ad2e8..505d11ae2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go index 724c2d007c..5d87dae72e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -34,7 +34,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go index bc0438f90b..13f5fa5575 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go index 41d80206b3..99f69027a8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go index 9a3a31b965..07cfc684b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go index 74a24773cc..888319bc0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go index 4d1455ed38..4af09cc49b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/doc.go b/vendor/k8s.io/client-go/applyconfigurations/doc.go new file mode 100644 index 0000000000..ac426c6075 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/doc.go @@ -0,0 +1,151 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package applyconfigurations provides typesafe go representations of the apply +configurations that are used to constructs Server-side Apply requests. + +# Basics + +The Apply functions in the typed client (see the k8s.io/client-go/kubernetes/typed packages) offer +a direct and typesafe way of calling Server-side Apply. Each Apply function takes an "apply +configuration" type as an argument, which is a structured representation of an Apply request. For +example: + + import ( + ... + v1ac "k8s.io/client-go/applyconfigurations/autoscaling/v1" + ) + hpaApplyConfig := v1ac.HorizontalPodAutoscaler(autoscalerName, ns). + WithSpec(v1ac.HorizontalPodAutoscalerSpec(). + WithMinReplicas(0) + ) + return hpav1client.Apply(ctx, hpaApplyConfig, metav1.ApplyOptions{FieldManager: "mycontroller", Force: true}) + +Note in this example that HorizontalPodAutoscaler is imported from an "applyconfigurations" +package. Each "apply configuration" type represents the same Kubernetes object kind as the +corresponding go struct, but where all fields are pointers to make them optional, allowing apply +requests to be accurately represented. For example, this when the apply configuration in the above +example is marshalled to YAML, it produces: + + apiVersion: autoscaling/v1 + kind: HorizontalPodAutoscaler + metadata: + name: myHPA + namespace: myNamespace + spec: + minReplicas: 0 + +To understand why this is needed, the above YAML cannot be produced by the +v1.HorizontalPodAutoscaler go struct. Take for example: + + hpa := v1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: ObjectMeta{ + Namespace: ns, + Name: autoscalerName, + }, + Spec: v1.HorizontalPodAutoscalerSpec{ + MinReplicas: pointer.Int32Ptr(0), + }, + } + +The above code attempts to declare the same apply configuration as shown in the previous examples, +but when marshalled to YAML, produces: + + kind: HorizontalPodAutoscaler + apiVersion: autoscaling/v1 + metadata: + name: myHPA + namespace: myNamespace + creationTimestamp: null + spec: + scaleTargetRef: + kind: "" + name: "" + minReplicas: 0 + maxReplicas: 0 + +Which, among other things, contains spec.maxReplicas set to 0. This is almost certainly not what +the caller intended (the intended apply configuration says nothing about the maxReplicas field), +and could have serious consequences on a production system: it directs the autoscaler to downscale +to zero pods. The problem here originates from the fact that the go structs contain required fields +that are zero valued if not set explicitly. The go structs work as intended for create and update +operations, but are fundamentally incompatible with apply, which is why we have introduced the +generated "apply configuration" types. + +The "apply configurations" also have convenience With functions that make it easier to +build apply requests. This allows developers to set fields without having to deal with the fact that +all the fields in the "apply configuration" types are pointers, and are inconvenient to set using +go. For example "MinReplicas: &0" is not legal go code, so without the With functions, developers +would work around this problem by using a library, .e.g. "MinReplicas: pointer.Int32Ptr(0)", but +string enumerations like corev1.Protocol are still a problem since they cannot be supported by a +general purpose library. In addition to the convenience, the With functions also isolate +developers from the underlying representation, which makes it safer for the underlying +representation to be changed to support additional features in the future. + +# Controller Support + +The new client-go support makes it much easier to use Server-side Apply in controllers, by either of +two mechanisms. + +Mechanism 1: + +When authoring new controllers to use Server-side Apply, a good approach is to have the controller +recreate the apply configuration for an object each time it reconciles that object. This ensures +that the controller fully reconciles all the fields that it is responsible for. Controllers +typically should unconditionally set all the fields they own by setting "Force: true" in the +ApplyOptions. Controllers must also provide a FieldManager name that is unique to the +reconciliation loop that apply is called from. + +When upgrading existing controllers to use Server-side Apply the same approach often works +well--migrate the controllers to recreate the apply configuration each time it reconciles any +object. For cases where this does not work well, see Mechanism 2. + +Mechanism 2: + +When upgrading existing controllers to use Server-side Apply, the controller might have multiple +code paths that update different parts of an object depending on various conditions. Migrating a +controller like this to Server-side Apply can be risky because if the controller forgets to include +any fields in an apply configuration that is included in a previous apply request, a field can be +accidentally deleted. For such cases, an alternative to mechanism 1 is to replace any controller +reconciliation code that performs a "read/modify-in-place/update" (or patch) workflow with a +"extract/modify-in-place/apply" workflow. Here's an example of the new workflow: + + fieldMgr := "my-field-manager" + deploymentClient := clientset.AppsV1().Deployments("default") + // read, could also be read from a shared informer + deployment, err := deploymentClient.Get(ctx, "example-deployment", metav1.GetOptions{}) + if err != nil { + // handle error + } + // extract + deploymentApplyConfig, err := appsv1ac.ExtractDeployment(deployment, fieldMgr) + if err != nil { + // handle error + } + // modify-in-place + deploymentApplyConfig.Spec.Template.Spec.WithContainers(corev1ac.Container(). + WithName("modify-slice"). + WithImage("nginx:1.14.2"), + ) + // apply + applied, err := deploymentClient.Apply(ctx, extractedDeployment, metav1.ApplyOptions{FieldManager: fieldMgr}) +*/ +package applyconfigurations // import "k8s.io/client-go/applyconfigurations" diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go index 767e3dfc73..a6e98d1c82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go index e66fb41271..18069c0d1b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go index cfc4a851f3..890d95748b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go index 640a265172..75d936e8be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go index eae399d323..ff778529c9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go index bbf718f0f2..9b8057e69d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1beta1.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go index b5d7a0c161..d628969187 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go index be6b3b2853..373f9ef97a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go index 2c827e62d4..e597b15a6a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go index 878083f821..6badc64d82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go index d8a214b7fc..79e109a779 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go index 5e18476bdc..5531c756f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go index f8d1cf5d25..adc023a34d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go index 7c17b40722..2d88406eb9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go index 361605d8cd..3826e0dddc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go index 3137bc5eb0..1245452237 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go index 46c541048d..6738bf07bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go index f19c2f2ee2..9d386f1608 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go index 20bf637805..12dbc35969 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go index e16dd23633..e896ab3415 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go index 0836537979..4ee3f01617 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go index 015541eeb9..dc676f7b60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go index 2d03c7b132..4a64124755 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go index 1ab4d8bb73..58fbde8b35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go index faa7e2446f..3aed616889 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go index 8ca93a0bc2..63648cd464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go index a90d3b2207..4a671130b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 27ea5d9dde..fb1f95a6d6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go index 6335ec375d..ca3e174f93 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go index 2ecc4c8c65..1607137204 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go index c69b281225..8a0fa57415 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go index 0140d771bf..6bc1c1977b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go index 179e4bd024..4454329c5b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []extensionsv1beta1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go index b2afc835d8..24c6b6ad1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go index b717365175..21a25ae81f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1beta1.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go index 5d0c570149..27653dd1af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go index 45dc4bf319..9a5b468a3f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go index 131e57a39d..775f82eef8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go index 3aa5e2f891..4352f7fac7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go index dde5f064b0..244701a5e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go index 60a1a8430c..101aa055b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -34,7 +34,7 @@ type ScaleApplyConfiguration struct { Status *v1beta1.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -216,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleAp b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go index cd21214f5a..4e5805f394 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index d9c8a79cc8..0f3b61af97 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 8809fafbae..9e3978af5b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go index 808ab09a55..5f26a66d2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go index 2785f5baf3..4efd5d2875 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go index 7c61360a53..6f951967e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go index 92a03d8628..0be9eddfd6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go index c19f097035..8e27642985 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 03ff6d9103..454ed8beb4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go index d9f8c2eccf..29c26b3406 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go index b193efa8bf..088afdc584 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index e8a1b97c9f..bcce2679c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go index 6ce588c8d9..42ccbfbf9d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go index 0638aee8b8..f445713f0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index 5d88749593..2262dedca9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go index 322871edc6..ff650bc3d5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go index 69fd2c23cc..7488f9bbe2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go index 0991ce9445..7428582a82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go index 55787ca767..58ad10764b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index f02b03bdc7..1ec77ae89b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/flowcontrol/v1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go index 2d17c111c6..fd90067d4d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go index 0710480900..45ccc5cb75 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go index 6dc1bb4d68..29a8999b8d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go index f44313f54e..09bd258905 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go index b62e9a22ff..d1c3dbec6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta1.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go index 8d72c2d0d7..1d6e8fc58e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go index 6bc6d0543a..5ad8a432b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go index 95b416e426..cc274fe2f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go index 6f57169e1f..0fe5feca12 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go index 86e1bef6b9..66f3276010 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go index 594ebc9912..3c571ccb06 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go index ea5b266b4c..32a082dc76 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go index 84324dbfdc..c4243f874c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go index 59bc610510..1ad4a554b7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go index c44bcc08b4..b5e773e82a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go index 19146d9f66..b013845f43 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go index 3c27e6aa62..875b01efec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go index 5e6e6e7b01..85a8b88630 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go index 2e12ee1cc0..5c67dad759 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go index f5a146a9b1..439e5ff753 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go index af571029fc..b5c231f6d2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta1.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go index 35bf27a593..bc2deae4c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go index d6bc330fe7..0c02d9b389 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go index 924f966d48..e3c4b97a7b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go index 63a5f0aa30..ffc3af950a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go index 04dfcbf11a..44571d263d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go index a5477e2768..6eab63bfa9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go index 67c5be2cbe..70ac997e45 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go index b670f2cfd9..25207d7c1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go index 563b185c74..298dd46370 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go index a9b7661fb2..38a513d306 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta2.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go index cb8ba0afd6..5032ee4898 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go index 179c3979db..2bb8c87182 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go index 3256b36300..7d52ca2c2a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go index f742adeff0..ddb17e9843 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go index 581b451ffd..bbf718b60f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go index 994a8a16a2..c083ad0ba6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go index b55e32be00..7a1f8790b9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go index 06246fb27e..19c34c5f83 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go index b67ea1c7f9..070d2ed465 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go index b6cfdcad3b..c0d44721cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go index 7030785b8c..2cfaab43d8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -22,7 +22,7 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta2.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go index 8c77b3e8a2..c249f042da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go index b03c11d0d9..b9bf6993af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go index cd45725932..49d84bd866 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go index c95635360e..1f69c43b23 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go index 0ef3a2c921..41d623aeb8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go index e077ed3fde..7141f6a6a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go index 18db1c9325..294ddc9098 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go index b919b711b3..6576e716ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go index 269a48721c..bd98dd683c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go index b7a64ebfee..8deaabdebd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta3.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go index ecb47f52cf..2dd0d2b068 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go index e30aace194..cc64dc585b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go index 6fbbbea8fe..e7d1a3a5f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go index 6e36b6a079..8e9687bb90 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go index cb827b1e62..566aaa916b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go index 5b0680d912..9fa1112ce6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go @@ -22,7 +22,7 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"` @@ -30,7 +30,7 @@ type PriorityLevelConfigurationSpecApplyConfiguration struct { Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go index 0ee9e306eb..be2436457e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go index fc86c44431..f9a3c6d1a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go index 72623ffe49..e38f711db0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go index e2d6b1b213..a5ed40c2ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go index f13b8f3ec5..c412b2a7a2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go @@ -22,7 +22,7 @@ import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta3.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go index 3db3abbc1a..7b3ec2ba82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go new file mode 100644 index 0000000000..91944002d4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageReviewApplyConfiguration represents a declarative configuration of the ImageReview type for use +// with apply. +type ImageReviewApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageReviewSpecApplyConfiguration `json:"spec,omitempty"` + Status *ImageReviewStatusApplyConfiguration `json:"status,omitempty"` +} + +// ImageReview constructs a declarative configuration of the ImageReview type for use with +// apply. +func ImageReview(name string) *ImageReviewApplyConfiguration { + b := &ImageReviewApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageReview") + b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1") + return b +} + +// ExtractImageReview extracts the applied configuration owned by fieldManager from +// imageReview. If no managedFields are found in imageReview for fieldManager, a +// ImageReviewApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageReview must be a unmodified ImageReview API object that was retrieved from the Kubernetes API. +// ExtractImageReview provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) { + return extractImageReview(imageReview, fieldManager, "") +} + +// ExtractImageReviewStatus is the same as ExtractImageReview except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageReviewStatus(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) { + return extractImageReview(imageReview, fieldManager, "status") +} + +func extractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) { + b := &ImageReviewApplyConfiguration{} + err := managedfields.ExtractInto(imageReview, internal.Parser().Type("io.k8s.api.imagepolicy.v1alpha1.ImageReview"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageReview.Name) + + b.WithKind("ImageReview") + b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithKind(value string) *ImageReviewApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithAPIVersion(value string) *ImageReviewApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithName(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithGenerateName(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithNamespace(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithUID(value types.UID) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithResourceVersion(value string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithGeneration(value int64) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageReviewApplyConfiguration) WithLabels(entries map[string]string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageReviewApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageReviewApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageReviewApplyConfiguration) WithFinalizers(values ...string) *ImageReviewApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ImageReviewApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithSpec(value *ImageReviewSpecApplyConfiguration) *ImageReviewApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageReviewApplyConfiguration) WithStatus(value *ImageReviewStatusApplyConfiguration) *ImageReviewApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ImageReviewApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go new file mode 100644 index 0000000000..adfdb32584 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ImageReviewContainerSpecApplyConfiguration represents a declarative configuration of the ImageReviewContainerSpec type for use +// with apply. +type ImageReviewContainerSpecApplyConfiguration struct { + Image *string `json:"image,omitempty"` +} + +// ImageReviewContainerSpecApplyConfiguration constructs a declarative configuration of the ImageReviewContainerSpec type for use with +// apply. +func ImageReviewContainerSpec() *ImageReviewContainerSpecApplyConfiguration { + return &ImageReviewContainerSpecApplyConfiguration{} +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ImageReviewContainerSpecApplyConfiguration) WithImage(value string) *ImageReviewContainerSpecApplyConfiguration { + b.Image = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go new file mode 100644 index 0000000000..7efc36a321 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ImageReviewSpecApplyConfiguration represents a declarative configuration of the ImageReviewSpec type for use +// with apply. +type ImageReviewSpecApplyConfiguration struct { + Containers []ImageReviewContainerSpecApplyConfiguration `json:"containers,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// ImageReviewSpecApplyConfiguration constructs a declarative configuration of the ImageReviewSpec type for use with +// apply. +func ImageReviewSpec() *ImageReviewSpecApplyConfiguration { + return &ImageReviewSpecApplyConfiguration{} +} + +// WithContainers adds the given value to the Containers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containers field. +func (b *ImageReviewSpecApplyConfiguration) WithContainers(values ...*ImageReviewContainerSpecApplyConfiguration) *ImageReviewSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithContainers") + } + b.Containers = append(b.Containers, *values[i]) + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageReviewSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ImageReviewSpecApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageReviewSpecApplyConfiguration) WithNamespace(value string) *ImageReviewSpecApplyConfiguration { + b.Namespace = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go new file mode 100644 index 0000000000..e26a427e69 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ImageReviewStatusApplyConfiguration represents a declarative configuration of the ImageReviewStatus type for use +// with apply. +type ImageReviewStatusApplyConfiguration struct { + Allowed *bool `json:"allowed,omitempty"` + Reason *string `json:"reason,omitempty"` + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty"` +} + +// ImageReviewStatusApplyConfiguration constructs a declarative configuration of the ImageReviewStatus type for use with +// apply. +func ImageReviewStatus() *ImageReviewStatusApplyConfiguration { + return &ImageReviewStatusApplyConfiguration{} +} + +// WithAllowed sets the Allowed field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allowed field is set to the value of the last call. +func (b *ImageReviewStatusApplyConfiguration) WithAllowed(value bool) *ImageReviewStatusApplyConfiguration { + b.Allowed = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ImageReviewStatusApplyConfiguration) WithReason(value string) *ImageReviewStatusApplyConfiguration { + b.Reason = &value + return b +} + +// WithAuditAnnotations puts the entries into the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the AuditAnnotations field, +// overwriting an existing map entries in AuditAnnotations field with the same key. +func (b *ImageReviewStatusApplyConfiguration) WithAuditAnnotations(entries map[string]string) *ImageReviewStatusApplyConfiguration { + if b.AuditAnnotations == nil && len(entries) > 0 { + b.AuditAnnotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.AuditAnnotations[k] = v + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 2ceb262217..43c9ae05a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -39,6 +39,28 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.admissionregistration.v1.AuditAnnotation + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: valueExpression + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1.ExpressionWarning + map: + fields: + - name: fieldRef + type: + scalar: string + default: "" + - name: warning + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1.MatchCondition map: fields: @@ -50,6 +72,31 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.admissionregistration.v1.MatchResources + map: + fields: + - name: excludeResourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.NamedRuleWithOperations + elementRelationship: atomic + - name: matchPolicy + type: + scalar: string + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: resourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.NamedRuleWithOperations + elementRelationship: atomic + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1.MutatingWebhook map: fields: @@ -123,6 +170,69 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - name +- name: io.k8s.api.admissionregistration.v1.NamedRuleWithOperations + map: + fields: + - name: apiGroups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: apiVersions + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: operations + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourceNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resources + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: scope + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.ParamKind + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.ParamRef + map: + fields: + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: parameterNotFoundAction + type: + scalar: string + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1.RuleWithOperations map: fields: @@ -170,6 +280,128 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: scalar: numeric +- name: io.k8s.api.admissionregistration.v1.TypeChecking + map: + fields: + - name: expressionWarnings + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.ExpressionWarning + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec + default: {} + - name: status + type: + namedType: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus + default: {} +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec + map: + fields: + - name: matchResources + type: + namedType: io.k8s.api.admissionregistration.v1.MatchResources + - name: paramRef + type: + namedType: io.k8s.api.admissionregistration.v1.ParamRef + - name: policyName + type: + scalar: string + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec + map: + fields: + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.AuditAnnotation + elementRelationship: atomic + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchConstraints + type: + namedType: io.k8s.api.admissionregistration.v1.MatchResources + - name: paramKind + type: + namedType: io.k8s.api.admissionregistration.v1.ParamKind + - name: validations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.Validation + elementRelationship: atomic + - name: variables + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.Variable + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration + type: + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1.TypeChecking - name: io.k8s.api.admissionregistration.v1.ValidatingWebhook map: fields: @@ -240,6 +472,34 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - name +- name: io.k8s.api.admissionregistration.v1.Validation + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: message + type: + scalar: string + - name: messageExpression + type: + scalar: string + - name: reason + type: + scalar: string +- name: io.k8s.api.admissionregistration.v1.Variable + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1.WebhookClientConfig map: fields: @@ -3599,6 +3859,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: completions type: scalar: numeric + - name: managedBy + type: + scalar: string - name: manualSelector type: scalar: boolean @@ -3617,6 +3880,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: successPolicy + type: + namedType: io.k8s.api.batch.v1.SuccessPolicy - name: suspend type: scalar: boolean @@ -3729,6 +3995,24 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern elementRelationship: atomic +- name: io.k8s.api.batch.v1.SuccessPolicy + map: + fields: + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.batch.v1.SuccessPolicyRule + elementRelationship: atomic +- name: io.k8s.api.batch.v1.SuccessPolicyRule + map: + fields: + - name: succeededCount + type: + scalar: numeric + - name: succeededIndexes + type: + scalar: string - name: io.k8s.api.batch.v1.UncountedTerminatedPods map: fields: @@ -4072,10 +4356,16 @@ var schemaYAML = typed.YAMLObject(`types: - name: leaseTransitions type: scalar: numeric + - name: preferredHolder + type: + scalar: string - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime -- name: io.k8s.api.coordination.v1beta1.Lease + - name: strategy + type: + scalar: string +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidate map: fields: - name: apiVersion @@ -4090,9 +4380,51 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.coordination.v1beta1.LeaseSpec + namedType: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec default: {} -- name: io.k8s.api.coordination.v1beta1.LeaseSpec +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec + map: + fields: + - name: binaryVersion + type: + scalar: string + - name: emulationVersion + type: + scalar: string + - name: leaseName + type: + scalar: string + default: "" + - name: pingTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: preferredStrategies + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime +- name: io.k8s.api.coordination.v1beta1.Lease + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1beta1.LeaseSpec + default: {} +- name: io.k8s.api.coordination.v1beta1.LeaseSpec map: fields: - name: acquireTime @@ -4107,9 +4439,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: leaseTransitions type: scalar: numeric + - name: preferredHolder + type: + scalar: string - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string - name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource map: fields: @@ -4138,6 +4476,21 @@ var schemaYAML = typed.YAMLObject(`types: - name: podAntiAffinity type: namedType: io.k8s.api.core.v1.PodAntiAffinity +- name: io.k8s.api.core.v1.AppArmorProfile + map: + fields: + - name: localhostProfile + type: + scalar: string + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: localhostProfile + discriminatorValue: LocalhostProfile - name: io.k8s.api.core.v1.AttachedVolume map: fields: @@ -4155,6 +4508,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: cachingMode type: scalar: string + default: ReadWrite - name: diskName type: scalar: string @@ -4166,12 +4520,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: ext4 - name: kind type: scalar: string + default: Shared - name: readOnly type: scalar: boolean + default: false - name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource map: fields: @@ -4356,15 +4713,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.core.v1.ClaimSource - map: - fields: - - name: resourceClaimName - type: - scalar: string - - name: resourceClaimTemplateName - type: - scalar: string - name: io.k8s.api.core.v1.ClientIPConfig map: fields: @@ -4460,6 +4808,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -4473,6 +4822,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -4510,6 +4860,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -4528,6 +4879,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -4744,6 +5096,14 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: allocatedResourcesStatus + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceStatus + elementRelationship: associative + keys: + - name - name: containerID type: scalar: string @@ -4781,6 +5141,23 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.ContainerState default: {} + - name: user + type: + namedType: io.k8s.api.core.v1.ContainerUser + - name: volumeMounts + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeMountStatus + elementRelationship: associative + keys: + - mountPath +- name: io.k8s.api.core.v1.ContainerUser + map: + fields: + - name: linux + type: + namedType: io.k8s.api.core.v1.LinuxContainerUser - name: io.k8s.api.core.v1.DaemonEndpoint map: fields: @@ -5343,12 +5720,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.HostIP map: fields: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.HostPathVolumeSource map: fields: @@ -5381,6 +5760,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -5423,6 +5803,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -5443,6 +5824,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ImageVolumeSource + map: + fields: + - name: pullPolicy + type: + scalar: string + - name: reference + type: + scalar: string - name: io.k8s.api.core.v1.KeyToPath map: fields: @@ -5539,6 +5929,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.LimitRangeItem elementRelationship: atomic +- name: io.k8s.api.core.v1.LinuxContainerUser + map: + fields: + - name: gid + type: + scalar: numeric + default: 0 + - name: supplementalGroups + type: + list: + elementType: + scalar: numeric + elementRelationship: atomic + - name: uid + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.LoadBalancerIngress map: fields: @@ -5572,6 +5979,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" elementRelationship: atomic - name: io.k8s.api.core.v1.LocalVolumeSource map: @@ -5766,6 +6174,31 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.DaemonEndpoint default: {} +- name: io.k8s.api.core.v1.NodeFeatures + map: + fields: + - name: supplementalGroupsPolicy + type: + scalar: boolean +- name: io.k8s.api.core.v1.NodeRuntimeHandler + map: + fields: + - name: features + type: + namedType: io.k8s.api.core.v1.NodeRuntimeHandlerFeatures + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.NodeRuntimeHandlerFeatures + map: + fields: + - name: recursiveReadOnlyMounts + type: + scalar: boolean + - name: userNamespaces + type: + scalar: boolean - name: io.k8s.api.core.v1.NodeSelector map: fields: @@ -5875,6 +6308,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.NodeDaemonEndpoints default: {} + - name: features + type: + namedType: io.k8s.api.core.v1.NodeFeatures - name: images type: list: @@ -5888,6 +6324,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: phase type: scalar: string + - name: runtimeHandlers + type: + list: + elementType: + namedType: io.k8s.api.core.v1.NodeRuntimeHandler + elementRelationship: atomic - name: volumesAttached type: list: @@ -6412,6 +6854,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.PodOS map: fields: @@ -6433,10 +6876,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: source + - name: resourceClaimName type: - namedType: io.k8s.api.core.v1.ClaimSource - default: {} + scalar: string + - name: resourceClaimTemplateName + type: + scalar: string - name: io.k8s.api.core.v1.PodResourceClaimStatus map: fields: @@ -6457,6 +6902,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.PodSecurityContext map: fields: + - name: appArmorProfile + type: + namedType: io.k8s.api.core.v1.AppArmorProfile - name: fsGroup type: scalar: numeric @@ -6484,6 +6932,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: numeric elementRelationship: atomic + - name: supplementalGroupsPolicy + type: + scalar: string - name: sysctls type: list: @@ -6895,6 +7346,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -6904,6 +7356,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -6913,6 +7366,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.RBDVolumeSource map: fields: @@ -6926,6 +7380,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -6935,6 +7390,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -6944,6 +7400,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.ReplicationController map: fields: @@ -7037,6 +7494,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: request + type: + scalar: string - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -7051,6 +7511,16 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceHealth + map: + fields: + - name: health + type: + scalar: string + - name: resourceID + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ResourceQuota map: fields: @@ -7123,6 +7593,21 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.ResourceStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resources + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceHealth + elementRelationship: associative + keys: + - resourceID - name: io.k8s.api.core.v1.SELinuxOptions map: fields: @@ -7144,6 +7629,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -7163,6 +7649,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -7179,6 +7666,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -7198,6 +7686,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -7285,6 +7774,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -7298,6 +7788,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -7314,6 +7805,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -7351,6 +7843,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: allowPrivilegeEscalation type: scalar: boolean + - name: appArmorProfile + type: + namedType: io.k8s.api.core.v1.AppArmorProfile - name: capabilities type: namedType: io.k8s.api.core.v1.Capabilities @@ -7546,6 +8041,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: sessionAffinityConfig type: namedType: io.k8s.api.core.v1.SessionAffinityConfig + - name: trafficDistribution + type: + scalar: string - name: type type: scalar: string @@ -7810,6 +8308,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostPath type: namedType: io.k8s.api.core.v1.HostPathVolumeSource + - name: image + type: + namedType: io.k8s.api.core.v1.ImageVolumeSource - name: iscsi type: namedType: io.k8s.api.core.v1.ISCSIVolumeSource @@ -7878,12 +8379,32 @@ var schemaYAML = typed.YAMLObject(`types: - name: readOnly type: scalar: boolean + - name: recursiveReadOnly + type: + scalar: string - name: subPath type: scalar: string - name: subPathExpr type: scalar: string +- name: io.k8s.api.core.v1.VolumeMountStatus + map: + fields: + - name: mountPath + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean + - name: recursiveReadOnly + type: + scalar: string - name: io.k8s.api.core.v1.VolumeNodeAffinity map: fields: @@ -10572,6 +11093,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric + elementRelationship: atomic - name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: @@ -10673,6 +11195,29 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.networking.v1beta1.HTTPIngressPath elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1beta1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1beta1.ParentReference - name: io.k8s.api.networking.v1beta1.Ingress map: fields: @@ -10839,24 +11384,27 @@ var schemaYAML = typed.YAMLObject(`types: - name: secretName type: scalar: string -- name: io.k8s.api.node.v1.Overhead +- name: io.k8s.api.networking.v1beta1.ParentReference map: fields: - - name: podFixed + - name: group type: - map: - elementType: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.node.v1.RuntimeClass + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string +- name: io.k8s.api.networking.v1beta1.ServiceCIDR map: fields: - name: apiVersion type: scalar: string - - name: handler - type: - scalar: string - default: "" - name: kind type: scalar: string @@ -10864,33 +11412,86 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: overhead + - name: spec type: - namedType: io.k8s.api.node.v1.Overhead - - name: scheduling + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + default: {} + - name: status type: - namedType: io.k8s.api.node.v1.Scheduling -- name: io.k8s.api.node.v1.Scheduling + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1beta1.ServiceCIDRSpec map: fields: - - name: nodeSelector - type: - map: - elementType: - scalar: string - elementRelationship: atomic - - name: tolerations + - name: cidrs type: list: elementType: - namedType: io.k8s.api.core.v1.Toleration + scalar: string elementRelationship: atomic -- name: io.k8s.api.node.v1alpha1.Overhead +- name: io.k8s.api.networking.v1beta1.ServiceCIDRStatus map: fields: - - name: podFixed + - name: conditions type: - map: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.node.v1.Overhead + map: + fields: + - name: podFixed + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.node.v1.RuntimeClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: handler + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: overhead + type: + namedType: io.k8s.api.node.v1.Overhead + - name: scheduling + type: + namedType: io.k8s.api.node.v1.Scheduling +- name: io.k8s.api.node.v1.Scheduling + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + elementRelationship: atomic + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: io.k8s.api.node.v1alpha1.Overhead + map: + fields: + - name: podFixed + type: + map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: io.k8s.api.node.v1alpha1.RuntimeClass @@ -11643,22 +12244,129 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha2.AllocationResult +- name: io.k8s.api.resource.v1alpha3.AllocationResult map: fields: - - name: availableOnNodes + - name: controller + type: + scalar: string + - name: devices + type: + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult + default: {} + - name: nodeSelector type: namedType: io.k8s.api.core.v1.NodeSelector - - name: resourceHandles +- name: io.k8s.api.resource.v1alpha3.BasicDevice + map: + fields: + - name: attributes + type: + map: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceAttribute + - name: capacity + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.resource.v1alpha3.CELDeviceSelector + map: + fields: + - name: expression + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.Device + map: + fields: + - name: basic + type: + namedType: io.k8s.api.resource.v1alpha3.BasicDevice + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: source + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationResult + map: + fields: + - name: config type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceHandle + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration elementRelationship: atomic - - name: shareable + - name: results + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceAttribute + map: + fields: + - name: bool type: scalar: boolean -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext + - name: int + type: + scalar: numeric + - name: string + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.resource.v1alpha3.DeviceClaim + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration + elementRelationship: atomic + - name: constraints + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceConstraint + elementRelationship: atomic + - name: requests + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceRequest + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceClass map: fields: - name: apiVersion @@ -11673,13 +12381,128 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec + namedType: io.k8s.api.resource.v1alpha3.DeviceClassSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration +- name: io.k8s.api.resource.v1alpha3.DeviceClassSpec + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + elementRelationship: atomic + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic + - name: suitableNodes + type: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1alpha3.DeviceConstraint + map: + fields: + - name: matchAttribute + type: + scalar: string + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceRequest + map: + fields: + - name: adminAccess + type: + scalar: boolean + default: false + - name: allocationMode + type: + scalar: string + - name: count + type: + scalar: numeric + - name: deviceClassName + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult + map: + fields: + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: pool + type: + scalar: string + default: "" + - name: request + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceSelector + map: + fields: + - name: cel + type: + namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector +- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: parameters + type: + namedType: __untyped_atomic_ +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus + namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus default: {} -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec map: fields: - name: potentialNodes @@ -11691,18 +12514,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: selectedNode type: scalar: string -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus map: fields: - name: resourceClaims type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus elementRelationship: associative keys: - name -- name: io.k8s.api.resource.v1alpha2.ResourceClaim +- name: io.k8s.api.resource.v1alpha3.ResourceClaim map: fields: - name: apiVersion @@ -11717,13 +12540,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference +- name: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference map: fields: - name: apiGroup @@ -11741,66 +12564,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus map: fields: - - name: apiGroup - type: - scalar: string - - name: kind - type: - scalar: string - default: "" - name: name type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus - map: - fields: - - name: name - type: - scalar: string - name: unsuitableNodes type: list: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec map: fields: - - name: allocationMode + - name: controller type: scalar: string - - name: parametersRef + - name: devices type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - - name: resourceClassName - type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha3.DeviceClaim + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimStatus map: fields: - name: allocation type: - namedType: io.k8s.api.resource.v1alpha2.AllocationResult + namedType: io.k8s.api.resource.v1alpha3.AllocationResult - name: deallocationRequested type: scalar: boolean - - name: driverName - type: - scalar: string - name: reservedFor type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference elementRelationship: associative keys: - uid -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplate map: fields: - name: apiVersion @@ -11815,9 +12619,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec map: fields: - name: metadata @@ -11826,18 +12630,29 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClass +- name: io.k8s.api.resource.v1alpha3.ResourcePool map: fields: - - name: apiVersion + - name: generation type: - scalar: string - - name: driverName + scalar: numeric + default: 0 + - name: name type: scalar: string default: "" + - name: resourceSliceCount + type: + scalar: numeric + default: 0 +- name: io.k8s.api.resource.v1alpha3.ResourceSlice + map: + fields: + - name: apiVersion + type: + scalar: string - name: kind type: scalar: string @@ -11845,38 +12660,36 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: parametersRef - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - - name: suitableNodes + - name: spec type: - namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference + namedType: io.k8s.api.resource.v1alpha3.ResourceSliceSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceSliceSpec map: fields: - - name: apiGroup + - name: allNodes type: - scalar: string - - name: kind + scalar: boolean + - name: devices type: - scalar: string - default: "" - - name: name + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.Device + elementRelationship: atomic + - name: driver type: scalar: string default: "" - - name: namespace + - name: nodeName type: scalar: string -- name: io.k8s.api.resource.v1alpha2.ResourceHandle - map: - fields: - - name: data + - name: nodeSelector type: - scalar: string - - name: driverName + namedType: io.k8s.api.core.v1.NodeSelector + - name: pool type: - scalar: string + namedType: io.k8s.api.resource.v1alpha3.ResourcePool + default: {} - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: @@ -12570,6 +13383,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1beta1.VolumeError +- name: io.k8s.api.storage.v1beta1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1beta1.VolumeError map: fields: @@ -12585,6 +13420,83 @@ var schemaYAML = typed.YAMLObject(`types: - name: count type: scalar: numeric +- name: io.k8s.api.storagemigration.v1alpha1.GroupVersionResource + map: + fields: + - name: group + type: + scalar: string + - name: resource + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.storagemigration.v1alpha1.MigrationCondition + map: + fields: + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec + default: {} + - name: status + type: + namedType: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus + default: {} +- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec + map: + fields: + - name: continueToken + type: + scalar: string + - name: resource + type: + namedType: io.k8s.api.storagemigration.v1alpha1.GroupVersionResource + default: {} +- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.storagemigration.v1alpha1.MigrationCondition + elementRelationship: associative + keys: + - type + - name: resourceVersion + type: + scalar: string - name: io.k8s.apimachinery.pkg.api.resource.Quantity scalar: untyped - name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go index c84102cdde..466aaebb61 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ConditionApplyConfiguration represents an declarative configuration of the Condition type for use +// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use // with apply. type ConditionApplyConfiguration struct { Type *string `json:"type,omitempty"` @@ -33,7 +33,7 @@ type ConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ConditionApplyConfiguration constructs an declarative configuration of the Condition type for use with +// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with // apply. func Condition() *ConditionApplyConfiguration { return &ConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go index 7a1d23114d..313bb9784d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeleteOptionsApplyConfiguration represents an declarative configuration of the DeleteOptions type for use +// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use // with apply. type DeleteOptionsApplyConfiguration struct { TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type DeleteOptionsApplyConfiguration struct { DryRun []string `json:"dryRun,omitempty"` } -// DeleteOptionsApplyConfiguration constructs an declarative configuration of the DeleteOptions type for use with +// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with // apply. func DeleteOptions() *DeleteOptionsApplyConfiguration { b := &DeleteOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go index 6d24bc363b..1f33c94e0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LabelSelectorApplyConfiguration represents an declarative configuration of the LabelSelector type for use +// LabelSelectorApplyConfiguration represents a declarative configuration of the LabelSelector type for use // with apply. type LabelSelectorApplyConfiguration struct { MatchLabels map[string]string `json:"matchLabels,omitempty"` MatchExpressions []LabelSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// LabelSelectorApplyConfiguration constructs an declarative configuration of the LabelSelector type for use with +// LabelSelectorApplyConfiguration constructs a declarative configuration of the LabelSelector type for use with // apply. func LabelSelector() *LabelSelectorApplyConfiguration { return &LabelSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go index ff70f365e6..bd9db9659b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LabelSelectorRequirementApplyConfiguration represents an declarative configuration of the LabelSelectorRequirement type for use +// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use // with apply. type LabelSelectorRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -30,7 +30,7 @@ type LabelSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// LabelSelectorRequirementApplyConfiguration constructs an declarative configuration of the LabelSelectorRequirement type for use with +// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with // apply. func LabelSelectorRequirement() *LabelSelectorRequirementApplyConfiguration { return &LabelSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go index f4d7e26812..6913df8226 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use +// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use // with apply. type ManagedFieldsEntryApplyConfiguration struct { Manager *string `json:"manager,omitempty"` @@ -34,7 +34,7 @@ type ManagedFieldsEntryApplyConfiguration struct { Subresource *string `json:"subresource,omitempty"` } -// ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with +// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with // apply. func ManagedFieldsEntry() *ManagedFieldsEntryApplyConfiguration { return &ManagedFieldsEntryApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go index 9b290e9680..a9419975ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go @@ -23,7 +23,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectMetaApplyConfiguration represents an declarative configuration of the ObjectMeta type for use +// ObjectMetaApplyConfiguration represents a declarative configuration of the ObjectMeta type for use // with apply. type ObjectMetaApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -41,7 +41,7 @@ type ObjectMetaApplyConfiguration struct { Finalizers []string `json:"finalizers,omitempty"` } -// ObjectMetaApplyConfiguration constructs an declarative configuration of the ObjectMeta type for use with +// ObjectMetaApplyConfiguration constructs a declarative configuration of the ObjectMeta type for use with // apply. func ObjectMeta() *ObjectMetaApplyConfiguration { return &ObjectMetaApplyConfiguration{} @@ -169,3 +169,8 @@ func (b *ObjectMetaApplyConfiguration) WithFinalizers(values ...string) *ObjectM } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ObjectMetaApplyConfiguration) GetName() *string { + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go index b3117d6a4b..2776152322 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// OwnerReferenceApplyConfiguration represents an declarative configuration of the OwnerReference type for use +// OwnerReferenceApplyConfiguration represents a declarative configuration of the OwnerReference type for use // with apply. type OwnerReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` @@ -33,7 +33,7 @@ type OwnerReferenceApplyConfiguration struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"` } -// OwnerReferenceApplyConfiguration constructs an declarative configuration of the OwnerReference type for use with +// OwnerReferenceApplyConfiguration constructs a declarative configuration of the OwnerReference type for use with // apply. func OwnerReference() *OwnerReferenceApplyConfiguration { return &OwnerReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go index f627733f1e..8f8b6c6b3a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// PreconditionsApplyConfiguration represents an declarative configuration of the Preconditions type for use +// PreconditionsApplyConfiguration represents a declarative configuration of the Preconditions type for use // with apply. type PreconditionsApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` } -// PreconditionsApplyConfiguration constructs an declarative configuration of the Preconditions type for use with +// PreconditionsApplyConfiguration constructs a declarative configuration of the Preconditions type for use with // apply. func Preconditions() *PreconditionsApplyConfiguration { return &PreconditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go index 877b0890e8..979044384c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TypeMetaApplyConfiguration represents an declarative configuration of the TypeMeta type for use +// TypeMetaApplyConfiguration represents a declarative configuration of the TypeMeta type for use // with apply. type TypeMetaApplyConfiguration struct { Kind *string `json:"kind,omitempty"` APIVersion *string `json:"apiVersion,omitempty"` } -// TypeMetaApplyConfiguration constructs an declarative configuration of the TypeMeta type for use with +// TypeMetaApplyConfiguration constructs a declarative configuration of the TypeMeta type for use with // apply. func TypeMeta() *TypeMetaApplyConfiguration { return &TypeMetaApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go index 07b6a67f6a..e39670f295 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/networking/v1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go index fef529d696..ad9a7a6771 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go index b5146902d4..607c26e943 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go index 5757135991..b014b7beef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { Service *IngressServiceBackendApplyConfiguration `json:"service,omitempty"` Resource *corev1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go index e33d0b2d9f..14acc7dbd8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go index a020d3a8df..0dba1ebc5d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go index ec0423e708..23e8484344 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go index 444275a127..d0feb44da4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go index 8e01a301ac..08c841f06b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go index 82b5babd9c..b6411199fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go index 8153e88fe2..4ef871f077 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go index d0e094387c..1e13e378be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go index 399739631b..07876afd17 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressServiceBackendApplyConfiguration represents an declarative configuration of the IngressServiceBackend type for use +// IngressServiceBackendApplyConfiguration represents a declarative configuration of the IngressServiceBackend type for use // with apply. type IngressServiceBackendApplyConfiguration struct { Name *string `json:"name,omitempty"` Port *ServiceBackendPortApplyConfiguration `json:"port,omitempty"` } -// IngressServiceBackendApplyConfiguration constructs an declarative configuration of the IngressServiceBackend type for use with +// IngressServiceBackendApplyConfiguration constructs a declarative configuration of the IngressServiceBackend type for use with // apply. func IngressServiceBackend() *IngressServiceBackendApplyConfiguration { return &IngressServiceBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go index 635514ecf7..0572153aa1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go index 7131bf8d0d..bd1327c93f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go index 4d8d369f7c..44092503f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go index 1efd6edfdc..f3447a8f10 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 409507310b..3f8c8a5351 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go index e5751c4413..46e2706ece 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go index 630fe1fabe..6e98759786 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go index 909b651c04..046de3e237 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go index 73dbed1d89..581ef1c348 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go index 882d8233a9..da5ed5d358 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []apinetworkingv1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go index ec278960ca..517f974838 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceBackendPortApplyConfiguration represents an declarative configuration of the ServiceBackendPort type for use +// ServiceBackendPortApplyConfiguration represents a declarative configuration of the ServiceBackendPort type for use // with apply. type ServiceBackendPortApplyConfiguration struct { Name *string `json:"name,omitempty"` Number *int32 `json:"number,omitempty"` } -// ServiceBackendPortApplyConfiguration constructs an declarative configuration of the ServiceBackendPort type for use with +// ServiceBackendPortApplyConfiguration constructs a declarative configuration of the ServiceBackendPort type for use with // apply. func ServiceBackendPort() *ServiceBackendPortApplyConfiguration { return &ServiceBackendPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index da6822111d..999c23fa14 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use // with apply. type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IPAddressApplyConfiguration struct { Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// IPAddress constructs an declarative configuration of the IPAddress type for use with +// IPAddress constructs a declarative configuration of the IPAddress type for use with // apply. func IPAddress(name string) *IPAddressApplyConfiguration { b := &IPAddressApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go index 064963d691..bf025a8c1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use // with apply. type IPAddressSpecApplyConfiguration struct { ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` } -// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with // apply. func IPAddressSpec() *IPAddressSpecApplyConfiguration { return &IPAddressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index ce1049709a..d5a52d503d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -27,7 +27,7 @@ type ParentReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with // apply. func ParentReference() *ParentReferenceApplyConfiguration { return &ParentReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go index f6d0a91e00..984e049f28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use // with apply. type ServiceCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ServiceCIDRApplyConfiguration struct { Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` } -// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with // apply. func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { b := &ServiceCIDRApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go index 302d69194c..7875ff403b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use // with apply. type ServiceCIDRSpecApplyConfiguration struct { CIDRs []string `json:"cidrs,omitempty"` } -// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with // apply. func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { return &ServiceCIDRSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go index 5afc549a65..34715e3a49 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use // with apply. type ServiceCIDRStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with // apply. func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { return &ServiceCIDRStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go index b12907e81c..61b458f7ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/networking/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go index 3137bc5eb0..1245452237 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go index 56f65c30a9..0df53ea652 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go index f19c2f2ee2..9d386f1608 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go index b65d4b3073..b0e877b57a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go index e6ca805e47..2a307a6760 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go index 51040462ca..eefbf62b87 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go index 20bf637805..12dbc35969 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go index e16dd23633..e896ab3415 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go index 0836537979..4ee3f01617 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go index 015541eeb9..dc676f7b60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go index 2d03c7b132..4a64124755 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go index 1ab4d8bb73..58fbde8b35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go index faa7e2446f..3aed616889 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go index 8ca93a0bc2..63648cd464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..3047d79b95 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use +// with apply. +type IPAddressApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` +} + +// IPAddress constructs a declarative configuration of the IPAddress type for use with +// apply. +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} + b.WithName(name) + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") +} + +// ExtractIPAddressStatus is the same as ExtractIPAddress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIPAddressStatus(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") +} + +func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1beta1.IPAddress"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(iPAddress.Name) + + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go new file mode 100644 index 0000000000..76b02137d2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go index d67e4d3977..1863938f16 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go @@ -16,51 +16,51 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1beta1 -// ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use // with apply. -type ResourceClassParametersReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Kind *string `json:"kind,omitempty"` - Name *string `json:"name,omitempty"` +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` } -// ResourceClassParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClassParametersReference type for use with +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with // apply. -func ResourceClassParametersReference() *ResourceClassParametersReferenceApplyConfiguration { - return &ResourceClassParametersReferenceApplyConfiguration{} +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} } -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// WithGroup sets the Group field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.APIGroup = &value +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value return b } -// WithKind sets the Kind field in the declarative configuration to the given value +// WithResource sets the Resource field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Kind = &value +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value return b } -// WithName sets the Name field in the declarative configuration to the given value +// WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithName(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Name = &value +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value return b } -// WithNamespace sets the Namespace field in the declarative configuration to the given value +// WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithNamespace(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Namespace = &value +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..4ef8e9ecac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use +// with apply. +type ServiceCIDRApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with +// apply. +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") +} + +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") +} + +func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCIDR.Name) + + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go new file mode 100644 index 0000000000..1f283532d3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use +// with apply. +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` +} + +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with +// apply. +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} +} + +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { + for i := range values { + b.CIDRs = append(b.CIDRs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go new file mode 100644 index 0000000000..f2dd92404d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go index 9eec002671..6694538fc3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go index 3c9d1fc467..6ce01a319c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go index e01db85d7b..2d084e0f59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go index 1ddaa64acc..84770a0920 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go index e680e12deb..9f139ee1b6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RuntimeClassApplyConfiguration struct { Spec *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go index 86e8585ad3..1aa43eb132 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RuntimeClassSpecApplyConfiguration represents an declarative configuration of the RuntimeClassSpec type for use +// RuntimeClassSpecApplyConfiguration represents a declarative configuration of the RuntimeClassSpec type for use // with apply. type RuntimeClassSpecApplyConfiguration struct { RuntimeHandler *string `json:"runtimeHandler,omitempty"` @@ -26,7 +26,7 @@ type RuntimeClassSpecApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClassSpecApplyConfiguration constructs an declarative configuration of the RuntimeClassSpec type for use with +// RuntimeClassSpecApplyConfiguration constructs a declarative configuration of the RuntimeClassSpec type for use with // apply. func RuntimeClassSpec() *RuntimeClassSpecApplyConfiguration { return &RuntimeClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go index d4117d6bc7..6ce49ad866 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go index e8c4895505..cf767e702e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go index f5487665c3..fa6c9f45bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go index 10831d0ff5..23d0b97527 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go index 76a9533a6f..3a051619f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go index 6b547c2695..a765a7b623 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go index 67d9ba6bba..2917145451 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` @@ -33,7 +33,7 @@ type PodDisruptionBudgetSpecApplyConfiguration struct { UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go index 2dd427b9e1..d0f9baf41c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go index d2a361d1b5..d4121af206 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go index cef51a279c..813b57bae7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go index 0ba3ea1c2e..405f1148b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` @@ -33,7 +33,7 @@ type PodDisruptionBudgetSpecApplyConfiguration struct { UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go index d0813590e1..e66a7fb386 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go index fda9205c21..5ae4dc37ff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go index 3a5660fe19..c5b0075ec0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go index 625ad72c44..91a9d5df31 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go index 65ee1d4fe5..a2e66d1096 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go index 97df25fb65..b51f904267 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go index 7270f07e49..e59c8e6d30 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go index ef03a48827..646a3bb194 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go index ebc87fdc45..e1d9c5cfb8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go index 63cdc3fcca..ff4aeb59e5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go index 19b1180fad..dc0e34e53b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go index a1723efc35..d3c12ec508 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go index 12143af130..89d7a2914f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go index cd256397a2..db0a4f7169 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go index a0ec20d0b1..8efcddd69d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go index 40dbc33073..4b2553117d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go index 46640dbbe9..665b42af50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go index d52ac3db9b..e9bb68dcb6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go index cf714ecc27..5e9c238540 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go index b97cbcba2f..2f088b93e5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go index c63dc68c6b..dc630df206 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go index 53a751eb34..4b1b6112bd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go index ecccdf91b1..246928553f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go index e6a02dc602..19d0420a81 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go index b616da8b13..f7c1a21a9c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go deleted file mode 100644 index bc6078aa94..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use -// with apply. -type AllocationResultApplyConfiguration struct { - ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` - AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` - Shareable *bool `json:"shareable,omitempty"` -} - -// AllocationResultApplyConfiguration constructs an declarative configuration of the AllocationResult type for use with -// apply. -func AllocationResult() *AllocationResultApplyConfiguration { - return &AllocationResultApplyConfiguration{} -} - -// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceHandles field. -func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceHandles") - } - b.ResourceHandles = append(b.ResourceHandles, *values[i]) - } - return b -} - -// WithAvailableOnNodes sets the AvailableOnNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AvailableOnNodes field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithAvailableOnNodes(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { - b.AvailableOnNodes = value - return b -} - -// WithShareable sets the Shareable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Shareable field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithShareable(value bool) *AllocationResultApplyConfiguration { - b.Shareable = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go deleted file mode 100644 index 27820ede60..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use -// with apply. -type ResourceClaimParametersReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Kind *string `json:"kind,omitempty"` - Name *string `json:"name,omitempty"` -} - -// ResourceClaimParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimParametersReference type for use with -// apply. -func ResourceClaimParametersReference() *ResourceClaimParametersReferenceApplyConfiguration { - return &ResourceClaimParametersReferenceApplyConfiguration{} -} - -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.APIGroup = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.Kind = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithName(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go deleted file mode 100644 index 0c73e64e9e..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" -) - -// ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use -// with apply. -type ResourceClaimSpecApplyConfiguration struct { - ResourceClassName *string `json:"resourceClassName,omitempty"` - ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` -} - -// ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with -// apply. -func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { - return &ResourceClaimSpecApplyConfiguration{} -} - -// WithResourceClassName sets the ResourceClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClassName field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithResourceClassName(value string) *ResourceClaimSpecApplyConfiguration { - b.ResourceClassName = &value - return b -} - -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParametersRef field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimSpecApplyConfiguration { - b.ParametersRef = value - return b -} - -// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { - b.AllocationMode = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go deleted file mode 100644 index 028cbaa1a7..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use -// with apply. -type ResourceHandleApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - Data *string `json:"data,omitempty"` -} - -// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with -// apply. -func ResourceHandle() *ResourceHandleApplyConfiguration { - return &ResourceHandleApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { - b.DriverName = &value - return b -} - -// WithData sets the Data field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Data field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { - b.Data = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go new file mode 100644 index 0000000000..3090b2f9d3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + Controller *string `json:"controller,omitempty"` +} + +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration { + b.Devices = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithController sets the Controller field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Controller field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration { + b.Controller = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go new file mode 100644 index 0000000000..e6b7745082 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use +// with apply. +type BasicDeviceApplyConfiguration struct { + Attributes map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[v1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"` +} + +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with +// apply. +func BasicDevice() *BasicDeviceApplyConfiguration { + return &BasicDeviceApplyConfiguration{} +} + +// WithAttributes puts the entries into the Attributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Attributes field, +// overwriting an existing map entries in Attributes field with the same key. +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Attributes == nil && len(entries) > 0 { + b.Attributes = make(map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Attributes[k] = v + } + return b +} + +// WithCapacity puts the entries into the Capacity field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Capacity field, +// overwriting an existing map entries in Capacity field with the same key. +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[v1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration { + if b.Capacity == nil && len(entries) > 0 { + b.Capacity = make(map[v1alpha3.QualifiedName]resource.Quantity, len(entries)) + } + for k, v := range entries { + b.Capacity[k] = v + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go new file mode 100644 index 0000000000..c59b6a2e37 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use +// with apply. +type CELDeviceSelectorApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with +// apply. +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration { + return &CELDeviceSelectorApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go new file mode 100644 index 0000000000..efdb5f37a9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use +// with apply. +type DeviceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"` +} + +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with +// apply. +func Device() *DeviceApplyConfiguration { + return &DeviceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration { + b.Name = &value + return b +} + +// WithBasic sets the Basic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Basic field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration { + b.Basic = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go new file mode 100644 index 0000000000..342e724ef0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use +// with apply. +type DeviceAllocationConfigurationApplyConfiguration struct { + Source *v1alpha3.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with +// apply. +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration { + return &DeviceAllocationConfigurationApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value v1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { + b.Source = &value + return b +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go new file mode 100644 index 0000000000..0cfb264b4e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use +// with apply. +type DeviceAllocationResultApplyConfiguration struct { + Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"` + Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with +// apply. +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration { + return &DeviceAllocationResultApplyConfiguration{} +} + +// WithResults adds the given value to the Results field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Results field. +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResults") + } + b.Results = append(b.Results, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go new file mode 100644 index 0000000000..6b0b7a40ac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use +// with apply. +type DeviceAttributeApplyConfiguration struct { + IntValue *int64 `json:"int,omitempty"` + BoolValue *bool `json:"bool,omitempty"` + StringValue *string `json:"string,omitempty"` + VersionValue *string `json:"version,omitempty"` +} + +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with +// apply. +func DeviceAttribute() *DeviceAttributeApplyConfiguration { + return &DeviceAttributeApplyConfiguration{} +} + +// WithIntValue sets the IntValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration { + b.IntValue = &value + return b +} + +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BoolValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration { + b.BoolValue = &value + return b +} + +// WithStringValue sets the StringValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StringValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration { + b.StringValue = &value + return b +} + +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration { + b.VersionValue = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go new file mode 100644 index 0000000000..ce3ab56d8b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use +// with apply. +type DeviceClaimApplyConfiguration struct { + Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"` + Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"` + Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with +// apply. +func DeviceClaim() *DeviceClaimApplyConfiguration { + return &DeviceClaimApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequests") + } + b.Requests = append(b.Requests, *values[i]) + } + return b +} + +// WithConstraints adds the given value to the Constraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Constraints field. +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConstraints") + } + b.Constraints = append(b.Constraints, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go new file mode 100644 index 0000000000..4cabe98599 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use +// with apply. +type DeviceClaimConfigurationApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with +// apply. +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration { + return &DeviceClaimConfigurationApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..abaadbb366 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use +// with apply. +type DeviceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"` +} + +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with +// apply. +func DeviceClass(name string) *DeviceClassApplyConfiguration { + b := &DeviceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API. +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "") +} + +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeviceClassStatus(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "status") +} + +func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) { + b := &DeviceClassApplyConfiguration{} + err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deviceClass.Name) + + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeviceClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go new file mode 100644 index 0000000000..cb3758a3e3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use +// with apply. +type DeviceClassConfigurationApplyConfiguration struct { + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with +// apply. +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { + return &DeviceClassConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go new file mode 100644 index 0000000000..d40a43de66 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use +// with apply. +type DeviceClassSpecApplyConfiguration struct { + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` + SuitableNodes *v1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` +} + +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with +// apply. +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration { + return &DeviceClassSpecApplyConfiguration{} +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} + +// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuitableNodes field is set to the value of the last call. +func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + b.SuitableNodes = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go new file mode 100644 index 0000000000..62c0d997de --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use +// with apply. +type DeviceConfigurationApplyConfiguration struct { + Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"` +} + +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with +// apply. +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration { + return &DeviceConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go new file mode 100644 index 0000000000..479acd57c2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use +// with apply. +type DeviceConstraintApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + MatchAttribute *v1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"` +} + +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with +// apply. +func DeviceConstraint() *DeviceConstraintApplyConfiguration { + return &DeviceConstraintApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchAttribute field is set to the value of the last call. +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value v1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration { + b.MatchAttribute = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go new file mode 100644 index 0000000000..e5c87efe47 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use +// with apply. +type DeviceRequestApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DeviceClassName *string `json:"deviceClassName,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + AllocationMode *resourcev1alpha3.DeviceAllocationMode `json:"allocationMode,omitempty"` + Count *int64 `json:"count,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with +// apply. +func DeviceRequest() *DeviceRequestApplyConfiguration { + return &DeviceRequestApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration { + b.Name = &value + return b +} + +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceClassName field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration { + b.DeviceClassName = &value + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1alpha3.DeviceAllocationMode) *DeviceRequestApplyConfiguration { + b.AllocationMode = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration { + b.Count = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go new file mode 100644 index 0000000000..712b9bf9b1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use +// with apply. +type DeviceRequestAllocationResultApplyConfiguration struct { + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` +} + +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with +// apply. +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration { + return &DeviceRequestAllocationResultApplyConfiguration{} +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Request = &value + return b +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Device = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go new file mode 100644 index 0000000000..574299d15e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use +// with apply. +type DeviceSelectorApplyConfiguration struct { + CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"` +} + +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with +// apply. +func DeviceSelector() *DeviceSelectorApplyConfiguration { + return &DeviceSelectorApplyConfiguration{} +} + +// WithCEL sets the CEL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CEL field is set to the value of the last call. +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration { + b.CEL = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go new file mode 100644 index 0000000000..caf9d059c3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use +// with apply. +type OpaqueDeviceConfigurationApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` +} + +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with +// apply. +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration { + return &OpaqueDeviceConfigurationApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration { + b.Driver = &value + return b +} + +// WithParameters sets the Parameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Parameters field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration { + b.Parameters = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go index 1dfb6ff97b..ee8e73ebe2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use +// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use // with apply. type PodSchedulingContextApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,14 +36,14 @@ type PodSchedulingContextApplyConfiguration struct { Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` } -// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with +// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with // apply. func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { b := &PodSchedulingContextApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -58,20 +58,20 @@ func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConf // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") } // ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") } -func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { b := &PodSchedulingContextApplyConfiguration{} - err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSched b.WithNamespace(podSchedulingContext.Namespace) b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -256,3 +256,9 @@ func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodScheduling b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodSchedulingContextApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go index c95d3295e8..fd25df7a53 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use +// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use // with apply. type PodSchedulingContextSpecApplyConfiguration struct { SelectedNode *string `json:"selectedNode,omitempty"` PotentialNodes []string `json:"potentialNodes,omitempty"` } -// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with +// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with // apply. func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { return &PodSchedulingContextSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go similarity index 84% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go index a8b10b9a0e..a06e370cc3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use +// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use // with apply. type PodSchedulingContextStatusApplyConfiguration struct { ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with +// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with // apply. func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { return &PodSchedulingContextStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go index 6c219f837b..6161595588 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use // with apply. type ResourceClaimApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,14 +36,14 @@ type ResourceClaimApplyConfiguration struct { Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` } -// ResourceClaim constructs an declarative configuration of the ResourceClaim type for use with +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with // apply. func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { b := &ResourceClaimApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -58,20 +58,20 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "") } // ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { +func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldMa b.WithNamespace(resourceClaim.Namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -256,3 +256,9 @@ func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go index 41bb9e9a14..96196d7c95 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go @@ -16,13 +16,13 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( types "k8s.io/apimachinery/pkg/types" ) -// ResourceClaimConsumerReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimConsumerReference type for use +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use // with apply. type ResourceClaimConsumerReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -31,7 +31,7 @@ type ResourceClaimConsumerReferenceApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` } -// ResourceClaimConsumerReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimConsumerReference type for use with +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with // apply. func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { return &ResourceClaimConsumerReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go similarity index 86% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go index e74679aed3..caab89acdb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use +// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use // with apply. type ResourceClaimSchedulingStatusApplyConfiguration struct { Name *string `json:"name,omitempty"` UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` } -// ResourceClaimSchedulingStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimSchedulingStatus type for use with +// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with // apply. func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { return &ResourceClaimSchedulingStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go new file mode 100644 index 0000000000..7c5b65681d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` + Controller *string `json:"controller,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.Devices = value + return b +} + +// WithController sets the Controller field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Controller field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration { + b.Controller = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go similarity index 77% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go index c6fa610906..a52af3ec36 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go @@ -16,31 +16,22 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 -// ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use // with apply. type ResourceClaimStatusApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` DeallocationRequested *bool `json:"deallocationRequested,omitempty"` } -// ResourceClaimStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimStatus type for use with +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with // apply. func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { return &ResourceClaimStatusApplyConfiguration{} } -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithDriverName(value string) *ResourceClaimStatusApplyConfiguration { - b.DriverName = &value - return b -} - // WithAllocation sets the Allocation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Allocation field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go index fc2209b8f0..6f371d0c05 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClaimTemplateApplyConfiguration represents an declarative configuration of the ResourceClaimTemplate type for use +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use // with apply. type ResourceClaimTemplateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,14 +35,14 @@ type ResourceClaimTemplateApplyConfiguration struct { Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` } -// ResourceClaimTemplate constructs an declarative configuration of the ResourceClaimTemplate type for use with +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with // apply. func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { b := &ResourceClaimTemplateApplyConfiguration{} b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } @@ -57,20 +57,20 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") } // ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") } -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaimTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.Resour b.WithNamespace(resourceClaimTemplate.Namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } @@ -247,3 +247,9 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimT b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go index 2f38ea0366..5b03ab7553 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClaimTemplateSpecApplyConfiguration represents an declarative configuration of the ResourceClaimTemplateSpec type for use +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use // with apply. type ResourceClaimTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` } -// ResourceClaimTemplateSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimTemplateSpec type for use with +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with // apply. func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { return &ResourceClaimTemplateSpecApplyConfiguration{} @@ -186,3 +186,9 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceCl b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go new file mode 100644 index 0000000000..23825d137f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use +// with apply. +type ResourcePoolApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"` +} + +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with +// apply. +func ResourcePool() *ResourcePoolApplyConfiguration { + return &ResourcePoolApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration { + b.Name = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration { + b.Generation = &value + return b +} + +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceSliceCount field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration { + b.ResourceSliceCount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go similarity index 61% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go index 724c9e88e0..aaad68612e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go @@ -16,77 +16,74 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" internal "k8s.io/client-go/applyconfigurations/internal" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceClassApplyConfiguration represents an declarative configuration of the ResourceClass type for use +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use // with apply. -type ResourceClassApplyConfiguration struct { +type ResourceSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - DriverName *string `json:"driverName,omitempty"` - ParametersRef *ResourceClassParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - SuitableNodes *corev1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` + Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"` } -// ResourceClass constructs an declarative configuration of the ResourceClass type for use with +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with // apply. -func ResourceClass(name string) *ResourceClassApplyConfiguration { - b := &ResourceClassApplyConfiguration{} +func ResourceSlice(name string) *ResourceSliceApplyConfiguration { + b := &ResourceSliceApplyConfiguration{} b.WithName(name) - b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b } -// ExtractResourceClass extracts the applied configuration owned by fieldManager from -// resourceClass. If no managedFields are found in resourceClass for fieldManager, a -// ResourceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractResourceSlice extracts the applied configuration owned by fieldManager from +// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a +// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// resourceClass must be a unmodified ResourceClass API object that was retrieved from the Kubernetes API. -// ExtractResourceClass provides a way to perform a extract/modify-in-place/apply workflow. +// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API. +// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { - return extractResourceClass(resourceClass, fieldManager, "") +func ExtractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "") } -// ExtractResourceClassStatus is the same as ExtractResourceClass except +// ExtractResourceSliceStatus is the same as ExtractResourceSlice except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { - return extractResourceClass(resourceClass, fieldManager, "status") +func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "status") } -func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { - b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) +func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { + b := &ResourceSliceApplyConfiguration{} + err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceSlice"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(resourceClass.Name) + b.WithName(resourceSlice.Name) - b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha2") + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1alpha3") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithKind(value string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { b.Kind = &value return b } @@ -94,7 +91,7 @@ func (b *ResourceClassApplyConfiguration) WithKind(value string) *ResourceClassA // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithAPIVersion(value string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { b.APIVersion = &value return b } @@ -102,7 +99,7 @@ func (b *ResourceClassApplyConfiguration) WithAPIVersion(value string) *Resource // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithName(value string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -111,7 +108,7 @@ func (b *ResourceClassApplyConfiguration) WithName(value string) *ResourceClassA // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithGenerateName(value string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -120,7 +117,7 @@ func (b *ResourceClassApplyConfiguration) WithGenerateName(value string) *Resour // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithNamespace(value string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -129,7 +126,7 @@ func (b *ResourceClassApplyConfiguration) WithNamespace(value string) *ResourceC // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithUID(value types.UID) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -138,7 +135,7 @@ func (b *ResourceClassApplyConfiguration) WithUID(value types.UID) *ResourceClas // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithResourceVersion(value string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -147,7 +144,7 @@ func (b *ResourceClassApplyConfiguration) WithResourceVersion(value string) *Res // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithGeneration(value int64) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -156,7 +153,7 @@ func (b *ResourceClassApplyConfiguration) WithGeneration(value int64) *ResourceC // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -165,7 +162,7 @@ func (b *ResourceClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -174,7 +171,7 @@ func (b *ResourceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -184,7 +181,7 @@ func (b *ResourceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClassApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -199,7 +196,7 @@ func (b *ResourceClassApplyConfiguration) WithLabels(entries map[string]string) // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClassApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -213,7 +210,7 @@ func (b *ResourceClassApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -227,7 +224,7 @@ func (b *ResourceClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClassApplyConfiguration) WithFinalizers(values ...string) *ResourceClassApplyConfiguration { +func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -235,32 +232,22 @@ func (b *ResourceClassApplyConfiguration) WithFinalizers(values ...string) *Reso return b } -func (b *ResourceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } } -// WithDriverName sets the DriverName field in the declarative configuration to the given value +// WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDriverName(value string) *ResourceClassApplyConfiguration { - b.DriverName = &value +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration { + b.Spec = value return b } -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParametersRef field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithParametersRef(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassApplyConfiguration { - b.ParametersRef = value - return b -} - -// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SuitableNodes field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithSuitableNodes(value *corev1.NodeSelectorApplyConfiguration) *ResourceClassApplyConfiguration { - b.SuitableNodes = value - return b +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go new file mode 100644 index 0000000000..2ded759073 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use +// with apply. +type ResourceSliceSpecApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + AllNodes *bool `json:"allNodes,omitempty"` + Devices []DeviceApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with +// apply. +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration { + return &ResourceSliceSpecApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration { + b.NodeName = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllNodes field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration { + b.AllNodes = &value + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go index b57e8ba57d..f2f135abc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go index 0cd09d5d1c..098517675e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go index 98cfb14c70..075862fe3e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go index aeead0861c..39d8357029 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go index a1ef00656b..b2dcb0feea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/storage/v1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` @@ -35,7 +35,7 @@ type CSIDriverSpecApplyConfiguration struct { SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go index d8296e4856..8a53e7984e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go index 6219ef1151..8c69e435e7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go index 30d1d4546b..21d3ba7ccc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go index c47c6b8215..0e293248d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go index 98c4c22336..26d70bc8b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct { AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go index 6665a1ff2e..77b96db2f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go index 4c74f09aa2..72c351208c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go index 2bf3f7720d..4778553986 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go index a55f7c8ea1..8965392352 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go index 015b08e6eb..14293376d0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go index 4bf829f8a9..039e5f32bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go index 3c5fd3dc29..735853c48b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go index 8b810fed10..aa949e28c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go index bcefb5778a..9648621ac3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go index 82872cc355..be7da5dd15 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go index 2710ff8864..e97487a645 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go index 43803496e8..a287fc6b28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go index 9d4c476259..f95bc55477 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use // with apply. type VolumeAttributesClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttributesClassApplyConfiguration struct { Parameters map[string]string `json:"parameters,omitempty"` } -// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with // apply. func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { b := &VolumeAttributesClassApplyConfiguration{} @@ -260,3 +260,9 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go index cbff16fd0c..ef8f6bbe64 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go index 4266f0b6e4..b9a807bd8a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go index 6097a615be..5f4e068f0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` @@ -35,7 +35,7 @@ type CSIDriverSpecApplyConfiguration struct { SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go index 91588fd9fb..af0f41cf0a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go index 2c7de497b2..65ad771bb2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go index 94ff1b4611..c9cbea1d9c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go index 2854a15da7..19350e5a6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go index 02194f1080..fa504a44ec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct { AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go index 89c99d5602..e0f2df28e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go index 9fccaf5cf9..b0711d7314 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go index 9700b38ee2..b08dd3148b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go index 1d5e304bb5..3bdaeb45d7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go index fa1855a241..f7046cdb35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..7b221d2775 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,268 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use +// with apply. +type VolumeAttributesClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` +} + +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with +// apply. +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} + b.WithName(name) + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b +} + +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") +} + +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") +} + +func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(volumeAttributesClass.Name) + + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go index 3f255fce75..fec1c9ade3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go index 4b69b64c9b..b42c9decc0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go new file mode 100644 index 0000000000..c8f9f009a5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// GroupVersionResourceApplyConfiguration represents a declarative configuration of the GroupVersionResource type for use +// with apply. +type GroupVersionResourceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Version *string `json:"version,omitempty"` + Resource *string `json:"resource,omitempty"` +} + +// GroupVersionResourceApplyConfiguration constructs a declarative configuration of the GroupVersionResource type for use with +// apply. +func GroupVersionResource() *GroupVersionResourceApplyConfiguration { + return &GroupVersionResourceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *GroupVersionResourceApplyConfiguration) WithGroup(value string) *GroupVersionResourceApplyConfiguration { + b.Group = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *GroupVersionResourceApplyConfiguration) WithVersion(value string) *GroupVersionResourceApplyConfiguration { + b.Version = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *GroupVersionResourceApplyConfiguration) WithResource(value string) *GroupVersionResourceApplyConfiguration { + b.Resource = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go new file mode 100644 index 0000000000..dcdbc60c7c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go @@ -0,0 +1,81 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use +// with apply. +type MigrationConditionApplyConfiguration struct { + Type *v1alpha1.MigrationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with +// apply. +func MigrationCondition() *MigrationConditionApplyConfiguration { + return &MigrationConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithType(value v1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *MigrationConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastUpdateTime field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *MigrationConditionApplyConfiguration { + b.LastUpdateTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithReason(value string) *MigrationConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithMessage(value string) *MigrationConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go new file mode 100644 index 0000000000..7e6452a777 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// StorageVersionMigrationApplyConfiguration represents a declarative configuration of the StorageVersionMigration type for use +// with apply. +type StorageVersionMigrationApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *StorageVersionMigrationSpecApplyConfiguration `json:"spec,omitempty"` + Status *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"` +} + +// StorageVersionMigration constructs a declarative configuration of the StorageVersionMigration type for use with +// apply. +func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfiguration { + b := &StorageVersionMigrationApplyConfiguration{} + b.WithName(name) + b.WithKind("StorageVersionMigration") + b.WithAPIVersion("storagemigration.k8s.io/v1alpha1") + return b +} + +// ExtractStorageVersionMigration extracts the applied configuration owned by fieldManager from +// storageVersionMigration. If no managedFields are found in storageVersionMigration for fieldManager, a +// StorageVersionMigrationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// storageVersionMigration must be a unmodified StorageVersionMigration API object that was retrieved from the Kubernetes API. +// ExtractStorageVersionMigration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractStorageVersionMigration(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) { + return extractStorageVersionMigration(storageVersionMigration, fieldManager, "") +} + +// ExtractStorageVersionMigrationStatus is the same as ExtractStorageVersionMigration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStorageVersionMigrationStatus(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) { + return extractStorageVersionMigration(storageVersionMigration, fieldManager, "status") +} + +func extractStorageVersionMigration(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string, subresource string) (*StorageVersionMigrationApplyConfiguration, error) { + b := &StorageVersionMigrationApplyConfiguration{} + err := managedfields.ExtractInto(storageVersionMigration, internal.Parser().Type("io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(storageVersionMigration.Name) + + b.WithKind("StorageVersionMigration") + b.WithAPIVersion("storagemigration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *StorageVersionMigrationApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) *StorageVersionMigrationApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *StorageVersionMigrationApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *StorageVersionMigrationApplyConfiguration) WithFinalizers(values ...string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *StorageVersionMigrationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithSpec(value *StorageVersionMigrationSpecApplyConfiguration) *StorageVersionMigrationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVersionMigrationStatusApplyConfiguration) *StorageVersionMigrationApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionMigrationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go new file mode 100644 index 0000000000..02ddb540f8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// StorageVersionMigrationSpecApplyConfiguration represents a declarative configuration of the StorageVersionMigrationSpec type for use +// with apply. +type StorageVersionMigrationSpecApplyConfiguration struct { + Resource *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"` + ContinueToken *string `json:"continueToken,omitempty"` +} + +// StorageVersionMigrationSpecApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationSpec type for use with +// apply. +func StorageVersionMigrationSpec() *StorageVersionMigrationSpecApplyConfiguration { + return &StorageVersionMigrationSpecApplyConfiguration{} +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *StorageVersionMigrationSpecApplyConfiguration) WithResource(value *GroupVersionResourceApplyConfiguration) *StorageVersionMigrationSpecApplyConfiguration { + b.Resource = value + return b +} + +// WithContinueToken sets the ContinueToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContinueToken field is set to the value of the last call. +func (b *StorageVersionMigrationSpecApplyConfiguration) WithContinueToken(value string) *StorageVersionMigrationSpecApplyConfiguration { + b.ContinueToken = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go new file mode 100644 index 0000000000..fc957cb15c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// StorageVersionMigrationStatusApplyConfiguration represents a declarative configuration of the StorageVersionMigrationStatus type for use +// with apply. +type StorageVersionMigrationStatusApplyConfiguration struct { + Conditions []MigrationConditionApplyConfiguration `json:"conditions,omitempty"` + ResourceVersion *string `json:"resourceVersion,omitempty"` +} + +// StorageVersionMigrationStatusApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationStatus type for use with +// apply. +func StorageVersionMigrationStatus() *StorageVersionMigrationStatusApplyConfiguration { + return &StorageVersionMigrationStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *StorageVersionMigrationStatusApplyConfiguration) WithConditions(values ...*MigrationConditionApplyConfiguration) *StorageVersionMigrationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *StorageVersionMigrationStatusApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationStatusApplyConfiguration { + b.ResourceVersion = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/utils.go b/vendor/k8s.io/client-go/applyconfigurations/utils.go new file mode 100644 index 0000000000..0955b8f44f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/utils.go @@ -0,0 +1,1740 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "k8s.io/api/admissionregistration/v1" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + v1beta2 "k8s.io/api/apps/v1beta2" + autoscalingv1 "k8s.io/api/autoscaling/v1" + v2 "k8s.io/api/autoscaling/v2" + v2beta1 "k8s.io/api/autoscaling/v2beta1" + v2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + eventsv1 "k8s.io/api/events/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1 "k8s.io/api/node/v1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + v1alpha3 "k8s.io/api/resource/v1alpha3" + schedulingv1 "k8s.io/api/scheduling/v1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsapiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationsbatchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + applyconfigurationscoordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + applyconfigurationsdiscoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + applyconfigurationseventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + applyconfigurationsimagepolicyv1alpha1 "k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1" + internal "k8s.io/client-go/applyconfigurations/internal" + applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1" + applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + applyconfigurationsnodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + applyconfigurationspolicyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=admissionregistration.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("AuditAnnotation"): + return &admissionregistrationv1.AuditAnnotationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ExpressionWarning"): + return &admissionregistrationv1.ExpressionWarningApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MatchCondition"): + return &admissionregistrationv1.MatchConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MatchResources"): + return &admissionregistrationv1.MatchResourcesApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MutatingWebhook"): + return &admissionregistrationv1.MutatingWebhookApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"): + return &admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): + return &admissionregistrationv1.NamedRuleWithOperationsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ParamKind"): + return &admissionregistrationv1.ParamKindApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ParamRef"): + return &admissionregistrationv1.ParamRefApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Rule"): + return &admissionregistrationv1.RuleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RuleWithOperations"): + return &admissionregistrationv1.RuleWithOperationsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ServiceReference"): + return &admissionregistrationv1.ServiceReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TypeChecking"): + return &admissionregistrationv1.TypeCheckingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"): + return &admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"): + return &admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"): + return &admissionregistrationv1.ValidatingAdmissionPolicySpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"): + return &admissionregistrationv1.ValidatingAdmissionPolicyStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingWebhook"): + return &admissionregistrationv1.ValidatingWebhookApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"): + return &admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Validation"): + return &admissionregistrationv1.ValidationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Variable"): + return &admissionregistrationv1.VariableApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("WebhookClientConfig"): + return &admissionregistrationv1.WebhookClientConfigApplyConfiguration{} + + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("AuditAnnotation"): + return &admissionregistrationv1alpha1.AuditAnnotationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ExpressionWarning"): + return &admissionregistrationv1alpha1.ExpressionWarningApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MatchCondition"): + return &admissionregistrationv1alpha1.MatchConditionApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MatchResources"): + return &admissionregistrationv1alpha1.MatchResourcesApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): + return &admissionregistrationv1alpha1.NamedRuleWithOperationsApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ParamKind"): + return &admissionregistrationv1alpha1.ParamKindApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ParamRef"): + return &admissionregistrationv1alpha1.ParamRefApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("TypeChecking"): + return &admissionregistrationv1alpha1.TypeCheckingApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicySpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"): + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Validation"): + return &admissionregistrationv1alpha1.ValidationApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("Variable"): + return &admissionregistrationv1alpha1.VariableApplyConfiguration{} + + // Group=admissionregistration.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithKind("AuditAnnotation"): + return &admissionregistrationv1beta1.AuditAnnotationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ExpressionWarning"): + return &admissionregistrationv1beta1.ExpressionWarningApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MatchCondition"): + return &admissionregistrationv1beta1.MatchConditionApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MatchResources"): + return &admissionregistrationv1beta1.MatchResourcesApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MutatingWebhook"): + return &admissionregistrationv1beta1.MutatingWebhookApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration"): + return &admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("NamedRuleWithOperations"): + return &admissionregistrationv1beta1.NamedRuleWithOperationsApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ParamKind"): + return &admissionregistrationv1beta1.ParamKindApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ParamRef"): + return &admissionregistrationv1beta1.ParamRefApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ServiceReference"): + return &admissionregistrationv1beta1.ServiceReferenceApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("TypeChecking"): + return &admissionregistrationv1beta1.TypeCheckingApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBindingSpec"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicySpec"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicySpecApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyStatus"): + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyStatusApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhook"): + return &admissionregistrationv1beta1.ValidatingWebhookApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration"): + return &admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("Validation"): + return &admissionregistrationv1beta1.ValidationApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("Variable"): + return &admissionregistrationv1beta1.VariableApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("WebhookClientConfig"): + return &admissionregistrationv1beta1.WebhookClientConfigApplyConfiguration{} + + // Group=apps, Version=v1 + case appsv1.SchemeGroupVersion.WithKind("ControllerRevision"): + return &applyconfigurationsappsv1.ControllerRevisionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSet"): + return &applyconfigurationsappsv1.DaemonSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetCondition"): + return &applyconfigurationsappsv1.DaemonSetConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetSpec"): + return &applyconfigurationsappsv1.DaemonSetSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetStatus"): + return &applyconfigurationsappsv1.DaemonSetStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"): + return &applyconfigurationsappsv1.DaemonSetUpdateStrategyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("Deployment"): + return &applyconfigurationsappsv1.DeploymentApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &applyconfigurationsappsv1.DeploymentConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &applyconfigurationsappsv1.DeploymentSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &applyconfigurationsappsv1.DeploymentStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &applyconfigurationsappsv1.DeploymentStrategyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSet"): + return &applyconfigurationsappsv1.ReplicaSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSetCondition"): + return &applyconfigurationsappsv1.ReplicaSetConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSetSpec"): + return &applyconfigurationsappsv1.ReplicaSetSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("ReplicaSetStatus"): + return &applyconfigurationsappsv1.ReplicaSetStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"): + return &applyconfigurationsappsv1.RollingUpdateDaemonSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &applyconfigurationsappsv1.RollingUpdateDeploymentApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"): + return &applyconfigurationsappsv1.RollingUpdateStatefulSetStrategyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSet"): + return &applyconfigurationsappsv1.StatefulSetApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetCondition"): + return &applyconfigurationsappsv1.StatefulSetConditionApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetOrdinals"): + return &applyconfigurationsappsv1.StatefulSetOrdinalsApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"): + return &applyconfigurationsappsv1.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetSpec"): + return &applyconfigurationsappsv1.StatefulSetSpecApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetStatus"): + return &applyconfigurationsappsv1.StatefulSetStatusApplyConfiguration{} + case appsv1.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"): + return &applyconfigurationsappsv1.StatefulSetUpdateStrategyApplyConfiguration{} + + // Group=apps, Version=v1beta1 + case appsv1beta1.SchemeGroupVersion.WithKind("ControllerRevision"): + return &applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("Deployment"): + return &applyconfigurationsappsv1beta1.DeploymentApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &applyconfigurationsappsv1beta1.DeploymentConditionApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &applyconfigurationsappsv1beta1.DeploymentSpecApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &applyconfigurationsappsv1beta1.DeploymentStatusApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &applyconfigurationsappsv1beta1.DeploymentStrategyApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("RollbackConfig"): + return &applyconfigurationsappsv1beta1.RollbackConfigApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &applyconfigurationsappsv1beta1.RollingUpdateDeploymentApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"): + return &applyconfigurationsappsv1beta1.RollingUpdateStatefulSetStrategyApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSet"): + return &applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetCondition"): + return &applyconfigurationsappsv1beta1.StatefulSetConditionApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetOrdinals"): + return &applyconfigurationsappsv1beta1.StatefulSetOrdinalsApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"): + return &applyconfigurationsappsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetSpec"): + return &applyconfigurationsappsv1beta1.StatefulSetSpecApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetStatus"): + return &applyconfigurationsappsv1beta1.StatefulSetStatusApplyConfiguration{} + case appsv1beta1.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"): + return &applyconfigurationsappsv1beta1.StatefulSetUpdateStrategyApplyConfiguration{} + + // Group=apps, Version=v1beta2 + case v1beta2.SchemeGroupVersion.WithKind("ControllerRevision"): + return &appsv1beta2.ControllerRevisionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSet"): + return &appsv1beta2.DaemonSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetCondition"): + return &appsv1beta2.DaemonSetConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetSpec"): + return &appsv1beta2.DaemonSetSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetStatus"): + return &appsv1beta2.DaemonSetStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"): + return &appsv1beta2.DaemonSetUpdateStrategyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("Deployment"): + return &appsv1beta2.DeploymentApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &appsv1beta2.DeploymentConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &appsv1beta2.DeploymentSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &appsv1beta2.DeploymentStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &appsv1beta2.DeploymentStrategyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSet"): + return &appsv1beta2.ReplicaSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetCondition"): + return &appsv1beta2.ReplicaSetConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetSpec"): + return &appsv1beta2.ReplicaSetSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("ReplicaSetStatus"): + return &appsv1beta2.ReplicaSetStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"): + return &appsv1beta2.RollingUpdateDaemonSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &appsv1beta2.RollingUpdateDeploymentApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("RollingUpdateStatefulSetStrategy"): + return &appsv1beta2.RollingUpdateStatefulSetStrategyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("Scale"): + return &appsv1beta2.ScaleApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSet"): + return &appsv1beta2.StatefulSetApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetCondition"): + return &appsv1beta2.StatefulSetConditionApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetOrdinals"): + return &appsv1beta2.StatefulSetOrdinalsApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetPersistentVolumeClaimRetentionPolicy"): + return &appsv1beta2.StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetSpec"): + return &appsv1beta2.StatefulSetSpecApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetStatus"): + return &appsv1beta2.StatefulSetStatusApplyConfiguration{} + case v1beta2.SchemeGroupVersion.WithKind("StatefulSetUpdateStrategy"): + return &appsv1beta2.StatefulSetUpdateStrategyApplyConfiguration{} + + // Group=autoscaling, Version=v1 + case autoscalingv1.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &applyconfigurationsautoscalingv1.CrossVersionObjectReferenceApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerSpecApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &applyconfigurationsautoscalingv1.HorizontalPodAutoscalerStatusApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("Scale"): + return &applyconfigurationsautoscalingv1.ScaleApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("ScaleSpec"): + return &applyconfigurationsautoscalingv1.ScaleSpecApplyConfiguration{} + case autoscalingv1.SchemeGroupVersion.WithKind("ScaleStatus"): + return &applyconfigurationsautoscalingv1.ScaleStatusApplyConfiguration{} + + // Group=autoscaling, Version=v2 + case v2.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"): + return &autoscalingv2.ContainerResourceMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"): + return &autoscalingv2.ContainerResourceMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &autoscalingv2.CrossVersionObjectReferenceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ExternalMetricSource"): + return &autoscalingv2.ExternalMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ExternalMetricStatus"): + return &autoscalingv2.ExternalMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &autoscalingv2.HorizontalPodAutoscalerApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerBehavior"): + return &autoscalingv2.HorizontalPodAutoscalerBehaviorApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"): + return &autoscalingv2.HorizontalPodAutoscalerConditionApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &autoscalingv2.HorizontalPodAutoscalerSpecApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &autoscalingv2.HorizontalPodAutoscalerStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HPAScalingPolicy"): + return &autoscalingv2.HPAScalingPolicyApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("HPAScalingRules"): + return &autoscalingv2.HPAScalingRulesApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricIdentifier"): + return &autoscalingv2.MetricIdentifierApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricSpec"): + return &autoscalingv2.MetricSpecApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricStatus"): + return &autoscalingv2.MetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricTarget"): + return &autoscalingv2.MetricTargetApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("MetricValueStatus"): + return &autoscalingv2.MetricValueStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ObjectMetricSource"): + return &autoscalingv2.ObjectMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ObjectMetricStatus"): + return &autoscalingv2.ObjectMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("PodsMetricSource"): + return &autoscalingv2.PodsMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("PodsMetricStatus"): + return &autoscalingv2.PodsMetricStatusApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ResourceMetricSource"): + return &autoscalingv2.ResourceMetricSourceApplyConfiguration{} + case v2.SchemeGroupVersion.WithKind("ResourceMetricStatus"): + return &autoscalingv2.ResourceMetricStatusApplyConfiguration{} + + // Group=autoscaling, Version=v2beta1 + case v2beta1.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"): + return &autoscalingv2beta1.ContainerResourceMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"): + return &autoscalingv2beta1.ContainerResourceMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &autoscalingv2beta1.CrossVersionObjectReferenceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ExternalMetricSource"): + return &autoscalingv2beta1.ExternalMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ExternalMetricStatus"): + return &autoscalingv2beta1.ExternalMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"): + return &autoscalingv2beta1.HorizontalPodAutoscalerConditionApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &autoscalingv2beta1.HorizontalPodAutoscalerSpecApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &autoscalingv2beta1.HorizontalPodAutoscalerStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("MetricSpec"): + return &autoscalingv2beta1.MetricSpecApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("MetricStatus"): + return &autoscalingv2beta1.MetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ObjectMetricSource"): + return &autoscalingv2beta1.ObjectMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ObjectMetricStatus"): + return &autoscalingv2beta1.ObjectMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("PodsMetricSource"): + return &autoscalingv2beta1.PodsMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("PodsMetricStatus"): + return &autoscalingv2beta1.PodsMetricStatusApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ResourceMetricSource"): + return &autoscalingv2beta1.ResourceMetricSourceApplyConfiguration{} + case v2beta1.SchemeGroupVersion.WithKind("ResourceMetricStatus"): + return &autoscalingv2beta1.ResourceMetricStatusApplyConfiguration{} + + // Group=autoscaling, Version=v2beta2 + case v2beta2.SchemeGroupVersion.WithKind("ContainerResourceMetricSource"): + return &autoscalingv2beta2.ContainerResourceMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ContainerResourceMetricStatus"): + return &autoscalingv2beta2.ContainerResourceMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("CrossVersionObjectReference"): + return &autoscalingv2beta2.CrossVersionObjectReferenceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ExternalMetricSource"): + return &autoscalingv2beta2.ExternalMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ExternalMetricStatus"): + return &autoscalingv2beta2.ExternalMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler"): + return &autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerBehavior"): + return &autoscalingv2beta2.HorizontalPodAutoscalerBehaviorApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerCondition"): + return &autoscalingv2beta2.HorizontalPodAutoscalerConditionApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerSpec"): + return &autoscalingv2beta2.HorizontalPodAutoscalerSpecApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscalerStatus"): + return &autoscalingv2beta2.HorizontalPodAutoscalerStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HPAScalingPolicy"): + return &autoscalingv2beta2.HPAScalingPolicyApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("HPAScalingRules"): + return &autoscalingv2beta2.HPAScalingRulesApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricIdentifier"): + return &autoscalingv2beta2.MetricIdentifierApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricSpec"): + return &autoscalingv2beta2.MetricSpecApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricStatus"): + return &autoscalingv2beta2.MetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricTarget"): + return &autoscalingv2beta2.MetricTargetApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("MetricValueStatus"): + return &autoscalingv2beta2.MetricValueStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ObjectMetricSource"): + return &autoscalingv2beta2.ObjectMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ObjectMetricStatus"): + return &autoscalingv2beta2.ObjectMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("PodsMetricSource"): + return &autoscalingv2beta2.PodsMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("PodsMetricStatus"): + return &autoscalingv2beta2.PodsMetricStatusApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ResourceMetricSource"): + return &autoscalingv2beta2.ResourceMetricSourceApplyConfiguration{} + case v2beta2.SchemeGroupVersion.WithKind("ResourceMetricStatus"): + return &autoscalingv2beta2.ResourceMetricStatusApplyConfiguration{} + + // Group=batch, Version=v1 + case batchv1.SchemeGroupVersion.WithKind("CronJob"): + return &applyconfigurationsbatchv1.CronJobApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("CronJobSpec"): + return &applyconfigurationsbatchv1.CronJobSpecApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("CronJobStatus"): + return &applyconfigurationsbatchv1.CronJobStatusApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("Job"): + return &applyconfigurationsbatchv1.JobApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobCondition"): + return &applyconfigurationsbatchv1.JobConditionApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobSpec"): + return &applyconfigurationsbatchv1.JobSpecApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobStatus"): + return &applyconfigurationsbatchv1.JobStatusApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("JobTemplateSpec"): + return &applyconfigurationsbatchv1.JobTemplateSpecApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicy"): + return &applyconfigurationsbatchv1.PodFailurePolicyApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyOnExitCodesRequirement"): + return &applyconfigurationsbatchv1.PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyOnPodConditionsPattern"): + return &applyconfigurationsbatchv1.PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyRule"): + return &applyconfigurationsbatchv1.PodFailurePolicyRuleApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("SuccessPolicy"): + return &applyconfigurationsbatchv1.SuccessPolicyApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("SuccessPolicyRule"): + return &applyconfigurationsbatchv1.SuccessPolicyRuleApplyConfiguration{} + case batchv1.SchemeGroupVersion.WithKind("UncountedTerminatedPods"): + return &applyconfigurationsbatchv1.UncountedTerminatedPodsApplyConfiguration{} + + // Group=batch, Version=v1beta1 + case batchv1beta1.SchemeGroupVersion.WithKind("CronJob"): + return &applyconfigurationsbatchv1beta1.CronJobApplyConfiguration{} + case batchv1beta1.SchemeGroupVersion.WithKind("CronJobSpec"): + return &applyconfigurationsbatchv1beta1.CronJobSpecApplyConfiguration{} + case batchv1beta1.SchemeGroupVersion.WithKind("CronJobStatus"): + return &applyconfigurationsbatchv1beta1.CronJobStatusApplyConfiguration{} + case batchv1beta1.SchemeGroupVersion.WithKind("JobTemplateSpec"): + return &applyconfigurationsbatchv1beta1.JobTemplateSpecApplyConfiguration{} + + // Group=certificates.k8s.io, Version=v1 + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequest"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration{} + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestCondition"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestConditionApplyConfiguration{} + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestSpec"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestSpecApplyConfiguration{} + case certificatesv1.SchemeGroupVersion.WithKind("CertificateSigningRequestStatus"): + return &applyconfigurationscertificatesv1.CertificateSigningRequestStatusApplyConfiguration{} + + // Group=certificates.k8s.io, Version=v1alpha1 + case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundle"): + return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration{} + case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundleSpec"): + return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleSpecApplyConfiguration{} + + // Group=certificates.k8s.io, Version=v1beta1 + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration{} + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestCondition"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestConditionApplyConfiguration{} + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestSpec"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestSpecApplyConfiguration{} + case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestStatus"): + return &applyconfigurationscertificatesv1beta1.CertificateSigningRequestStatusApplyConfiguration{} + + // Group=coordination.k8s.io, Version=v1 + case coordinationv1.SchemeGroupVersion.WithKind("Lease"): + return &applyconfigurationscoordinationv1.LeaseApplyConfiguration{} + case coordinationv1.SchemeGroupVersion.WithKind("LeaseSpec"): + return &applyconfigurationscoordinationv1.LeaseSpecApplyConfiguration{} + + // Group=coordination.k8s.io, Version=v1alpha1 + case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate"): + return &applyconfigurationscoordinationv1alpha1.LeaseCandidateApplyConfiguration{} + case coordinationv1alpha1.SchemeGroupVersion.WithKind("LeaseCandidateSpec"): + return &applyconfigurationscoordinationv1alpha1.LeaseCandidateSpecApplyConfiguration{} + + // Group=coordination.k8s.io, Version=v1beta1 + case coordinationv1beta1.SchemeGroupVersion.WithKind("Lease"): + return &applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration{} + case coordinationv1beta1.SchemeGroupVersion.WithKind("LeaseSpec"): + return &applyconfigurationscoordinationv1beta1.LeaseSpecApplyConfiguration{} + + // Group=core, Version=v1 + case corev1.SchemeGroupVersion.WithKind("Affinity"): + return &applyconfigurationscorev1.AffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AppArmorProfile"): + return &applyconfigurationscorev1.AppArmorProfileApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AttachedVolume"): + return &applyconfigurationscorev1.AttachedVolumeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AWSElasticBlockStoreVolumeSource"): + return &applyconfigurationscorev1.AWSElasticBlockStoreVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AzureDiskVolumeSource"): + return &applyconfigurationscorev1.AzureDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AzureFilePersistentVolumeSource"): + return &applyconfigurationscorev1.AzureFilePersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("AzureFileVolumeSource"): + return &applyconfigurationscorev1.AzureFileVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Capabilities"): + return &applyconfigurationscorev1.CapabilitiesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CephFSPersistentVolumeSource"): + return &applyconfigurationscorev1.CephFSPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CephFSVolumeSource"): + return &applyconfigurationscorev1.CephFSVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CinderPersistentVolumeSource"): + return &applyconfigurationscorev1.CinderPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CinderVolumeSource"): + return &applyconfigurationscorev1.CinderVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ClientIPConfig"): + return &applyconfigurationscorev1.ClientIPConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ClusterTrustBundleProjection"): + return &applyconfigurationscorev1.ClusterTrustBundleProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ComponentCondition"): + return &applyconfigurationscorev1.ComponentConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ComponentStatus"): + return &applyconfigurationscorev1.ComponentStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMap"): + return &applyconfigurationscorev1.ConfigMapApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapEnvSource"): + return &applyconfigurationscorev1.ConfigMapEnvSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapKeySelector"): + return &applyconfigurationscorev1.ConfigMapKeySelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapNodeConfigSource"): + return &applyconfigurationscorev1.ConfigMapNodeConfigSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapProjection"): + return &applyconfigurationscorev1.ConfigMapProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ConfigMapVolumeSource"): + return &applyconfigurationscorev1.ConfigMapVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Container"): + return &applyconfigurationscorev1.ContainerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerImage"): + return &applyconfigurationscorev1.ContainerImageApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerPort"): + return &applyconfigurationscorev1.ContainerPortApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerResizePolicy"): + return &applyconfigurationscorev1.ContainerResizePolicyApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerState"): + return &applyconfigurationscorev1.ContainerStateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStateRunning"): + return &applyconfigurationscorev1.ContainerStateRunningApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStateTerminated"): + return &applyconfigurationscorev1.ContainerStateTerminatedApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStateWaiting"): + return &applyconfigurationscorev1.ContainerStateWaitingApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerStatus"): + return &applyconfigurationscorev1.ContainerStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ContainerUser"): + return &applyconfigurationscorev1.ContainerUserApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CSIPersistentVolumeSource"): + return &applyconfigurationscorev1.CSIPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("CSIVolumeSource"): + return &applyconfigurationscorev1.CSIVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DaemonEndpoint"): + return &applyconfigurationscorev1.DaemonEndpointApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DownwardAPIProjection"): + return &applyconfigurationscorev1.DownwardAPIProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DownwardAPIVolumeFile"): + return &applyconfigurationscorev1.DownwardAPIVolumeFileApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("DownwardAPIVolumeSource"): + return &applyconfigurationscorev1.DownwardAPIVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EmptyDirVolumeSource"): + return &applyconfigurationscorev1.EmptyDirVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EndpointAddress"): + return &applyconfigurationscorev1.EndpointAddressApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EndpointPort"): + return &applyconfigurationscorev1.EndpointPortApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Endpoints"): + return &applyconfigurationscorev1.EndpointsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EndpointSubset"): + return &applyconfigurationscorev1.EndpointSubsetApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EnvFromSource"): + return &applyconfigurationscorev1.EnvFromSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EnvVar"): + return &applyconfigurationscorev1.EnvVarApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EnvVarSource"): + return &applyconfigurationscorev1.EnvVarSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EphemeralContainer"): + return &applyconfigurationscorev1.EphemeralContainerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EphemeralContainerCommon"): + return &applyconfigurationscorev1.EphemeralContainerCommonApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EphemeralVolumeSource"): + return &applyconfigurationscorev1.EphemeralVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Event"): + return &applyconfigurationscorev1.EventApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EventSeries"): + return &applyconfigurationscorev1.EventSeriesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("EventSource"): + return &applyconfigurationscorev1.EventSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ExecAction"): + return &applyconfigurationscorev1.ExecActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FCVolumeSource"): + return &applyconfigurationscorev1.FCVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FlexPersistentVolumeSource"): + return &applyconfigurationscorev1.FlexPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FlexVolumeSource"): + return &applyconfigurationscorev1.FlexVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("FlockerVolumeSource"): + return &applyconfigurationscorev1.FlockerVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GCEPersistentDiskVolumeSource"): + return &applyconfigurationscorev1.GCEPersistentDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GitRepoVolumeSource"): + return &applyconfigurationscorev1.GitRepoVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GlusterfsPersistentVolumeSource"): + return &applyconfigurationscorev1.GlusterfsPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GlusterfsVolumeSource"): + return &applyconfigurationscorev1.GlusterfsVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("GRPCAction"): + return &applyconfigurationscorev1.GRPCActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HostAlias"): + return &applyconfigurationscorev1.HostAliasApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HostIP"): + return &applyconfigurationscorev1.HostIPApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HostPathVolumeSource"): + return &applyconfigurationscorev1.HostPathVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HTTPGetAction"): + return &applyconfigurationscorev1.HTTPGetActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("HTTPHeader"): + return &applyconfigurationscorev1.HTTPHeaderApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ImageVolumeSource"): + return &applyconfigurationscorev1.ImageVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ISCSIPersistentVolumeSource"): + return &applyconfigurationscorev1.ISCSIPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ISCSIVolumeSource"): + return &applyconfigurationscorev1.ISCSIVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("KeyToPath"): + return &applyconfigurationscorev1.KeyToPathApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Lifecycle"): + return &applyconfigurationscorev1.LifecycleApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LifecycleHandler"): + return &applyconfigurationscorev1.LifecycleHandlerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LimitRange"): + return &applyconfigurationscorev1.LimitRangeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LimitRangeItem"): + return &applyconfigurationscorev1.LimitRangeItemApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LimitRangeSpec"): + return &applyconfigurationscorev1.LimitRangeSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LinuxContainerUser"): + return &applyconfigurationscorev1.LinuxContainerUserApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LoadBalancerIngress"): + return &applyconfigurationscorev1.LoadBalancerIngressApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LoadBalancerStatus"): + return &applyconfigurationscorev1.LoadBalancerStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LocalObjectReference"): + return &applyconfigurationscorev1.LocalObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("LocalVolumeSource"): + return &applyconfigurationscorev1.LocalVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ModifyVolumeStatus"): + return &applyconfigurationscorev1.ModifyVolumeStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Namespace"): + return &applyconfigurationscorev1.NamespaceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NamespaceCondition"): + return &applyconfigurationscorev1.NamespaceConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NamespaceSpec"): + return &applyconfigurationscorev1.NamespaceSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NamespaceStatus"): + return &applyconfigurationscorev1.NamespaceStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NFSVolumeSource"): + return &applyconfigurationscorev1.NFSVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Node"): + return &applyconfigurationscorev1.NodeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeAddress"): + return &applyconfigurationscorev1.NodeAddressApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeAffinity"): + return &applyconfigurationscorev1.NodeAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeCondition"): + return &applyconfigurationscorev1.NodeConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeConfigSource"): + return &applyconfigurationscorev1.NodeConfigSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeConfigStatus"): + return &applyconfigurationscorev1.NodeConfigStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeDaemonEndpoints"): + return &applyconfigurationscorev1.NodeDaemonEndpointsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeFeatures"): + return &applyconfigurationscorev1.NodeFeaturesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandler"): + return &applyconfigurationscorev1.NodeRuntimeHandlerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandlerFeatures"): + return &applyconfigurationscorev1.NodeRuntimeHandlerFeaturesApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSelector"): + return &applyconfigurationscorev1.NodeSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSelectorRequirement"): + return &applyconfigurationscorev1.NodeSelectorRequirementApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSelectorTerm"): + return &applyconfigurationscorev1.NodeSelectorTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSpec"): + return &applyconfigurationscorev1.NodeSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeStatus"): + return &applyconfigurationscorev1.NodeStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("NodeSystemInfo"): + return &applyconfigurationscorev1.NodeSystemInfoApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ObjectFieldSelector"): + return &applyconfigurationscorev1.ObjectFieldSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ObjectReference"): + return &applyconfigurationscorev1.ObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolume"): + return &applyconfigurationscorev1.PersistentVolumeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"): + return &applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimCondition"): + return &applyconfigurationscorev1.PersistentVolumeClaimConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimSpec"): + return &applyconfigurationscorev1.PersistentVolumeClaimSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimStatus"): + return &applyconfigurationscorev1.PersistentVolumeClaimStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimTemplate"): + return &applyconfigurationscorev1.PersistentVolumeClaimTemplateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaimVolumeSource"): + return &applyconfigurationscorev1.PersistentVolumeClaimVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeSource"): + return &applyconfigurationscorev1.PersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeSpec"): + return &applyconfigurationscorev1.PersistentVolumeSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeStatus"): + return &applyconfigurationscorev1.PersistentVolumeStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PhotonPersistentDiskVolumeSource"): + return &applyconfigurationscorev1.PhotonPersistentDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Pod"): + return &applyconfigurationscorev1.PodApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodAffinity"): + return &applyconfigurationscorev1.PodAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodAffinityTerm"): + return &applyconfigurationscorev1.PodAffinityTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodAntiAffinity"): + return &applyconfigurationscorev1.PodAntiAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodCondition"): + return &applyconfigurationscorev1.PodConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodDNSConfig"): + return &applyconfigurationscorev1.PodDNSConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodDNSConfigOption"): + return &applyconfigurationscorev1.PodDNSConfigOptionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodIP"): + return &applyconfigurationscorev1.PodIPApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodOS"): + return &applyconfigurationscorev1.PodOSApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodReadinessGate"): + return &applyconfigurationscorev1.PodReadinessGateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodResourceClaim"): + return &applyconfigurationscorev1.PodResourceClaimApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodResourceClaimStatus"): + return &applyconfigurationscorev1.PodResourceClaimStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodSchedulingGate"): + return &applyconfigurationscorev1.PodSchedulingGateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodSecurityContext"): + return &applyconfigurationscorev1.PodSecurityContextApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodSpec"): + return &applyconfigurationscorev1.PodSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodStatus"): + return &applyconfigurationscorev1.PodStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodTemplate"): + return &applyconfigurationscorev1.PodTemplateApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PodTemplateSpec"): + return &applyconfigurationscorev1.PodTemplateSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PortStatus"): + return &applyconfigurationscorev1.PortStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PortworxVolumeSource"): + return &applyconfigurationscorev1.PortworxVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("PreferredSchedulingTerm"): + return &applyconfigurationscorev1.PreferredSchedulingTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Probe"): + return &applyconfigurationscorev1.ProbeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ProbeHandler"): + return &applyconfigurationscorev1.ProbeHandlerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ProjectedVolumeSource"): + return &applyconfigurationscorev1.ProjectedVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("QuobyteVolumeSource"): + return &applyconfigurationscorev1.QuobyteVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("RBDPersistentVolumeSource"): + return &applyconfigurationscorev1.RBDPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("RBDVolumeSource"): + return &applyconfigurationscorev1.RBDVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationController"): + return &applyconfigurationscorev1.ReplicationControllerApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationControllerCondition"): + return &applyconfigurationscorev1.ReplicationControllerConditionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationControllerSpec"): + return &applyconfigurationscorev1.ReplicationControllerSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ReplicationControllerStatus"): + return &applyconfigurationscorev1.ReplicationControllerStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceClaim"): + return &applyconfigurationscorev1.ResourceClaimApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceFieldSelector"): + return &applyconfigurationscorev1.ResourceFieldSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceHealth"): + return &applyconfigurationscorev1.ResourceHealthApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceQuota"): + return &applyconfigurationscorev1.ResourceQuotaApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceQuotaSpec"): + return &applyconfigurationscorev1.ResourceQuotaSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceQuotaStatus"): + return &applyconfigurationscorev1.ResourceQuotaStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceRequirements"): + return &applyconfigurationscorev1.ResourceRequirementsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ResourceStatus"): + return &applyconfigurationscorev1.ResourceStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScaleIOPersistentVolumeSource"): + return &applyconfigurationscorev1.ScaleIOPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScaleIOVolumeSource"): + return &applyconfigurationscorev1.ScaleIOVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScopedResourceSelectorRequirement"): + return &applyconfigurationscorev1.ScopedResourceSelectorRequirementApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ScopeSelector"): + return &applyconfigurationscorev1.ScopeSelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SeccompProfile"): + return &applyconfigurationscorev1.SeccompProfileApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Secret"): + return &applyconfigurationscorev1.SecretApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretEnvSource"): + return &applyconfigurationscorev1.SecretEnvSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretKeySelector"): + return &applyconfigurationscorev1.SecretKeySelectorApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretProjection"): + return &applyconfigurationscorev1.SecretProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretReference"): + return &applyconfigurationscorev1.SecretReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecretVolumeSource"): + return &applyconfigurationscorev1.SecretVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SecurityContext"): + return &applyconfigurationscorev1.SecurityContextApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SELinuxOptions"): + return &applyconfigurationscorev1.SELinuxOptionsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Service"): + return &applyconfigurationscorev1.ServiceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceAccount"): + return &applyconfigurationscorev1.ServiceAccountApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceAccountTokenProjection"): + return &applyconfigurationscorev1.ServiceAccountTokenProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServicePort"): + return &applyconfigurationscorev1.ServicePortApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceSpec"): + return &applyconfigurationscorev1.ServiceSpecApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("ServiceStatus"): + return &applyconfigurationscorev1.ServiceStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SessionAffinityConfig"): + return &applyconfigurationscorev1.SessionAffinityConfigApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("SleepAction"): + return &applyconfigurationscorev1.SleepActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("StorageOSPersistentVolumeSource"): + return &applyconfigurationscorev1.StorageOSPersistentVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("StorageOSVolumeSource"): + return &applyconfigurationscorev1.StorageOSVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Sysctl"): + return &applyconfigurationscorev1.SysctlApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Taint"): + return &applyconfigurationscorev1.TaintApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TCPSocketAction"): + return &applyconfigurationscorev1.TCPSocketActionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Toleration"): + return &applyconfigurationscorev1.TolerationApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TopologySelectorLabelRequirement"): + return &applyconfigurationscorev1.TopologySelectorLabelRequirementApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TopologySelectorTerm"): + return &applyconfigurationscorev1.TopologySelectorTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TopologySpreadConstraint"): + return &applyconfigurationscorev1.TopologySpreadConstraintApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TypedLocalObjectReference"): + return &applyconfigurationscorev1.TypedLocalObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("TypedObjectReference"): + return &applyconfigurationscorev1.TypedObjectReferenceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("Volume"): + return &applyconfigurationscorev1.VolumeApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeDevice"): + return &applyconfigurationscorev1.VolumeDeviceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeMount"): + return &applyconfigurationscorev1.VolumeMountApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeMountStatus"): + return &applyconfigurationscorev1.VolumeMountStatusApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeNodeAffinity"): + return &applyconfigurationscorev1.VolumeNodeAffinityApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeProjection"): + return &applyconfigurationscorev1.VolumeProjectionApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeResourceRequirements"): + return &applyconfigurationscorev1.VolumeResourceRequirementsApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VolumeSource"): + return &applyconfigurationscorev1.VolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("VsphereVirtualDiskVolumeSource"): + return &applyconfigurationscorev1.VsphereVirtualDiskVolumeSourceApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("WeightedPodAffinityTerm"): + return &applyconfigurationscorev1.WeightedPodAffinityTermApplyConfiguration{} + case corev1.SchemeGroupVersion.WithKind("WindowsSecurityContextOptions"): + return &applyconfigurationscorev1.WindowsSecurityContextOptionsApplyConfiguration{} + + // Group=discovery.k8s.io, Version=v1 + case discoveryv1.SchemeGroupVersion.WithKind("Endpoint"): + return &applyconfigurationsdiscoveryv1.EndpointApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointConditions"): + return &applyconfigurationsdiscoveryv1.EndpointConditionsApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointHints"): + return &applyconfigurationsdiscoveryv1.EndpointHintsApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointPort"): + return &applyconfigurationsdiscoveryv1.EndpointPortApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("EndpointSlice"): + return &applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration{} + case discoveryv1.SchemeGroupVersion.WithKind("ForZone"): + return &applyconfigurationsdiscoveryv1.ForZoneApplyConfiguration{} + + // Group=discovery.k8s.io, Version=v1beta1 + case discoveryv1beta1.SchemeGroupVersion.WithKind("Endpoint"): + return &applyconfigurationsdiscoveryv1beta1.EndpointApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointConditions"): + return &applyconfigurationsdiscoveryv1beta1.EndpointConditionsApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointHints"): + return &applyconfigurationsdiscoveryv1beta1.EndpointHintsApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointPort"): + return &applyconfigurationsdiscoveryv1beta1.EndpointPortApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("EndpointSlice"): + return &applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration{} + case discoveryv1beta1.SchemeGroupVersion.WithKind("ForZone"): + return &applyconfigurationsdiscoveryv1beta1.ForZoneApplyConfiguration{} + + // Group=events.k8s.io, Version=v1 + case eventsv1.SchemeGroupVersion.WithKind("Event"): + return &applyconfigurationseventsv1.EventApplyConfiguration{} + case eventsv1.SchemeGroupVersion.WithKind("EventSeries"): + return &applyconfigurationseventsv1.EventSeriesApplyConfiguration{} + + // Group=events.k8s.io, Version=v1beta1 + case eventsv1beta1.SchemeGroupVersion.WithKind("Event"): + return &applyconfigurationseventsv1beta1.EventApplyConfiguration{} + case eventsv1beta1.SchemeGroupVersion.WithKind("EventSeries"): + return &applyconfigurationseventsv1beta1.EventSeriesApplyConfiguration{} + + // Group=extensions, Version=v1beta1 + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet"): + return &applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetCondition"): + return &applyconfigurationsextensionsv1beta1.DaemonSetConditionApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetSpec"): + return &applyconfigurationsextensionsv1beta1.DaemonSetSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetStatus"): + return &applyconfigurationsextensionsv1beta1.DaemonSetStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSetUpdateStrategy"): + return &applyconfigurationsextensionsv1beta1.DaemonSetUpdateStrategyApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment"): + return &applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentCondition"): + return &applyconfigurationsextensionsv1beta1.DeploymentConditionApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentSpec"): + return &applyconfigurationsextensionsv1beta1.DeploymentSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentStatus"): + return &applyconfigurationsextensionsv1beta1.DeploymentStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("DeploymentStrategy"): + return &applyconfigurationsextensionsv1beta1.DeploymentStrategyApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"): + return &applyconfigurationsextensionsv1beta1.HTTPIngressPathApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"): + return &applyconfigurationsextensionsv1beta1.HTTPIngressRuleValueApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("Ingress"): + return &applyconfigurationsextensionsv1beta1.IngressApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressBackend"): + return &applyconfigurationsextensionsv1beta1.IngressBackendApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"): + return &applyconfigurationsextensionsv1beta1.IngressLoadBalancerIngressApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"): + return &applyconfigurationsextensionsv1beta1.IngressLoadBalancerStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressPortStatus"): + return &applyconfigurationsextensionsv1beta1.IngressPortStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressRule"): + return &applyconfigurationsextensionsv1beta1.IngressRuleApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressRuleValue"): + return &applyconfigurationsextensionsv1beta1.IngressRuleValueApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressSpec"): + return &applyconfigurationsextensionsv1beta1.IngressSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressStatus"): + return &applyconfigurationsextensionsv1beta1.IngressStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IngressTLS"): + return &applyconfigurationsextensionsv1beta1.IngressTLSApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("IPBlock"): + return &applyconfigurationsextensionsv1beta1.IPBlockApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicy"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyEgressRule"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyEgressRuleApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyIngressRule"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyIngressRuleApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyPeer"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyPeerApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicyPort"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicyPortApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("NetworkPolicySpec"): + return &applyconfigurationsextensionsv1beta1.NetworkPolicySpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetCondition"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetConditionApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetSpec"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetSpecApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSetStatus"): + return &applyconfigurationsextensionsv1beta1.ReplicaSetStatusApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("RollbackConfig"): + return &applyconfigurationsextensionsv1beta1.RollbackConfigApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDaemonSet"): + return &applyconfigurationsextensionsv1beta1.RollingUpdateDaemonSetApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("RollingUpdateDeployment"): + return &applyconfigurationsextensionsv1beta1.RollingUpdateDeploymentApplyConfiguration{} + case extensionsv1beta1.SchemeGroupVersion.WithKind("Scale"): + return &applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1 + case flowcontrolv1.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1.ExemptPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &applyconfigurationsflowcontrolv1.FlowDistinguisherMethodApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchema"): + return &applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &applyconfigurationsflowcontrolv1.FlowSchemaConditionApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &applyconfigurationsflowcontrolv1.FlowSchemaSpecApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &applyconfigurationsflowcontrolv1.FlowSchemaStatusApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("GroupSubject"): + return &applyconfigurationsflowcontrolv1.GroupSubjectApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1.LimitedPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("LimitResponse"): + return &applyconfigurationsflowcontrolv1.LimitResponseApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1.NonResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &applyconfigurationsflowcontrolv1.PolicyRulesWithSubjectsApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationConditionApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationReferenceApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationSpecApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &applyconfigurationsflowcontrolv1.PriorityLevelConfigurationStatusApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &applyconfigurationsflowcontrolv1.QueuingConfigurationApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1.ResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &applyconfigurationsflowcontrolv1.ServiceAccountSubjectApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsflowcontrolv1.SubjectApplyConfiguration{} + case flowcontrolv1.SchemeGroupVersion.WithKind("UserSubject"): + return &applyconfigurationsflowcontrolv1.UserSubjectApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.ExemptPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &applyconfigurationsflowcontrolv1beta1.FlowDistinguisherMethodApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchema"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaConditionApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaSpecApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &applyconfigurationsflowcontrolv1beta1.FlowSchemaStatusApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("GroupSubject"): + return &applyconfigurationsflowcontrolv1beta1.GroupSubjectApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.LimitedPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("LimitResponse"): + return &applyconfigurationsflowcontrolv1beta1.LimitResponseApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta1.NonResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &applyconfigurationsflowcontrolv1beta1.PolicyRulesWithSubjectsApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationConditionApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationReferenceApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationSpecApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationStatusApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &applyconfigurationsflowcontrolv1beta1.QueuingConfigurationApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta1.ResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &applyconfigurationsflowcontrolv1beta1.ServiceAccountSubjectApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsflowcontrolv1beta1.SubjectApplyConfiguration{} + case flowcontrolv1beta1.SchemeGroupVersion.WithKind("UserSubject"): + return &applyconfigurationsflowcontrolv1beta1.UserSubjectApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta2 + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.ExemptPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &applyconfigurationsflowcontrolv1beta2.FlowDistinguisherMethodApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchema"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaConditionApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaSpecApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &applyconfigurationsflowcontrolv1beta2.FlowSchemaStatusApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("GroupSubject"): + return &applyconfigurationsflowcontrolv1beta2.GroupSubjectApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.LimitedPriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("LimitResponse"): + return &applyconfigurationsflowcontrolv1beta2.LimitResponseApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta2.NonResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &applyconfigurationsflowcontrolv1beta2.PolicyRulesWithSubjectsApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationConditionApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationReferenceApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationSpecApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationStatusApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &applyconfigurationsflowcontrolv1beta2.QueuingConfigurationApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &applyconfigurationsflowcontrolv1beta2.ResourcePolicyRuleApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &applyconfigurationsflowcontrolv1beta2.ServiceAccountSubjectApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsflowcontrolv1beta2.SubjectApplyConfiguration{} + case flowcontrolv1beta2.SchemeGroupVersion.WithKind("UserSubject"): + return &applyconfigurationsflowcontrolv1beta2.UserSubjectApplyConfiguration{} + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta3 + case v1beta3.SchemeGroupVersion.WithKind("ExemptPriorityLevelConfiguration"): + return &flowcontrolv1beta3.ExemptPriorityLevelConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowDistinguisherMethod"): + return &flowcontrolv1beta3.FlowDistinguisherMethodApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchema"): + return &flowcontrolv1beta3.FlowSchemaApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaCondition"): + return &flowcontrolv1beta3.FlowSchemaConditionApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaSpec"): + return &flowcontrolv1beta3.FlowSchemaSpecApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("FlowSchemaStatus"): + return &flowcontrolv1beta3.FlowSchemaStatusApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("GroupSubject"): + return &flowcontrolv1beta3.GroupSubjectApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("LimitedPriorityLevelConfiguration"): + return &flowcontrolv1beta3.LimitedPriorityLevelConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("LimitResponse"): + return &flowcontrolv1beta3.LimitResponseApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("NonResourcePolicyRule"): + return &flowcontrolv1beta3.NonResourcePolicyRuleApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PolicyRulesWithSubjects"): + return &flowcontrolv1beta3.PolicyRulesWithSubjectsApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration"): + return &flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationCondition"): + return &flowcontrolv1beta3.PriorityLevelConfigurationConditionApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationReference"): + return &flowcontrolv1beta3.PriorityLevelConfigurationReferenceApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationSpec"): + return &flowcontrolv1beta3.PriorityLevelConfigurationSpecApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfigurationStatus"): + return &flowcontrolv1beta3.PriorityLevelConfigurationStatusApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("QueuingConfiguration"): + return &flowcontrolv1beta3.QueuingConfigurationApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("ResourcePolicyRule"): + return &flowcontrolv1beta3.ResourcePolicyRuleApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("ServiceAccountSubject"): + return &flowcontrolv1beta3.ServiceAccountSubjectApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("Subject"): + return &flowcontrolv1beta3.SubjectApplyConfiguration{} + case v1beta3.SchemeGroupVersion.WithKind("UserSubject"): + return &flowcontrolv1beta3.UserSubjectApplyConfiguration{} + + // Group=imagepolicy.k8s.io, Version=v1alpha1 + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReview"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewApplyConfiguration{} + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewContainerSpec"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewContainerSpecApplyConfiguration{} + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewSpec"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewSpecApplyConfiguration{} + case imagepolicyv1alpha1.SchemeGroupVersion.WithKind("ImageReviewStatus"): + return &applyconfigurationsimagepolicyv1alpha1.ImageReviewStatusApplyConfiguration{} + + // Group=internal.apiserver.k8s.io, Version=v1alpha1 + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("ServerStorageVersion"): + return &applyconfigurationsapiserverinternalv1alpha1.ServerStorageVersionApplyConfiguration{} + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersion"): + return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration{} + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersionCondition"): + return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionConditionApplyConfiguration{} + case apiserverinternalv1alpha1.SchemeGroupVersion.WithKind("StorageVersionStatus"): + return &applyconfigurationsapiserverinternalv1alpha1.StorageVersionStatusApplyConfiguration{} + + // Group=meta.k8s.io, Version=v1 + case metav1.SchemeGroupVersion.WithKind("Condition"): + return &applyconfigurationsmetav1.ConditionApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("DeleteOptions"): + return &applyconfigurationsmetav1.DeleteOptionsApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("LabelSelector"): + return &applyconfigurationsmetav1.LabelSelectorApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("LabelSelectorRequirement"): + return &applyconfigurationsmetav1.LabelSelectorRequirementApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("ManagedFieldsEntry"): + return &applyconfigurationsmetav1.ManagedFieldsEntryApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("ObjectMeta"): + return &applyconfigurationsmetav1.ObjectMetaApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("OwnerReference"): + return &applyconfigurationsmetav1.OwnerReferenceApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("Preconditions"): + return &applyconfigurationsmetav1.PreconditionsApplyConfiguration{} + case metav1.SchemeGroupVersion.WithKind("TypeMeta"): + return &applyconfigurationsmetav1.TypeMetaApplyConfiguration{} + + // Group=networking.k8s.io, Version=v1 + case networkingv1.SchemeGroupVersion.WithKind("HTTPIngressPath"): + return &applyconfigurationsnetworkingv1.HTTPIngressPathApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"): + return &applyconfigurationsnetworkingv1.HTTPIngressRuleValueApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("Ingress"): + return &applyconfigurationsnetworkingv1.IngressApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressBackend"): + return &applyconfigurationsnetworkingv1.IngressBackendApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressClass"): + return &applyconfigurationsnetworkingv1.IngressClassApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressClassParametersReference"): + return &applyconfigurationsnetworkingv1.IngressClassParametersReferenceApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressClassSpec"): + return &applyconfigurationsnetworkingv1.IngressClassSpecApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"): + return &applyconfigurationsnetworkingv1.IngressLoadBalancerIngressApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"): + return &applyconfigurationsnetworkingv1.IngressLoadBalancerStatusApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressPortStatus"): + return &applyconfigurationsnetworkingv1.IngressPortStatusApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressRule"): + return &applyconfigurationsnetworkingv1.IngressRuleApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressRuleValue"): + return &applyconfigurationsnetworkingv1.IngressRuleValueApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressServiceBackend"): + return &applyconfigurationsnetworkingv1.IngressServiceBackendApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressSpec"): + return &applyconfigurationsnetworkingv1.IngressSpecApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressStatus"): + return &applyconfigurationsnetworkingv1.IngressStatusApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IngressTLS"): + return &applyconfigurationsnetworkingv1.IngressTLSApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("IPBlock"): + return &applyconfigurationsnetworkingv1.IPBlockApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicy"): + return &applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyEgressRule"): + return &applyconfigurationsnetworkingv1.NetworkPolicyEgressRuleApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyIngressRule"): + return &applyconfigurationsnetworkingv1.NetworkPolicyIngressRuleApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyPeer"): + return &applyconfigurationsnetworkingv1.NetworkPolicyPeerApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicyPort"): + return &applyconfigurationsnetworkingv1.NetworkPolicyPortApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("NetworkPolicySpec"): + return &applyconfigurationsnetworkingv1.NetworkPolicySpecApplyConfiguration{} + case networkingv1.SchemeGroupVersion.WithKind("ServiceBackendPort"): + return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{} + + // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddress"): + return &applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddressSpec"): + return &applyconfigurationsnetworkingv1alpha1.IPAddressSpecApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ParentReference"): + return &applyconfigurationsnetworkingv1alpha1.ParentReferenceApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR"): + return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"): + return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRSpecApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"): + return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRStatusApplyConfiguration{} + + // Group=networking.k8s.io, Version=v1beta1 + case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"): + return &applyconfigurationsnetworkingv1beta1.HTTPIngressPathApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressRuleValue"): + return &applyconfigurationsnetworkingv1beta1.HTTPIngressRuleValueApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("Ingress"): + return &applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressBackend"): + return &applyconfigurationsnetworkingv1beta1.IngressBackendApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClass"): + return &applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClassParametersReference"): + return &applyconfigurationsnetworkingv1beta1.IngressClassParametersReferenceApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressClassSpec"): + return &applyconfigurationsnetworkingv1beta1.IngressClassSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerIngress"): + return &applyconfigurationsnetworkingv1beta1.IngressLoadBalancerIngressApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressLoadBalancerStatus"): + return &applyconfigurationsnetworkingv1beta1.IngressLoadBalancerStatusApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressPortStatus"): + return &applyconfigurationsnetworkingv1beta1.IngressPortStatusApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressRule"): + return &applyconfigurationsnetworkingv1beta1.IngressRuleApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressRuleValue"): + return &applyconfigurationsnetworkingv1beta1.IngressRuleValueApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressSpec"): + return &applyconfigurationsnetworkingv1beta1.IngressSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressStatus"): + return &applyconfigurationsnetworkingv1beta1.IngressStatusApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IngressTLS"): + return &applyconfigurationsnetworkingv1beta1.IngressTLSApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IPAddress"): + return &applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("IPAddressSpec"): + return &applyconfigurationsnetworkingv1beta1.IPAddressSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ParentReference"): + return &applyconfigurationsnetworkingv1beta1.ParentReferenceApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDR"): + return &applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"): + return &applyconfigurationsnetworkingv1beta1.ServiceCIDRSpecApplyConfiguration{} + case networkingv1beta1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"): + return &applyconfigurationsnetworkingv1beta1.ServiceCIDRStatusApplyConfiguration{} + + // Group=node.k8s.io, Version=v1 + case nodev1.SchemeGroupVersion.WithKind("Overhead"): + return &applyconfigurationsnodev1.OverheadApplyConfiguration{} + case nodev1.SchemeGroupVersion.WithKind("RuntimeClass"): + return &applyconfigurationsnodev1.RuntimeClassApplyConfiguration{} + case nodev1.SchemeGroupVersion.WithKind("Scheduling"): + return &applyconfigurationsnodev1.SchedulingApplyConfiguration{} + + // Group=node.k8s.io, Version=v1alpha1 + case nodev1alpha1.SchemeGroupVersion.WithKind("Overhead"): + return &applyconfigurationsnodev1alpha1.OverheadApplyConfiguration{} + case nodev1alpha1.SchemeGroupVersion.WithKind("RuntimeClass"): + return &applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration{} + case nodev1alpha1.SchemeGroupVersion.WithKind("RuntimeClassSpec"): + return &applyconfigurationsnodev1alpha1.RuntimeClassSpecApplyConfiguration{} + case nodev1alpha1.SchemeGroupVersion.WithKind("Scheduling"): + return &applyconfigurationsnodev1alpha1.SchedulingApplyConfiguration{} + + // Group=node.k8s.io, Version=v1beta1 + case nodev1beta1.SchemeGroupVersion.WithKind("Overhead"): + return &applyconfigurationsnodev1beta1.OverheadApplyConfiguration{} + case nodev1beta1.SchemeGroupVersion.WithKind("RuntimeClass"): + return &applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration{} + case nodev1beta1.SchemeGroupVersion.WithKind("Scheduling"): + return &applyconfigurationsnodev1beta1.SchedulingApplyConfiguration{} + + // Group=policy, Version=v1 + case policyv1.SchemeGroupVersion.WithKind("Eviction"): + return &applyconfigurationspolicyv1.EvictionApplyConfiguration{} + case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): + return &applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration{} + case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudgetSpec"): + return &applyconfigurationspolicyv1.PodDisruptionBudgetSpecApplyConfiguration{} + case policyv1.SchemeGroupVersion.WithKind("PodDisruptionBudgetStatus"): + return &applyconfigurationspolicyv1.PodDisruptionBudgetStatusApplyConfiguration{} + + // Group=policy, Version=v1beta1 + case policyv1beta1.SchemeGroupVersion.WithKind("Eviction"): + return &applyconfigurationspolicyv1beta1.EvictionApplyConfiguration{} + case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget"): + return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration{} + case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudgetSpec"): + return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetSpecApplyConfiguration{} + case policyv1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudgetStatus"): + return &applyconfigurationspolicyv1beta1.PodDisruptionBudgetStatusApplyConfiguration{} + + // Group=rbac.authorization.k8s.io, Version=v1 + case rbacv1.SchemeGroupVersion.WithKind("AggregationRule"): + return &applyconfigurationsrbacv1.AggregationRuleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("ClusterRole"): + return &applyconfigurationsrbacv1.ClusterRoleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("PolicyRule"): + return &applyconfigurationsrbacv1.PolicyRuleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("Role"): + return &applyconfigurationsrbacv1.RoleApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("RoleBinding"): + return &applyconfigurationsrbacv1.RoleBindingApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("RoleRef"): + return &applyconfigurationsrbacv1.RoleRefApplyConfiguration{} + case rbacv1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsrbacv1.SubjectApplyConfiguration{} + + // Group=rbac.authorization.k8s.io, Version=v1alpha1 + case rbacv1alpha1.SchemeGroupVersion.WithKind("AggregationRule"): + return &applyconfigurationsrbacv1alpha1.AggregationRuleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("ClusterRole"): + return &applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("PolicyRule"): + return &applyconfigurationsrbacv1alpha1.PolicyRuleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("Role"): + return &applyconfigurationsrbacv1alpha1.RoleApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("RoleBinding"): + return &applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("RoleRef"): + return &applyconfigurationsrbacv1alpha1.RoleRefApplyConfiguration{} + case rbacv1alpha1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsrbacv1alpha1.SubjectApplyConfiguration{} + + // Group=rbac.authorization.k8s.io, Version=v1beta1 + case rbacv1beta1.SchemeGroupVersion.WithKind("AggregationRule"): + return &applyconfigurationsrbacv1beta1.AggregationRuleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRole"): + return &applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): + return &applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("PolicyRule"): + return &applyconfigurationsrbacv1beta1.PolicyRuleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("Role"): + return &applyconfigurationsrbacv1beta1.RoleApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("RoleBinding"): + return &applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("RoleRef"): + return &applyconfigurationsrbacv1beta1.RoleRefApplyConfiguration{} + case rbacv1beta1.SchemeGroupVersion.WithKind("Subject"): + return &applyconfigurationsrbacv1beta1.SubjectApplyConfiguration{} + + // Group=resource.k8s.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithKind("AllocationResult"): + return &resourcev1alpha3.AllocationResultApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("BasicDevice"): + return &resourcev1alpha3.BasicDeviceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("CELDeviceSelector"): + return &resourcev1alpha3.CELDeviceSelectorApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("Device"): + return &resourcev1alpha3.DeviceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceAllocationConfiguration"): + return &resourcev1alpha3.DeviceAllocationConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceAllocationResult"): + return &resourcev1alpha3.DeviceAllocationResultApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceAttribute"): + return &resourcev1alpha3.DeviceAttributeApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClaim"): + return &resourcev1alpha3.DeviceClaimApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClaimConfiguration"): + return &resourcev1alpha3.DeviceClaimConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClass"): + return &resourcev1alpha3.DeviceClassApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClassConfiguration"): + return &resourcev1alpha3.DeviceClassConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceClassSpec"): + return &resourcev1alpha3.DeviceClassSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceConfiguration"): + return &resourcev1alpha3.DeviceConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceConstraint"): + return &resourcev1alpha3.DeviceConstraintApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceRequest"): + return &resourcev1alpha3.DeviceRequestApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceRequestAllocationResult"): + return &resourcev1alpha3.DeviceRequestAllocationResultApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("DeviceSelector"): + return &resourcev1alpha3.DeviceSelectorApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("OpaqueDeviceConfiguration"): + return &resourcev1alpha3.OpaqueDeviceConfigurationApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext"): + return &resourcev1alpha3.PodSchedulingContextApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"): + return &resourcev1alpha3.PodSchedulingContextSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextStatus"): + return &resourcev1alpha3.PodSchedulingContextStatusApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim"): + return &resourcev1alpha3.ResourceClaimApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): + return &resourcev1alpha3.ResourceClaimConsumerReferenceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"): + return &resourcev1alpha3.ResourceClaimSchedulingStatusApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSpec"): + return &resourcev1alpha3.ResourceClaimSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimStatus"): + return &resourcev1alpha3.ResourceClaimStatusApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate"): + return &resourcev1alpha3.ResourceClaimTemplateApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplateSpec"): + return &resourcev1alpha3.ResourceClaimTemplateSpecApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourcePool"): + return &resourcev1alpha3.ResourcePoolApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice"): + return &resourcev1alpha3.ResourceSliceApplyConfiguration{} + case v1alpha3.SchemeGroupVersion.WithKind("ResourceSliceSpec"): + return &resourcev1alpha3.ResourceSliceSpecApplyConfiguration{} + + // Group=scheduling.k8s.io, Version=v1 + case schedulingv1.SchemeGroupVersion.WithKind("PriorityClass"): + return &applyconfigurationsschedulingv1.PriorityClassApplyConfiguration{} + + // Group=scheduling.k8s.io, Version=v1alpha1 + case schedulingv1alpha1.SchemeGroupVersion.WithKind("PriorityClass"): + return &applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration{} + + // Group=scheduling.k8s.io, Version=v1beta1 + case schedulingv1beta1.SchemeGroupVersion.WithKind("PriorityClass"): + return &applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration{} + + // Group=storage.k8s.io, Version=v1 + case storagev1.SchemeGroupVersion.WithKind("CSIDriver"): + return &applyconfigurationsstoragev1.CSIDriverApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSIDriverSpec"): + return &applyconfigurationsstoragev1.CSIDriverSpecApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSINode"): + return &applyconfigurationsstoragev1.CSINodeApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSINodeDriver"): + return &applyconfigurationsstoragev1.CSINodeDriverApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSINodeSpec"): + return &applyconfigurationsstoragev1.CSINodeSpecApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("CSIStorageCapacity"): + return &applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("StorageClass"): + return &applyconfigurationsstoragev1.StorageClassApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("TokenRequest"): + return &applyconfigurationsstoragev1.TokenRequestApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachment"): + return &applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"): + return &applyconfigurationsstoragev1.VolumeAttachmentSourceApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"): + return &applyconfigurationsstoragev1.VolumeAttachmentSpecApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"): + return &applyconfigurationsstoragev1.VolumeAttachmentStatusApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeError"): + return &applyconfigurationsstoragev1.VolumeErrorApplyConfiguration{} + case storagev1.SchemeGroupVersion.WithKind("VolumeNodeResources"): + return &applyconfigurationsstoragev1.VolumeNodeResourcesApplyConfiguration{} + + // Group=storage.k8s.io, Version=v1alpha1 + case storagev1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity"): + return &applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentSourceApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentSpecApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"): + return &applyconfigurationsstoragev1alpha1.VolumeAttachmentStatusApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass"): + return &applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration{} + case storagev1alpha1.SchemeGroupVersion.WithKind("VolumeError"): + return &applyconfigurationsstoragev1alpha1.VolumeErrorApplyConfiguration{} + + // Group=storage.k8s.io, Version=v1beta1 + case storagev1beta1.SchemeGroupVersion.WithKind("CSIDriver"): + return &applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSIDriverSpec"): + return &applyconfigurationsstoragev1beta1.CSIDriverSpecApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSINode"): + return &applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSINodeDriver"): + return &applyconfigurationsstoragev1beta1.CSINodeDriverApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSINodeSpec"): + return &applyconfigurationsstoragev1beta1.CSINodeSpecApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity"): + return &applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("StorageClass"): + return &applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("TokenRequest"): + return &applyconfigurationsstoragev1beta1.TokenRequestApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachment"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentSource"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentSourceApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentSpec"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentSpecApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttachmentStatus"): + return &applyconfigurationsstoragev1beta1.VolumeAttachmentStatusApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass"): + return &applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeError"): + return &applyconfigurationsstoragev1beta1.VolumeErrorApplyConfiguration{} + case storagev1beta1.SchemeGroupVersion.WithKind("VolumeNodeResources"): + return &applyconfigurationsstoragev1beta1.VolumeNodeResourcesApplyConfiguration{} + + // Group=storagemigration.k8s.io, Version=v1alpha1 + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("GroupVersionResource"): + return &applyconfigurationsstoragemigrationv1alpha1.GroupVersionResourceApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("MigrationCondition"): + return &applyconfigurationsstoragemigrationv1alpha1.MigrationConditionApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration"): + return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationSpec"): + return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationSpecApplyConfiguration{} + case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationStatus"): + return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationStatusApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go index f72c42051b..f5eaaedab3 100644 --- a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go +++ b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go @@ -19,7 +19,8 @@ package discovery import ( "fmt" - apidiscovery "k8s.io/api/apidiscovery/v2beta1" + apidiscovery "k8s.io/api/apidiscovery/v2" + apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -154,3 +155,124 @@ func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubreso result.Verbs = in.Verbs return result, nil } + +// Please note the functions below will be removed in v1.33. They facilitate conversion +// between the deprecated type apidiscoveryv2beta1.APIGroupDiscoveryList. + +// SplitGroupsAndResourcesV2Beta1 transforms "aggregated" discovery top-level structure into +// the previous "unaggregated" discovery groups and resources. +// Deprecated: Please use SplitGroupsAndResources +func SplitGroupsAndResourcesV2Beta1(aggregatedGroups apidiscoveryv2beta1.APIGroupDiscoveryList) ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Aggregated group list will contain the entirety of discovery, including + // groups, versions, and resources. GroupVersions marked "stale" are failed. + groups := []*metav1.APIGroup{} + failedGVs := map[schema.GroupVersion]error{} + resourcesByGV := map[schema.GroupVersion]*metav1.APIResourceList{} + for _, aggGroup := range aggregatedGroups.Items { + group, resources, failed := convertAPIGroupv2beta1(aggGroup) + groups = append(groups, group) + for gv, resourceList := range resources { + resourcesByGV[gv] = resourceList + } + for gv, err := range failed { + failedGVs[gv] = err + } + } + // Transform slice of groups to group list before returning. + groupList := &metav1.APIGroupList{} + groupList.Groups = make([]metav1.APIGroup, 0, len(groups)) + for _, group := range groups { + groupList.Groups = append(groupList.Groups, *group) + } + return groupList, resourcesByGV, failedGVs +} + +// convertAPIGroupv2beta1 tranforms an "aggregated" APIGroupDiscovery to an "legacy" APIGroup, +// also returning the map of APIResourceList for resources within GroupVersions. +func convertAPIGroupv2beta1(g apidiscoveryv2beta1.APIGroupDiscovery) ( + *metav1.APIGroup, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Iterate through versions to convert to group and resources. + group := &metav1.APIGroup{} + gvResources := map[schema.GroupVersion]*metav1.APIResourceList{} + failedGVs := map[schema.GroupVersion]error{} + group.Name = g.ObjectMeta.Name + for _, v := range g.Versions { + gv := schema.GroupVersion{Group: g.Name, Version: v.Version} + if v.Freshness == apidiscoveryv2beta1.DiscoveryFreshnessStale { + failedGVs[gv] = StaleGroupVersionError{gv: gv} + continue + } + version := metav1.GroupVersionForDiscovery{} + version.GroupVersion = gv.String() + version.Version = v.Version + group.Versions = append(group.Versions, version) + // PreferredVersion is first non-stale Version + if group.PreferredVersion == (metav1.GroupVersionForDiscovery{}) { + group.PreferredVersion = version + } + resourceList := &metav1.APIResourceList{} + resourceList.GroupVersion = gv.String() + for _, r := range v.Resources { + resource, err := convertAPIResourcev2beta1(r) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, resource) + } + // Subresources field in new format get transformed into full APIResources. + // It is possible a partial result with an error was returned to be used + // as the parent resource for the subresource. + for _, subresource := range r.Subresources { + sr, err := convertAPISubresourcev2beta1(resource, subresource) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, sr) + } + } + } + gvResources[gv] = resourceList + } + return group, gvResources, failedGVs +} + +// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are +// resilient to missing GVK, since this resource might be the parent resource +// for a subresource. If the parent is missing a GVK, it is not returned in +// discovery, and the subresource MUST have the GVK. +func convertAPIResourcev2beta1(in apidiscoveryv2beta1.APIResourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{ + Name: in.Resource, + SingularName: in.SingularResource, + Namespaced: in.Scope == apidiscoveryv2beta1.ScopeNamespace, + Verbs: in.Verbs, + ShortNames: in.ShortNames, + Categories: in.Categories, + } + // Can return partial result with error, which can be the parent for a + // subresource. Do not add this result to the returned discovery resources. + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { + return result, fmt.Errorf("discovery resource %s missing GVK", in.Resource) + } + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + return result, nil +} + +// convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. +func convertAPISubresourcev2beta1(parent metav1.APIResource, in apidiscoveryv2beta1.APISubresourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{} + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { + return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) + } + result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) + result.SingularName = parent.SingularName + result.Namespaced = parent.Namespaced + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + result.Verbs = in.Verbs + return result, nil +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index df0e0f9974..ef14fee5f0 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -33,7 +33,8 @@ import ( "github.com/golang/protobuf/proto" openapi_v2 "github.com/google/gnostic-models/openapiv2" - apidiscovery "k8s.io/api/apidiscovery/v2beta1" + apidiscoveryv2 "k8s.io/api/apidiscovery/v2" + apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -64,12 +65,14 @@ const ( // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient // to ordering when comparing returned values in "Content-Type" header). AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" + AcceptV2 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2;as=APIGroupDiscoveryList" // Prioritize aggregated discovery by placing first in the order of discovery accept types. - acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 + acceptDiscoveryFormats = AcceptV2 + "," + AcceptV2Beta1 + "," + AcceptV1 ) // Aggregated discovery content-type GVK. var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"} +var v2GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2", Kind: "APIGroupDiscoveryList"} // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. @@ -265,13 +268,20 @@ func (d *DiscoveryClient) downloadLegacy() ( var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Based on the content-type server responded with: aggregated or unaggregated. - if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { - var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + } else if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2beta1.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResourcesV2Beta1(aggregatedDiscovery) } else { // Default is unaggregated discovery v1. var v metav1.APIVersions @@ -317,13 +327,20 @@ func (d *DiscoveryClient) downloadAPIs() ( failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Based on the content-type server responded with: aggregated or unaggregated. - if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { - var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + } else if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2beta1.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResourcesV2Beta1(aggregatedDiscovery) } else { // Default is unaggregated discovery v1. err = json.Unmarshal(body, apiGroupList) diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index f8a78e1ef4..e5d9e7f800 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -47,7 +47,9 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*me Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } - c.Invokes(action, nil) + if _, err := c.Invokes(action, nil); err != nil { + return nil, err + } for _, resourceList := range c.Resources { if resourceList.GroupVersion == groupVersion { return resourceList, nil @@ -77,7 +79,9 @@ func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav Verb: "get", Resource: schema.GroupVersionResource{Resource: "resource"}, } - c.Invokes(action, nil) + if _, err = c.Invokes(action, nil); err != nil { + return resultGroups, c.Resources, err + } return resultGroups, c.Resources, nil } @@ -100,7 +104,9 @@ func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { Verb: "get", Resource: schema.GroupVersionResource{Resource: "group"}, } - c.Invokes(action, nil) + if _, err := c.Invokes(action, nil); err != nil { + return nil, err + } groups := map[string]*metav1.APIGroup{} diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 4b54859530..326da7cbdf 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,6 +30,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" ) type DynamicClient struct { @@ -293,6 +297,24 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav } func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for %v, falling back to the standard LIST semantics, err = %v", c.resource, watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %v ended with an error, falling back to the standard LIST semantics, err = %v", c.resource, err) + } + result, err := c.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result) + } + return result, err +} + +func (c *dynamicResourceClient) list(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { if err := validateNamespaceWithOptionalName(c.namespace); err != nil { return nil, err } @@ -319,6 +341,27 @@ func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOption return list, nil } +// watchList establishes a watch stream with the server and returns an unstructured list. +func (c *dynamicResourceClient) watchList(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } + + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + + result := &unstructured.UnstructuredList{} + err := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + WatchList(ctx). + Into(result) + + return result, err +} + func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { opts.Watch = true if err := validateNamespaceWithOptionalName(c.namespace); err != nil { diff --git a/vendor/k8s.io/client-go/features/envvar.go b/vendor/k8s.io/client-go/features/envvar.go new file mode 100644 index 0000000000..8c3f887dc4 --- /dev/null +++ b/vendor/k8s.io/client-go/features/envvar.go @@ -0,0 +1,188 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "fmt" + "os" + "strconv" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/naming" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" +) + +// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common +// call chains, so they'd be unhelpful as names. +var internalPackages = []string{"k8s.io/client-go/features/envvar.go"} + +var _ Gates = &envVarFeatureGates{} + +// newEnvVarFeatureGates creates a feature gate that allows for registration +// of features and checking if the features are enabled. +// +// On the first call to Enabled, the effective state of all known features is loaded from +// environment variables. The environment variable read for a given feature is formed by +// concatenating the prefix "KUBE_FEATURE_" with the feature's name. +// +// For example, if you have a feature named "MyFeature" +// setting an environmental variable "KUBE_FEATURE_MyFeature" +// will allow you to configure the state of that feature. +// +// Please note that environmental variables can only be set to the boolean value. +// Incorrect values will be ignored and logged. +// +// Features can also be set directly via the Set method. +// In that case, these features take precedence over +// features set via environmental variables. +func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates { + known := map[Feature]FeatureSpec{} + for name, spec := range features { + known[name] = spec + } + + fg := &envVarFeatureGates{ + callSiteName: naming.GetNameFromCallsite(internalPackages...), + known: known, + } + fg.enabledViaEnvVar.Store(map[Feature]bool{}) + fg.enabledViaSetMethod = map[Feature]bool{} + + return fg +} + +// envVarFeatureGates implements Gates and allows for feature registration. +type envVarFeatureGates struct { + // callSiteName holds the name of the file + // that created this instance + callSiteName string + + // readEnvVarsOnce guards reading environmental variables + readEnvVarsOnce sync.Once + + // known holds known feature gates + known map[Feature]FeatureSpec + + // enabledViaEnvVar holds a map[Feature]bool + // with values explicitly set via env var + enabledViaEnvVar atomic.Value + + // lockEnabledViaSetMethod protects enabledViaSetMethod + lockEnabledViaSetMethod sync.RWMutex + + // enabledViaSetMethod holds values explicitly set + // via Set method, features stored in this map take + // precedence over features stored in enabledViaEnvVar + enabledViaSetMethod map[Feature]bool + + // readEnvVars holds the boolean value which + // indicates whether readEnvVarsOnce has been called. + readEnvVars atomic.Bool +} + +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +func (f *envVarFeatureGates) Enabled(key Feature) bool { + if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok { + // ensue that the state of all known features + // is loaded from environment variables + // on the first call to Enabled method. + if !f.hasAlreadyReadEnvVar() { + _ = f.getEnabledMapFromEnvVar() + } + return v + } + if v, ok := f.getEnabledMapFromEnvVar()[key]; ok { + return v + } + if v, ok := f.known[key]; ok { + return v.Default + } + panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName)) +} + +// Set sets the given feature to the given value. +// +// Features set via this method take precedence over +// the features set via environment variables. +func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error { + feature, ok := f.known[featureName] + if !ok { + return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName) + } + if feature.LockToDefault && feature.Default != featureValue { + return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default) + } + + f.lockEnabledViaSetMethod.Lock() + defer f.lockEnabledViaSetMethod.Unlock() + f.enabledViaSetMethod[featureName] = featureValue + + return nil +} + +// getEnabledMapFromEnvVar will fill the enabled map on the first call. +// This is the only time a known feature can be set to a value +// read from the corresponding environmental variable. +func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool { + f.readEnvVarsOnce.Do(func() { + featureGatesState := map[Feature]bool{} + for feature, featureSpec := range f.known { + featureState, featureStateSet := os.LookupEnv(fmt.Sprintf("KUBE_FEATURE_%s", feature)) + if !featureStateSet { + continue + } + boolVal, boolErr := strconv.ParseBool(featureState) + switch { + case boolErr != nil: + utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, due to %v", feature, featureState, boolErr)) + case featureSpec.LockToDefault: + if boolVal != featureSpec.Default { + utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, feature is locked to %v", feature, featureState, featureSpec.Default)) + break + } + featureGatesState[feature] = featureSpec.Default + default: + featureGatesState[feature] = boolVal + } + } + f.enabledViaEnvVar.Store(featureGatesState) + f.readEnvVars.Store(true) + + for feature, featureSpec := range f.known { + if featureState, ok := featureGatesState[feature]; ok { + klog.V(1).InfoS("Feature gate updated state", "feature", feature, "enabled", featureState) + continue + } + klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default) + } + }) + return f.enabledViaEnvVar.Load().(map[Feature]bool) +} + +func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) { + f.lockEnabledViaSetMethod.RLock() + defer f.lockEnabledViaSetMethod.RUnlock() + + value, found := f.enabledViaSetMethod[key] + return value, found +} + +func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool { + return f.readEnvVars.Load() +} diff --git a/vendor/k8s.io/client-go/features/features.go b/vendor/k8s.io/client-go/features/features.go new file mode 100644 index 0000000000..afb67f509e --- /dev/null +++ b/vendor/k8s.io/client-go/features/features.go @@ -0,0 +1,143 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "errors" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sync/atomic" +) + +// NOTE: types Feature, FeatureSpec, prerelease (and its values) +// were duplicated from the component-base repository +// +// for more information please refer to https://docs.google.com/document/d/1g9BGCRw-7ucUxO6OtCWbb3lfzUGA_uU9178wLdXAIfs + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +type prerelease string + +type Feature string + +type FeatureSpec struct { + // Default is the default enablement state for the feature + Default bool + // LockToDefault indicates that the feature is locked to its default and cannot be changed + LockToDefault bool + // PreRelease indicates the maturity level of the feature + PreRelease prerelease +} + +// Gates indicates whether a given feature is enabled or not. +type Gates interface { + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool +} + +// Registry represents an external feature gates registry. +type Registry interface { + // Add adds existing feature gates to the provided registry. + // + // As of today, this method is used by AddFeaturesToExistingFeatureGates and + // ReplaceFeatureGates to take control of the features exposed by this library. + Add(map[Feature]FeatureSpec) error +} + +// FeatureGates returns the feature gates exposed by this library. +// +// By default, only the default features gates will be returned. +// The default implementation allows controlling the features +// via environmental variables. +// For example, if you have a feature named "MyFeature" +// setting an environmental variable "KUBE_FEATURE_MyFeature" +// will allow you to configure the state of that feature. +// +// Please note that the actual set of the feature gates +// might be overwritten by calling ReplaceFeatureGates method. +func FeatureGates() Gates { + return featureGates.Load().(*featureGatesWrapper).Gates +} + +// AddFeaturesToExistingFeatureGates adds the default feature gates to the provided registry. +// Usually this function is combined with ReplaceFeatureGates to take control of the +// features exposed by this library. +func AddFeaturesToExistingFeatureGates(registry Registry) error { + return registry.Add(defaultKubernetesFeatureGates) +} + +// ReplaceFeatureGates overwrites the default implementation of the feature gates +// used by this library. +// +// Useful for binaries that would like to have full control of the features +// exposed by this library, such as allowing consumers of a binary +// to interact with the features via a command line flag. +// +// For example: +// +// // first, register client-go's features to your registry. +// clientgofeaturegate.AddFeaturesToExistingFeatureGates(utilfeature.DefaultMutableFeatureGate) +// // then replace client-go's feature gates implementation with your implementation +// clientgofeaturegate.ReplaceFeatureGates(utilfeature.DefaultMutableFeatureGate) +func ReplaceFeatureGates(newFeatureGates Gates) { + if replaceFeatureGatesWithWarningIndicator(newFeatureGates) { + utilruntime.HandleError(errors.New("the default feature gates implementation has already been used and now it's being overwritten. This might lead to unexpected behaviour. Check your initialization order")) + } +} + +func replaceFeatureGatesWithWarningIndicator(newFeatureGates Gates) bool { + shouldProduceWarning := false + + if defaultFeatureGates, ok := FeatureGates().(*envVarFeatureGates); ok { + if defaultFeatureGates.hasAlreadyReadEnvVar() { + shouldProduceWarning = true + } + } + wrappedFeatureGates := &featureGatesWrapper{newFeatureGates} + featureGates.Store(wrappedFeatureGates) + + return shouldProduceWarning +} + +func init() { + envVarGates := newEnvVarFeatureGates(defaultKubernetesFeatureGates) + + wrappedFeatureGates := &featureGatesWrapper{envVarGates} + featureGates.Store(wrappedFeatureGates) +} + +// featureGatesWrapper a thin wrapper to satisfy featureGates variable (atomic.Value). +// That is, all calls to Store for a given Value must use values of the same concrete type. +type featureGatesWrapper struct { + Gates +} + +var ( + // featureGates is a shared global FeatureGates. + // + // Top-level commands/options setup that needs to modify this feature gates + // should use AddFeaturesToExistingFeatureGates followed by ReplaceFeatureGates. + featureGates = &atomic.Value{} +) diff --git a/vendor/k8s.io/client-go/features/known_features.go b/vendor/k8s.io/client-go/features/known_features.go new file mode 100644 index 0000000000..0c972a46fd --- /dev/null +++ b/vendor/k8s.io/client-go/features/known_features.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature featuregate.Feature = "MyFeature" + // + // Feature gates should be listed in alphabetical, case-sensitive + // (upper before any lower case character) order. This reduces the risk + // of code conflicts because changes are more likely to be scattered + // across the file. + + // owner: @p0lyn0mial + // beta: v1.30 + // + // Allow the client to get a stream of individual items instead of chunking from the server. + // + // NOTE: + // The feature is disabled in Beta by default because + // it will only be turned on for selected control plane component(s). + WatchListClient Feature = "WatchListClient" + + // owner: @nilekhc + // alpha: v1.30 + InformerResourceVersion Feature = "InformerResourceVersion" +) + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// +// To add a new feature, define a key for it above and add it here. +// After registering with the binary, the features are, by default, controllable using environment variables. +// For more details, please see envVarFeatureGates implementation. +var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{ + WatchListClient: {Default: false, PreRelease: Beta}, + InformerResourceVersion: {Default: false, PreRelease: Alpha}, +} diff --git a/vendor/k8s.io/client-go/gentype/type.go b/vendor/k8s.io/client-go/gentype/type.go new file mode 100644 index 0000000000..b5be84318d --- /dev/null +++ b/vendor/k8s.io/client-go/gentype/type.go @@ -0,0 +1,360 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gentype + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" +) + +// objectWithMeta matches objects implementing both runtime.Object and metav1.Object. +type objectWithMeta interface { + runtime.Object + metav1.Object +} + +// namedObject matches comparable objects implementing GetName(); it is intended for use with apply declarative configurations. +type namedObject interface { + comparable + GetName() *string +} + +// Client represents a client, optionally namespaced, with no support for lists or apply declarative configurations. +type Client[T objectWithMeta] struct { + resource string + client rest.Interface + namespace string // "" for non-namespaced clients + newObject func() T + parameterCodec runtime.ParameterCodec +} + +// ClientWithList represents a client with support for lists. +type ClientWithList[T objectWithMeta, L runtime.Object] struct { + *Client[T] + alsoLister[T, L] +} + +// ClientWithApply represents a client with support for apply declarative configurations. +type ClientWithApply[T objectWithMeta, C namedObject] struct { + *Client[T] + alsoApplier[T, C] +} + +// ClientWithListAndApply represents a client with support for lists and apply declarative configurations. +type ClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct { + *Client[T] + alsoLister[T, L] + alsoApplier[T, C] +} + +// Helper types for composition +type alsoLister[T objectWithMeta, L runtime.Object] struct { + client *Client[T] + newList func() L +} + +type alsoApplier[T objectWithMeta, C namedObject] struct { + client *Client[T] +} + +// NewClient constructs a client, namespaced or not, with no support for lists or apply. +// Non-namespaced clients are constructed by passing an empty namespace (""). +func NewClient[T objectWithMeta]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, +) *Client[T] { + return &Client[T]{ + resource: resource, + client: client, + parameterCodec: parameterCodec, + namespace: namespace, + newObject: emptyObjectCreator, + } +} + +// NewClientWithList constructs a namespaced client with support for lists. +func NewClientWithList[T objectWithMeta, L runtime.Object]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, +) *ClientWithList[T, L] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithList[T, L]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + } +} + +// NewClientWithApply constructs a namespaced client with support for apply declarative configurations. +func NewClientWithApply[T objectWithMeta, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, +) *ClientWithApply[T, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithApply[T, C]{ + typeClient, + alsoApplier[T, C]{typeClient}, + } +} + +// NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations. +func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, +) *ClientWithListAndApply[T, L, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithListAndApply[T, L, C]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + alsoApplier[T, C]{typeClient}, + } +} + +// GetClient returns the REST interface. +func (c *Client[T]) GetClient() rest.Interface { + return c.client +} + +// GetNamespace returns the client's namespace, if any. +func (c *Client[T]) GetNamespace() string { + return c.namespace +} + +// Get takes name of the resource, and returns the corresponding object, and an error if there is any. +func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { + result := c.newObject() + err := c.client.Get(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + VersionedParams(&options, c.parameterCodec). + Do(ctx). + Into(result) + return result, err +} + +// List takes label and field selectors, and returns the list of resources that match those selectors. +func (l *alsoLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (L, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := l.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, "watchlist request for "+l.client.resource, l.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %s ended with an error, falling back to the standard LIST semantics, err = %v", l.client.resource, err) + } + result, err := l.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, "list request for "+l.client.resource, l.list, opts, result) + } + return result, err +} + +func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L, error) { + list := l.newList() + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + err := l.client.client.Get(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + Do(ctx). + Into(list) + return list, err +} + +// watchList establishes a watch stream with the server and returns the list of resources. +func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOptions) (result L, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = l.newList() + err = l.client.client.Get(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + WatchList(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resources. +func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) { + result := c.newObject() + err := c.client.Post(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// UpdateStatus updates the status subresource of a resource. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + SubResource("status"). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Delete takes name of the resource and deletes it. Returns an error if one occurs. +func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return l.client.client.Delete(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&listOpts, l.client.parameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resource. +func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) { + result := c.newObject() + err := c.client.Patch(pt). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, c.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resource. +func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + result := a.client.newObject() + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(obj) + if err != nil { + return *new(T), err + } + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + err = a.client.client.Patch(types.ApplyPatchType). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + VersionedParams(&patchOpts, a.client.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource. +func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(obj) + if err != nil { + return *new(T), err + } + + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + + result := a.client.newObject() + err = a.client.client.Patch(types.ApplyPatchType). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + SubResource("status"). + VersionedParams(&patchOpts, a.client.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go index 1ecae9ecf7..08769d3cca 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go @@ -26,6 +26,10 @@ import ( type Interface interface { // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer + // ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer } @@ -46,6 +50,16 @@ func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationIn return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. +func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer { + return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. +func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer { + return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go new file mode 100644 index 0000000000..eaf9414e26 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for +// ValidatingAdmissionPolicies. +type ValidatingAdmissionPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ValidatingAdmissionPolicyLister +} + +type validatingAdmissionPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) + }, + }, + &admissionregistrationv1.ValidatingAdmissionPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultInformer) +} + +func (f *validatingAdmissionPolicyInformer) Lister() v1.ValidatingAdmissionPolicyLister { + return v1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go new file mode 100644 index 0000000000..8cd61bf28a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/admissionregistration/v1" + cache "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for +// ValidatingAdmissionPolicyBindings. +type ValidatingAdmissionPolicyBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ValidatingAdmissionPolicyBindingLister +} + +type validatingAdmissionPolicyBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) + }, + }, + &admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) +} + +func (f *validatingAdmissionPolicyBindingInformer) Lister() v1.ValidatingAdmissionPolicyBindingLister { + return v1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/coordination/interface.go b/vendor/k8s.io/client-go/informers/coordination/interface.go index 54cfd7b9f2..026b4d9476 100644 --- a/vendor/k8s.io/client-go/informers/coordination/interface.go +++ b/vendor/k8s.io/client-go/informers/coordination/interface.go @@ -20,6 +20,7 @@ package coordination import ( v1 "k8s.io/client-go/informers/coordination/v1" + v1alpha1 "k8s.io/client-go/informers/coordination/v1alpha1" v1beta1 "k8s.io/client-go/informers/coordination/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go new file mode 100644 index 0000000000..4058af2806 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // LeaseCandidates returns a LeaseCandidateInformer. + LeaseCandidates() LeaseCandidateInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// LeaseCandidates returns a LeaseCandidateInformer. +func (v *version) LeaseCandidates() LeaseCandidateInformer { + return &leaseCandidateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..21bc47a8e6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/coordination/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// LeaseCandidateInformer provides access to a shared informer and lister for +// LeaseCandidates. +type LeaseCandidateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.LeaseCandidateLister +} + +type leaseCandidateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLeaseCandidateInformer constructs a new informer for LeaseCandidate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLeaseCandidateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLeaseCandidateInformer constructs a new informer for LeaseCandidate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1alpha1().LeaseCandidates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1alpha1().LeaseCandidates(namespace).Watch(context.TODO(), options) + }, + }, + &coordinationv1alpha1.LeaseCandidate{}, + resyncPeriod, + indexers, + ) +} + +func (f *leaseCandidateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLeaseCandidateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *leaseCandidateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&coordinationv1alpha1.LeaseCandidate{}, f.defaultInformer) +} + +func (f *leaseCandidateInformer) Lister() v1alpha1.LeaseCandidateLister { + return v1alpha1.NewLeaseCandidateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/doc.go b/vendor/k8s.io/client-go/informers/doc.go index 231bffb69b..f37c3e4d01 100644 --- a/vendor/k8s.io/client-go/informers/doc.go +++ b/vendor/k8s.io/client-go/informers/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package informers provides generated informers for Kubernetes APIs. -package informers +package informers // import "k8s.io/client-go/informers" diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 9fc86441a1..86c24551ef 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -46,6 +46,7 @@ import ( resource "k8s.io/client-go/informers/resource" scheduling "k8s.io/client-go/informers/scheduling" storage "k8s.io/client-go/informers/storage" + storagemigration "k8s.io/client-go/informers/storagemigration" kubernetes "k8s.io/client-go/kubernetes" cache "k8s.io/client-go/tools/cache" ) @@ -246,6 +247,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new @@ -290,6 +292,7 @@ type SharedInformerFactory interface { Resource() resource.Interface Scheduling() scheduling.Interface Storage() storage.Interface + Storagemigration() storagemigration.Interface } func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface { @@ -367,3 +370,7 @@ func (f *sharedInformerFactory) Scheduling() scheduling.Interface { func (f *sharedInformerFactory) Storage() storage.Interface { return storage.New(f, f.namespace, f.tweakListOptions) } + +func (f *sharedInformerFactory) Storagemigration() storagemigration.Interface { + return storagemigration.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 680768815b..39a9d3bf4f 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -38,6 +38,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -60,13 +61,14 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -100,6 +102,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=admissionregistration.k8s.io, Version=v1 case v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().MutatingWebhookConfigurations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingAdmissionPolicies().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingAdmissionPolicyBindings().Informer()}, nil case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil @@ -193,6 +199,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case coordinationv1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil + // Group=coordination.k8s.io, Version=v1alpha1 + case coordinationv1alpha1.SchemeGroupVersion.WithResource("leasecandidates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1alpha1().LeaseCandidates().Informer()}, nil + // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1beta1().Leases().Informer()}, nil @@ -302,10 +312,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil // Group=networking.k8s.io, Version=v1beta1 + case networkingv1beta1.SchemeGroupVersion.WithResource("ipaddresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IPAddresses().Informer()}, nil case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil case networkingv1beta1.SchemeGroupVersion.WithResource("ingressclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IngressClasses().Informer()}, nil + case networkingv1beta1.SchemeGroupVersion.WithResource("servicecidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().ServiceCIDRs().Informer()}, nil // Group=node.k8s.io, Version=v1 case nodev1.SchemeGroupVersion.WithResource("runtimeclasses"): @@ -357,15 +371,17 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil - // Group=resource.k8s.io, Version=v1alpha2 - case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimTemplates().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil + // Group=resource.k8s.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().DeviceClasses().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().PodSchedulingContexts().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaims().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaimTemplates().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceSlices().Informer()}, nil // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): @@ -410,6 +426,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttachments().Informer()}, nil + case storagev1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttributesClasses().Informer()}, nil + + // Group=storagemigration.k8s.io, Version=v1alpha1 + case storagemigrationv1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storagemigration().V1alpha1().StorageVersionMigrations().Informer()}, nil } diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go index 2dcc3129a5..974a8fd5bf 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go @@ -24,10 +24,14 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // IPAddresses returns a IPAddressInformer. + IPAddresses() IPAddressInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer // IngressClasses returns a IngressClassInformer. IngressClasses() IngressClassInformer + // ServiceCIDRs returns a ServiceCIDRInformer. + ServiceCIDRs() ServiceCIDRInformer } type version struct { @@ -41,6 +45,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// IPAddresses returns a IPAddressInformer. +func (v *version) IPAddresses() IPAddressInformer { + return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Ingresses returns a IngressInformer. func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -50,3 +59,8 @@ func (v *version) Ingresses() IngressInformer { func (v *version) IngressClasses() IngressClassInformer { return &ingressClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// ServiceCIDRs returns a ServiceCIDRInformer. +func (v *version) ServiceCIDRs() ServiceCIDRInformer { + return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..2a2dfa2907 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// IPAddressInformer provides access to a shared informer and lister for +// IPAddresses. +type IPAddressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.IPAddressLister +} + +type iPAddressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().IPAddresses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().IPAddresses().Watch(context.TODO(), options) + }, + }, + &networkingv1beta1.IPAddress{}, + resyncPeriod, + indexers, + ) +} + +func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1beta1.IPAddress{}, f.defaultInformer) +} + +func (f *iPAddressInformer) Lister() v1beta1.IPAddressLister { + return v1beta1.NewIPAddressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..d5a9ce0146 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ServiceCIDRInformer provides access to a shared informer and lister for +// ServiceCIDRs. +type ServiceCIDRInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ServiceCIDRLister +} + +type serviceCIDRInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().ServiceCIDRs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().ServiceCIDRs().Watch(context.TODO(), options) + }, + }, + &networkingv1beta1.ServiceCIDR{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1beta1.ServiceCIDR{}, f.defaultInformer) +} + +func (f *serviceCIDRInformer) Lister() v1beta1.ServiceCIDRLister { + return v1beta1.NewServiceCIDRLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/interface.go b/vendor/k8s.io/client-go/informers/resource/interface.go index 3fcce8ae9d..170d29d808 100644 --- a/vendor/k8s.io/client-go/informers/resource/interface.go +++ b/vendor/k8s.io/client-go/informers/resource/interface.go @@ -20,13 +20,13 @@ package resource import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha2 "k8s.io/client-go/informers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/informers/resource/v1alpha3" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha2 provides access to shared informers for resources in V1alpha2. - V1alpha2() v1alpha2.Interface + // V1alpha3 provides access to shared informers for resources in V1alpha3. + V1alpha3() v1alpha3.Interface } type group struct { @@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha2 returns a new v1alpha2.Interface. -func (g *group) V1alpha2() v1alpha2.Interface { - return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha3 returns a new v1alpha3.Interface. +func (g *group) V1alpha3() v1alpha3.Interface { + return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..c0bcbd1905 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + time "time" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassInformer provides access to a shared informer and lister for +// DeviceClasses. +type DeviceClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.DeviceClassLister +} + +type deviceClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().DeviceClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().DeviceClasses().Watch(context.TODO(), options) + }, + }, + &resourcev1alpha3.DeviceClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deviceClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha3.DeviceClass{}, f.defaultInformer) +} + +func (f *deviceClassInformer) Lister() v1alpha3.DeviceClassLister { + return v1alpha3.NewDeviceClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go similarity index 80% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go index 23f817c62e..481a7de451 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" @@ -24,14 +24,16 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // DeviceClasses returns a DeviceClassInformer. + DeviceClasses() DeviceClassInformer // PodSchedulingContexts returns a PodSchedulingContextInformer. PodSchedulingContexts() PodSchedulingContextInformer // ResourceClaims returns a ResourceClaimInformer. ResourceClaims() ResourceClaimInformer // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. ResourceClaimTemplates() ResourceClaimTemplateInformer - // ResourceClasses returns a ResourceClassInformer. - ResourceClasses() ResourceClassInformer + // ResourceSlices returns a ResourceSliceInformer. + ResourceSlices() ResourceSliceInformer } type version struct { @@ -45,6 +47,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// DeviceClasses returns a DeviceClassInformer. +func (v *version) DeviceClasses() DeviceClassInformer { + return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // PodSchedulingContexts returns a PodSchedulingContextInformer. func (v *version) PodSchedulingContexts() PodSchedulingContextInformer { return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -60,7 +67,7 @@ func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// ResourceClasses returns a ResourceClassInformer. -func (v *version) ResourceClasses() ResourceClassInformer { - return &resourceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +// ResourceSlices returns a ResourceSliceInformer. +func (v *version) ResourceSlices() ResourceSliceInformer { + return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go similarity index 86% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go index b4aabb3761..62fb3614fc 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodSchedulingContexts. type PodSchedulingContextInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.PodSchedulingContextLister + Lister() v1alpha3.PodSchedulingContextLister } type podSchedulingContextInformer struct { @@ -62,16 +62,16 @@ func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namesp if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().PodSchedulingContexts(namespace).List(context.TODO(), options) + return client.ResourceV1alpha3().PodSchedulingContexts(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().PodSchedulingContexts(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha3().PodSchedulingContexts(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha2.PodSchedulingContext{}, + &resourcev1alpha3.PodSchedulingContext{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interfa } func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.PodSchedulingContext{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.PodSchedulingContext{}, f.defaultInformer) } -func (f *podSchedulingContextInformer) Lister() v1alpha2.PodSchedulingContextLister { - return v1alpha2.NewPodSchedulingContextLister(f.Informer().GetIndexer()) +func (f *podSchedulingContextInformer) Lister() v1alpha3.PodSchedulingContextLister { + return v1alpha3.NewPodSchedulingContextLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go similarity index 85% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go index 3af9368919..fa644579b1 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaims. type ResourceClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimLister + Lister() v1alpha3.ResourceClaimLister } type resourceClaimInformer struct { @@ -62,16 +62,16 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaims(namespace).List(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaims(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaims(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaims(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceClaim{}, + &resourcev1alpha3.ResourceClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaim{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.ResourceClaim{}, f.defaultInformer) } -func (f *resourceClaimInformer) Lister() v1alpha2.ResourceClaimLister { - return v1alpha2.NewResourceClaimLister(f.Informer().GetIndexer()) +func (f *resourceClaimInformer) Lister() v1alpha3.ResourceClaimLister { + return v1alpha3.NewResourceClaimLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go similarity index 86% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go index 13f4ad835c..294755661c 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceClaimTemplates. type ResourceClaimTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimTemplateLister + Lister() v1alpha3.ResourceClaimTemplateLister } type resourceClaimTemplateInformer struct { @@ -62,16 +62,16 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).List(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) + return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceClaimTemplate{}, + &resourcev1alpha3.ResourceClaimTemplate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interf } func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimTemplate{}, f.defaultInformer) + return f.factory.InformerFor(&resourcev1alpha3.ResourceClaimTemplate{}, f.defaultInformer) } -func (f *resourceClaimTemplateInformer) Lister() v1alpha2.ResourceClaimTemplateLister { - return v1alpha2.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +func (f *resourceClaimTemplateInformer) Lister() v1alpha3.ResourceClaimTemplateLister { + return v1alpha3.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go similarity index 64% rename from vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go rename to vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go index cb76d78fe4..108083530c 100644 --- a/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go +++ b/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go @@ -16,74 +16,74 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "context" time "time" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" cache "k8s.io/client-go/tools/cache" ) -// ResourceClassInformer provides access to a shared informer and lister for -// ResourceClasses. -type ResourceClassInformer interface { +// ResourceSliceInformer provides access to a shared informer and lister for +// ResourceSlices. +type ResourceSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClassLister + Lister() v1alpha3.ResourceSliceLister } -type resourceClassInformer struct { +type resourceSliceInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewResourceClassInformer constructs a new informer for ResourceClass type. +// NewResourceSliceInformer constructs a new informer for ResourceSlice type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClassInformer(client, resyncPeriod, indexers, nil) +func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredResourceClassInformer constructs a new informer for ResourceClass type. +// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClasses().List(context.TODO(), options) + return client.ResourceV1alpha3().ResourceSlices().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ResourceV1alpha2().ResourceClasses().Watch(context.TODO(), options) + return client.ResourceV1alpha3().ResourceSlices().Watch(context.TODO(), options) }, }, - &resourcev1alpha2.ResourceClass{}, + &resourcev1alpha3.ResourceSlice{}, resyncPeriod, indexers, ) } -func (f *resourceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *resourceClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClass{}, f.defaultInformer) +func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resourcev1alpha3.ResourceSlice{}, f.defaultInformer) } -func (f *resourceClassInformer) Lister() v1alpha2.ResourceClassLister { - return v1alpha2.NewResourceClassLister(f.Informer().GetIndexer()) +func (f *resourceSliceInformer) Lister() v1alpha3.ResourceSliceLister { + return v1alpha3.NewResourceSliceLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go index 77b77c08ee..7433951855 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -34,6 +34,8 @@ type Interface interface { StorageClasses() StorageClassInformer // VolumeAttachments returns a VolumeAttachmentInformer. VolumeAttachments() VolumeAttachmentInformer + // VolumeAttributesClasses returns a VolumeAttributesClassInformer. + VolumeAttributesClasses() VolumeAttributesClassInformer } type version struct { @@ -71,3 +73,8 @@ func (v *version) StorageClasses() StorageClassInformer { func (v *version) VolumeAttachments() VolumeAttachmentInformer { return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttributesClasses returns a VolumeAttributesClassInformer. +func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer { + return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..ede90ce43c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassInformer provides access to a shared informer and lister for +// VolumeAttributesClasses. +type VolumeAttributesClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.VolumeAttributesClassLister +} + +type volumeAttributesClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().VolumeAttributesClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().VolumeAttributesClasses().Watch(context.TODO(), options) + }, + }, + &storagev1beta1.VolumeAttributesClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagev1beta1.VolumeAttributesClass{}, f.defaultInformer) +} + +func (f *volumeAttributesClassInformer) Lister() v1beta1.VolumeAttributesClassLister { + return v1beta1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storagemigration/interface.go b/vendor/k8s.io/client-go/informers/storagemigration/interface.go new file mode 100644 index 0000000000..1f7030fea8 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storagemigration/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package storagemigration + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/storagemigration/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go new file mode 100644 index 0000000000..60724e7a28 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // StorageVersionMigrations returns a StorageVersionMigrationInformer. + StorageVersionMigrations() StorageVersionMigrationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// StorageVersionMigrations returns a StorageVersionMigrationInformer. +func (v *version) StorageVersionMigrations() StorageVersionMigrationInformer { + return &storageVersionMigrationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go new file mode 100644 index 0000000000..70e7c72797 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// StorageVersionMigrationInformer provides access to a shared informer and lister for +// StorageVersionMigrations. +type StorageVersionMigrationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.StorageVersionMigrationLister +} + +type storageVersionMigrationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageVersionMigrationInformer constructs a new informer for StorageVersionMigration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageVersionMigrationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageVersionMigrationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageVersionMigrationInformer constructs a new informer for StorageVersionMigration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageVersionMigrationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StoragemigrationV1alpha1().StorageVersionMigrations().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StoragemigrationV1alpha1().StorageVersionMigrations().Watch(context.TODO(), options) + }, + }, + &storagemigrationv1alpha1.StorageVersionMigration{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageVersionMigrationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageVersionMigrationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageVersionMigrationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storagemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer) +} + +func (f *storageVersionMigrationInformer) Lister() v1alpha1.StorageVersionMigrationLister { + return v1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index a0095d086f..9cddb0bbed 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -45,6 +45,7 @@ import ( certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" @@ -67,13 +68,14 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" ) @@ -101,6 +103,7 @@ type Interface interface { CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface + CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -124,13 +127,14 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface + ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface + StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface } // Clientset contains the clients for groups. @@ -157,6 +161,7 @@ type Clientset struct { certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client + coordinationV1alpha1 *coordinationv1alpha1.CoordinationV1alpha1Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -180,13 +185,14 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client + resourceV1alpha3 *resourcev1alpha3.ResourceV1alpha3Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client + storagemigrationV1alpha1 *storagemigrationv1alpha1.StoragemigrationV1alpha1Client } // AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client @@ -294,6 +300,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return c.certificatesV1alpha1 } +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { + return c.coordinationV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -409,9 +420,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { - return c.resourceV1alpha2 +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { + return c.resourceV1alpha3 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -444,6 +455,11 @@ func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 } +// StoragemigrationV1alpha1 retrieves the StoragemigrationV1alpha1Client +func (c *Clientset) StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface { + return c.storagemigrationV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -572,6 +588,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.coordinationV1alpha1, err = coordinationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -664,7 +684,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -692,6 +712,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.storagemigrationV1alpha1, err = storagemigrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -734,6 +758,7 @@ func New(c rest.Interface) *Clientset { cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) + cs.coordinationV1alpha1 = coordinationv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -757,13 +782,14 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha2 = resourcev1alpha2.New(c) + cs.resourceV1alpha3 = resourcev1alpha3.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) cs.storageV1alpha1 = storagev1alpha1.New(c) + cs.storagemigrationV1alpha1 = storagemigrationv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index 9cef4242f2..e052f81b85 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package kubernetes holds packages which implement a clientset for Kubernetes // APIs. -package kubernetes +package kubernetes // import "k8s.io/client-go/kubernetes" diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index fc529873f1..132f917abe 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -21,6 +21,7 @@ package fake import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" + applyconfigurations "k8s.io/client-go/applyconfigurations" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" clientset "k8s.io/client-go/kubernetes" @@ -68,6 +69,8 @@ import ( fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" fakecoordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1/fake" + coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + fakecoordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -112,8 +115,8 @@ import ( fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" - resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" - fakeresourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake" + resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" + fakeresourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" fakeschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" @@ -126,13 +129,19 @@ import ( fakestoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake" + storagemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" + fakestoragemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake" "k8s.io/client-go/testing" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { @@ -174,6 +183,38 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} @@ -284,6 +325,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake} } +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { + return &fakecoordinationv1alpha1.FakeCoordinationV1alpha1{Fake: &c.Fake} +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake} @@ -399,9 +445,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} } -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { - return &fakeresourcev1alpha2.FakeResourceV1alpha2{Fake: &c.Fake} +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { + return &fakeresourcev1alpha3.FakeResourceV1alpha3{Fake: &c.Fake} } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -433,3 +479,8 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return &fakestoragev1alpha1.FakeStorageV1alpha1{Fake: &c.Fake} } + +// StoragemigrationV1alpha1 retrieves the StoragemigrationV1alpha1Client +func (c *Clientset) StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface { + return &fakestoragemigrationv1alpha1.FakeStoragemigrationV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 6b80d68339..157abae5ff 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -41,6 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -63,13 +64,14 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -102,6 +104,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, + coordinationv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -125,13 +128,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha2.AddToScheme, + resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, + storagemigrationv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index f44055fbfc..5262b0f046 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -41,6 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -63,13 +64,14 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -102,6 +104,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, + coordinationv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -125,13 +128,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha2.AddToScheme, + resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, + storagemigrationv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go index 10848bed17..a81b2b6829 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -29,6 +29,8 @@ import ( type AdmissionregistrationV1Interface interface { RESTClient() rest.Interface MutatingWebhookConfigurationsGetter + ValidatingAdmissionPoliciesGetter + ValidatingAdmissionPolicyBindingsGetter ValidatingWebhookConfigurationsGetter } @@ -41,6 +43,14 @@ func (c *AdmissionregistrationV1Client) MutatingWebhookConfigurations() Mutating return newMutatingWebhookConfigurations(c) } +func (c *AdmissionregistrationV1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { + return newValidatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface { + return newValidatingAdmissionPolicyBindings(c) +} + func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { return newValidatingWebhookConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go index a5a570ad15..b7487c2fbd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go @@ -32,6 +32,14 @@ func (c *FakeAdmissionregistrationV1) MutatingWebhookConfigurations() v1.Mutatin return &FakeMutatingWebhookConfigurations{c} } +func (c *FakeAdmissionregistrationV1) ValidatingAdmissionPolicies() v1.ValidatingAdmissionPolicyInterface { + return &FakeValidatingAdmissionPolicies{c} +} + +func (c *FakeAdmissionregistrationV1) ValidatingAdmissionPolicyBindings() v1.ValidatingAdmissionPolicyBindingInterface { + return &FakeValidatingAdmissionPolicyBindings{c} +} + func (c *FakeAdmissionregistrationV1) ValidatingWebhookConfigurations() v1.ValidatingWebhookConfigurationInterface { return &FakeValidatingWebhookConfigurations{c} } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go index b88598b715..2d371e6fc7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var mutatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("Mutating // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { + emptyResult := &v1.MutatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1.MutatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name str // DeleteCollection deletes a collection of objects. func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.MutatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context // Patch applies the patch and returns the patched mutatingWebhookConfiguration. func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW if name == nil { return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.MutatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go new file mode 100644 index 0000000000..d6c7bec898 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + testing "k8s.io/client-go/testing" +) + +// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type FakeValidatingAdmissionPolicies struct { + Fake *FakeAdmissionregistrationV1 +} + +var validatingadmissionpoliciesResource = v1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") + +var validatingadmissionpoliciesKind = v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") + +// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. +func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} + +// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. +func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1.ValidatingAdmissionPolicyList).ListMeta} + for _, item := range obj.(*v1.ValidatingAdmissionPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. +func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) +} + +// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. +func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} + +// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. +func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} + +// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. +func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1.ValidatingAdmissionPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched validatingAdmissionPolicy. +func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. +func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + emptyResult := &v1.ValidatingAdmissionPolicy{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicy), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go new file mode 100644 index 0000000000..5b6719be0a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + testing "k8s.io/client-go/testing" +) + +// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type FakeValidatingAdmissionPolicyBindings struct { + Fake *FakeAdmissionregistrationV1 +} + +var validatingadmissionpolicybindingsResource = v1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") + +var validatingadmissionpolicybindingsKind = v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") + +// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. +func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicyBinding), err +} + +// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. +func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBindingList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1.ValidatingAdmissionPolicyBindingList).ListMeta} + for _, item := range obj.(*v1.ValidatingAdmissionPolicyBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. +func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) +} + +// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. +func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicyBinding), err +} + +// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. +func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicyBinding), err +} + +// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. +func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1.ValidatingAdmissionPolicyBinding{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyBindingList{}) + return err +} + +// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. +func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicyBinding), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. +func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { + if validatingAdmissionPolicyBinding == nil { + return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") + } + data, err := json.Marshal(validatingAdmissionPolicyBinding) + if err != nil { + return nil, err + } + name := validatingAdmissionPolicyBinding.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") + } + emptyResult := &v1.ValidatingAdmissionPolicyBinding{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ValidatingAdmissionPolicyBinding), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go index a6951c736e..ff7fc43013 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var validatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("Valida // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { + emptyResult := &v1.ValidatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1.ValidatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts met // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name s // DeleteCollection deletes a collection of objects. func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ValidatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte // Patch applies the patch and returns the patched validatingWebhookConfiguration. func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat if name == nil { return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ValidatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go index a5b062e37f..d81e1c87fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go @@ -20,4 +20,8 @@ package v1 type MutatingWebhookConfigurationExpansion interface{} +type ValidatingAdmissionPolicyExpansion interface{} + +type ValidatingAdmissionPolicyBindingExpansion interface{} + type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index edbc826d19..e863766c60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface { // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} }, + func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go new file mode 100644 index 0000000000..1b20e69606 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPoliciesGetter interface { + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface +} + +// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. +type ValidatingAdmissionPolicyInterface interface { + Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + ValidatingAdmissionPolicyExpansion +} + +// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type validatingAdmissionPolicies struct { + *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] +} + +// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies +func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies { + return &validatingAdmissionPolicies{ + gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} }, + func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go new file mode 100644 index 0000000000..44694b2329 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPolicyBindingsGetter interface { + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface +} + +// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. +type ValidatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) + ValidatingAdmissionPolicyBindingExpansion +} + +// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type validatingAdmissionPolicyBindings struct { + *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] +} + +// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings +func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings { + return &validatingAdmissionPolicyBindings{ + gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} }, + func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index 065e3c8341..11b4ac0591 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type ValidatingWebhookConfigurationInterface interface { // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} }, + func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go index f4358ce46c..ef4d843e00 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("Vali // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1alpha1.ValidatingAdmissionPolicyList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) } // Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) { +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyList{}) return err @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched validatingAdmissionPolicy. func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1alpha1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go index c520655f9d..f7cc966fb1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1alpha1.SchemeGroupVersion.WithKind // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1alpha1.ValidatingAdmissionPolicyBindingList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) } // Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") } + emptyResult := &v1alpha1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 1d994b5abf..c2b7c825cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} }, + func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 39823ca82b..d8d0796ead 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -55,143 +52,20 @@ type ValidatingAdmissionPolicyBindingInterface interface { // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} }, + func() *v1alpha1.ValidatingAdmissionPolicyBindingList { + return &v1alpha1.ValidatingAdmissionPolicyBindingList{} + }), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go index 9d85aff37f..7671549323 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var mutatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("Mut // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(mutatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + emptyResult := &v1beta1.MutatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.Li // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(mutatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name str // DeleteCollection deletes a collection of objects. func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(mutatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context // Patch applies the patch and returns the patched mutatingWebhookConfiguration. func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingW if name == nil { return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.MutatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.MutatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.MutatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go index 90cb4ff6ca..e30891c779 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go @@ -43,20 +43,22 @@ var validatingadmissionpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("Valid // Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpoliciesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1beta1.ValidatingAdmissionPolicyList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpoliciesResource, opts)) } // Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } // Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpoliciesResource, validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) { +func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } @@ -118,7 +123,7 @@ func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpoliciesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyList{}) return err @@ -126,10 +131,11 @@ func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched validatingAdmissionPolicy. func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } @@ -147,10 +153,11 @@ func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingA if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } @@ -169,10 +176,11 @@ func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingAdmissionPolicy{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.ValidatingAdmissionPolicy{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go index f771f81f30..207db37529 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go @@ -43,20 +43,22 @@ var validatingadmissionpolicybindingsKind = v1beta1.SchemeGroupVersion.WithKind( // Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootGetActionWithOptions(validatingadmissionpolicybindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } // List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1beta1.ValidatingAdmissionPolicyBindingList{}) + Invokes(testing.NewRootListActionWithOptions(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v // Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingadmissionpolicybindingsResource, opts)) } // Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } // Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name // DeleteCollection deletes a collection of objects. func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingadmissionpolicybindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Con // Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, valid if name == nil { return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingAdmissionPolicyBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicyBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go index 41e3a7c1ee..f78a31ee0e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go @@ -43,20 +43,22 @@ var validatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("V // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(validatingwebhookconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1. // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(validatingwebhookconfigurationsResource, opts)) } // Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(validatingwebhookconfigurationsResource, validatingWebhookConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } @@ -107,7 +111,7 @@ func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name s // DeleteCollection deletes a collection of objects. func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(validatingwebhookconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{}) return err @@ -115,10 +119,11 @@ func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Conte // Patch applies the patch and returns the patched validatingWebhookConfiguration. func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } @@ -136,10 +141,11 @@ func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validat if name == nil { return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.ValidatingWebhookConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingWebhookConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ValidatingWebhookConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index ca6bb8bd50..7a5bc8b9b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface { // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} }, + func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go index bea51b587f..0023d8837c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -43,6 +40,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} }, + func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index bba37bb047..8168d8cbcd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -55,143 +52,20 @@ type ValidatingAdmissionPolicyBindingInterface interface { // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} }, + func() *v1beta1.ValidatingAdmissionPolicyBindingList { + return &v1beta1.ValidatingAdmissionPolicyBindingList{} + }), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 5ba5974d7a..5abd96823d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -55,143 +52,20 @@ type ValidatingWebhookConfigurationInterface interface { // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} }, + func() *v1beta1.ValidatingWebhookConfigurationList { + return &v1beta1.ValidatingWebhookConfigurationList{} + }), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go index 738c68038b..e9f0b78d46 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go @@ -43,20 +43,22 @@ var storageversionsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersion") // Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootGetActionWithOptions(storageversionsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } // List takes label and field selectors, and returns the list of StorageVersions that match those selectors. func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { + emptyResult := &v1alpha1.StorageVersionList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageversionsResource, storageversionsKind, opts), &v1alpha1.StorageVersionList{}) + Invokes(testing.NewRootListActionWithOptions(storageversionsResource, storageversionsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested storageVersions. func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageversionsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionsResource, opts)) } // Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootCreateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } // Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageversionsResource, storageVersion, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) { +func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(storageversionsResource, "status", storageVersion), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionsResource, "status", storageVersion, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } @@ -118,7 +123,7 @@ func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageversionsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageversionsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{}) return err @@ -126,10 +131,11 @@ func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched storageVersion. func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, name, pt, data, subresources...), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } @@ -147,10 +153,11 @@ func (c *FakeStorageVersions) Apply(ctx context.Context, storageVersion *apiserv if name == nil { return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") } + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } @@ -169,10 +176,11 @@ func (c *FakeStorageVersions) ApplyStatus(ctx context.Context, storageVersion *a if name == nil { return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") } + emptyResult := &v1alpha1.StorageVersion{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersion{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.StorageVersion), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go index 18789c7f82..436593f7fa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageVersionsGetter has a method to return a StorageVersionInterface. @@ -43,6 +40,7 @@ type StorageVersionsGetter interface { type StorageVersionInterface interface { Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type StorageVersionInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) StorageVersionExpansion } // storageVersions implements StorageVersionInterface type storageVersions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration] } // newStorageVersions returns a StorageVersions func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { return &storageVersions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]( + "storageversions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} }, + func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }), } } - -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. -func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Get(). - Resource("storageversions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. -func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.StorageVersionList{} - err = c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageVersions. -func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Post(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. -func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageversions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageversions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageVersion. -func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(pt). - Resource("storageversions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion. -func (c *storageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *storageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index f4b198265d..252f47ba29 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ControllerRevision { return &v1.ControllerRevision{} }, + func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 53e5392879..28917a7960 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.DaemonSet { return &v1.DaemonSet{} }, + func() *v1.DaemonSetList { return &v1.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index ccc2049ff7..871d51cfe2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -45,6 +44,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Deployment { return &v1.Deployment{} }, + func() *v1.DeploymentList { return &v1.DeploymentList{} }), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go index f691ba9acd..c609ef534a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1.SchemeGroupVersion.WithKind("ControllerRevision // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1.ControllerRevision{}) + Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { + emptyResult := &v1.ControllerRevisionList{} obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1.ControllerRevisionList{}) + Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOpti // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *FakeControllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) + Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) + Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ControllerRevisionList{}) return err @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts met // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision if name == nil { return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } + emptyResult := &v1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go index 3e0df72352..bac3fc1225 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go @@ -44,22 +44,24 @@ var daemonsetsKind = v1.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1.DaemonSet{}) + Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { + emptyResult := &v1.DaemonSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1.DaemonSetList{}) + Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) + Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) + Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts metav1.De // DeleteCollection deletes a collection of objects. func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.DaemonSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.Delet // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetA if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.Daem if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go index da1896fe60..8ed8432883 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go @@ -46,22 +46,24 @@ var deploymentsKind = v1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { + emptyResult := &v1.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -80,40 +82,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } @@ -128,7 +133,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.DeploymentList{}) return err @@ -136,11 +141,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } @@ -158,11 +164,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1.Deployme if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } @@ -181,33 +188,36 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1.De if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } @@ -222,11 +232,12 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, if err != nil { return nil, err } + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go index dedf19b42f..942a4e64a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go @@ -46,22 +46,24 @@ var replicasetsKind = v1.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1.ReplicaSet{}) + Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { + emptyResult := &v1.ReplicaSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1.ReplicaSetList{}) + Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -80,40 +82,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) + Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) + Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } @@ -128,7 +133,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ReplicaSetList{}) return err @@ -136,11 +141,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } @@ -158,11 +164,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaS if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } @@ -181,33 +188,36 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.Re if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } @@ -222,11 +232,12 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, if err != nil { return nil, err } + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go index f1d7d96e8d..ae4e811fb7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go @@ -46,22 +46,24 @@ var statefulsetsKind = v1.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1.StatefulSet{}) + Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { + emptyResult := &v1.StatefulSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1.StatefulSetList{}) + Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -80,40 +82,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested statefulSets. func (c *FakeStatefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) + Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) + Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } @@ -128,7 +133,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.StatefulSetList{}) return err @@ -136,11 +141,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } @@ -158,11 +164,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1.Statef if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } @@ -181,33 +188,36 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1. if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } @@ -222,11 +232,12 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin if err != nil { return nil, err } + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 917ed521f4..d6dec016bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -45,6 +44,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ReplicaSet { return &v1.ReplicaSet{} }, + func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index d1fbb915d8..b25ed07238 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -45,6 +44,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.StatefulSet { return &v1.StatefulSet{} }, + func() *v1.StatefulSetList { return &v1.StatefulSetList{} }), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 0c3f49ba14..185f7cc4e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} }, + func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 281758c435..06e4b7bf93 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +40,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go index 1954c94703..7ea2b2e11b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1beta1.SchemeGroupVersion.WithKind("ControllerRev // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) + Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + emptyResult := &v1beta1.ControllerRevisionList{} obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{}) + Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) + Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) + Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{}) return err @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision if name == nil { return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } + emptyResult := &v1beta1.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go index 9614852f74..05c557ecb3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go @@ -44,22 +44,24 @@ var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + emptyResult := &v1beta1.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta1.Dep if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -179,11 +186,12 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1bet if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go index 2124515cfe..c38690554a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go @@ -44,22 +44,24 @@ var statefulsetsKind = v1beta1.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) + Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + emptyResult := &v1beta1.StatefulSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{}) + Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested statefulSets. func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) + Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } @@ -126,7 +131,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } @@ -156,11 +162,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.S if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } @@ -179,11 +186,12 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1b if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta1.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StatefulSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 3f1aebcffb..1ff69eb993 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,6 +40,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} }, + func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }), } } - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1643277a6..6caee6a725 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} }, + func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 1391df87d2..766dc6d433 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} }, + func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 5bda0d92c1..6592ee8cd9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +40,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.Deployment { return &v1beta2.Deployment{} }, + func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go index 1bf7fb3314..45b2050706 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go @@ -44,22 +44,24 @@ var controllerrevisionsKind = v1beta2.SchemeGroupVersion.WithKind("ControllerRev // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) + Invokes(testing.NewGetActionWithOptions(controllerrevisionsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + emptyResult := &v1beta2.ControllerRevisionList{} obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{}) + Invokes(testing.NewListActionWithOptions(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(controllerrevisionsResource, c.ns, opts)) } // Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) + Invokes(testing.NewCreateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) + Invokes(testing.NewUpdateActionWithOptions(controllerrevisionsResource, c.ns, controllerRevision, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } @@ -114,7 +118,7 @@ func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(controllerrevisionsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{}) return err @@ -122,11 +126,12 @@ func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched controllerRevision. func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } @@ -144,11 +149,12 @@ func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision if name == nil { return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") } + emptyResult := &v1beta2.ControllerRevision{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ControllerRevision{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ControllerRevision), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go index 8f5cfa5a8a..61ceeb1411 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go @@ -44,22 +44,24 @@ var daemonsetsKind = v1beta2.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) + Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + emptyResult := &v1beta2.DaemonSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{}) + Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) + Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) + Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOpt // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.Daemo if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2 if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta2.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go index c9e8ab48bb..d849856a40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go @@ -44,22 +44,24 @@ var deploymentsKind = v1beta2.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + emptyResult := &v1beta2.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta2.Dep if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } @@ -179,11 +186,12 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1bet if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta2.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Deployment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go index 46e1a78a7a..1f957f0843 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go @@ -44,22 +44,24 @@ var replicasetsKind = v1beta2.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) + Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + emptyResult := &v1beta2.ReplicaSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{}) + Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) + Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) + Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } @@ -126,7 +131,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } @@ -156,11 +162,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.Rep if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } @@ -179,11 +186,12 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1bet if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta2.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.ReplicaSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go index 684f799256..ac8945aa77 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go @@ -44,22 +44,24 @@ var statefulsetsKind = v1beta2.SchemeGroupVersion.WithKind("StatefulSet") // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) + Invokes(testing.NewGetActionWithOptions(statefulsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + emptyResult := &v1beta2.StatefulSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{}) + Invokes(testing.NewListActionWithOptions(statefulsetsResource, statefulsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested statefulSets. func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(statefulsetsResource, c.ns, opts)) } // Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) + Invokes(testing.NewCreateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) + Invokes(testing.NewUpdateActionWithOptions(statefulsetsResource, c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) { +func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "status", c.ns, statefulSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } @@ -126,7 +131,7 @@ func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(statefulsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched statefulSet. func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } @@ -156,11 +162,12 @@ func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.S if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } @@ -179,33 +186,36 @@ func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1b if name == nil { return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") } + emptyResult := &v1beta2.StatefulSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.StatefulSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.StatefulSet), err } // GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { + emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(statefulsetsResource, c.ns, "scale", statefulSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { + emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(statefulsetsResource, "scale", c.ns, scale, opts), &v1beta2.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Scale), err } @@ -220,11 +230,12 @@ func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName strin if err != nil { return nil, err } + emptyResult := &v1beta2.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &v1beta2.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 988d898f79..90380ca980 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,6 +40,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} }, + func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }), } } - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 0416675d6d..f2d673abb9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,6 +42,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) @@ -61,209 +62,27 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} }, + func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s } result = &v1beta2.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go index e683b3eaaa..7e7c3138a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectReview") // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { + emptyResult := &v1.SelfSubjectReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1.SelfSubjectReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go index 500e87d065..a22f335429 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go @@ -37,10 +37,11 @@ var tokenreviewsKind = v1.SchemeGroupVersion.WithKind("TokenReview") // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { + emptyResult := &v1.TokenReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1.TokenReview{}) + Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.TokenReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go index bfb9603d67..720dd9e7e9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface { // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { - result = &v1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index ca7cd47d26..52c55fab08 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -41,24 +41,17 @@ type TokenReviewInterface interface { // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*v1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.TokenReview { return &v1.TokenReview{} }), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { - result = &v1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go index a20b3dd764..680460f459 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectRe // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { + emptyResult := &v1alpha1.SelfSubjectReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1alpha1.SelfSubjectReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go index 7f8b12a46f..f034bcdbe3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go @@ -23,8 +23,8 @@ import ( v1alpha1 "k8s.io/api/authentication/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface { // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*v1alpha1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1alpha1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { - result = &v1alpha1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go index 4a9db85cf5..33e130e9cc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go @@ -37,10 +37,11 @@ var selfsubjectreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRev // Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { + emptyResult := &v1beta1.SelfSubjectReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1beta1.SelfSubjectReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectreviewsResource, selfSubjectReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SelfSubjectReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go index b1988a67a3..b512f5c146 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -37,10 +37,11 @@ var tokenreviewsKind = v1beta1.SchemeGroupVersion.WithKind("TokenReview") // Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { + emptyResult := &v1beta1.TokenReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{}) + Invokes(testing.NewRootCreateActionWithOptions(tokenreviewsResource, tokenReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.TokenReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go index 9d54826a31..d083ba8fa9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectReviewInterface interface { // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { - result = &v1beta1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 5da1224337..982534935e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -41,24 +41,17 @@ type TokenReviewInterface interface { // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { - result = &v1beta1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go index 43ea05328c..dd23481d39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go @@ -38,11 +38,12 @@ var localsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("LocalSubject // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { + emptyResult := &v1.LocalSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1.LocalSubjectAccessReview{}) + Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LocalSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go index 27642266d6..d04b8502f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go @@ -37,10 +37,11 @@ var selfsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectAc // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { + emptyResult := &v1.SelfSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1.SelfSubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SelfSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go index cd6c682d16..71ed326f8b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go @@ -37,10 +37,11 @@ var selfsubjectrulesreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectRul // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { + emptyResult := &v1.SelfSubjectRulesReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1.SelfSubjectRulesReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go index 09dab64807..358ba9aa77 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go @@ -37,10 +37,11 @@ var subjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SubjectAccessRevi // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { + emptyResult := &v1.SubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1.SubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.SubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index 84b2efe166..3d058941a2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface { // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*v1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { - result = &v1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index 2006196c11..9e874bee5a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface { // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { - result = &v1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 25d99f7b52..567b63ec4c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface { // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { - result = &v1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 8ac0566a2e..52e8d74e57 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface { // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { - result = &v1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go index 104e979d19..e2bf627736 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go @@ -38,11 +38,12 @@ var localsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("LocalSu // Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { + emptyResult := &v1beta1.LocalSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1beta1.LocalSubjectAccessReview{}) + Invokes(testing.NewCreateActionWithOptions(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.LocalSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go index 517e48b760..996e4d4108 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go @@ -37,10 +37,11 @@ var selfsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubj // Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { + emptyResult := &v1beta1.SelfSubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1beta1.SelfSubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectaccessreviewsResource, selfSubjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SelfSubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go index 3aed050fcf..6e4c758909 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go @@ -37,10 +37,11 @@ var selfsubjectrulesreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubje // Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { + emptyResult := &v1beta1.SelfSubjectRulesReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1beta1.SelfSubjectRulesReview{}) + Invokes(testing.NewRootCreateActionWithOptions(selfsubjectrulesreviewsResource, selfSubjectRulesReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SelfSubjectRulesReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go index e9bfa521a2..aab6e08dc2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go @@ -37,10 +37,11 @@ var subjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SubjectAcces // Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { + emptyResult := &v1beta1.SubjectAccessReview{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1beta1.SubjectAccessReview{}) + Invokes(testing.NewRootCreateActionWithOptions(subjectaccessreviewsResource, subjectAccessReview, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.SubjectAccessReview), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 78584ba945..302c094b39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface { // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*v1beta1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1beta1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { - result = &v1beta1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 0286c93fe6..4b413dc4f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface { // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { - result = &v1beta1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index d772973ec6..b64cec3015 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface { // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { - result = &v1beta1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index aebe8398c0..3fca833a1b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface { // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { - result = &v1beta1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index a2c95b7539..23e2c391dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v1.SchemeGroupVersion.WithKind("HorizontalPod // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + emptyResult := &v1.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.Lis // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 19afde66db..4d29ac5227 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} }, + func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go index cfcc208232..2ca3d27c94 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2.SchemeGroupVersion.WithKind("HorizontalPod // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { + emptyResult := &v2.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v2.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go index 3a077d71da..dbce8d1020 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2 import ( "context" - json "encoding/json" - "fmt" - "time" v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} }, + func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go index 0b2658e642..7f99b5e8fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2beta1.SchemeGroupVersion.WithKind("Horizont // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta1.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta1.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta1.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 5080912a12..6bc1b77766 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} }, + func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go index 0a7c93c3d3..e037e8ac48 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go @@ -44,22 +44,24 @@ var horizontalpodautoscalersKind = v2beta2.SchemeGroupVersion.WithKind("Horizont // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewGetActionWithOptions(horizontalpodautoscalersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscalerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta2.HorizontalPodAutoscalerList{}) + Invokes(testing.NewListActionWithOptions(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOpt // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(horizontalpodautoscalersResource, c.ns, opts)) } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewCreateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateActionWithOptions(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } @@ -126,7 +131,7 @@ func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, // DeleteCollection deletes a collection of objects. func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(horizontalpodautoscalersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{}) return err @@ -134,11 +139,12 @@ func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opt // Patch applies the patch and returns the patched horizontalPodAutoscaler. func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } @@ -156,11 +162,12 @@ func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodA if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } @@ -179,11 +186,12 @@ func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizont if name == nil { return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") } + emptyResult := &v2beta2.HorizontalPodAutoscaler{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta2.HorizontalPodAutoscaler{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v2beta2.HorizontalPodAutoscaler), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 0ddb9108b3..6f464661a9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} }, + func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go index 9250263215..7907a5bf56 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -43,6 +40,7 @@ type CronJobsGetter interface { type CronJobInterface interface { Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type CronJobInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.CronJob { return &v1.CronJob{} }, + func() *v1.CronJobList { return &v1.CronJobList{} }), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go index 0cbcce6d81..171bb82329 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go @@ -44,22 +44,24 @@ var cronjobsKind = v1.SchemeGroupVersion.WithKind("CronJob") // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. func (c *FakeCronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1.CronJob{}) + Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { + emptyResult := &v1.CronJobList{} obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1.CronJobList{}) + Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested cronJobs. func (c *FakeCronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) + Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) + Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1.CronJob{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } @@ -126,7 +131,7 @@ func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts metav1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CronJobList{}) return err @@ -134,11 +139,12 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteO // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } @@ -156,11 +162,12 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyC if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } @@ -179,11 +186,12 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJob if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CronJob), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go index cf1a913bdf..23e66953cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go @@ -44,22 +44,24 @@ var jobsKind = v1.SchemeGroupVersion.WithKind("Job") // Get takes name of the job, and returns the corresponding job object, and an error if there is any. func (c *FakeJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) + Invokes(testing.NewGetActionWithOptions(jobsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { + emptyResult := &v1.JobList{} obj, err := c.Fake. - Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &v1.JobList{}) + Invokes(testing.NewListActionWithOptions(jobsResource, jobsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested jobs. func (c *FakeJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(jobsResource, c.ns, opts)) } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. func (c *FakeJobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) + Invokes(testing.NewCreateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. func (c *FakeJobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) + Invokes(testing.NewUpdateActionWithOptions(jobsResource, c.ns, job, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) { +func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(jobsResource, "status", c.ns, job, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } @@ -126,7 +131,7 @@ func (c *FakeJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(jobsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.JobList{}) return err @@ -134,11 +139,12 @@ func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptio // Patch applies the patch and returns the patched job. func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &v1.Job{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } @@ -156,11 +162,12 @@ func (c *FakeJobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration if name == nil { return nil, fmt.Errorf("job.Name must be provided to Apply") } + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Job{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } @@ -179,11 +186,12 @@ func (c *FakeJobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfigu if name == nil { return nil, fmt.Errorf("job.Name must be provided to Apply") } + emptyResult := &v1.Job{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Job{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(jobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Job), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index c076c80af2..83dbe6fa4f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // JobsGetter has a method to return a JobInterface. @@ -43,6 +40,7 @@ type JobsGetter interface { type JobInterface interface { Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type JobInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) JobExpansion } // jobs implements JobInterface type jobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration] } // newJobs returns a Jobs func newJobs(c *BatchV1Client, namespace string) *jobs { return &jobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]( + "jobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Job { return &v1.Job{} }, + func() *v1.JobList { return &v1.JobList{} }), } } - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *jobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *jobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d687339ae9..a6f7399d84 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -43,6 +40,7 @@ type CronJobsGetter interface { type CronJobInterface interface { Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type CronJobInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.CronJob { return &v1beta1.CronJob{} }, + func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go index 9d078f55a9..71cd4f1653 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go @@ -44,22 +44,24 @@ var cronjobsKind = v1beta1.SchemeGroupVersion.WithKind("CronJob") // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) + Invokes(testing.NewGetActionWithOptions(cronjobsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + emptyResult := &v1beta1.CronJobList{} obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{}) + Invokes(testing.NewListActionWithOptions(cronjobsResource, cronjobsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested cronJobs. func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(cronjobsResource, c.ns, opts)) } // Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) + Invokes(testing.NewCreateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) + Invokes(testing.NewUpdateActionWithOptions(cronjobsResource, c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) { +func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(cronjobsResource, "status", c.ns, cronJob, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } @@ -126,7 +131,7 @@ func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(cronjobsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{}) return err @@ -134,11 +139,12 @@ func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptio // Patch applies the patch and returns the patched cronJob. func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } @@ -156,11 +162,12 @@ func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobA if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } @@ -179,11 +186,12 @@ func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.Cr if name == nil { return nil, fmt.Errorf("cronJob.Name must be provided to Apply") } + emptyResult := &v1beta1.CronJob{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.CronJob{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CronJob), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go index 0d6b68b296..9fa3300e6c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface { type CertificateSigningRequestInterface interface { Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,6 +49,7 @@ type CertificateSigningRequestInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) @@ -59,195 +58,26 @@ type CertificateSigningRequestInterface interface { // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} }, + func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }), } } -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { result = &v1.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequestName). SubResource("approval"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go index adb7db0bf6..f3fc99f839 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go @@ -43,20 +43,22 @@ var certificatesigningrequestsKind = v1.SchemeGroupVersion.WithKind("Certificate // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { + emptyResult := &v1.CertificateSigningRequestList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1.CertificateSigningRequestList{}) + Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.L // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts)) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } @@ -118,7 +123,7 @@ func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string // DeleteCollection deletes a collection of objects. func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CertificateSigningRequestList{}) return err @@ -126,10 +131,11 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } @@ -147,10 +153,11 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } @@ -169,20 +176,22 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } // UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { + emptyResult := &v1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &v1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "approval", certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CertificateSigningRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go index 970fb15e6e..74fe9fa14c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/certificates/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. @@ -55,143 +52,18 @@ type ClusterTrustBundleInterface interface { // clusterTrustBundles implements ClusterTrustBundleInterface type clusterTrustBundles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration] } // newClusterTrustBundles returns a ClusterTrustBundles func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { return &clusterTrustBundles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( + "clustertrustbundles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} }, + func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }), } } - -// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. -func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Get(). - Resource("clustertrustbundles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. -func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterTrustBundleList{} - err = c.client.Get(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterTrustBundles. -func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Post(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTrustBundle). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Put(). - Resource("clustertrustbundles"). - Name(clusterTrustBundle.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTrustBundle). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. -func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustertrustbundles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustertrustbundles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterTrustBundle. -func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Patch(pt). - Resource("clustertrustbundles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. -func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - if clusterTrustBundle == nil { - return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterTrustBundle) - if err != nil { - return nil, err - } - name := clusterTrustBundle.Name - if name == nil { - return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") - } - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustertrustbundles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go index 2f849cbd7d..1c4e97bd40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go @@ -43,20 +43,22 @@ var clustertrustbundlesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterTrust // Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. func (c *FakeClusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustertrustbundlesResource, name), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootGetActionWithOptions(clustertrustbundlesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } // List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { + emptyResult := &v1alpha1.ClusterTrustBundleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustertrustbundlesResource, clustertrustbundlesKind, opts), &v1alpha1.ClusterTrustBundleList{}) + Invokes(testing.NewRootListActionWithOptions(clustertrustbundlesResource, clustertrustbundlesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested clusterTrustBundles. func (c *FakeClusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustertrustbundlesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clustertrustbundlesResource, opts)) } // Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. func (c *FakeClusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootCreateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } // Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. func (c *FakeClusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootUpdateActionWithOptions(clustertrustbundlesResource, clusterTrustBundle, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } @@ -107,7 +111,7 @@ func (c *FakeClusterTrustBundles) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustertrustbundlesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clustertrustbundlesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterTrustBundleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched clusterTrustBundle. func (c *FakeClusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, name, pt, data, subresources...), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } @@ -136,10 +141,11 @@ func (c *FakeClusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle if name == nil { return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") } + emptyResult := &v1alpha1.ClusterTrustBundle{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterTrustBundle{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustertrustbundlesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterTrustBundle), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index ec0b9d266f..de9915c5d6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface { type CertificateSigningRequestInterface interface { Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type CertificateSigningRequestInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} }, + func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }), } } - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go index 4737891411..4e631b0a40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -30,7 +30,7 @@ type CertificateSigningRequestExpansion interface { func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). VersionedParams(&opts, scheme.ParameterCodec). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go index 76bb38e7bf..ff5a9bd4c7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go @@ -43,20 +43,22 @@ var certificatesigningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("Certif // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootGetActionWithOptions(certificatesigningrequestsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + emptyResult := &v1beta1.CertificateSigningRequestList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{}) + Invokes(testing.NewRootListActionWithOptions(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListO // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(certificatesigningrequestsResource, opts)) } // Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootCreateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateActionWithOptions(certificatesigningrequestsResource, certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) { +func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(certificatesigningrequestsResource, "status", certificateSigningRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } @@ -118,7 +123,7 @@ func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string // DeleteCollection deletes a collection of objects. func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(certificatesigningrequestsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) return err @@ -126,10 +131,11 @@ func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, o // Patch applies the patch and returns the patched certificateSigningRequest. func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } @@ -147,10 +153,11 @@ func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateS if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } @@ -169,10 +176,11 @@ func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certif if name == nil { return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") } + emptyResult := &v1beta1.CertificateSigningRequest{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.CertificateSigningRequest{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CertificateSigningRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go index 6dc7c4c17f..03f833f370 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go @@ -44,22 +44,24 @@ var leasesKind = v1.SchemeGroupVersion.WithKind("Lease") // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. func (c *FakeLeases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1.Lease{}) + Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { + emptyResult := &v1.LeaseList{} obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1.LeaseList{}) + Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested leases. func (c *FakeLeases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1.Lease{}) + Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1.Lease{}) + Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } @@ -114,7 +118,7 @@ func (c *FakeLeases) Delete(ctx context.Context, name string, opts metav1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.LeaseList{}) return err @@ -122,11 +126,12 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt // Patch applies the patch and returns the patched lease. func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } @@ -144,11 +149,12 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1.LeaseApply if name == nil { return nil, fmt.Errorf("lease.Name must be provided to Apply") } + emptyResult := &v1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Lease), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index 9e6b169a81..97834d6ac0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -55,154 +52,18 @@ type LeaseInterface interface { // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Lease { return &v1.Lease{} }, + func() *v1.LeaseList { return &v1.LeaseList{} }), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go new file mode 100644 index 0000000000..dd75e5d014 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1alpha1Interface interface { + RESTClient() rest.Interface + LeaseCandidatesGetter +} + +// CoordinationV1alpha1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1alpha1Client) LeaseCandidates(namespace string) LeaseCandidateInterface { + return newLeaseCandidates(c, namespace) +} + +// NewForConfig creates a new CoordinationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CoordinationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1alpha1Client { + return &CoordinationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go index baaf2d9853..df51baa4d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha2 +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go new file mode 100644 index 0000000000..2e7d4be268 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_coordination_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCoordinationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeCoordinationV1alpha1) LeaseCandidates(namespace string) v1alpha1.LeaseCandidateInterface { + return &FakeLeaseCandidates{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCoordinationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go new file mode 100644 index 0000000000..c3de2303ca --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake/fake_leasecandidate.go @@ -0,0 +1,160 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeLeaseCandidates implements LeaseCandidateInterface +type FakeLeaseCandidates struct { + Fake *FakeCoordinationV1alpha1 + ns string +} + +var leasecandidatesResource = v1alpha1.SchemeGroupVersion.WithResource("leasecandidates") + +var leasecandidatesKind = v1alpha1.SchemeGroupVersion.WithKind("LeaseCandidate") + +// Get takes name of the leaseCandidate, and returns the corresponding leaseCandidate object, and an error if there is any. +func (c *FakeLeaseCandidates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(leasecandidatesResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// List takes label and field selectors, and returns the list of LeaseCandidates that match those selectors. +func (c *FakeLeaseCandidates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.LeaseCandidateList, err error) { + emptyResult := &v1alpha1.LeaseCandidateList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(leasecandidatesResource, leasecandidatesKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.LeaseCandidateList{ListMeta: obj.(*v1alpha1.LeaseCandidateList).ListMeta} + for _, item := range obj.(*v1alpha1.LeaseCandidateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested leaseCandidates. +func (c *FakeLeaseCandidates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(leasecandidatesResource, c.ns, opts)) + +} + +// Create takes the representation of a leaseCandidate and creates it. Returns the server's representation of the leaseCandidate, and an error, if there is any. +func (c *FakeLeaseCandidates) Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// Update takes the representation of a leaseCandidate and updates it. Returns the server's representation of the leaseCandidate, and an error, if there is any. +func (c *FakeLeaseCandidates) Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(leasecandidatesResource, c.ns, leaseCandidate, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// Delete takes name of the leaseCandidate and deletes it. Returns an error if one occurs. +func (c *FakeLeaseCandidates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(leasecandidatesResource, c.ns, name, opts), &v1alpha1.LeaseCandidate{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeLeaseCandidates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionActionWithOptions(leasecandidatesResource, c.ns, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.LeaseCandidateList{}) + return err +} + +// Patch applies the patch and returns the patched leaseCandidate. +func (c *FakeLeaseCandidates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) { + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied leaseCandidate. +func (c *FakeLeaseCandidates) Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) { + if leaseCandidate == nil { + return nil, fmt.Errorf("leaseCandidate provided to Apply must not be nil") + } + data, err := json.Marshal(leaseCandidate) + if err != nil { + return nil, err + } + name := leaseCandidate.Name + if name == nil { + return nil, fmt.Errorf("leaseCandidate.Name must be provided to Apply") + } + emptyResult := &v1alpha1.LeaseCandidate{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(leasecandidatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.LeaseCandidate), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..2dc2f30cfc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type LeaseCandidateExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..868185135b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// LeaseCandidatesGetter has a method to return a LeaseCandidateInterface. +// A group's client should implement this interface. +type LeaseCandidatesGetter interface { + LeaseCandidates(namespace string) LeaseCandidateInterface +} + +// LeaseCandidateInterface has methods to work with LeaseCandidate resources. +type LeaseCandidateInterface interface { + Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (*v1alpha1.LeaseCandidate, error) + Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (*v1alpha1.LeaseCandidate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.LeaseCandidate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LeaseCandidateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) + Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) + LeaseCandidateExpansion +} + +// leaseCandidates implements LeaseCandidateInterface +type leaseCandidates struct { + *gentype.ClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration] +} + +// newLeaseCandidates returns a LeaseCandidates +func newLeaseCandidates(c *CoordinationV1alpha1Client, namespace string) *leaseCandidates { + return &leaseCandidates{ + gentype.NewClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration]( + "leasecandidates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.LeaseCandidate { return &v1alpha1.LeaseCandidate{} }, + func() *v1alpha1.LeaseCandidateList { return &v1alpha1.LeaseCandidateList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go index 9a4a0d7eb9..112784af94 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go @@ -44,22 +44,24 @@ var leasesKind = v1beta1.SchemeGroupVersion.WithKind("Lease") // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1beta1.Lease{}) + Invokes(testing.NewGetActionWithOptions(leasesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { + emptyResult := &v1beta1.LeaseList{} obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1beta1.LeaseList{}) + Invokes(testing.NewListActionWithOptions(leasesResource, leasesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested leases. func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(leasesResource, c.ns, opts)) } // Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) + Invokes(testing.NewCreateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) + Invokes(testing.NewUpdateActionWithOptions(leasesResource, c.ns, lease, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } @@ -114,7 +118,7 @@ func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOpti // DeleteCollection deletes a collection of objects. func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(leasesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.LeaseList{}) return err @@ -122,11 +126,12 @@ func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions // Patch applies the patch and returns the patched lease. func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } @@ -144,11 +149,12 @@ func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1beta1.Lease if name == nil { return nil, fmt.Errorf("lease.Name must be provided to Apply") } + emptyResult := &v1beta1.Lease{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Lease{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(leasesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Lease), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 1bbd57bdd1..62341e53b6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -55,154 +52,18 @@ type LeaseInterface interface { // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Lease { return &v1beta1.Lease{} }, + func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1beta1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 0fef56429d..ab9458a5c9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ComponentStatusesGetter has a method to return a ComponentStatusInterface. @@ -55,143 +52,18 @@ type ComponentStatusInterface interface { // componentStatuses implements ComponentStatusInterface type componentStatuses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration] } // newComponentStatuses returns a ComponentStatuses func newComponentStatuses(c *CoreV1Client) *componentStatuses { return &componentStatuses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]( + "componentstatuses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ComponentStatus { return &v1.ComponentStatus{} }, + func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }), } } - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *componentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { - if componentStatus == nil { - return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(componentStatus) - if err != nil { - return nil, err - } - name := componentStatus.Name - if name == nil { - return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") - } - result = &v1.ComponentStatus{} - err = c.client.Patch(types.ApplyPatchType). - Resource("componentstatuses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index b68177720b..72aa2361f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ConfigMapsGetter has a method to return a ConfigMapInterface. @@ -55,154 +52,18 @@ type ConfigMapInterface interface { // configMaps implements ConfigMapInterface type configMaps struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration] } // newConfigMaps returns a ConfigMaps func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { return &configMaps{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]( + "configmaps", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ConfigMap { return &v1.ConfigMap{} }, + func() *v1.ConfigMapList { return &v1.ConfigMapList{} }), } } - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *configMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { - if configMap == nil { - return nil, fmt.Errorf("configMap provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(configMap) - if err != nil { - return nil, err - } - name := configMap.Name - if name == nil { - return nil, fmt.Errorf("configMap.Name must be provided to Apply") - } - result = &v1.ConfigMap{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("configmaps"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index cdf464b069..9b9fc5fc1e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointsGetter has a method to return a EndpointsInterface. @@ -55,154 +52,18 @@ type EndpointsInterface interface { // endpoints implements EndpointsInterface type endpoints struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration] } // newEndpoints returns a Endpoints func newEndpoints(c *CoreV1Client, namespace string) *endpoints { return &endpoints{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]( + "endpoints", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Endpoints { return &v1.Endpoints{} }, + func() *v1.EndpointsList { return &v1.EndpointsList{} }), } } - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *endpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { - if endpoints == nil { - return nil, fmt.Errorf("endpoints provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpoints) - if err != nil { - return nil, err - } - name := endpoints.Name - if name == nil { - return nil, fmt.Errorf("endpoints.Name must be provided to Apply") - } - result = &v1.Endpoints{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpoints"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 8274d85ffe..5ff0f06906 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *CoreV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index a3fdf57a98..4243572328 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -48,11 +48,11 @@ type EventExpansion interface { // event; it must either match this event client's namespace, or this event // client must have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -67,11 +67,11 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -87,11 +87,11 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // match this event client's namespace, or this event client must have been // created with the "" namespace. func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + if e.GetNamespace() != "" && incompleteEvent.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). Resource("events"). Name(incompleteEvent.Name). @@ -109,8 +109,8 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev if err != nil { return nil, err } - if len(e.ns) > 0 && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + if len(e.GetNamespace()) > 0 && ref.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.GetNamespace()) } stringRefKind := string(ref.Kind) var refKind *string diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go index 39d4c3282e..dbd305280b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go @@ -43,20 +43,22 @@ var componentstatusesKind = v1.SchemeGroupVersion.WithKind("ComponentStatus") // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) + Invokes(testing.NewRootGetActionWithOptions(componentstatusesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { + emptyResult := &v1.ComponentStatusList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &v1.ComponentStatusList{}) + Invokes(testing.NewRootListActionWithOptions(componentstatusesResource, componentstatusesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOption // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *FakeComponentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(componentstatusesResource, opts)) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + Invokes(testing.NewRootCreateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + Invokes(testing.NewRootUpdateActionWithOptions(componentstatusesResource, componentStatus, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } @@ -107,7 +111,7 @@ func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts me // DeleteCollection deletes a collection of objects. func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(componentstatusesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) return err @@ -115,10 +119,11 @@ func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav // Patch applies the patch and returns the patched componentStatus. func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &v1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } @@ -136,10 +141,11 @@ func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *core if name == nil { return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") } + emptyResult := &v1.ComponentStatus{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, *name, types.ApplyPatchType, data), &v1.ComponentStatus{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(componentstatusesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ComponentStatus), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go index 6e8a38bd8f..ae760add7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go @@ -44,22 +44,24 @@ var configmapsKind = v1.SchemeGroupVersion.WithKind("ConfigMap") // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. func (c *FakeConfigMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + Invokes(testing.NewGetActionWithOptions(configmapsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { + emptyResult := &v1.ConfigMapList{} obj, err := c.Fake. - Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &v1.ConfigMapList{}) + Invokes(testing.NewListActionWithOptions(configmapsResource, configmapsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested configMaps. func (c *FakeConfigMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(configmapsResource, c.ns, opts)) } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. func (c *FakeConfigMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + Invokes(testing.NewCreateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. func (c *FakeConfigMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + Invokes(testing.NewUpdateActionWithOptions(configmapsResource, c.ns, configMap, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } @@ -114,7 +118,7 @@ func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts metav1.De // DeleteCollection deletes a collection of objects. func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(configmapsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) return err @@ -122,11 +126,12 @@ func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.Delet // Patch applies the patch and returns the patched configMap. func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &v1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } @@ -144,11 +149,12 @@ func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapA if name == nil { return nil, fmt.Errorf("configMap.Name must be provided to Apply") } + emptyResult := &v1.ConfigMap{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ConfigMap{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(configmapsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ConfigMap), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go index 6b2f6c249e..7e2e91cfa6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go @@ -44,22 +44,24 @@ var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints") // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + Invokes(testing.NewGetActionWithOptions(endpointsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + emptyResult := &v1.EndpointsList{} obj, err := c.Fake. - Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{}) + Invokes(testing.NewListActionWithOptions(endpointsResource, endpointsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (resu // Watch returns a watch.Interface that watches the requested endpoints. func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(endpointsResource, c.ns, opts)) } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + Invokes(testing.NewCreateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + Invokes(testing.NewUpdateActionWithOptions(endpointsResource, c.ns, endpoints, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } @@ -114,7 +118,7 @@ func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.Del // DeleteCollection deletes a collection of objects. func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(endpointsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.Delete // Patch applies the patch and returns the patched endpoints. func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } @@ -144,11 +149,12 @@ func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsAp if name == nil { return nil, fmt.Errorf("endpoints.Name must be provided to Apply") } + emptyResult := &v1.Endpoints{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Endpoints{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Endpoints), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go index 9ad879b394..a438ba4737 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go @@ -44,22 +44,24 @@ var eventsKind = v1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { + emptyResult := &v1.EventList{} obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) + Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EventList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *corev1.EventApplyConfigur if name == nil { return nil, fmt.Errorf("event.Name must be provided to Apply") } + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go index f18b5741c3..4cc36131ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go @@ -44,22 +44,24 @@ var limitrangesKind = v1.SchemeGroupVersion.WithKind("LimitRange") // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. func (c *FakeLimitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + Invokes(testing.NewGetActionWithOptions(limitrangesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { + emptyResult := &v1.LimitRangeList{} obj, err := c.Fake. - Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &v1.LimitRangeList{}) + Invokes(testing.NewListActionWithOptions(limitrangesResource, limitrangesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested limitRanges. func (c *FakeLimitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(limitrangesResource, c.ns, opts)) } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + Invokes(testing.NewCreateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + Invokes(testing.NewUpdateActionWithOptions(limitrangesResource, c.ns, limitRange, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } @@ -114,7 +118,7 @@ func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(limitrangesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) return err @@ -122,11 +126,12 @@ func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched limitRange. func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &v1.LimitRange{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } @@ -144,11 +149,12 @@ func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRan if name == nil { return nil, fmt.Errorf("limitRange.Name must be provided to Apply") } + emptyResult := &v1.LimitRange{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, *name, types.ApplyPatchType, data), &v1.LimitRange{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(limitrangesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.LimitRange), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go index 52fcff591e..093990571f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go @@ -43,20 +43,22 @@ var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace") // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) + Invokes(testing.NewRootGetActionWithOptions(namespacesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + emptyResult := &v1.NamespaceList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{}) + Invokes(testing.NewRootListActionWithOptions(namespacesResource, namespacesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested namespaces. func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(namespacesResource, opts)) } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + Invokes(testing.NewRootCreateActionWithOptions(namespacesResource, namespace, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) + Invokes(testing.NewRootUpdateActionWithOptions(namespacesResource, namespace, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { +func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(namespacesResource, "status", namespace, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } @@ -118,10 +123,11 @@ func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.De // Patch applies the patch and returns the patched namespace. func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } @@ -139,10 +145,11 @@ func (c *FakeNamespaces) Apply(ctx context.Context, namespace *corev1.NamespaceA if name == nil { return nil, fmt.Errorf("namespace.Name must be provided to Apply") } + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data), &v1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } @@ -161,10 +168,11 @@ func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *corev1.Name if name == nil { return nil, fmt.Errorf("namespace.Name must be provided to Apply") } + emptyResult := &v1.Namespace{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data, "status"), &v1.Namespace{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(namespacesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Namespace), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go index 5df40f8d11..451f992da1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go @@ -43,20 +43,22 @@ var nodesKind = v1.SchemeGroupVersion.WithKind("Node") // Get takes name of the node, and returns the corresponding node object, and an error if there is any. func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) + Invokes(testing.NewRootGetActionWithOptions(nodesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { + emptyResult := &v1.NodeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{}) + Invokes(testing.NewRootListActionWithOptions(nodesResource, nodesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested nodes. func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(nodesResource, opts)) } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) + Invokes(testing.NewRootCreateActionWithOptions(nodesResource, node, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) + Invokes(testing.NewRootUpdateActionWithOptions(nodesResource, node, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) { +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(nodesResource, "status", node, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } @@ -118,7 +123,7 @@ func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(nodesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.NodeList{}) return err @@ -126,10 +131,11 @@ func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOpti // Patch applies the patch and returns the patched node. func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } @@ -147,10 +153,11 @@ func (c *FakeNodes) Apply(ctx context.Context, node *corev1.NodeApplyConfigurati if name == nil { return nil, fmt.Errorf("node.Name must be provided to Apply") } + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &v1.Node{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } @@ -169,10 +176,11 @@ func (c *FakeNodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfi if name == nil { return nil, fmt.Errorf("node.Name must be provided to Apply") } + emptyResult := &v1.Node{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &v1.Node{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(nodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Node), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go index 5b06d0b192..16a1f2201a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go @@ -43,20 +43,22 @@ var persistentvolumesKind = v1.SchemeGroupVersion.WithKind("PersistentVolume") // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + Invokes(testing.NewRootGetActionWithOptions(persistentvolumesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { + emptyResult := &v1.PersistentVolumeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &v1.PersistentVolumeList{}) + Invokes(testing.NewRootListActionWithOptions(persistentvolumesResource, persistentvolumesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOption // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *FakePersistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(persistentvolumesResource, opts)) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootCreateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootUpdateActionWithOptions(persistentvolumesResource, persistentVolume, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) { +func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(persistentvolumesResource, "status", persistentVolume, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } @@ -118,7 +123,7 @@ func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts me // DeleteCollection deletes a collection of objects. func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(persistentvolumesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) return err @@ -126,10 +131,11 @@ func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav // Patch applies the patch and returns the patched persistentVolume. func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &v1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } @@ -147,10 +153,11 @@ func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *cor if name == nil { return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data), &v1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } @@ -169,10 +176,11 @@ func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolum if name == nil { return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolume{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolume{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(persistentvolumesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolume), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go index b860e53674..12617c2432 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go @@ -44,22 +44,24 @@ var persistentvolumeclaimsKind = v1.SchemeGroupVersion.WithKind("PersistentVolum // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewGetActionWithOptions(persistentvolumeclaimsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + emptyResult := &v1.PersistentVolumeClaimList{} obj, err := c.Fake. - Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &v1.PersistentVolumeClaimList{}) + Invokes(testing.NewListActionWithOptions(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListO // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(persistentvolumeclaimsResource, c.ns, opts)) } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewCreateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewUpdateActionWithOptions(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) { +func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } @@ -126,7 +131,7 @@ func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, op // DeleteCollection deletes a collection of objects. func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(persistentvolumeclaimsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) return err @@ -134,11 +139,12 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched persistentVolumeClaim. func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } @@ -156,11 +162,12 @@ func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolume if name == nil { return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } @@ -179,11 +186,12 @@ func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistent if name == nil { return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") } + emptyResult := &v1.PersistentVolumeClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolumeClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PersistentVolumeClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go index 23634c7d07..d2b46e8e3a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go @@ -44,22 +44,24 @@ var podsKind = v1.SchemeGroupVersion.WithKind("Pod") // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) + Invokes(testing.NewGetActionWithOptions(podsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { + emptyResult := &v1.PodList{} obj, err := c.Fake. - Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{}) + Invokes(testing.NewListActionWithOptions(podsResource, podsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested pods. func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(podsResource, c.ns, opts)) } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + Invokes(testing.NewCreateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) + Invokes(testing.NewUpdateActionWithOptions(podsResource, c.ns, pod, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) { +func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "status", c.ns, pod, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } @@ -126,7 +131,7 @@ func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(podsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PodList{}) return err @@ -134,11 +139,12 @@ func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptio // Patch applies the patch and returns the patched pod. func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } @@ -156,11 +162,12 @@ func (c *FakePods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, if name == nil { return nil, fmt.Errorf("pod.Name must be provided to Apply") } + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Pod{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } @@ -179,22 +186,24 @@ func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfigur if name == nil { return nil, fmt.Errorf("pod.Name must be provided to Apply") } + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Pod{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } // UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { + emptyResult := &v1.Pod{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(podsResource, "ephemeralcontainers", c.ns, pod, opts), &v1.Pod{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Pod), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go index 9fa97ab402..dc9affdd06 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go @@ -44,22 +44,24 @@ var podtemplatesKind = v1.SchemeGroupVersion.WithKind("PodTemplate") // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. func (c *FakePodTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + Invokes(testing.NewGetActionWithOptions(podtemplatesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { + emptyResult := &v1.PodTemplateList{} obj, err := c.Fake. - Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &v1.PodTemplateList{}) + Invokes(testing.NewListActionWithOptions(podtemplatesResource, podtemplatesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested podTemplates. func (c *FakePodTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(podtemplatesResource, c.ns, opts)) } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + Invokes(testing.NewCreateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + Invokes(testing.NewUpdateActionWithOptions(podtemplatesResource, c.ns, podTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } @@ -114,7 +118,7 @@ func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(podtemplatesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) return err @@ -122,11 +126,12 @@ func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched podTemplate. func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &v1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } @@ -144,11 +149,12 @@ func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTem if name == nil { return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") } + emptyResult := &v1.PodTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go index 1e469c9b1a..6b3497f089 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go @@ -45,22 +45,24 @@ var replicationcontrollersKind = v1.SchemeGroupVersion.WithKind("ReplicationCont // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + Invokes(testing.NewGetActionWithOptions(replicationcontrollersResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { + emptyResult := &v1.ReplicationControllerList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &v1.ReplicationControllerList{}) + Invokes(testing.NewListActionWithOptions(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -79,40 +81,43 @@ func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListO // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *FakeReplicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicationcontrollersResource, c.ns, opts)) } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewCreateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewUpdateActionWithOptions(replicationcontrollersResource, c.ns, replicationController, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) { +func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "status", c.ns, replicationController, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } @@ -127,7 +132,7 @@ func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, op // DeleteCollection deletes a collection of objects. func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicationcontrollersResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) return err @@ -135,11 +140,12 @@ func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched replicationController. func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &v1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } @@ -157,11 +163,12 @@ func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationContr if name == nil { return nil, fmt.Errorf("replicationController.Name must be provided to Apply") } + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } @@ -180,33 +187,36 @@ func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicatio if name == nil { return nil, fmt.Errorf("replicationController.Name must be provided to Apply") } + emptyResult := &v1.ReplicationController{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicationController{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ReplicationController), err } // GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(replicationcontrollersResource, c.ns, "scale", replicationControllerName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + emptyResult := &autoscalingv1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicationcontrollersResource, "scale", c.ns, scale, opts), &autoscalingv1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*autoscalingv1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go index 87664985ce..5e2e02afc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go @@ -44,22 +44,24 @@ var resourcequotasKind = v1.SchemeGroupVersion.WithKind("ResourceQuota") // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + Invokes(testing.NewGetActionWithOptions(resourcequotasResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { + emptyResult := &v1.ResourceQuotaList{} obj, err := c.Fake. - Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &v1.ResourceQuotaList{}) + Invokes(testing.NewListActionWithOptions(resourcequotasResource, resourcequotasKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *FakeResourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(resourcequotasResource, c.ns, opts)) } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewCreateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewUpdateActionWithOptions(resourcequotasResource, c.ns, resourceQuota, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) { +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(resourcequotasResource, "status", c.ns, resourceQuota, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } @@ -126,7 +131,7 @@ func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(resourcequotasResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) return err @@ -134,11 +139,12 @@ func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched resourceQuota. func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &v1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } @@ -156,11 +162,12 @@ func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.Re if name == nil { return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") } + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data), &v1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } @@ -179,11 +186,12 @@ func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *cor if name == nil { return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") } + emptyResult := &v1.ResourceQuota{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ResourceQuota{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ResourceQuota), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go index 90035a7037..ec0fc65b5b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go @@ -44,22 +44,24 @@ var secretsKind = v1.SchemeGroupVersion.WithKind("Secret") // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) + Invokes(testing.NewGetActionWithOptions(secretsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { + emptyResult := &v1.SecretList{} obj, err := c.Fake. - Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{}) + Invokes(testing.NewListActionWithOptions(secretsResource, secretsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested secrets. func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(secretsResource, c.ns, opts)) } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + Invokes(testing.NewCreateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) + Invokes(testing.NewUpdateActionWithOptions(secretsResource, c.ns, secret, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } @@ -114,7 +118,7 @@ func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(secretsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.SecretList{}) return err @@ -122,11 +126,12 @@ func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOp // Patch applies the patch and returns the patched secret. func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } @@ -144,11 +149,12 @@ func (c *FakeSecrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfi if name == nil { return nil, fmt.Errorf("secret.Name must be provided to Apply") } + emptyResult := &v1.Secret{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Secret{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(secretsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Secret), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go index 514ab19e39..2a3cf45fbc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go @@ -44,22 +44,24 @@ var servicesKind = v1.SchemeGroupVersion.WithKind("Service") // Get takes name of the service, and returns the corresponding service object, and an error if there is any. func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + Invokes(testing.NewGetActionWithOptions(servicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { + emptyResult := &v1.ServiceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{}) + Invokes(testing.NewListActionWithOptions(servicesResource, servicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested services. func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(servicesResource, c.ns, opts)) } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + Invokes(testing.NewCreateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + Invokes(testing.NewUpdateActionWithOptions(servicesResource, c.ns, service, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) { +func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(servicesResource, "status", c.ns, service, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } @@ -126,11 +131,12 @@ func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.Dele // Patch applies the patch and returns the patched service. func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } @@ -148,11 +154,12 @@ func (c *FakeServices) Apply(ctx context.Context, service *corev1.ServiceApplyCo if name == nil { return nil, fmt.Errorf("service.Name must be provided to Apply") } + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Service{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } @@ -171,11 +178,12 @@ func (c *FakeServices) ApplyStatus(ctx context.Context, service *corev1.ServiceA if name == nil { return nil, fmt.Errorf("service.Name must be provided to Apply") } + emptyResult := &v1.Service{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Service{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(servicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Service), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go index 115ff07123..f3ad8d40f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go @@ -45,22 +45,24 @@ var serviceaccountsKind = v1.SchemeGroupVersion.WithKind("ServiceAccount") // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + Invokes(testing.NewGetActionWithOptions(serviceaccountsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { + emptyResult := &v1.ServiceAccountList{} obj, err := c.Fake. - Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &v1.ServiceAccountList{}) + Invokes(testing.NewListActionWithOptions(serviceaccountsResource, serviceaccountsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -79,28 +81,30 @@ func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *FakeServiceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(serviceaccountsResource, c.ns, opts)) } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + Invokes(testing.NewCreateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + Invokes(testing.NewUpdateActionWithOptions(serviceaccountsResource, c.ns, serviceAccount, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } @@ -115,7 +119,7 @@ func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(serviceaccountsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) return err @@ -123,11 +127,12 @@ func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched serviceAccount. func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &v1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } @@ -145,22 +150,24 @@ func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *corev1. if name == nil { return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") } + emptyResult := &v1.ServiceAccount{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ServiceAccount{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ServiceAccount), err } // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { + emptyResult := &authenticationv1.TokenRequest{} obj, err := c.Fake. - Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{}) + Invokes(testing.NewCreateSubresourceActionWithOptions(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*authenticationv1.TokenRequest), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index e6883b607c..f8e4048f98 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LimitRangesGetter has a method to return a LimitRangeInterface. @@ -55,154 +52,18 @@ type LimitRangeInterface interface { // limitRanges implements LimitRangeInterface type limitRanges struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration] } // newLimitRanges returns a LimitRanges func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { return &limitRanges{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]( + "limitranges", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.LimitRange { return &v1.LimitRange{} }, + func() *v1.LimitRangeList { return &v1.LimitRangeList{} }), } } - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *limitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { - if limitRange == nil { - return nil, fmt.Errorf("limitRange provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(limitRange) - if err != nil { - return nil, err - } - name := limitRange.Name - if name == nil { - return nil, fmt.Errorf("limitRange.Name must be provided to Apply") - } - result = &v1.LimitRange{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("limitranges"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 06c77b4c45..75d20648f5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NamespacesGetter has a method to return a NamespaceInterface. @@ -43,6 +40,7 @@ type NamespacesGetter interface { type NamespaceInterface interface { Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) @@ -50,178 +48,25 @@ type NamespaceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) NamespaceExpansion } // namespaces implements NamespaceInterface type namespaces struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration] } // newNamespaces returns a Namespaces func newNamespaces(c *CoreV1Client) *namespaces { return &namespaces{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]( + "namespaces", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Namespace { return &v1.Namespace{} }, + func() *v1.NamespaceList { return &v1.NamespaceList{} }), } } - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *namespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *namespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index be1116db15..4f720fb92e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -32,6 +32,6 @@ type NamespaceExpansion interface { // Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. func (c *namespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) + err = c.GetClient().Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) return } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d9725b2f95..df1a7817f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NodesGetter has a method to return a NodeInterface. @@ -43,6 +40,7 @@ type NodesGetter interface { type NodeInterface interface { Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type NodeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *CoreV1Client) *nodes { return &nodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Node { return &v1.Node{} }, + func() *v1.NodeList { return &v1.NodeList{} }), } } - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Post(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *nodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *nodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index bdf7bfed8d..df86253b0e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -34,7 +34,7 @@ type NodeExpansion interface { // the node that the server returns, or an error. func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) { result := &v1.Node{} - err := c.client.Patch(types.StrategicMergePatchType). + err := c.GetClient().Patch(types.StrategicMergePatchType). Resource("nodes"). Name(nodeName). SubResource("status"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index a8e2295977..8be40f8665 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumesGetter has a method to return a PersistentVolumeInterface. @@ -43,6 +40,7 @@ type PersistentVolumesGetter interface { type PersistentVolumeInterface interface { Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type PersistentVolumeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) PersistentVolumeExpansion } // persistentVolumes implements PersistentVolumeInterface type persistentVolumes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration] } // newPersistentVolumes returns a PersistentVolumes func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { return &persistentVolumes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]( + "persistentvolumes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PersistentVolume { return &v1.PersistentVolume{} }, + func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }), } } - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *persistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 2e7f4fb44f..7721b00923 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. @@ -43,6 +40,7 @@ type PersistentVolumeClaimsGetter interface { type PersistentVolumeClaimInterface interface { Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type PersistentVolumeClaimInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } // persistentVolumeClaims implements PersistentVolumeClaimInterface type persistentVolumeClaims struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration] } // newPersistentVolumeClaims returns a PersistentVolumeClaims func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]( + "persistentvolumeclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} }, + func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }), } } - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *persistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 63122cf3fb..470b7de7bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodsGetter has a method to return a PodInterface. @@ -43,6 +40,7 @@ type PodsGetter interface { type PodInterface interface { Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,6 +49,7 @@ type PodInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) @@ -59,209 +58,27 @@ type PodInterface interface { // pods implements PodInterface type pods struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration] } // newPods returns a Pods func newPods(c *CoreV1Client, namespace string) *pods { return &pods{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]( + "pods", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Pod { return &v1.Pod{} }, + func() *v1.PodList { return &v1.PodList{} }), } } -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("pods"). Name(podName). SubResource("ephemeralcontainers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index 8b6e0e932f..a2d4d70d46 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -47,33 +47,33 @@ type PodExpansion interface { // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() } // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource. // Equivalent to calling EvictV1beta1. // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1(). func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) + return c.GetClient().Get().Namespace(c.GetNamespace()).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) } // ProxyGet returns a response of the pod by calling it through the proxy. func (c *pods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("pods"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index ff90fc0e62..060a059093 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodTemplatesGetter has a method to return a PodTemplateInterface. @@ -55,154 +52,18 @@ type PodTemplateInterface interface { // podTemplates implements PodTemplateInterface type podTemplates struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration] } // newPodTemplates returns a PodTemplates func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { return &podTemplates{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]( + "podtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PodTemplate { return &v1.PodTemplate{} }, + func() *v1.PodTemplateList { return &v1.PodTemplateList{} }), } } - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *podTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { - if podTemplate == nil { - return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podTemplate) - if err != nil { - return nil, err - } - name := podTemplate.Name - if name == nil { - return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") - } - result = &v1.PodTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 49c75d967b..9b275ed1ba 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" @@ -30,8 +27,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicationControllersGetter has a method to return a ReplicationControllerInterface. @@ -44,6 +41,7 @@ type ReplicationControllersGetter interface { type ReplicationControllerInterface interface { Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -52,6 +50,7 @@ type ReplicationControllerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -61,209 +60,27 @@ type ReplicationControllerInterface interface { // replicationControllers implements ReplicationControllerInterface type replicationControllers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration] } // newReplicationControllers returns a ReplicationControllers func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]( + "replicationcontrollers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ReplicationController { return &v1.ReplicationController{} }, + func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }), } } -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *replicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). @@ -276,8 +93,8 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8444d164ed..4b2dcd3b59 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ResourceQuotasGetter has a method to return a ResourceQuotaInterface. @@ -43,6 +40,7 @@ type ResourceQuotasGetter interface { type ResourceQuotaInterface interface { Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type ResourceQuotaInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) ResourceQuotaExpansion } // resourceQuotas implements ResourceQuotaInterface type resourceQuotas struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration] } // newResourceQuotas returns a ResourceQuotas func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]( + "resourcequotas", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ResourceQuota { return &v1.ResourceQuota{} }, + func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }), } } - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *resourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4aba330381..12a8d1178f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SecretsGetter has a method to return a SecretInterface. @@ -55,154 +52,18 @@ type SecretInterface interface { // secrets implements SecretInterface type secrets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration] } // newSecrets returns a Secrets func newSecrets(c *CoreV1Client, namespace string) *secrets { return &secrets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]( + "secrets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Secret { return &v1.Secret{} }, + func() *v1.SecretList { return &v1.SecretList{} }), } } - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *secrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { - if secret == nil { - return nil, fmt.Errorf("secret provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(secret) - if err != nil { - return nil, err - } - name := secret.Name - if name == nil { - return nil, fmt.Errorf("secret.Name must be provided to Apply") - } - result = &v1.Secret{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("secrets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 3fe22ba444..ec935a3247 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServicesGetter has a method to return a ServiceInterface. @@ -43,6 +40,7 @@ type ServicesGetter interface { type ServiceInterface interface { Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) @@ -50,190 +48,25 @@ type ServiceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) ServiceExpansion } // services implements ServiceInterface type services struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration] } // newServices returns a Services func newServices(c *CoreV1Client, namespace string) *services { return &services{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]( + "services", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Service { return &v1.Service{} }, + func() *v1.ServiceList { return &v1.ServiceList{} }), } } - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *services) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *services) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go index 4937fd1a39..9a6f781387 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -28,8 +28,8 @@ type ServiceExpansion interface { // ProxyGet returns a response of the service by calling it through the proxy. func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("services"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index bdf589b960..eb995d4548 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" @@ -30,8 +27,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceAccountsGetter has a method to return a ServiceAccountInterface. @@ -58,163 +55,27 @@ type ServiceAccountInterface interface { // serviceAccounts implements ServiceAccountInterface type serviceAccounts struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration] } // newServiceAccounts returns a ServiceAccounts func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]( + "serviceaccounts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ServiceAccount { return &v1.ServiceAccount{} }, + func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }), } } -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *serviceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { - if serviceAccount == nil { - return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceAccount) - if err != nil { - return nil, err - } - name := serviceAccount.Name - if name == nil { - return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") - } - result = &v1.ServiceAccount{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { result = &authenticationv1.TokenRequest{} - err = c.client.Post(). - Namespace(c.ns). + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). Resource("serviceaccounts"). Name(serviceAccountName). SubResource("token"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go index 63e616b033..1f927055cc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -55,154 +52,18 @@ type EndpointSliceInterface interface { // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.EndpointSlice { return &v1.EndpointSlice{} }, + func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go index d159b5ea9e..6bbbde82ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go @@ -44,22 +44,24 @@ var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice") // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{}) + Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { + emptyResult := &v1.EndpointSliceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{}) + Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested endpointSlices. func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) + Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) + Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } @@ -114,7 +118,7 @@ func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched endpointSlice. func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } @@ -144,11 +149,12 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discovery if name == nil { return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") } + emptyResult := &v1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.EndpointSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index 2ade833029..298cfbc879 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -55,154 +52,18 @@ type EndpointSliceInterface interface { // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} }, + func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go index 2683718113..65cf69b9dc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go @@ -44,22 +44,24 @@ var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice") // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) + Invokes(testing.NewGetActionWithOptions(endpointslicesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { + emptyResult := &v1beta1.EndpointSliceList{} obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{}) + Invokes(testing.NewListActionWithOptions(endpointslicesResource, endpointslicesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested endpointSlices. func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(endpointslicesResource, c.ns, opts)) } // Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + Invokes(testing.NewCreateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) + Invokes(testing.NewUpdateActionWithOptions(endpointslicesResource, c.ns, endpointSlice, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } @@ -114,7 +118,7 @@ func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(endpointslicesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched endpointSlice. func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } @@ -144,11 +149,12 @@ func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discovery if name == nil { return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") } + emptyResult := &v1beta1.EndpointSlice{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.EndpointSlice{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.EndpointSlice), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go index c9f2bbed50..d021a76c4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go index 0928781f1e..1e79eb9845 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go @@ -44,22 +44,24 @@ var eventsKind = v1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { + emptyResult := &v1.EventList{} obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) + Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.EventList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOpt // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1.EventApplyConfig if name == nil { return nil, fmt.Errorf("event.Name must be provided to Apply") } + emptyResult := &v1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index dfdf8b8979..77ca2e7756 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1beta1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Event { return &v1beta1.Event{} }, + func() *v1beta1.EventList { return &v1beta1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1beta1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 562f8d5e45..4ddbaa31af 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -44,11 +44,11 @@ type EventExpansion interface { // it must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -64,11 +64,11 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // created with the "" namespace. // Update also requires the ResourceVersion to be set in the event object. func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -84,11 +84,11 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // The namespace must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go index 522b4dc063..b00f2126a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go @@ -44,22 +44,24 @@ var eventsKind = v1beta1.SchemeGroupVersion.WithKind("Event") // Get takes name of the event, and returns the corresponding event object, and an error if there is any. func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{}) + Invokes(testing.NewGetActionWithOptions(eventsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { + emptyResult := &v1beta1.EventList{} obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{}) + Invokes(testing.NewListActionWithOptions(eventsResource, eventsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(eventsResource, c.ns, opts)) } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{}) + Invokes(testing.NewCreateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{}) + Invokes(testing.NewUpdateActionWithOptions(eventsResource, c.ns, event, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } @@ -114,7 +118,7 @@ func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOpti // DeleteCollection deletes a collection of objects. func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(eventsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.EventList{}) return err @@ -122,11 +126,12 @@ func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions // Patch applies the patch and returns the patched event. func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } @@ -144,11 +149,12 @@ func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1beta1.EventApplyC if name == nil { return nil, fmt.Errorf("event.Name must be provided to Apply") } + emptyResult := &v1beta1.Event{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Event{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(eventsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Event), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index ffe219fdaa..f86194bf05 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} }, + func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index c41d8dbc26..021fbb3b3b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +42,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) @@ -61,209 +62,27 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca } result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index 5c409ac996..bd75b8a38e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -31,5 +31,5 @@ type DeploymentExpansion interface { // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. func (c *deployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go index abe3d2da1f..f14943082d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -44,22 +44,24 @@ var daemonsetsKind = v1beta1.SchemeGroupVersion.WithKind("DaemonSet") // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + Invokes(testing.NewGetActionWithOptions(daemonsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + emptyResult := &v1beta1.DaemonSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{}) + Invokes(testing.NewListActionWithOptions(daemonsetsResource, daemonsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(daemonsetsResource, c.ns, opts)) } // Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + Invokes(testing.NewCreateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + Invokes(testing.NewUpdateActionWithOptions(daemonsetsResource, c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) { +func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(daemonsetsResource, "status", c.ns, daemonSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } @@ -126,7 +131,7 @@ func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(daemonsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOpt // Patch applies the patch and returns the patched daemonSet. func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } @@ -156,11 +162,12 @@ func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1 if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } @@ -179,11 +186,12 @@ func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv if name == nil { return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") } + emptyResult := &v1beta1.DaemonSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.DaemonSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.DaemonSet), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go index e399361a92..b81d4a96c4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go @@ -44,22 +44,24 @@ var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + Invokes(testing.NewGetActionWithOptions(deploymentsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + emptyResult := &v1beta1.DeploymentList{} obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) + Invokes(testing.NewListActionWithOptions(deploymentsResource, deploymentsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(deploymentsResource, c.ns, opts)) } // Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewCreateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateActionWithOptions(deploymentsResource, c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { +func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "status", c.ns, deployment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -126,7 +131,7 @@ func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(deploymentsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) return err @@ -134,11 +139,12 @@ func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched deployment. func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -156,11 +162,12 @@ func (c *FakeDeployments) Apply(ctx context.Context, deployment *extensionsv1bet if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } @@ -179,33 +186,36 @@ func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *extension if name == nil { return nil, fmt.Errorf("deployment.Name must be provided to Apply") } + emptyResult := &v1beta1.Deployment{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Deployment), err } // GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(deploymentsResource, c.ns, "scale", deploymentName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(deploymentsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } @@ -220,11 +230,12 @@ func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, if err != nil { return nil, err } + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go index 48ae51e80d..ae95682fc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go @@ -44,22 +44,24 @@ var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + emptyResult := &v1beta1.IngressList{} obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) + Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) return err @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOpti // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *extensionsv1beta1.In if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *extensionsv1be if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go index a32022140a..d829a0c638 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go @@ -44,22 +44,24 @@ var networkpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy") // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { + emptyResult := &v1beta1.NetworkPolicyList{} obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1beta1.NetworkPolicyList{}) + Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } @@ -114,7 +118,7 @@ func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.NetworkPolicyList{}) return err @@ -122,11 +126,12 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched networkPolicy. func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } @@ -144,11 +149,12 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *extensio if name == nil { return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") } + emptyResult := &v1beta1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.NetworkPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go index 42da6fa8b6..5d94ba73b0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -44,22 +44,24 @@ var replicasetsKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet") // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + Invokes(testing.NewGetActionWithOptions(replicasetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + emptyResult := &v1beta1.ReplicaSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{}) + Invokes(testing.NewListActionWithOptions(replicasetsResource, replicasetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(replicasetsResource, c.ns, opts)) } // Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + Invokes(testing.NewCreateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + Invokes(testing.NewUpdateActionWithOptions(replicasetsResource, c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) { +func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "status", c.ns, replicaSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } @@ -126,7 +131,7 @@ func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(replicasetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) return err @@ -134,11 +139,12 @@ func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched replicaSet. func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } @@ -156,11 +162,12 @@ func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *extensionsv1bet if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } @@ -179,33 +186,36 @@ func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *extension if name == nil { return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") } + emptyResult := &v1beta1.ReplicaSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.ReplicaSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ReplicaSet), err } // GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{}) + Invokes(testing.NewGetSubresourceActionWithOptions(replicasetsResource, c.ns, "scale", replicaSetName, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } // UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(replicasetsResource, "scale", c.ns, scale, opts), &v1beta1.Scale{}) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } @@ -220,11 +230,12 @@ func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, if err != nil { return nil, err } + emptyResult := &v1beta1.Scale{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, opts.ToPatchOptions(), "scale"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Scale), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index dd4012cc23..4511c93fc2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index 978b26db03..afa8203c3d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface { // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} }, + func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 3c907a3a04..8973948f39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,6 +42,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) @@ -61,209 +62,27 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} }, + func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca } result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go index 922a60d89b..bf2b63fb2d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { + emptyResult := &v1.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (re // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.D // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.Dele // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.F if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go index 27d9586748..053de56ed1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLe // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { + emptyResult := &v1.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1. // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go index bd36c5e6a4..2606cee070 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.FlowSchema { return &v1.FlowSchema{} }, + func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go index 797fe94035..64907af606 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} }, + func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go index be7a7e390f..8b4435a8ad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1beta1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { + emptyResult := &v1beta1.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta1.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta1.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go index 698a168b37..e139e4dceb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("Prior // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { + emptyResult := &v1beta1.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta1.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta1.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go index a9d38becf9..3c6805b9bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} }, + func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go index 41f35cbccd..049f4049d6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} }, + func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go index 7ce6d2116b..41cad9b7a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1beta2.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { + emptyResult := &v1beta2.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta2.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta2.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go index 7340f8a09e..f9eac85d51 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta2.SchemeGroupVersion.WithKind("Prior // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { + emptyResult := &v1beta2.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta2.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta2.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta2.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta2.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go index 3a1f12b6a2..2706157628 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} }, + func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go index f028869f17..00ead4c60d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} }, + func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go index 1371f6ed67..70dca796a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go @@ -43,20 +43,22 @@ var flowschemasKind = v1beta3.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootGetActionWithOptions(flowschemasResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { + emptyResult := &v1beta3.FlowSchemaList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta3.FlowSchemaList{}) + Invokes(testing.NewRootListActionWithOptions(flowschemasResource, flowschemasKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested flowSchemas. func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootCreateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootUpdateActionWithOptions(flowschemasResource, flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(flowschemasResource, "status", flowSchema, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } @@ -118,7 +123,7 @@ func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(flowschemasResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta3.FlowSchemaList{}) return err @@ -126,10 +131,11 @@ func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched flowSchema. func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } @@ -147,10 +153,11 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1be if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } @@ -169,10 +176,11 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } + emptyResult := &v1beta3.FlowSchema{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(flowschemasResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go index a0e266fecb..45836a6453 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go @@ -43,20 +43,22 @@ var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("Prior // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetActionWithOptions(prioritylevelconfigurationsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { + emptyResult := &v1beta3.PriorityLevelConfigurationList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta3.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListActionWithOptions(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateActionWithOptions(prioritylevelconfigurationsResource, priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } @@ -118,7 +123,7 @@ func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name strin // DeleteCollection deletes a collection of objects. func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(prioritylevelconfigurationsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta3.PriorityLevelConfigurationList{}) return err @@ -126,10 +131,11 @@ func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, // Patch applies the patch and returns the patched priorityLevelConfiguration. func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } @@ -147,10 +153,11 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } @@ -169,10 +176,11 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } + emptyResult := &v1beta3.PriorityLevelConfiguration{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta3.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go index 5fa39d6baf..35f600cdf4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go @@ -20,17 +20,14 @@ package v1beta3 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} }, + func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta3.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta3.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta3.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go index 49f05257c9..93842e0cf0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta3 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} }, + func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta3.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go index 002de0dd8a..a9693338b5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go @@ -44,22 +44,24 @@ var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1.Ingress{}) + Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { + emptyResult := &v1.IngressList{} obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1.IngressList{}) + Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (resu // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) + Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) + Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1.Ingress{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.Del // DeleteCollection deletes a collection of objects. func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.IngressList{}) return err @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.Delete // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1.Ingress if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1.I if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go index 208a975082..cdbd594452 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go @@ -43,20 +43,22 @@ var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass") // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1.IngressClass{}) + Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } // List takes label and field selectors, and returns the list of IngressClasses that match those selectors. func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { + emptyResult := &v1.IngressClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1.IngressClassList{}) + Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested ingressClasses. func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts)) } // Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) + Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } @@ -107,7 +111,7 @@ func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.IngressClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched ingressClass. func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } @@ -136,10 +141,11 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networking if name == nil { return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") } + emptyResult := &v1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.IngressClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go index dde09774c4..9098bf42e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go @@ -44,22 +44,24 @@ var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy") // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{}) + Invokes(testing.NewGetActionWithOptions(networkpoliciesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + emptyResult := &v1.NetworkPolicyList{} obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{}) + Invokes(testing.NewListActionWithOptions(networkpoliciesResource, networkpoliciesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(networkpoliciesResource, c.ns, opts)) } // Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) + Invokes(testing.NewCreateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) + Invokes(testing.NewUpdateActionWithOptions(networkpoliciesResource, c.ns, networkPolicy, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } @@ -114,7 +118,7 @@ func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(networkpoliciesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{}) return err @@ -122,11 +126,12 @@ func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched networkPolicy. func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } @@ -144,11 +149,12 @@ func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networki if name == nil { return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") } + emptyResult := &v1.NetworkPolicy{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1.NetworkPolicy{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.NetworkPolicy), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go index 9923d6cbae..afaff4912a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Ingress { return &v1.Ingress{} }, + func() *v1.IngressList { return &v1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go index 16c8e48bf0..3301e87994 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -55,143 +52,18 @@ type IngressClassInterface interface { // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.IngressClass { return &v1.IngressClass{} }, + func() *v1.IngressClassList { return &v1.IngressClassList{} }), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d7454ce145..ba2ef32dbd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface { // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} }, + func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go index 4db8df68cb..6ce62b3313 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go @@ -43,20 +43,22 @@ var ipaddressesKind = v1alpha1.SchemeGroupVersion.WithKind("IPAddress") // Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ipaddressesResource, name), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } // List takes label and field selectors, and returns the list of IPAddresses that match those selectors. func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { + emptyResult := &v1alpha1.IPAddressList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(ipaddressesResource, ipaddressesKind, opts), &v1alpha1.IPAddressList{}) + Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested iPAddresses. func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ipaddressesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts)) } // Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } // Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } @@ -107,7 +111,7 @@ func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ipaddressesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.IPAddressList{}) return err @@ -115,10 +119,11 @@ func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched iPAddress. func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, name, pt, data, subresources...), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } @@ -136,10 +141,11 @@ func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alph if name == nil { return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") } + emptyResult := &v1alpha1.IPAddress{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, *name, types.ApplyPatchType, data), &v1alpha1.IPAddress{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.IPAddress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go index 653ef631af..27a78e1ba6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go @@ -43,20 +43,22 @@ var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR") // Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } // List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { + emptyResult := &v1alpha1.ServiceCIDRList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{}) + Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested serviceCIDRs. func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts)) } // Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } // Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) { +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } @@ -118,7 +123,7 @@ func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{}) return err @@ -126,10 +131,11 @@ func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched serviceCIDR. func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } @@ -147,10 +153,11 @@ func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1a if name == nil { return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") } + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } @@ -169,10 +176,11 @@ func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *network if name == nil { return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") } + emptyResult := &v1alpha1.ServiceCIDR{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ServiceCIDR), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go index fff193d68d..33e90d18a3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IPAddressesGetter has a method to return a IPAddressInterface. @@ -55,143 +52,18 @@ type IPAddressInterface interface { // iPAddresses implements IPAddressInterface type iPAddresses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration] } // newIPAddresses returns a IPAddresses func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { return &iPAddresses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} }, + func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }), } } - -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. -func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Get(). - Resource("ipaddresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. -func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.IPAddressList{} - err = c.client.Get(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested iPAddresses. -func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Post(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAddress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Put(). - Resource("ipaddresses"). - Name(iPAddress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAddress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. -func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ipaddresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ipaddresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched iPAddress. -func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Patch(pt). - Resource("ipaddresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. -func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { - if iPAddress == nil { - return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(iPAddress) - if err != nil { - return nil, err - } - name := iPAddress.Name - if name == nil { - return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") - } - result = &v1alpha1.IPAddress{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ipaddresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go index 100f290a19..b72fe5b696 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. @@ -43,6 +40,7 @@ type ServiceCIDRsGetter interface { type ServiceCIDRInterface interface { Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type ServiceCIDRInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) ServiceCIDRExpansion } // serviceCIDRs implements ServiceCIDRInterface type serviceCIDRs struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration] } // newServiceCIDRs returns a ServiceCIDRs func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { return &serviceCIDRs{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} }, + func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }), } } - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Get(). - Resource("servicecidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ServiceCIDRList{} - err = c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Post(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("servicecidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("servicecidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(pt). - Resource("servicecidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go index 7a3b861be0..59bf762a01 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go @@ -44,22 +44,24 @@ var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + Invokes(testing.NewGetActionWithOptions(ingressesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + emptyResult := &v1beta1.IngressList{} obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) + Invokes(testing.NewListActionWithOptions(ingressesResource, ingressesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(ingressesResource, c.ns, opts)) } // Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewCreateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateActionWithOptions(ingressesResource, c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(ingressesResource, "status", c.ns, ingress, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -126,7 +131,7 @@ func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(ingressesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) return err @@ -134,11 +139,12 @@ func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOpti // Patch applies the patch and returns the patched ingress. func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -156,11 +162,12 @@ func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1beta1.In if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } @@ -179,11 +186,12 @@ func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1be if name == nil { return nil, fmt.Errorf("ingress.Name must be provided to Apply") } + emptyResult := &v1beta1.Ingress{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(ingressesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Ingress), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go index 1804e61fc3..3001de8e42 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go @@ -43,20 +43,22 @@ var ingressclassesKind = v1beta1.SchemeGroupVersion.WithKind("IngressClass") // Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1beta1.IngressClass{}) + Invokes(testing.NewRootGetActionWithOptions(ingressclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } // List takes label and field selectors, and returns the list of IngressClasses that match those selectors. func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { + emptyResult := &v1beta1.IngressClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1beta1.IngressClassList{}) + Invokes(testing.NewRootListActionWithOptions(ingressclassesResource, ingressclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested ingressClasses. func (c *FakeIngressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(ingressclassesResource, opts)) } // Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{}) + Invokes(testing.NewRootCreateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } // Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(ingressclassesResource, ingressClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } @@ -107,7 +111,7 @@ func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(ingressclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.IngressClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched ingressClass. func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1beta1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } @@ -136,10 +141,11 @@ func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networking if name == nil { return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") } + emptyResult := &v1beta1.IngressClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1beta1.IngressClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ingressclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.IngressClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go new file mode 100644 index 0000000000..d8352bb79f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeIPAddresses implements IPAddressInterface +type FakeIPAddresses struct { + Fake *FakeNetworkingV1beta1 +} + +var ipaddressesResource = v1beta1.SchemeGroupVersion.WithResource("ipaddresses") + +var ipaddressesKind = v1beta1.SchemeGroupVersion.WithKind("IPAddress") + +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. +func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(ipaddressesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. +func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IPAddressList, err error) { + emptyResult := &v1beta1.IPAddressList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(ipaddressesResource, ipaddressesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.IPAddressList{ListMeta: obj.(*v1beta1.IPAddressList).ListMeta} + for _, item := range obj.(*v1beta1.IPAddressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested iPAddresses. +func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(ipaddressesResource, opts)) +} + +// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(ipaddressesResource, iPAddress, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. +func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ipaddressesResource, name, opts), &v1beta1.IPAddress{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(ipaddressesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.IPAddressList{}) + return err +} + +// Patch applies the patch and returns the patched iPAddress. +func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) { + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. +func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) { + if iPAddress == nil { + return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") + } + data, err := json.Marshal(iPAddress) + if err != nil { + return nil, err + } + name := iPAddress.Name + if name == nil { + return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") + } + emptyResult := &v1beta1.IPAddress{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(ipaddressesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.IPAddress), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go index b8792a3064..bd72d59297 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go @@ -28,6 +28,10 @@ type FakeNetworkingV1beta1 struct { *testing.Fake } +func (c *FakeNetworkingV1beta1) IPAddresses() v1beta1.IPAddressInterface { + return &FakeIPAddresses{c} +} + func (c *FakeNetworkingV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { return &FakeIngresses{c, namespace} } @@ -36,6 +40,10 @@ func (c *FakeNetworkingV1beta1) IngressClasses() v1beta1.IngressClassInterface { return &FakeIngressClasses{c} } +func (c *FakeNetworkingV1beta1) ServiceCIDRs() v1beta1.ServiceCIDRInterface { + return &FakeServiceCIDRs{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeNetworkingV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go new file mode 100644 index 0000000000..0eb5b2f2bb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCIDRs implements ServiceCIDRInterface +type FakeServiceCIDRs struct { + Fake *FakeNetworkingV1beta1 +} + +var servicecidrsResource = v1beta1.SchemeGroupVersion.WithResource("servicecidrs") + +var servicecidrsKind = v1beta1.SchemeGroupVersion.WithKind("ServiceCIDR") + +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. +func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(servicecidrsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. +func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ServiceCIDRList, err error) { + emptyResult := &v1beta1.ServiceCIDRList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(servicecidrsResource, servicecidrsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ServiceCIDRList{ListMeta: obj.(*v1beta1.ServiceCIDRList).ListMeta} + for _, item := range obj.(*v1beta1.ServiceCIDRList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCIDRs. +func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(servicecidrsResource, opts)) +} + +// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(servicecidrsResource, serviceCIDR, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(servicecidrsResource, "status", serviceCIDR, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. +func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1beta1.ServiceCIDR{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(servicecidrsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.ServiceCIDRList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCIDR. +func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) { + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. +func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + emptyResult := &v1beta1.ServiceCIDR{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(servicecidrsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.ServiceCIDR), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go index f74c7257ad..ac1ffbb984 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +type IPAddressExpansion interface{} + type IngressExpansion interface{} type IngressClassExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index b309281afa..90be275adc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go index 50ccdfdbba..c55da4168f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -55,143 +52,18 @@ type IngressClassInterface interface { // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} }, + func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1beta1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..09e4139e74 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (*v1beta1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (*v1beta1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + *gentype.ClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration] +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses { + return &iPAddresses{ + gentype.NewClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} }, + func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 851634ed0f..d35225abd8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -28,8 +28,10 @@ import ( type NetworkingV1beta1Interface interface { RESTClient() rest.Interface + IPAddressesGetter IngressesGetter IngressClassesGetter + ServiceCIDRsGetter } // NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,6 +39,10 @@ type NetworkingV1beta1Client struct { restClient rest.Interface } +func (c *NetworkingV1beta1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } @@ -45,6 +51,10 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface { return newIngressClasses(c) } +func (c *NetworkingV1beta1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + // NewForConfig creates a new NetworkingV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..d3336f2ec0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (*v1beta1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + *gentype.ClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration] +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs { + return &serviceCIDRs{ + gentype.NewClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} }, + func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go index 35cfbcae4b..0a52706284 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go @@ -43,20 +43,22 @@ var runtimeclassesKind = v1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1.RuntimeClass{}) + Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { + emptyResult := &v1.RuntimeClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1.RuntimeClassList{}) + Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested runtimeClasses. func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) + Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.RuntimeClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched runtimeClass. func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.Run if name == nil { return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } + emptyResult := &v1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go index 5ec38b203e..6c8110640d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1 "k8s.io/client-go/applyconfigurations/node/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.RuntimeClass { return &v1.RuntimeClass{} }, + func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go index 2ff7d3f973..bcd261d003 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go @@ -43,20 +43,22 @@ var runtimeclassesKind = v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + emptyResult := &v1alpha1.RuntimeClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1alpha1.RuntimeClassList{}) + Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested runtimeClasses. func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched runtimeClass. func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alph if name == nil { return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } + emptyResult := &v1alpha1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index 039a7ace15..60aa4a213b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} }, + func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go index e6552f9aca..a3c8c018c5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go @@ -43,20 +43,22 @@ var runtimeclassesKind = v1beta1.SchemeGroupVersion.WithKind("RuntimeClass") // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootGetActionWithOptions(runtimeclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + emptyResult := &v1beta1.RuntimeClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1beta1.RuntimeClassList{}) + Invokes(testing.NewRootListActionWithOptions(runtimeclassesResource, runtimeclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested runtimeClasses. func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(runtimeclassesResource, opts)) } // Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootCreateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(runtimeclassesResource, runtimeClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } @@ -107,7 +111,7 @@ func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(runtimeclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched runtimeClass. func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } @@ -136,10 +141,11 @@ func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta if name == nil { return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") } + emptyResult := &v1beta1.RuntimeClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1beta1.RuntimeClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(runtimeclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RuntimeClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index f8990adf1e..8e15d52889 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} }, + func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go index cd1aac9c29..22173d36de 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1 import ( - rest "k8s.io/client-go/rest" + v1 "k8s.io/api/policy/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,17 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*v1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Eviction { return &v1.Eviction{} }), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go index 853187feb5..2c7e95b720 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go index 7b5f51caf4..de2bcc1b09 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go @@ -44,22 +44,24 @@ var poddisruptionbudgetsKind = v1.SchemeGroupVersion.WithKind("PodDisruptionBudg // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1.PodDisruptionBudget{}) + Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { + emptyResult := &v1.PodDisruptionBudgetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1.PodDisruptionBudgetList{}) + Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOpt // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) + Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } @@ -126,7 +131,7 @@ func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PodDisruptionBudgetList{}) return err @@ -134,11 +139,12 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts me // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } @@ -156,11 +162,12 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } @@ -179,11 +186,12 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PodDisruptionBudget), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go index 58db3acf9e..6d011cbce2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface { type PodDisruptionBudgetInterface interface { Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} }, + func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index 12e8e76edc..e003ece6bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1beta1 import ( - rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/api/policy/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,17 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*v1beta1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1beta1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Eviction { return &v1beta1.Eviction{} }), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go index c003671f5d..d7c28987cf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go index bcee8e7774..fbd9d01e07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go @@ -44,22 +44,24 @@ var poddisruptionbudgetsKind = v1beta1.SchemeGroupVersion.WithKind("PodDisruptio // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewGetActionWithOptions(poddisruptionbudgetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + emptyResult := &v1beta1.PodDisruptionBudgetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) + Invokes(testing.NewListActionWithOptions(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,40 +80,43 @@ func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(poddisruptionbudgetsResource, c.ns, opts)) } // Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewCreateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateActionWithOptions(poddisruptionbudgetsResource, c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) { +func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } @@ -126,7 +131,7 @@ func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(poddisruptionbudgetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) return err @@ -134,11 +139,12 @@ func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1 // Patch applies the patch and returns the patched podDisruptionBudget. func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } @@ -156,11 +162,12 @@ func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudge if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } @@ -179,11 +186,12 @@ func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptio if name == nil { return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") } + emptyResult := &v1beta1.PodDisruptionBudget{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.PodDisruptionBudget{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PodDisruptionBudget), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 1687289921..4111812376 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface { type PodDisruptionBudgetInterface interface { Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} }, + func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 000d737f0f..19fff0ee47 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterRole { return &v1.ClusterRole{} }, + func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 31db43d984..77fb3785e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} }, + func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go index 5add33ddfb..6df91b1a86 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go @@ -43,20 +43,22 @@ var clusterrolesKind = v1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1.ClusterRole{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { + emptyResult := &v1.ClusterRoleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1.ClusterRoleList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *FakeClusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ClusterRoleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.Cluste if name == nil { return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } + emptyResult := &v1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go index d42e93e653..6f3251408c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1.SchemeGroupVersion.WithKind("ClusterRoleBinding // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + emptyResult := &v1.ClusterRoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOpti // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.ClusterRoleBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts met // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding if name == nil { return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } + emptyResult := &v1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go index a3bc5da663..ba9161940b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go @@ -44,22 +44,24 @@ var rolesKind = v1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1.Role{}) + Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { + emptyResult := &v1.RoleList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1.RoleList{}) + Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result * // Watch returns a watch.Interface that watches the requested roles. func (c *FakeRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1.Role{}) + Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1.Role{}) + Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts metav1.DeleteO // DeleteCollection deletes a collection of objects. func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.RoleList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOpti // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfigurati if name == nil { return nil, fmt.Errorf("role.Name must be provided to Apply") } + emptyResult := &v1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go index eeb37e9db3..6d7d7d1933 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go @@ -44,22 +44,24 @@ var rolebindingsKind = v1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1.RoleBinding{}) + Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { + emptyResult := &v1.RoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1.RoleBindingList{}) + Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (r // Watch returns a watch.Interface that watches the requested roleBindings. func (c *FakeRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) + Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) + Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts metav1. // DeleteCollection deletes a collection of objects. func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.RoleBindingList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.Del // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBi if name == nil { return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } + emptyResult := &v1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 93810a3ffa..b75b055f07 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Role { return &v1.Role{} }, + func() *v1.RoleList { return &v1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 2ace938604..fcbb1c0e26 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.RoleBinding { return &v1.RoleBinding{} }, + func() *v1.RoleBindingList { return &v1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index d6d30e99ef..f91e2c50a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} }, + func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 2eded92ac2..3f04526f0b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} }, + func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go index 534a1990f5..34c9a853ea 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -43,20 +43,22 @@ var clusterrolesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + emptyResult := &v1alpha1.ClusterRoleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1. if name == nil { return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } + emptyResult := &v1alpha1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go index 0a4359392d..d42f763421 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleB // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + emptyResult := &v1alpha1.ClusterRoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding if name == nil { return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } + emptyResult := &v1alpha1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go index a0e28348ae..9b0ba7cac6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -44,22 +44,24 @@ var rolesKind = v1alpha1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + emptyResult := &v1alpha1.RoleList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{}) + Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1al // Watch returns a watch.Interface that watches the requested roles. func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfi if name == nil { return nil, fmt.Errorf("role.Name must be provided to Apply") } + emptyResult := &v1alpha1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go index 76649f5c2b..f572945aca 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -44,22 +44,24 @@ var rolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + emptyResult := &v1alpha1.RoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{}) + Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested roleBindings. func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1. if name == nil { return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } + emptyResult := &v1alpha1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 43c16fde74..4a1876a7d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1alpha1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.Role { return &v1alpha1.Role{} }, + func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1alpha1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 3129c9b4e8..6473132f1c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} }, + func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index a3d67f0315..ed398333a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} }, + func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1beta1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index ae39cbb9ae..3010a99ae2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} }, + func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go index 2a94a4315e..b7996c106f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go @@ -43,20 +43,22 @@ var clusterrolesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRole") // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + emptyResult := &v1beta1.ClusterRoleList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolesResource, clusterrolesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolesResource, opts)) } // Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolesResource, clusterRole, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched clusterRole. func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.C if name == nil { return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") } + emptyResult := &v1beta1.ClusterRole{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRole{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRole), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go index c9fd7c0cdd..8843757ace 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go @@ -43,20 +43,22 @@ var clusterrolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBi // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootGetActionWithOptions(clusterrolebindingsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + emptyResult := &v1beta1.ClusterRoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{}) + Invokes(testing.NewRootListActionWithOptions(clusterrolebindingsResource, clusterrolebindingsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterrolebindingsResource, opts)) } // Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootCreateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootUpdateActionWithOptions(clusterrolebindingsResource, clusterRoleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } @@ -107,7 +111,7 @@ func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(clusterrolebindingsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) return err @@ -115,10 +119,11 @@ func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1. // Patch applies the patch and returns the patched clusterRoleBinding. func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } @@ -136,10 +141,11 @@ func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding if name == nil { return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") } + emptyResult := &v1beta1.ClusterRoleBinding{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRoleBinding{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterrolebindingsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.ClusterRoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go index 4158cf1d55..aa0fe28a13 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go @@ -44,22 +44,24 @@ var rolesKind = v1beta1.SchemeGroupVersion.WithKind("Role") // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{}) + Invokes(testing.NewGetActionWithOptions(rolesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + emptyResult := &v1beta1.RoleList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{}) + Invokes(testing.NewListActionWithOptions(rolesResource, rolesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1be // Watch returns a watch.Interface that watches the requested roles. func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolesResource, c.ns, opts)) } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + Invokes(testing.NewCreateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) + Invokes(testing.NewUpdateActionWithOptions(rolesResource, c.ns, role, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } @@ -114,7 +118,7 @@ func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, // Patch applies the patch and returns the patched role. func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } @@ -144,11 +149,12 @@ func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfig if name == nil { return nil, fmt.Errorf("role.Name must be provided to Apply") } + emptyResult := &v1beta1.Role{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Role{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.Role), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go index 4616f0fd10..26c3c80458 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go @@ -44,22 +44,24 @@ var rolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("RoleBinding") // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) + Invokes(testing.NewGetActionWithOptions(rolebindingsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + emptyResult := &v1beta1.RoleBindingList{} obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{}) + Invokes(testing.NewListActionWithOptions(rolebindingsResource, rolebindingsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested roleBindings. func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(rolebindingsResource, c.ns, opts)) } // Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + Invokes(testing.NewCreateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) + Invokes(testing.NewUpdateActionWithOptions(rolebindingsResource, c.ns, roleBinding, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } @@ -114,7 +118,7 @@ func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(rolebindingsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) return err @@ -122,11 +126,12 @@ func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched roleBinding. func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } @@ -144,11 +149,12 @@ func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.R if name == nil { return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") } + emptyResult := &v1beta1.RoleBinding{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.RoleBinding{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.RoleBinding), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index e789e42fe7..92e51da1b1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1beta1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Role { return &v1beta1.Role{} }, + func() *v1beta1.RoleList { return &v1beta1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1beta1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 1461ba3b6e..ad31bd051a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} }, + func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1beta1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go deleted file mode 100644 index 4d247c5136..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClasses implements ResourceClassInterface -type FakeResourceClasses struct { - Fake *FakeResourceV1alpha2 -} - -var resourceclassesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclasses") - -var resourceclassesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClass") - -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(resourceclassesResource, name), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(resourceclassesResource, resourceclassesKind, opts), &v1alpha2.ResourceClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClassList{ListMeta: obj.(*v1alpha2.ResourceClassList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClasses. -func (c *FakeResourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(resourceclassesResource, opts)) -} - -// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. -func (c *FakeResourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceclassesResource, name, opts), &v1alpha2.ResourceClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(resourceclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClass. -func (c *FakeResourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, name, pt, data, subresources...), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { - if resourceClass == nil { - return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClass) - if err != nil { - return nil, err - } - name := resourceClass.Name - if name == nil { - return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go deleted file mode 100644 index 72e81a29e3..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. -// A group's client should implement this interface. -type PodSchedulingContextsGetter interface { - PodSchedulingContexts(namespace string) PodSchedulingContextInterface -} - -// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. -type PodSchedulingContextInterface interface { - Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) - Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) - UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) - Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) - ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) - PodSchedulingContextExpansion -} - -// podSchedulingContexts implements PodSchedulingContextInterface -type podSchedulingContexts struct { - client rest.Interface - ns string -} - -// newPodSchedulingContexts returns a PodSchedulingContexts -func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { - return &podSchedulingContexts{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.PodSchedulingContextList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSchedulingContexts. -func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(podSchedulingContext.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(podSchedulingContext.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. -func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSchedulingContext. -func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go deleted file mode 100644 index cfb27c9db6..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimsGetter has a method to return a ResourceClaimInterface. -// A group's client should implement this interface. -type ResourceClaimsGetter interface { - ResourceClaims(namespace string) ResourceClaimInterface -} - -// ResourceClaimInterface has methods to work with ResourceClaim resources. -type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) - ResourceClaimExpansion -} - -// resourceClaims implements ResourceClaimInterface -type resourceClaims struct { - client rest.Interface - ns string -} - -// newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { - return &resourceClaims{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaims. -func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(resourceClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(resourceClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. -func (c *resourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go deleted file mode 100644 index 3f4e320064..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. -// A group's client should implement this interface. -type ResourceClaimTemplatesGetter interface { - ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface -} - -// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. -type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) - ResourceClaimTemplateExpansion -} - -// resourceClaimTemplates implements ResourceClaimTemplateInterface -type resourceClaimTemplates struct { - client rest.Interface - ns string -} - -// newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { - return &resourceClaimTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates. -func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(resourceClaimTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. -func (c *resourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - if resourceClaimTemplate == nil { - return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaimTemplate) - if err != nil { - return nil, err - } - name := resourceClaimTemplate.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go deleted file mode 100644 index 95a4ac5668..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClassesGetter has a method to return a ResourceClassInterface. -// A group's client should implement this interface. -type ResourceClassesGetter interface { - ResourceClasses() ResourceClassInterface -} - -// ResourceClassInterface has methods to work with ResourceClass resources. -type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) - ResourceClassExpansion -} - -// resourceClasses implements ResourceClassInterface -type resourceClasses struct { - client rest.Interface -} - -// newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { - return &resourceClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Get(). - Resource("resourceclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClassList{} - err = c.client.Get(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClasses. -func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Post(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Put(). - Resource("resourceclasses"). - Name(resourceClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. -func (c *resourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("resourceclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("resourceclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Patch(pt). - Resource("resourceclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { - if resourceClass == nil { - return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClass) - if err != nil { - return nil, err - } - name := resourceClass.Name - if name == nil { - return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("resourceclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..35455dfa35 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// DeviceClassesGetter has a method to return a DeviceClassInterface. +// A group's client should implement this interface. +type DeviceClassesGetter interface { + DeviceClasses() DeviceClassInterface +} + +// DeviceClassInterface has methods to work with DeviceClass resources. +type DeviceClassInterface interface { + Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (*v1alpha3.DeviceClass, error) + Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (*v1alpha3.DeviceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.DeviceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) + DeviceClassExpansion +} + +// deviceClasses implements DeviceClassInterface +type deviceClasses struct { + *gentype.ClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration] +} + +// newDeviceClasses returns a DeviceClasses +func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses { + return &deviceClasses{ + gentype.NewClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]( + "deviceclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} }, + func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go new file mode 100644 index 0000000000..fdb23fd37c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha3 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go new file mode 100644 index 0000000000..d96cbd2219 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + testing "k8s.io/client-go/testing" +) + +// FakeDeviceClasses implements DeviceClassInterface +type FakeDeviceClasses struct { + Fake *FakeResourceV1alpha3 +} + +var deviceclassesResource = v1alpha3.SchemeGroupVersion.WithResource("deviceclasses") + +var deviceclassesKind = v1alpha3.SchemeGroupVersion.WithKind("DeviceClass") + +// Get takes name of the deviceClass, and returns the corresponding deviceClass object, and an error if there is any. +func (c *FakeDeviceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(deviceclassesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// List takes label and field selectors, and returns the list of DeviceClasses that match those selectors. +func (c *FakeDeviceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.DeviceClassList, err error) { + emptyResult := &v1alpha3.DeviceClassList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(deviceclassesResource, deviceclassesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.DeviceClassList{ListMeta: obj.(*v1alpha3.DeviceClassList).ListMeta} + for _, item := range obj.(*v1alpha3.DeviceClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deviceClasses. +func (c *FakeDeviceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(deviceclassesResource, opts)) +} + +// Create takes the representation of a deviceClass and creates it. Returns the server's representation of the deviceClass, and an error, if there is any. +func (c *FakeDeviceClasses) Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// Update takes the representation of a deviceClass and updates it. Returns the server's representation of the deviceClass, and an error, if there is any. +func (c *FakeDeviceClasses) Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(deviceclassesResource, deviceClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// Delete takes name of the deviceClass and deletes it. Returns an error if one occurs. +func (c *FakeDeviceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(deviceclassesResource, name, opts), &v1alpha3.DeviceClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeviceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(deviceclassesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha3.DeviceClassList{}) + return err +} + +// Patch applies the patch and returns the patched deviceClass. +func (c *FakeDeviceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) { + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied deviceClass. +func (c *FakeDeviceClasses) Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) { + if deviceClass == nil { + return nil, fmt.Errorf("deviceClass provided to Apply must not be nil") + } + data, err := json.Marshal(deviceClass) + if err != nil { + return nil, err + } + name := deviceClass.Name + if name == nil { + return nil, fmt.Errorf("deviceClass.Name must be provided to Apply") + } + emptyResult := &v1alpha3.DeviceClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(deviceclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.DeviceClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go similarity index 56% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go index 54882f8175..54898993e5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go @@ -23,51 +23,53 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakePodSchedulingContexts implements PodSchedulingContextInterface type FakePodSchedulingContexts struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 ns string } -var podschedulingcontextsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts") +var podschedulingcontextsResource = v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts") -var podschedulingcontextsKind = v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext") +var podschedulingcontextsKind = v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext") // Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewGetAction(podschedulingcontextsResource, c.ns, name), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewGetActionWithOptions(podschedulingcontextsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { +func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.PodSchedulingContextList, err error) { + emptyResult := &v1alpha3.PodSchedulingContextList{} obj, err := c.Fake. - Invokes(testing.NewListAction(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), &v1alpha2.PodSchedulingContextList{}) + Invokes(testing.NewListActionWithOptions(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.PodSchedulingContextList{ListMeta: obj.(*v1alpha2.PodSchedulingContextList).ListMeta} - for _, item := range obj.(*v1alpha2.PodSchedulingContextList).Items { + list := &v1alpha3.PodSchedulingContextList{ListMeta: obj.(*v1alpha3.PodSchedulingContextList).ListMeta} + for _, item := range obj.(*v1alpha3.PodSchedulingContextList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,73 +80,77 @@ func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOption // Watch returns a watch.Interface that watches the requested podSchedulingContexts. func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(podschedulingcontextsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(podschedulingcontextsResource, c.ns, opts)) } // Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewCreateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewUpdateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) { +func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podschedulingcontextsResource, "status", c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(podschedulingcontextsResource, "status", c.ns, podSchedulingContext, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha3.PodSchedulingContext{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podschedulingcontextsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(podschedulingcontextsResource, c.ns, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingContextList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.PodSchedulingContextList{}) return err } // Patch applies the patch and returns the patched podSchedulingContext. -func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) { + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { if podSchedulingContext == nil { return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") } @@ -156,18 +162,19 @@ func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingCont if name == nil { return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") } + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { +func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { if podSchedulingContext == nil { return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") } @@ -179,11 +186,12 @@ func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podScheduli if name == nil { return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") } + emptyResult := &v1alpha3.PodSchedulingContext{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodSchedulingContext{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.PodSchedulingContext), err + return obj.(*v1alpha3.PodSchedulingContext), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go similarity index 59% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go index 2053877320..4523d9f09c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go @@ -19,34 +19,38 @@ limitations under the License. package fake import ( - v1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeResourceV1alpha2 struct { +type FakeResourceV1alpha3 struct { *testing.Fake } -func (c *FakeResourceV1alpha2) PodSchedulingContexts(namespace string) v1alpha2.PodSchedulingContextInterface { +func (c *FakeResourceV1alpha3) DeviceClasses() v1alpha3.DeviceClassInterface { + return &FakeDeviceClasses{c} +} + +func (c *FakeResourceV1alpha3) PodSchedulingContexts(namespace string) v1alpha3.PodSchedulingContextInterface { return &FakePodSchedulingContexts{c, namespace} } -func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface { +func (c *FakeResourceV1alpha3) ResourceClaims(namespace string) v1alpha3.ResourceClaimInterface { return &FakeResourceClaims{c, namespace} } -func (c *FakeResourceV1alpha2) ResourceClaimTemplates(namespace string) v1alpha2.ResourceClaimTemplateInterface { +func (c *FakeResourceV1alpha3) ResourceClaimTemplates(namespace string) v1alpha3.ResourceClaimTemplateInterface { return &FakeResourceClaimTemplates{c, namespace} } -func (c *FakeResourceV1alpha2) ResourceClasses() v1alpha2.ResourceClassInterface { - return &FakeResourceClasses{c} +func (c *FakeResourceV1alpha3) ResourceSlices() v1alpha3.ResourceSliceInterface { + return &FakeResourceSlices{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeResourceV1alpha2) RESTClient() rest.Interface { +func (c *FakeResourceV1alpha3) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go similarity index 57% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go index 087e51f714..db38b3d60c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go @@ -23,51 +23,53 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakeResourceClaims implements ResourceClaimInterface type FakeResourceClaims struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 ns string } -var resourceclaimsResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaims") +var resourceclaimsResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaims") -var resourceclaimsKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim") +var resourceclaimsKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim") // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimsResource, c.ns, name), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewGetActionWithOptions(resourceclaimsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { +func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimList, err error) { + emptyResult := &v1alpha3.ResourceClaimList{} obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimsResource, resourceclaimsKind, c.ns, opts), &v1alpha2.ResourceClaimList{}) + Invokes(testing.NewListActionWithOptions(resourceclaimsResource, resourceclaimsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.ResourceClaimList{ListMeta: obj.(*v1alpha2.ResourceClaimList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimList).Items { + list := &v1alpha3.ResourceClaimList{ListMeta: obj.(*v1alpha3.ResourceClaimList).ListMeta} + for _, item := range obj.(*v1alpha3.ResourceClaimList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,73 +80,77 @@ func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested resourceClaims. func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimsResource, c.ns, opts)) } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewCreateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewUpdateActionWithOptions(resourceclaimsResource, c.ns, resourceClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) { +func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourceclaimsResource, "status", c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewUpdateSubresourceActionWithOptions(resourceclaimsResource, "status", c.ns, resourceClaim, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha3.ResourceClaim{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(resourceclaimsResource, c.ns, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimList{}) return err } // Patch applies the patch and returns the patched resourceClaim. -func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) { + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -156,18 +162,19 @@ func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { +func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -179,11 +186,12 @@ func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *res if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceClaim{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.ResourceClaim{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaim), err + return obj.(*v1alpha3.ResourceClaim), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go similarity index 58% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go index 2a1b4554eb..28db7261f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go @@ -23,51 +23,53 @@ import ( json "encoding/json" "fmt" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" testing "k8s.io/client-go/testing" ) // FakeResourceClaimTemplates implements ResourceClaimTemplateInterface type FakeResourceClaimTemplates struct { - Fake *FakeResourceV1alpha2 + Fake *FakeResourceV1alpha3 ns string } -var resourceclaimtemplatesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates") +var resourceclaimtemplatesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates") -var resourceclaimtemplatesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplate") +var resourceclaimtemplatesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimTemplate") // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimtemplatesResource, c.ns, name), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewGetActionWithOptions(resourceclaimtemplatesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { +func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceClaimTemplateList, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplateList{} obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), &v1alpha2.ResourceClaimTemplateList{}) + Invokes(testing.NewListActionWithOptions(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } - list := &v1alpha2.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha2.ResourceClaimTemplateList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimTemplateList).Items { + list := &v1alpha3.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha3.ResourceClaimTemplateList).ListMeta} + for _, item := range obj.(*v1alpha3.ResourceClaimTemplateList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -78,61 +80,64 @@ func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptio // Watch returns a watch.Interface that watches the requested resourceClaimTemplates. func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimtemplatesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(resourceclaimtemplatesResource, c.ns, opts)) } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewCreateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewUpdateActionWithOptions(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha3.ResourceClaimTemplate{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimtemplatesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(resourceclaimtemplatesResource, c.ns, opts, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimTemplateList{}) + _, err := c.Fake.Invokes(action, &v1alpha3.ResourceClaimTemplateList{}) return err } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) { + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { +func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -144,11 +149,12 @@ func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTem if name == nil { return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } + emptyResult := &v1alpha3.ResourceClaimTemplate{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimTemplate{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } - return obj.(*v1alpha2.ResourceClaimTemplate), err + return obj.(*v1alpha3.ResourceClaimTemplate), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go new file mode 100644 index 0000000000..c355fc454a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + testing "k8s.io/client-go/testing" +) + +// FakeResourceSlices implements ResourceSliceInterface +type FakeResourceSlices struct { + Fake *FakeResourceV1alpha3 +} + +var resourceslicesResource = v1alpha3.SchemeGroupVersion.WithResource("resourceslices") + +var resourceslicesKind = v1alpha3.SchemeGroupVersion.WithKind("ResourceSlice") + +// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any. +func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(resourceslicesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.ResourceSlice), err +} + +// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors. +func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.ResourceSliceList, err error) { + emptyResult := &v1alpha3.ResourceSliceList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(resourceslicesResource, resourceslicesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.ResourceSliceList{ListMeta: obj.(*v1alpha3.ResourceSliceList).ListMeta} + for _, item := range obj.(*v1alpha3.ResourceSliceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceSlices. +func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(resourceslicesResource, opts)) +} + +// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any. +func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.ResourceSlice), err +} + +// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any. +func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(resourceslicesResource, resourceSlice, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.ResourceSlice), err +} + +// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs. +func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha3.ResourceSlice{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(resourceslicesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha3.ResourceSliceList{}) + return err +} + +// Patch applies the patch and returns the patched resourceSlice. +func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) { + emptyResult := &v1alpha3.ResourceSlice{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.ResourceSlice), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice. +func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) { + if resourceSlice == nil { + return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil") + } + data, err := json.Marshal(resourceSlice) + if err != nil { + return nil, err + } + name := resourceSlice.Name + if name == nil { + return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply") + } + emptyResult := &v1alpha3.ResourceSlice{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(resourceslicesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha3.ResourceSlice), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go similarity index 88% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go index 2c02e9ce74..747e564b76 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go @@ -16,7 +16,9 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 + +type DeviceClassExpansion interface{} type PodSchedulingContextExpansion interface{} @@ -24,4 +26,4 @@ type ResourceClaimExpansion interface{} type ResourceClaimTemplateExpansion interface{} -type ResourceClassExpansion interface{} +type ResourceSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go new file mode 100644 index 0000000000..af59843212 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha3.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + *gentype.ClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration] +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + gentype.NewClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration]( + "podschedulingcontexts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.PodSchedulingContext { return &v1alpha3.PodSchedulingContext{} }, + func() *v1alpha3.PodSchedulingContextList { return &v1alpha3.PodSchedulingContextList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go similarity index 65% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go index d5795fd628..879f0990d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go @@ -16,49 +16,54 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( "net/http" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type ResourceV1alpha2Interface interface { +type ResourceV1alpha3Interface interface { RESTClient() rest.Interface + DeviceClassesGetter PodSchedulingContextsGetter ResourceClaimsGetter ResourceClaimTemplatesGetter - ResourceClassesGetter + ResourceSlicesGetter } -// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha2Client struct { +// ResourceV1alpha3Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha3Client struct { restClient rest.Interface } -func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { +func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface { + return newDeviceClasses(c) +} + +func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { return newPodSchedulingContexts(c, namespace) } -func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { +func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } -func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { +func (c *ResourceV1alpha3Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { return newResourceClaimTemplates(c, namespace) } -func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { - return newResourceClasses(c) +func (c *ResourceV1alpha3Client) ResourceSlices() ResourceSliceInterface { + return newResourceSlices(c) } -// NewForConfig creates a new ResourceV1alpha2Client for the given config. +// NewForConfig creates a new ResourceV1alpha3Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { +func NewForConfig(c *rest.Config) (*ResourceV1alpha3Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -70,9 +75,9 @@ func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. +// NewForConfigAndClient creates a new ResourceV1alpha3Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha3Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -81,12 +86,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Cli if err != nil { return nil, err } - return &ResourceV1alpha2Client{client}, nil + return &ResourceV1alpha3Client{client}, nil } -// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and +// NewForConfigOrDie creates a new ResourceV1alpha3Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha3Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -94,13 +99,13 @@ func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { return client } -// New creates a new ResourceV1alpha2Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha2Client { - return &ResourceV1alpha2Client{c} +// New creates a new ResourceV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha3Client { + return &ResourceV1alpha3Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha2.SchemeGroupVersion + gv := v1alpha3.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -114,7 +119,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { +func (c *ResourceV1alpha3Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go new file mode 100644 index 0000000000..2ac65c005e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (*v1alpha3.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration] +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims { + return &resourceClaims{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]( + "resourceclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} }, + func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 0000000000..87997bfee5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration] +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( + "resourceclaimtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} }, + func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go new file mode 100644 index 0000000000..0819041408 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceSlicesGetter has a method to return a ResourceSliceInterface. +// A group's client should implement this interface. +type ResourceSlicesGetter interface { + ResourceSlices() ResourceSliceInterface +} + +// ResourceSliceInterface has methods to work with ResourceSlice resources. +type ResourceSliceInterface interface { + Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (*v1alpha3.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (*v1alpha3.ResourceSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) + ResourceSliceExpansion +} + +// resourceSlices implements ResourceSliceInterface +type resourceSlices struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration] +} + +// newResourceSlices returns a ResourceSlices +func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices { + return &resourceSlices{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]( + "resourceslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} }, + func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go index 40ab9fb407..92847184bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go @@ -43,20 +43,22 @@ var priorityclassesKind = v1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1.PriorityClass{}) + Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { + emptyResult := &v1.PriorityClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1.PriorityClassList{}) + Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *FakePriorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) + Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts meta // DeleteCollection deletes a collection of objects. func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.PriorityClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1. // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli if name == nil { return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } + emptyResult := &v1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index c68ec5da41..a28ef2fd4a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PriorityClass { return &v1.PriorityClass{} }, + func() *v1.PriorityClassList { return &v1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go index 3c8404a725..055d458a3c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go @@ -43,20 +43,22 @@ var priorityclassesKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + emptyResult := &v1alpha1.PriorityClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{}) + Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli if name == nil { return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } + emptyResult := &v1alpha1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index a9b8c19c78..5c78f3de9f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} }, + func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go index 4cf2e26c77..49d82a7ed9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go @@ -43,20 +43,22 @@ var priorityclassesKind = v1beta1.SchemeGroupVersion.WithKind("PriorityClass") // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootGetActionWithOptions(priorityclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + emptyResult := &v1beta1.PriorityClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{}) + Invokes(testing.NewRootListActionWithOptions(priorityclassesResource, priorityclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(priorityclassesResource, opts)) } // Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootCreateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(priorityclassesResource, priorityClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } @@ -107,7 +111,7 @@ func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(priorityclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched priorityClass. func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } @@ -136,10 +141,11 @@ func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *scheduli if name == nil { return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") } + emptyResult := &v1beta1.PriorityClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(priorityclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.PriorityClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 155476e4c7..9fef1d7596 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} }, + func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1beta1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go index d9dc4151e2..2e14db6000 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -55,143 +52,18 @@ type CSIDriverInterface interface { // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CSIDriver { return &v1.CSIDriver{} }, + func() *v1.CSIDriverList { return &v1.CSIDriverList{} }), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index 17dbc8c1c8..6d28d7ed11 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -55,143 +52,18 @@ type CSINodeInterface interface { // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CSINode { return &v1.CSINode{} }, + func() *v1.CSINodeList { return &v1.CSINodeList{} }), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go index 6bb50e0da9..8a762b9fff 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} }, + func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go index 4983227376..1df7c034bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go @@ -43,20 +43,22 @@ var csidriversKind = v1.SchemeGroupVersion.WithKind("CSIDriver") // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &v1.CSIDriver{}) + Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { + emptyResult := &v1.CSIDriverList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1.CSIDriverList{}) + Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (res // Watch returns a watch.Interface that watches the requested cSIDrivers. func (c *FakeCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts)) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) + Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) + Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } @@ -107,7 +111,7 @@ func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts metav1.De // DeleteCollection deletes a collection of objects. func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CSIDriverList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.Delet // Patch applies the patch and returns the patched cSIDriver. func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } @@ -136,10 +141,11 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriv if name == nil { return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") } + emptyResult := &v1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIDriver), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go index 0271a20f3d..e2b8e8cc8d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go @@ -43,20 +43,22 @@ var csinodesKind = v1.SchemeGroupVersion.WithKind("CSINode") // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. func (c *FakeCSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &v1.CSINode{}) + Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { + emptyResult := &v1.CSINodeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1.CSINodeList{}) + Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested cSINodes. func (c *FakeCSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1.CSINode{}) + Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1.CSINode{}) + Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } @@ -107,7 +111,7 @@ func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts metav1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CSINodeList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteO // Patch applies the patch and returns the patched cSINode. func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } @@ -136,10 +141,11 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeAppl if name == nil { return nil, fmt.Errorf("cSINode.Name must be provided to Apply") } + emptyResult := &v1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSINode), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go index b12bbe3c15..a86014855e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1.SchemeGroupVersion.WithKind("CSIStorageCapacit // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1.CSIStorageCapacity{}) + Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { + emptyResult := &v1.CSIStorageCapacityList{} obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1.CSIStorageCapacityList{}) + Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOpt // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) + Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.CSIStorageCapacityList{}) return err @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts me // Patch applies the patch and returns the patched cSIStorageCapacity. func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity if name == nil { return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } + emptyResult := &v1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go index e232f4c8d7..8910be1db9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go @@ -43,20 +43,22 @@ var storageclassesKind = v1.SchemeGroupVersion.WithKind("StorageClass") // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. func (c *FakeStorageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{}) + Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { + emptyResult := &v1.StorageClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1.StorageClassList{}) + Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) // Watch returns a watch.Interface that watches the requested storageClasses. func (c *FakeStorageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } @@ -107,7 +111,7 @@ func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts metav // DeleteCollection deletes a collection of objects. func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.D // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } @@ -136,10 +141,11 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1. if name == nil { return nil, fmt.Errorf("storageClass.Name must be provided to Apply") } + emptyResult := &v1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go index 3f5f2aec57..3d3d71ec50 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1.SchemeGroupVersion.WithKind("VolumeAttachment") // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1.VolumeAttachment{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { + emptyResult := &v1.VolumeAttachmentList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1.VolumeAttachmentList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOption // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts me // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1.VolumeAttachmentList{}) return err @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 8e97d90a0f..d7b6ff68aa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -55,143 +52,18 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.StorageClass { return &v1.StorageClass{} }, + func() *v1.StorageClassList { return &v1.StorageClassList{} }), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index c1dbec84f4..3a0404284f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} }, + func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go index bf5d64dddc..6819deff62 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} }, + func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go index c1614cda7d..0bcaccd208 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1alpha1.SchemeGroupVersion.WithKind("CSIStorageC // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { + emptyResult := &v1alpha1.CSIStorageCapacityList{} obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1alpha1.CSIStorageCapacityList{}) + Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.CSIStorageCapacityList{}) return err @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1 // Patch applies the patch and returns the patched cSIStorageCapacity. func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity if name == nil { return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } + emptyResult := &v1alpha1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go index 9725d6d10b..a07247f8f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachme // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + emptyResult := &v1alpha1.VolumeAttachmentList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) ( // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1 // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{}) return err @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.De // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1alpha1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go index d25263df48..0d7fe9aa8c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go @@ -43,20 +43,22 @@ var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAt // Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattributesclassesResource, name), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } // List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { + emptyResult := &v1alpha1.VolumeAttributesClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattributesclassesResource, volumeattributesclassesKind, opts), &v1alpha1.VolumeAttributesClassList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOpti // Watch returns a watch.Interface that watches the requested volumeAttributesClasses. func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattributesclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts)) } // Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } // Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } @@ -107,7 +111,7 @@ func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, o // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattributesclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts // Patch applies the patch and returns the patched volumeAttributesClass. func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } @@ -136,10 +141,11 @@ func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttribute if name == nil { return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") } + emptyResult := &v1alpha1.VolumeAttributesClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttributesClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.VolumeAttributesClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 58abb748f9..0982d5568a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} }, + func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go index 6633a4dc15..40cff75883 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. @@ -55,143 +52,18 @@ type VolumeAttributesClassInterface interface { // volumeAttributesClasses implements VolumeAttributesClassInterface type volumeAttributesClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration] } // newVolumeAttributesClasses returns a VolumeAttributesClasses func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { return &volumeAttributesClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} }, + func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }), } } - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttributesClassList{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Post(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Put(). - Resource("volumeattributesclasses"). - Name(volumeAttributesClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattributesclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattributesclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(pt). - Resource("volumeattributesclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattributesclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 04e677db05..2748919b40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -55,143 +52,18 @@ type CSIDriverInterface interface { // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} }, + func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1beta1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index c3760b5ce5..fe6fe228e3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -55,143 +52,18 @@ type CSINodeInterface interface { // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1beta1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CSINode { return &v1beta1.CSINode{} }, + func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1beta1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go index 98ba936dc4..e9ffc1df92 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} }, + func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go index 4257aa6183..2b230707fe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go @@ -43,20 +43,22 @@ var csidriversKind = v1beta1.SchemeGroupVersion.WithKind("CSIDriver") // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootGetActionWithOptions(csidriversResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { + emptyResult := &v1beta1.CSIDriverList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1beta1.CSIDriverList{}) + Invokes(testing.NewRootListActionWithOptions(csidriversResource, csidriversKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested cSIDrivers. func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csidriversResource, opts)) } // Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootCreateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootUpdateActionWithOptions(csidriversResource, cSIDriver, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } @@ -107,7 +111,7 @@ func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.Delete // DeleteCollection deletes a collection of objects. func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csidriversResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CSIDriverList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOpt // Patch applies the patch and returns the patched cSIDriver. func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } @@ -136,10 +141,11 @@ func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CS if name == nil { return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") } + emptyResult := &v1beta1.CSIDriver{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1beta1.CSIDriver{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csidriversResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIDriver), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go index d38c104bc1..c5c2b58250 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go @@ -43,20 +43,22 @@ var csinodesKind = v1beta1.SchemeGroupVersion.WithKind("CSINode") // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &v1beta1.CSINode{}) + Invokes(testing.NewRootGetActionWithOptions(csinodesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } // List takes label and field selectors, and returns the list of CSINodes that match those selectors. func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { + emptyResult := &v1beta1.CSINodeList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1beta1.CSINodeList{}) + Invokes(testing.NewRootListActionWithOptions(csinodesResource, csinodesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v // Watch returns a watch.Interface that watches the requested cSINodes. func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(csinodesResource, opts)) } // Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) + Invokes(testing.NewRootCreateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) + Invokes(testing.NewRootUpdateActionWithOptions(csinodesResource, cSINode, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } @@ -107,7 +111,7 @@ func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(csinodesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CSINodeList{}) return err @@ -115,10 +119,11 @@ func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptio // Patch applies the patch and returns the patched cSINode. func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1beta1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } @@ -136,10 +141,11 @@ func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINod if name == nil { return nil, fmt.Errorf("cSINode.Name must be provided to Apply") } + emptyResult := &v1beta1.CSINode{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1beta1.CSINode{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(csinodesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSINode), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go index d7bbb614b2..59a9aaf9df 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go @@ -44,22 +44,24 @@ var csistoragecapacitiesKind = v1beta1.SchemeGroupVersion.WithKind("CSIStorageCa // Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewGetActionWithOptions(csistoragecapacitiesResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } // List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { + emptyResult := &v1beta1.CSIStorageCapacityList{} obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1beta1.CSIStorageCapacityList{}) + Invokes(testing.NewListActionWithOptions(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -78,28 +80,30 @@ func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions // Watch returns a watch.Interface that watches the requested cSIStorageCapacities. func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(csistoragecapacitiesResource, c.ns, opts)) } // Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewCreateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } // Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewUpdateActionWithOptions(csistoragecapacitiesResource, c.ns, cSIStorageCapacity, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } @@ -114,7 +118,7 @@ func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts // DeleteCollection deletes a collection of objects. func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(csistoragecapacitiesResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.CSIStorageCapacityList{}) return err @@ -122,11 +126,12 @@ func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1 // Patch applies the patch and returns the patched cSIStorageCapacity. func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } @@ -144,11 +149,12 @@ func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity if name == nil { return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") } + emptyResult := &v1beta1.CSIStorageCapacity{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CSIStorageCapacity{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.CSIStorageCapacity), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go index 6b5bb02fda..470281607f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -48,6 +48,10 @@ func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterfa return &FakeVolumeAttachments{c} } +func (c *FakeStorageV1beta1) VolumeAttributesClasses() v1beta1.VolumeAttributesClassInterface { + return &FakeVolumeAttributesClasses{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 869e58b4f7..954a346081 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -43,20 +43,22 @@ var storageclassesKind = v1beta1.SchemeGroupVersion.WithKind("StorageClass") // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) + Invokes(testing.NewRootGetActionWithOptions(storageclassesResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + emptyResult := &v1beta1.StorageClassList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{}) + Invokes(testing.NewRootListActionWithOptions(storageclassesResource, storageclassesKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,25 +77,27 @@ func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (res // Watch returns a watch.Interface that watches the requested storageClasses. func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(storageclassesResource, opts)) } // Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + Invokes(testing.NewRootCreateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) + Invokes(testing.NewRootUpdateActionWithOptions(storageclassesResource, storageClass, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } @@ -107,7 +111,7 @@ func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.De // DeleteCollection deletes a collection of objects. func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(storageclassesResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) return err @@ -115,10 +119,11 @@ func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.Delet // Patch applies the patch and returns the patched storageClass. func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } @@ -136,10 +141,11 @@ func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1b if name == nil { return nil, fmt.Errorf("storageClass.Name must be provided to Apply") } + emptyResult := &v1beta1.StorageClass{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1beta1.StorageClass{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.StorageClass), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go index e2b4a2eb1b..247f7ca627 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go @@ -43,20 +43,22 @@ var volumeattachmentsKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttachmen // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootGetActionWithOptions(volumeattachmentsResource, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + emptyResult := &v1beta1.VolumeAttachmentList{} obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{}) + Invokes(testing.NewRootListActionWithOptions(volumeattachmentsResource, volumeattachmentsKind, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -75,36 +77,39 @@ func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) ( // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattachmentsResource, opts)) } // Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootCreateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateActionWithOptions(volumeattachmentsResource, volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) { +func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(volumeattachmentsResource, "status", volumeAttachment, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } @@ -118,7 +123,7 @@ func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1 // DeleteCollection deletes a collection of objects. func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattachmentsResource, opts, listOpts) _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{}) return err @@ -126,10 +131,11 @@ func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.De // Patch applies the patch and returns the patched volumeAttachment. func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } @@ -147,10 +153,11 @@ func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *sto if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } @@ -169,10 +176,11 @@ func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachmen if name == nil { return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") } + emptyResult := &v1beta1.VolumeAttachment{} obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.VolumeAttachment{}) + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattachmentsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1beta1.VolumeAttachment), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go new file mode 100644 index 0000000000..3cef7291ab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface +type FakeVolumeAttributesClasses struct { + Fake *FakeStorageV1beta1 +} + +var volumeattributesclassesResource = v1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses") + +var volumeattributesclassesKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttributesClass") + +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. +func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(volumeattributesclassesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. +func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttributesClassList, err error) { + emptyResult := &v1beta1.VolumeAttributesClassList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(volumeattributesclassesResource, volumeattributesclassesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.VolumeAttributesClassList{ListMeta: obj.(*v1beta1.VolumeAttributesClassList).ListMeta} + for _, item := range obj.(*v1beta1.VolumeAttributesClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. +func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(volumeattributesclassesResource, opts)) +} + +// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(volumeattributesclassesResource, volumeAttributesClass, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1beta1.VolumeAttributesClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(volumeattributesclassesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttributesClassList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) { + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) { + if volumeAttributesClass == nil { + return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") + } + data, err := json.Marshal(volumeAttributesClass) + if err != nil { + return nil, err + } + name := volumeAttributesClass.Name + if name == nil { + return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") + } + emptyResult := &v1beta1.VolumeAttributesClass{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(volumeattributesclassesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1beta1.VolumeAttributesClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 1a202a928e..ebf78e10bc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -27,3 +27,5 @@ type CSIStorageCapacityExpansion interface{} type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4c7604bd29..3d1b59e36c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -33,6 +33,7 @@ type StorageV1beta1Interface interface { CSIStorageCapacitiesGetter StorageClassesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. @@ -60,6 +61,10 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1beta1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 9b4ef231c8..fed699cc85 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -55,143 +52,18 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1beta1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} }, + func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1beta1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 35a8b64fcc..01024ce485 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} }, + func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..47eadcac63 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*v1beta1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1beta1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + *gentype.ClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration] +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + gentype.NewClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} }, + func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go new file mode 100644 index 0000000000..df51baa4d4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..16f4439906 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go new file mode 100644 index 0000000000..3ae8f4ae56 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeStoragemigrationV1alpha1 struct { + *testing.Fake +} + +func (c *FakeStoragemigrationV1alpha1) StorageVersionMigrations() v1alpha1.StorageVersionMigrationInterface { + return &FakeStorageVersionMigrations{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeStoragemigrationV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go new file mode 100644 index 0000000000..c3ff235912 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeStorageVersionMigrations implements StorageVersionMigrationInterface +type FakeStorageVersionMigrations struct { + Fake *FakeStoragemigrationV1alpha1 +} + +var storageversionmigrationsResource = v1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations") + +var storageversionmigrationsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration") + +// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. +func (c *FakeStorageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(storageversionmigrationsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} + +// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. +func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { + emptyResult := &v1alpha1.StorageVersionMigrationList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(storageversionmigrationsResource, storageversionmigrationsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.StorageVersionMigrationList{ListMeta: obj.(*v1alpha1.StorageVersionMigrationList).ListMeta} + for _, item := range obj.(*v1alpha1.StorageVersionMigrationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storageVersionMigrations. +func (c *FakeStorageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(storageversionmigrationsResource, opts)) +} + +// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. +func (c *FakeStorageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} + +// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. +func (c *FakeStorageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(storageversionmigrationsResource, storageVersionMigration, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(storageversionmigrationsResource, "status", storageVersionMigration, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} + +// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. +func (c *FakeStorageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(storageversionmigrationsResource, name, opts), &v1alpha1.StorageVersionMigration{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(storageversionmigrationsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionMigrationList{}) + return err +} + +// Patch applies the patch and returns the patched storageVersionMigration. +func (c *FakeStorageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersionMigration. +func (c *FakeStorageVersionMigrations) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { + if storageVersionMigration == nil { + return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") + } + data, err := json.Marshal(storageVersionMigration) + if err != nil { + return nil, err + } + name := storageVersionMigration.Name + if name == nil { + return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") + } + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeStorageVersionMigrations) ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { + if storageVersionMigration == nil { + return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") + } + data, err := json.Marshal(storageVersionMigration) + if err != nil { + return nil, err + } + name := storageVersionMigration.Name + if name == nil { + return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") + } + emptyResult := &v1alpha1.StorageVersionMigration{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(storageversionmigrationsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.StorageVersionMigration), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..89220c3ce9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type StorageVersionMigrationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go new file mode 100644 index 0000000000..613e453559 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StoragemigrationV1alpha1Interface interface { + RESTClient() rest.Interface + StorageVersionMigrationsGetter +} + +// StoragemigrationV1alpha1Client is used to interact with features provided by the storagemigration.k8s.io group. +type StoragemigrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StoragemigrationV1alpha1Client) StorageVersionMigrations() StorageVersionMigrationInterface { + return newStorageVersionMigrations(c) +} + +// NewForConfig creates a new StoragemigrationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*StoragemigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StoragemigrationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StoragemigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &StoragemigrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StoragemigrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StoragemigrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StoragemigrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StoragemigrationV1alpha1Client { + return &StoragemigrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StoragemigrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go new file mode 100644 index 0000000000..5fc0fd5197 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface. +// A group's client should implement this interface. +type StorageVersionMigrationsGetter interface { + StorageVersionMigrations() StorageVersionMigrationInterface +} + +// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources. +type StorageVersionMigrationInterface interface { + Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) + Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) + Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + StorageVersionMigrationExpansion +} + +// storageVersionMigrations implements StorageVersionMigrationInterface +type storageVersionMigrations struct { + *gentype.ClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] +} + +// newStorageVersionMigrations returns a StorageVersionMigrations +func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations { + return &storageVersionMigrations{ + gentype.NewClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( + "storageversionmigrations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} }, + func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }), + } +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go index e121ae41a3..9002ad6ea3 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/expansion_generated.go @@ -22,6 +22,14 @@ package v1 // MutatingWebhookConfigurationLister. type MutatingWebhookConfigurationListerExpansion interface{} +// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to +// ValidatingAdmissionPolicyLister. +type ValidatingAdmissionPolicyListerExpansion interface{} + +// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to +// ValidatingAdmissionPolicyBindingLister. +type ValidatingAdmissionPolicyBindingListerExpansion interface{} + // ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to // ValidatingWebhookConfigurationLister. type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go index fe9e27985d..4ab267e42e 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type MutatingWebhookConfigurationLister interface { // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all MutatingWebhookConfigurations in the indexer. -func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.MutatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the MutatingWebhookConfiguration from the index for a given name. -func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1.MutatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("mutatingwebhookconfiguration"), name) - } - return obj.(*v1.MutatingWebhookConfiguration), nil + return &mutatingWebhookConfigurationLister{listers.New[*v1.MutatingWebhookConfiguration](indexer, v1.Resource("mutatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go new file mode 100644 index 0000000000..f233cdbe80 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. +// All objects returned here must be treated as read-only. +type ValidatingAdmissionPolicyLister interface { + // List lists all ValidatingAdmissionPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicy, err error) + // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ValidatingAdmissionPolicy, error) + ValidatingAdmissionPolicyListerExpansion +} + +// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. +type validatingAdmissionPolicyLister struct { + listers.ResourceIndexer[*v1.ValidatingAdmissionPolicy] +} + +// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. +func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { + return &validatingAdmissionPolicyLister{listers.New[*v1.ValidatingAdmissionPolicy](indexer, v1.Resource("validatingadmissionpolicy"))} +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go new file mode 100644 index 0000000000..450a066725 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. +// All objects returned here must be treated as read-only. +type ValidatingAdmissionPolicyBindingLister interface { + // List lists all ValidatingAdmissionPolicyBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicyBinding, err error) + // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ValidatingAdmissionPolicyBinding, error) + ValidatingAdmissionPolicyBindingListerExpansion +} + +// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. +type validatingAdmissionPolicyBindingLister struct { + listers.ResourceIndexer[*v1.ValidatingAdmissionPolicyBinding] +} + +// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. +func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { + return &validatingAdmissionPolicyBindingLister{listers.New[*v1.ValidatingAdmissionPolicyBinding](indexer, v1.Resource("validatingadmissionpolicybinding"))} +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go index 1579a0ebb7..99045a6752 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingWebhookConfigurationLister interface { // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all ValidatingWebhookConfigurations in the indexer. -func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. -func (s *validatingWebhookConfigurationLister) Get(name string) (*v1.ValidatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingwebhookconfiguration"), name) - } - return obj.(*v1.ValidatingWebhookConfiguration), nil + return &validatingWebhookConfigurationLister{listers.New[*v1.ValidatingWebhookConfiguration](indexer, v1.Resource("validatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go index ae500183a7..c3aec2d736 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyLister interface { // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*v1alpha1.ValidatingAdmissionPolicy](indexer, v1alpha1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 552854daf2..5a2cf79c53 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyBindingLister interface { // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*v1alpha1.ValidatingAdmissionPolicyBinding](indexer, v1alpha1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 93c6096ee9..3bad49ac0a 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type MutatingWebhookConfigurationLister interface { // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all MutatingWebhookConfigurations in the indexer. -func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MutatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the MutatingWebhookConfiguration from the index for a given name. -func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("mutatingwebhookconfiguration"), name) - } - return obj.(*v1beta1.MutatingWebhookConfiguration), nil + return &mutatingWebhookConfigurationLister{listers.New[*v1beta1.MutatingWebhookConfiguration](indexer, v1beta1.Resource("mutatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go index 7018b3ceec..74d7c6ce3e 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyLister interface { // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*v1beta1.ValidatingAdmissionPolicy](indexer, v1beta1.Resource("validatingadmissionpolicy"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 5fcebfd22f..668d652bb7 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingAdmissionPolicyBindingLister interface { // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*v1beta1.ValidatingAdmissionPolicyBinding](indexer, v1beta1.Resource("validatingadmissionpolicybinding"))} } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 7c17fccb2e..16167d5738 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ValidatingWebhookConfigurationLister interface { // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all ValidatingWebhookConfigurations in the indexer. -func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. -func (s *validatingWebhookConfigurationLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingwebhookconfiguration"), name) - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), nil + return &validatingWebhookConfigurationLister{listers.New[*v1beta1.ValidatingWebhookConfiguration](indexer, v1beta1.Resource("validatingwebhookconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go index 9a6d74b2bf..ce51b88f28 100644 --- a/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageVersionLister interface { // storageVersionLister implements the StorageVersionLister interface. type storageVersionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.StorageVersion] } // NewStorageVersionLister returns a new StorageVersionLister. func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister { - return &storageVersionLister{indexer: indexer} -} - -// List lists all StorageVersions in the indexer. -func (s *storageVersionLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.StorageVersion)) - }) - return ret, err -} - -// Get retrieves the StorageVersion from the index for a given name. -func (s *storageVersionLister) Get(name string) (*v1alpha1.StorageVersion, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("storageversion"), name) - } - return obj.(*v1alpha1.StorageVersion), nil + return &storageVersionLister{listers.New[*v1alpha1.StorageVersion](indexer, v1alpha1.Resource("storageversion"))} } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go index 9e2f973746..b9061b159e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*v1.ControllerRevision](indexer, v1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -74,26 +66,5 @@ type ControllerRevisionNamespaceLister interface { // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) - } - return obj.(*v1.ControllerRevision), nil + listers.ResourceIndexer[*v1.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go index 061959e3da..4240cb6248 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*v1.DaemonSet](indexer, v1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*v1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -74,26 +66,5 @@ type DaemonSetNamespaceLister interface { // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("daemonset"), name) - } - return obj.(*v1.DaemonSet), nil + listers.ResourceIndexer[*v1.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go index 7704034172..3337026b73 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1.Deployment](indexer, v1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("deployment"), name) - } - return obj.(*v1.Deployment), nil + listers.ResourceIndexer[*v1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go index 3ca7757eb9..244df1d33f 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*v1.ReplicaSet](indexer, v1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*v1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -74,26 +66,5 @@ type ReplicaSetNamespaceLister interface { // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("replicaset"), name) - } - return obj.(*v1.ReplicaSet), nil + listers.ResourceIndexer[*v1.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go index f6899d5ff9..a8dc1b022a 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*v1.StatefulSet](indexer, v1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*v1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -74,26 +66,5 @@ type StatefulSetNamespaceLister interface { // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("statefulset"), name) - } - return obj.(*v1.StatefulSet), nil + listers.ResourceIndexer[*v1.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go index fc73de723f..c5e8fb3735 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*v1beta1.ControllerRevision](indexer, v1beta1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -74,26 +66,5 @@ type ControllerRevisionNamespaceLister interface { // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta1.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("controllerrevision"), name) - } - return obj.(*v1beta1.ControllerRevision), nil + listers.ResourceIndexer[*v1beta1.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go index 3fb70794ca..1bc6d45ad0 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) - } - return obj.(*v1beta1.Deployment), nil + listers.ResourceIndexer[*v1beta1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go index e3556bc398..4bf103aef7 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*v1beta1.StatefulSet](indexer, v1beta1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -74,26 +66,5 @@ type StatefulSetNamespaceLister interface { // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1beta1.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("statefulset"), name) - } - return obj.(*v1beta1.StatefulSet), nil + listers.ResourceIndexer[*v1beta1.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go index da2ce86005..de941bc691 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*v1beta2.ControllerRevision](indexer, v1beta2.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*v1beta2.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -74,26 +66,5 @@ type ControllerRevisionNamespaceLister interface { // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta2.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("controllerrevision"), name) - } - return obj.(*v1beta2.ControllerRevision), nil + listers.ResourceIndexer[*v1beta2.ControllerRevision] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go index 4b7aedd758..37784fe888 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*v1beta2.DaemonSet](indexer, v1beta2.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta2.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -74,26 +66,5 @@ type DaemonSetNamespaceLister interface { // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1beta2.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("daemonset"), name) - } - return obj.(*v1beta2.DaemonSet), nil + listers.ResourceIndexer[*v1beta2.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go index c2857bbc36..75acc1693e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1beta2.Deployment](indexer, v1beta2.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1beta2.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta2.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("deployment"), name) - } - return obj.(*v1beta2.Deployment), nil + listers.ResourceIndexer[*v1beta2.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go index 26b350ce8f..37ea97630e 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*v1beta2.ReplicaSet](indexer, v1beta2.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta2.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -74,26 +66,5 @@ type ReplicaSetNamespaceLister interface { // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1beta2.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("replicaset"), name) - } - return obj.(*v1beta2.ReplicaSet), nil + listers.ResourceIndexer[*v1beta2.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go index fbbaf0133f..cc48a1473c 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*v1beta2.StatefulSet](indexer, v1beta2.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*v1beta2.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -74,26 +66,5 @@ type StatefulSetNamespaceLister interface { // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1beta2.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("statefulset"), name) - } - return obj.(*v1beta2.StatefulSet), nil + listers.ResourceIndexer[*v1beta2.StatefulSet] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go index 8447f059d4..2cd4cc87bf 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v1.HorizontalPodAutoscaler](indexer, v1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v1.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v1.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v1.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go index a5cef27725..7c2806af21 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v2 import ( v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v2.HorizontalPodAutoscaler](indexer, v2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v2.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go index f1804e995b..a2befd6062 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v2beta1 import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v2beta1.HorizontalPodAutoscaler](indexer, v2beta1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2beta1.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v2beta1.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go index b0dbaf9eb0..52bae849ba 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -20,8 +20,8 @@ package v2beta2 import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta2.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*v2beta2.HorizontalPodAutoscaler](indexer, v2beta2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*v2beta2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -74,26 +66,5 @@ type HorizontalPodAutoscalerNamespaceLister interface { // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta2.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta2.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta2.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2beta2.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*v2beta2.HorizontalPodAutoscaler] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go index 8e49ed959f..a7a3abbfa3 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{indexer: indexer} -} - -// List lists all CronJobs in the indexer. -func (s *cronJobLister) List(selector labels.Selector) (ret []*v1.CronJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CronJob)) - }) - return ret, err + return &cronJobLister{listers.New[*v1.CronJob](indexer, v1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} + return cronJobNamespaceLister{listers.NewNamespaced[*v1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -74,26 +66,5 @@ type CronJobNamespaceLister interface { // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CronJobs in the indexer for a given namespace. -func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1.CronJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CronJob)) - }) - return ret, err -} - -// Get retrieves the CronJob from the indexer for a given namespace and name. -func (s cronJobNamespaceLister) Get(name string) (*v1.CronJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cronjob"), name) - } - return obj.(*v1.CronJob), nil + listers.ResourceIndexer[*v1.CronJob] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job.go b/vendor/k8s.io/client-go/listers/batch/v1/job.go index 3aba6b95fa..4078a9f7d8 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/job.go +++ b/vendor/k8s.io/client-go/listers/batch/v1/job.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type JobLister interface { // jobLister implements the JobLister interface. type jobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Job] } // NewJobLister returns a new JobLister. func NewJobLister(indexer cache.Indexer) JobLister { - return &jobLister{indexer: indexer} -} - -// List lists all Jobs in the indexer. -func (s *jobLister) List(selector labels.Selector) (ret []*v1.Job, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Job)) - }) - return ret, err + return &jobLister{listers.New[*v1.Job](indexer, v1.Resource("job"))} } // Jobs returns an object that can list and get Jobs. func (s *jobLister) Jobs(namespace string) JobNamespaceLister { - return jobNamespaceLister{indexer: s.indexer, namespace: namespace} + return jobNamespaceLister{listers.NewNamespaced[*v1.Job](s.ResourceIndexer, namespace)} } // JobNamespaceLister helps list and get Jobs. @@ -74,26 +66,5 @@ type JobNamespaceLister interface { // jobNamespaceLister implements the JobNamespaceLister // interface. type jobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Jobs in the indexer for a given namespace. -func (s jobNamespaceLister) List(selector labels.Selector) (ret []*v1.Job, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Job)) - }) - return ret, err -} - -// Get retrieves the Job from the indexer for a given namespace and name. -func (s jobNamespaceLister) Get(name string) (*v1.Job, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("job"), name) - } - return obj.(*v1.Job), nil + listers.ResourceIndexer[*v1.Job] } diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go index 4842d5e5a1..33ed8219e3 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{indexer: indexer} -} - -// List lists all CronJobs in the indexer. -func (s *cronJobLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CronJob)) - }) - return ret, err + return &cronJobLister{listers.New[*v1beta1.CronJob](indexer, v1beta1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} + return cronJobNamespaceLister{listers.NewNamespaced[*v1beta1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -74,26 +66,5 @@ type CronJobNamespaceLister interface { // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CronJobs in the indexer for a given namespace. -func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CronJob)) - }) - return ret, err -} - -// Get retrieves the CronJob from the indexer for a given namespace and name. -func (s cronJobNamespaceLister) Get(name string) (*v1beta1.CronJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("cronjob"), name) - } - return obj.(*v1beta1.CronJob), nil + listers.ResourceIndexer[*v1beta1.CronJob] } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go index 0d04e118db..38e4a3a658 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/certificates/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CertificateSigningRequestLister interface { // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{indexer: indexer} -} - -// List lists all CertificateSigningRequests in the indexer. -func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1.CertificateSigningRequest, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CertificateSigningRequest)) - }) - return ret, err -} - -// Get retrieves the CertificateSigningRequest from the index for a given name. -func (s *certificateSigningRequestLister) Get(name string) (*v1.CertificateSigningRequest, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("certificatesigningrequest"), name) - } - return obj.(*v1.CertificateSigningRequest), nil + return &certificateSigningRequestLister{listers.New[*v1.CertificateSigningRequest](indexer, v1.Resource("certificatesigningrequest"))} } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go index b8049a7618..88e5365f40 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/certificates/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterTrustBundleLister interface { // clusterTrustBundleLister implements the ClusterTrustBundleLister interface. type clusterTrustBundleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ClusterTrustBundle] } // NewClusterTrustBundleLister returns a new ClusterTrustBundleLister. func NewClusterTrustBundleLister(indexer cache.Indexer) ClusterTrustBundleLister { - return &clusterTrustBundleLister{indexer: indexer} -} - -// List lists all ClusterTrustBundles in the indexer. -func (s *clusterTrustBundleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterTrustBundle)) - }) - return ret, err -} - -// Get retrieves the ClusterTrustBundle from the index for a given name. -func (s *clusterTrustBundleLister) Get(name string) (*v1alpha1.ClusterTrustBundle, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustertrustbundle"), name) - } - return obj.(*v1alpha1.ClusterTrustBundle), nil + return &clusterTrustBundleLister{listers.New[*v1alpha1.ClusterTrustBundle](indexer, v1alpha1.Resource("clustertrustbundle"))} } diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go index 471b5629b3..84b5ac4a90 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CertificateSigningRequestLister interface { // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{indexer: indexer} -} - -// List lists all CertificateSigningRequests in the indexer. -func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CertificateSigningRequest)) - }) - return ret, err -} - -// Get retrieves the CertificateSigningRequest from the index for a given name. -func (s *certificateSigningRequestLister) Get(name string) (*v1beta1.CertificateSigningRequest, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("certificatesigningrequest"), name) - } - return obj.(*v1beta1.CertificateSigningRequest), nil + return &certificateSigningRequestLister{listers.New[*v1beta1.CertificateSigningRequest](indexer, v1beta1.Resource("certificatesigningrequest"))} } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go index de366d0e11..b36d8800e3 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1/lease.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/coordination/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err + return &leaseLister{listers.New[*v1.Lease](indexer, v1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} + return leaseNamespaceLister{listers.NewNamespaced[*v1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -74,26 +66,5 @@ type LeaseNamespaceLister interface { // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("lease"), name) - } - return obj.(*v1.Lease), nil + listers.ResourceIndexer[*v1.Lease] } diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..233bda975b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// LeaseCandidateListerExpansion allows custom methods to be added to +// LeaseCandidateLister. +type LeaseCandidateListerExpansion interface{} + +// LeaseCandidateNamespaceListerExpansion allows custom methods to be added to +// LeaseCandidateNamespaceLister. +type LeaseCandidateNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 0000000000..b5e5fac9e4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/coordination/v1alpha1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// LeaseCandidateLister helps list LeaseCandidates. +// All objects returned here must be treated as read-only. +type LeaseCandidateLister interface { + // List lists all LeaseCandidates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error) + // LeaseCandidates returns an object that can list and get LeaseCandidates. + LeaseCandidates(namespace string) LeaseCandidateNamespaceLister + LeaseCandidateListerExpansion +} + +// leaseCandidateLister implements the LeaseCandidateLister interface. +type leaseCandidateLister struct { + listers.ResourceIndexer[*v1alpha1.LeaseCandidate] +} + +// NewLeaseCandidateLister returns a new LeaseCandidateLister. +func NewLeaseCandidateLister(indexer cache.Indexer) LeaseCandidateLister { + return &leaseCandidateLister{listers.New[*v1alpha1.LeaseCandidate](indexer, v1alpha1.Resource("leasecandidate"))} +} + +// LeaseCandidates returns an object that can list and get LeaseCandidates. +func (s *leaseCandidateLister) LeaseCandidates(namespace string) LeaseCandidateNamespaceLister { + return leaseCandidateNamespaceLister{listers.NewNamespaced[*v1alpha1.LeaseCandidate](s.ResourceIndexer, namespace)} +} + +// LeaseCandidateNamespaceLister helps list and get LeaseCandidates. +// All objects returned here must be treated as read-only. +type LeaseCandidateNamespaceLister interface { + // List lists all LeaseCandidates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.LeaseCandidate, err error) + // Get retrieves the LeaseCandidate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.LeaseCandidate, error) + LeaseCandidateNamespaceListerExpansion +} + +// leaseCandidateNamespaceLister implements the LeaseCandidateNamespaceLister +// interface. +type leaseCandidateNamespaceLister struct { + listers.ResourceIndexer[*v1alpha1.LeaseCandidate] +} diff --git a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go index 8dfdc1e9bc..dbe132696a 100644 --- a/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/coordination/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1beta1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Lease)) - }) - return ret, err + return &leaseLister{listers.New[*v1beta1.Lease](indexer, v1beta1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} + return leaseNamespaceLister{listers.NewNamespaced[*v1beta1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -74,26 +66,5 @@ type LeaseNamespaceLister interface { // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1beta1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("lease"), name) - } - return obj.(*v1beta1.Lease), nil + listers.ResourceIndexer[*v1beta1.Lease] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go index 5fcdac3c76..9e3274b5a3 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ComponentStatusLister interface { // componentStatusLister implements the ComponentStatusLister interface. type componentStatusLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ComponentStatus] } // NewComponentStatusLister returns a new ComponentStatusLister. func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { - return &componentStatusLister{indexer: indexer} -} - -// List lists all ComponentStatuses in the indexer. -func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ComponentStatus)) - }) - return ret, err -} - -// Get retrieves the ComponentStatus from the index for a given name. -func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("componentstatus"), name) - } - return obj.(*v1.ComponentStatus), nil + return &componentStatusLister{listers.New[*v1.ComponentStatus](indexer, v1.Resource("componentstatus"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go index 6a410e47c4..0dde404f2f 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ConfigMapLister interface { // configMapLister implements the ConfigMapLister interface. type configMapLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ConfigMap] } // NewConfigMapLister returns a new ConfigMapLister. func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { - return &configMapLister{indexer: indexer} -} - -// List lists all ConfigMaps in the indexer. -func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ConfigMap)) - }) - return ret, err + return &configMapLister{listers.New[*v1.ConfigMap](indexer, v1.Resource("configmap"))} } // ConfigMaps returns an object that can list and get ConfigMaps. func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { - return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} + return configMapNamespaceLister{listers.NewNamespaced[*v1.ConfigMap](s.ResourceIndexer, namespace)} } // ConfigMapNamespaceLister helps list and get ConfigMaps. @@ -74,26 +66,5 @@ type ConfigMapNamespaceLister interface { // configMapNamespaceLister implements the ConfigMapNamespaceLister // interface. type configMapNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ConfigMaps in the indexer for a given namespace. -func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ConfigMap)) - }) - return ret, err -} - -// Get retrieves the ConfigMap from the indexer for a given namespace and name. -func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("configmap"), name) - } - return obj.(*v1.ConfigMap), nil + listers.ResourceIndexer[*v1.ConfigMap] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go index 4759ce808f..726b432559 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EndpointsLister interface { // endpointsLister implements the EndpointsLister interface. type endpointsLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Endpoints] } // NewEndpointsLister returns a new EndpointsLister. func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { - return &endpointsLister{indexer: indexer} -} - -// List lists all Endpoints in the indexer. -func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Endpoints)) - }) - return ret, err + return &endpointsLister{listers.New[*v1.Endpoints](indexer, v1.Resource("endpoints"))} } // Endpoints returns an object that can list and get Endpoints. func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { - return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointsNamespaceLister{listers.NewNamespaced[*v1.Endpoints](s.ResourceIndexer, namespace)} } // EndpointsNamespaceLister helps list and get Endpoints. @@ -74,26 +66,5 @@ type EndpointsNamespaceLister interface { // endpointsNamespaceLister implements the EndpointsNamespaceLister // interface. type endpointsNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Endpoints in the indexer for a given namespace. -func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Endpoints)) - }) - return ret, err -} - -// Get retrieves the Endpoints from the indexer for a given namespace and name. -func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("endpoints"), name) - } - return obj.(*v1.Endpoints), nil + listers.ResourceIndexer[*v1.Endpoints] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go index 4416e20120..5ab3a19321 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/event.go +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err + return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -74,26 +66,5 @@ type EventNamespaceLister interface { // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("event"), name) - } - return obj.(*v1.Event), nil + listers.ResourceIndexer[*v1.Event] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go index d8fa569cd3..5c7593cfa9 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type LimitRangeLister interface { // limitRangeLister implements the LimitRangeLister interface. type limitRangeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.LimitRange] } // NewLimitRangeLister returns a new LimitRangeLister. func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { - return &limitRangeLister{indexer: indexer} -} - -// List lists all LimitRanges in the indexer. -func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.LimitRange)) - }) - return ret, err + return &limitRangeLister{listers.New[*v1.LimitRange](indexer, v1.Resource("limitrange"))} } // LimitRanges returns an object that can list and get LimitRanges. func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { - return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} + return limitRangeNamespaceLister{listers.NewNamespaced[*v1.LimitRange](s.ResourceIndexer, namespace)} } // LimitRangeNamespaceLister helps list and get LimitRanges. @@ -74,26 +66,5 @@ type LimitRangeNamespaceLister interface { // limitRangeNamespaceLister implements the LimitRangeNamespaceLister // interface. type limitRangeNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all LimitRanges in the indexer for a given namespace. -func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.LimitRange)) - }) - return ret, err -} - -// Get retrieves the LimitRange from the indexer for a given namespace and name. -func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("limitrange"), name) - } - return obj.(*v1.LimitRange), nil + listers.ResourceIndexer[*v1.LimitRange] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go index 454aa1a0a2..a016447cfe 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type NamespaceLister interface { // namespaceLister implements the NamespaceLister interface. type namespaceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Namespace] } // NewNamespaceLister returns a new NamespaceLister. func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { - return &namespaceLister{indexer: indexer} -} - -// List lists all Namespaces in the indexer. -func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Namespace)) - }) - return ret, err -} - -// Get retrieves the Namespace from the index for a given name. -func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("namespace"), name) - } - return obj.(*v1.Namespace), nil + return &namespaceLister{listers.New[*v1.Namespace](indexer, v1.Resource("namespace"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go index 596049857f..495c6d79d1 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/node.go +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type NodeLister interface { // nodeLister implements the NodeLister interface. type nodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Node] } // NewNodeLister returns a new NodeLister. func NewNodeLister(indexer cache.Indexer) NodeLister { - return &nodeLister{indexer: indexer} -} - -// List lists all Nodes in the indexer. -func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Node)) - }) - return ret, err -} - -// Get retrieves the Node from the index for a given name. -func (s *nodeLister) Get(name string) (*v1.Node, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("node"), name) - } - return obj.(*v1.Node), nil + return &nodeLister{listers.New[*v1.Node](indexer, v1.Resource("node"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go index e7dfd4ac9f..17f19bb7a4 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PersistentVolumeLister interface { // persistentVolumeLister implements the PersistentVolumeLister interface. type persistentVolumeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PersistentVolume] } // NewPersistentVolumeLister returns a new PersistentVolumeLister. func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { - return &persistentVolumeLister{indexer: indexer} -} - -// List lists all PersistentVolumes in the indexer. -func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolume)) - }) - return ret, err -} - -// Get retrieves the PersistentVolume from the index for a given name. -func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("persistentvolume"), name) - } - return obj.(*v1.PersistentVolume), nil + return &persistentVolumeLister{listers.New[*v1.PersistentVolume](indexer, v1.Resource("persistentvolume"))} } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go index fc71bb5a1f..ce9df90314 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PersistentVolumeClaimLister interface { // persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. type persistentVolumeClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PersistentVolumeClaim] } // NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { - return &persistentVolumeClaimLister{indexer: indexer} -} - -// List lists all PersistentVolumeClaims in the indexer. -func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolumeClaim)) - }) - return ret, err + return &persistentVolumeClaimLister{listers.New[*v1.PersistentVolumeClaim](indexer, v1.Resource("persistentvolumeclaim"))} } // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { - return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return persistentVolumeClaimNamespaceLister{listers.NewNamespaced[*v1.PersistentVolumeClaim](s.ResourceIndexer, namespace)} } // PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. @@ -74,26 +66,5 @@ type PersistentVolumeClaimNamespaceLister interface { // persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister // interface. type persistentVolumeClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PersistentVolumeClaims in the indexer for a given namespace. -func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolumeClaim)) - }) - return ret, err -} - -// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. -func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("persistentvolumeclaim"), name) - } - return obj.(*v1.PersistentVolumeClaim), nil + listers.ResourceIndexer[*v1.PersistentVolumeClaim] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go index ab8f0946c3..b17a8382a0 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/pod.go +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodLister interface { // podLister implements the PodLister interface. type podLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Pod] } // NewPodLister returns a new PodLister. func NewPodLister(indexer cache.Indexer) PodLister { - return &podLister{indexer: indexer} -} - -// List lists all Pods in the indexer. -func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Pod)) - }) - return ret, err + return &podLister{listers.New[*v1.Pod](indexer, v1.Resource("pod"))} } // Pods returns an object that can list and get Pods. func (s *podLister) Pods(namespace string) PodNamespaceLister { - return podNamespaceLister{indexer: s.indexer, namespace: namespace} + return podNamespaceLister{listers.NewNamespaced[*v1.Pod](s.ResourceIndexer, namespace)} } // PodNamespaceLister helps list and get Pods. @@ -74,26 +66,5 @@ type PodNamespaceLister interface { // podNamespaceLister implements the PodNamespaceLister // interface. type podNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Pods in the indexer for a given namespace. -func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Pod)) - }) - return ret, err -} - -// Get retrieves the Pod from the indexer for a given namespace and name. -func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("pod"), name) - } - return obj.(*v1.Pod), nil + listers.ResourceIndexer[*v1.Pod] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go index 6c310045b7..8ac93148f5 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodTemplateLister interface { // podTemplateLister implements the PodTemplateLister interface. type podTemplateLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PodTemplate] } // NewPodTemplateLister returns a new PodTemplateLister. func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { - return &podTemplateLister{indexer: indexer} -} - -// List lists all PodTemplates in the indexer. -func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodTemplate)) - }) - return ret, err + return &podTemplateLister{listers.New[*v1.PodTemplate](indexer, v1.Resource("podtemplate"))} } // PodTemplates returns an object that can list and get PodTemplates. func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { - return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} + return podTemplateNamespaceLister{listers.NewNamespaced[*v1.PodTemplate](s.ResourceIndexer, namespace)} } // PodTemplateNamespaceLister helps list and get PodTemplates. @@ -74,26 +66,5 @@ type PodTemplateNamespaceLister interface { // podTemplateNamespaceLister implements the PodTemplateNamespaceLister // interface. type podTemplateNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodTemplates in the indexer for a given namespace. -func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodTemplate)) - }) - return ret, err -} - -// Get retrieves the PodTemplate from the indexer for a given namespace and name. -func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("podtemplate"), name) - } - return obj.(*v1.PodTemplate), nil + listers.ResourceIndexer[*v1.PodTemplate] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go index e28e2ef768..8ce23fc09a 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicationControllerLister interface { // replicationControllerLister implements the ReplicationControllerLister interface. type replicationControllerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ReplicationController] } // NewReplicationControllerLister returns a new ReplicationControllerLister. func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { - return &replicationControllerLister{indexer: indexer} -} - -// List lists all ReplicationControllers in the indexer. -func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicationController)) - }) - return ret, err + return &replicationControllerLister{listers.New[*v1.ReplicationController](indexer, v1.Resource("replicationcontroller"))} } // ReplicationControllers returns an object that can list and get ReplicationControllers. func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { - return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicationControllerNamespaceLister{listers.NewNamespaced[*v1.ReplicationController](s.ResourceIndexer, namespace)} } // ReplicationControllerNamespaceLister helps list and get ReplicationControllers. @@ -74,26 +66,5 @@ type ReplicationControllerNamespaceLister interface { // replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister // interface. type replicationControllerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicationControllers in the indexer for a given namespace. -func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicationController)) - }) - return ret, err -} - -// Get retrieves the ReplicationController from the indexer for a given namespace and name. -func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("replicationcontroller"), name) - } - return obj.(*v1.ReplicationController), nil + listers.ResourceIndexer[*v1.ReplicationController] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go index 9c00b49d4f..4b46194a25 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ResourceQuotaLister interface { // resourceQuotaLister implements the ResourceQuotaLister interface. type resourceQuotaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ResourceQuota] } // NewResourceQuotaLister returns a new ResourceQuotaLister. func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { - return &resourceQuotaLister{indexer: indexer} -} - -// List lists all ResourceQuotas in the indexer. -func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResourceQuota)) - }) - return ret, err + return &resourceQuotaLister{listers.New[*v1.ResourceQuota](indexer, v1.Resource("resourcequota"))} } // ResourceQuotas returns an object that can list and get ResourceQuotas. func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { - return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceQuotaNamespaceLister{listers.NewNamespaced[*v1.ResourceQuota](s.ResourceIndexer, namespace)} } // ResourceQuotaNamespaceLister helps list and get ResourceQuotas. @@ -74,26 +66,5 @@ type ResourceQuotaNamespaceLister interface { // resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister // interface. type resourceQuotaNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceQuotas in the indexer for a given namespace. -func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResourceQuota)) - }) - return ret, err -} - -// Get retrieves the ResourceQuota from the indexer for a given namespace and name. -func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("resourcequota"), name) - } - return obj.(*v1.ResourceQuota), nil + listers.ResourceIndexer[*v1.ResourceQuota] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go index d386d4d5cb..47a0c9a084 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/secret.go +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type SecretLister interface { // secretLister implements the SecretLister interface. type secretLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Secret] } // NewSecretLister returns a new SecretLister. func NewSecretLister(indexer cache.Indexer) SecretLister { - return &secretLister{indexer: indexer} -} - -// List lists all Secrets in the indexer. -func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Secret)) - }) - return ret, err + return &secretLister{listers.New[*v1.Secret](indexer, v1.Resource("secret"))} } // Secrets returns an object that can list and get Secrets. func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { - return secretNamespaceLister{indexer: s.indexer, namespace: namespace} + return secretNamespaceLister{listers.NewNamespaced[*v1.Secret](s.ResourceIndexer, namespace)} } // SecretNamespaceLister helps list and get Secrets. @@ -74,26 +66,5 @@ type SecretNamespaceLister interface { // secretNamespaceLister implements the SecretNamespaceLister // interface. type secretNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Secrets in the indexer for a given namespace. -func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Secret)) - }) - return ret, err -} - -// Get retrieves the Secret from the indexer for a given namespace and name. -func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("secret"), name) - } - return obj.(*v1.Secret), nil + listers.ResourceIndexer[*v1.Secret] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go index 51026d7b4b..536fb337f9 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/service.go +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ServiceLister interface { // serviceLister implements the ServiceLister interface. type serviceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Service] } // NewServiceLister returns a new ServiceLister. func NewServiceLister(indexer cache.Indexer) ServiceLister { - return &serviceLister{indexer: indexer} -} - -// List lists all Services in the indexer. -func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Service)) - }) - return ret, err + return &serviceLister{listers.New[*v1.Service](indexer, v1.Resource("service"))} } // Services returns an object that can list and get Services. func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { - return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} + return serviceNamespaceLister{listers.NewNamespaced[*v1.Service](s.ResourceIndexer, namespace)} } // ServiceNamespaceLister helps list and get Services. @@ -74,26 +66,5 @@ type ServiceNamespaceLister interface { // serviceNamespaceLister implements the ServiceNamespaceLister // interface. type serviceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Services in the indexer for a given namespace. -func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Service)) - }) - return ret, err -} - -// Get retrieves the Service from the indexer for a given namespace and name. -func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("service"), name) - } - return obj.(*v1.Service), nil + listers.ResourceIndexer[*v1.Service] } diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go index aa9554d8bb..8a4af4f4cb 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ServiceAccountLister interface { // serviceAccountLister implements the ServiceAccountLister interface. type serviceAccountLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ServiceAccount] } // NewServiceAccountLister returns a new ServiceAccountLister. func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { - return &serviceAccountLister{indexer: indexer} -} - -// List lists all ServiceAccounts in the indexer. -func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ServiceAccount)) - }) - return ret, err + return &serviceAccountLister{listers.New[*v1.ServiceAccount](indexer, v1.Resource("serviceaccount"))} } // ServiceAccounts returns an object that can list and get ServiceAccounts. func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { - return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} + return serviceAccountNamespaceLister{listers.NewNamespaced[*v1.ServiceAccount](s.ResourceIndexer, namespace)} } // ServiceAccountNamespaceLister helps list and get ServiceAccounts. @@ -74,26 +66,5 @@ type ServiceAccountNamespaceLister interface { // serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister // interface. type serviceAccountNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ServiceAccounts in the indexer for a given namespace. -func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ServiceAccount)) - }) - return ret, err -} - -// Get retrieves the ServiceAccount from the indexer for a given namespace and name. -func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("serviceaccount"), name) - } - return obj.(*v1.ServiceAccount), nil + listers.ResourceIndexer[*v1.ServiceAccount] } diff --git a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go index 4dd46ff1bf..dcb18f19a8 100644 --- a/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{indexer: indexer} -} - -// List lists all EndpointSlices in the indexer. -func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EndpointSlice)) - }) - return ret, err + return &endpointSliceLister{listers.New[*v1.EndpointSlice](indexer, v1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointSliceNamespaceLister{listers.NewNamespaced[*v1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -74,26 +66,5 @@ type EndpointSliceNamespaceLister interface { // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EndpointSlices in the indexer for a given namespace. -func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EndpointSlice)) - }) - return ret, err -} - -// Get retrieves the EndpointSlice from the indexer for a given namespace and name. -func (s endpointSliceNamespaceLister) Get(name string) (*v1.EndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("endpointslice"), name) - } - return obj.(*v1.EndpointSlice), nil + listers.ResourceIndexer[*v1.EndpointSlice] } diff --git a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go index e92872d5f4..d3762f5c28 100644 --- a/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/discovery/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{indexer: indexer} -} - -// List lists all EndpointSlices in the indexer. -func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.EndpointSlice)) - }) - return ret, err + return &endpointSliceLister{listers.New[*v1beta1.EndpointSlice](indexer, v1beta1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointSliceNamespaceLister{listers.NewNamespaced[*v1beta1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -74,26 +66,5 @@ type EndpointSliceNamespaceLister interface { // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EndpointSlices in the indexer for a given namespace. -func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.EndpointSlice)) - }) - return ret, err -} - -// Get retrieves the EndpointSlice from the indexer for a given namespace and name. -func (s endpointSliceNamespaceLister) Get(name string) (*v1beta1.EndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("endpointslice"), name) - } - return obj.(*v1beta1.EndpointSlice), nil + listers.ResourceIndexer[*v1beta1.EndpointSlice] } diff --git a/vendor/k8s.io/client-go/listers/doc.go b/vendor/k8s.io/client-go/listers/doc.go new file mode 100644 index 0000000000..96c330c931 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package listers provides generated listers for Kubernetes APIs. +package listers // import "k8s.io/client-go/listers" diff --git a/vendor/k8s.io/client-go/listers/events/v1/event.go b/vendor/k8s.io/client-go/listers/events/v1/event.go index 4abe841e26..66e3c64669 100644 --- a/vendor/k8s.io/client-go/listers/events/v1/event.go +++ b/vendor/k8s.io/client-go/listers/events/v1/event.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/events/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err + return &eventLister{listers.New[*v1.Event](indexer, v1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*v1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -74,26 +66,5 @@ type EventNamespaceLister interface { // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("event"), name) - } - return obj.(*v1.Event), nil + listers.ResourceIndexer[*v1.Event] } diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go index 41a521be6f..3d51bb2656 100644 --- a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/events/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Event)) - }) - return ret, err + return &eventLister{listers.New[*v1beta1.Event](indexer, v1beta1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*v1beta1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -74,26 +66,5 @@ type EventNamespaceLister interface { // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1beta1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("event"), name) - } - return obj.(*v1beta1.Event), nil + listers.ResourceIndexer[*v1beta1.Event] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go index 900475410b..4510b42360 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*v1beta1.DaemonSet](indexer, v1beta1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*v1beta1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -74,26 +66,5 @@ type DaemonSetNamespaceLister interface { // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1beta1.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("daemonset"), name) - } - return obj.(*v1beta1.DaemonSet), nil + listers.ResourceIndexer[*v1beta1.DaemonSet] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go index 42b5a07231..149047c973 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*v1beta1.Deployment](indexer, v1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*v1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -74,26 +66,5 @@ type DeploymentNamespaceLister interface { // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) - } - return obj.(*v1beta1.Deployment), nil + listers.ResourceIndexer[*v1beta1.Deployment] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go index 1cb7677bd8..b714eebb35 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -74,26 +66,5 @@ type IngressNamespaceLister interface { // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil + listers.ResourceIndexer[*v1beta1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go index 84419a8e96..b31099c266 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err + return &networkPolicyLister{listers.New[*v1beta1.NetworkPolicy](indexer, v1beta1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return networkPolicyNamespaceLister{listers.NewNamespaced[*v1beta1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -74,26 +66,5 @@ type NetworkPolicyNamespaceLister interface { // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1beta1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("networkpolicy"), name) - } - return obj.(*v1beta1.NetworkPolicy), nil + listers.ResourceIndexer[*v1beta1.NetworkPolicy] } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go index a5ec3229bc..604bee80ba 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*v1beta1.ReplicaSet](indexer, v1beta1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*v1beta1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -74,26 +66,5 @@ type ReplicaSetNamespaceLister interface { // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1beta1.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("replicaset"), name) - } - return obj.(*v1beta1.ReplicaSet), nil + listers.ResourceIndexer[*v1beta1.ReplicaSet] } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go index 43ccd4e5ff..ba7e514874 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("flowschema"), name) - } - return obj.(*v1.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1.FlowSchema](indexer, v1.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go index 61189b9cf9..61f5b9fe6d 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1.PriorityLevelConfiguration](indexer, v1.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go index 7927a8411e..59bca6ce4e 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta1.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("flowschema"), name) - } - return obj.(*v1beta1.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1beta1.FlowSchema](indexer, v1beta1.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go index c94aaa4c1d..902f7cc4bb 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta1.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta1.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1beta1.PriorityLevelConfiguration](indexer, v1beta1.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go index 2710f26306..721c5f6bdd 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta2.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("flowschema"), name) - } - return obj.(*v1beta2.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1beta2.FlowSchema](indexer, v1beta2.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go index 00ede00709..3e8a2134fc 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1beta2 import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta2.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta2.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta2.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1beta2.PriorityLevelConfiguration](indexer, v1beta2.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go index ef01b5a76e..c5555fd64d 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go @@ -20,8 +20,8 @@ package v1beta3 import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type FlowSchemaLister interface { // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta3.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta3.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta3.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta3.Resource("flowschema"), name) - } - return obj.(*v1beta3.FlowSchema), nil + return &flowSchemaLister{listers.New[*v1beta3.FlowSchema](indexer, v1beta3.Resource("flowschema"))} } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go index d05613949f..9f7d89c548 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -20,8 +20,8 @@ package v1beta3 import ( v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityLevelConfigurationLister interface { // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta3.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta3.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta3.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta3.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta3.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*v1beta3.PriorityLevelConfiguration](indexer, v1beta3.Resource("prioritylevelconfiguration"))} } diff --git a/vendor/k8s.io/client-go/listers/generic_helpers.go b/vendor/k8s.io/client-go/listers/generic_helpers.go new file mode 100644 index 0000000000..c69bb22b11 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/generic_helpers.go @@ -0,0 +1,72 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package listers + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// ResourceIndexer wraps an indexer, resource, and optional namespace for a given type. +// This is intended for use by listers (generated by lister-gen) only. +type ResourceIndexer[T runtime.Object] struct { + indexer cache.Indexer + resource schema.GroupResource + namespace string // empty for non-namespaced types +} + +// New returns a new instance of a lister (resource indexer) wrapping the given indexer and resource for the specified type. +// This is intended for use by listers (generated by lister-gen) only. +func New[T runtime.Object](indexer cache.Indexer, resource schema.GroupResource) ResourceIndexer[T] { + return ResourceIndexer[T]{indexer: indexer, resource: resource} +} + +// NewNamespaced returns a new instance of a namespaced lister (resource indexer) wrapping the given parent and namespace for the specified type. +// This is intended for use by listers (generated by lister-gen) only. +func NewNamespaced[T runtime.Object](parent ResourceIndexer[T], namespace string) ResourceIndexer[T] { + return ResourceIndexer[T]{indexer: parent.indexer, resource: parent.resource, namespace: namespace} +} + +// List lists all resources in the indexer matching the given selector. +func (l ResourceIndexer[T]) List(selector labels.Selector) (ret []T, err error) { + // ListAllByNamespace reverts to ListAll on empty namespaces + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(T)) + }) + return ret, err +} + +// Get retrieves the resource from the index for a given name. +func (l ResourceIndexer[T]) Get(name string) (T, error) { + var key string + if l.namespace == "" { + key = name + } else { + key = l.namespace + "/" + name + } + obj, exists, err := l.indexer.GetByKey(key) + if err != nil { + return *new(T), err + } + if !exists { + return *new(T), errors.NewNotFound(l.resource, name) + } + return obj.(T), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go index 0f49d4f572..3007cd3492 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingress.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*v1.Ingress](indexer, v1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*v1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -74,26 +66,5 @@ type IngressNamespaceLister interface { // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("ingress"), name) - } - return obj.(*v1.Ingress), nil + listers.ResourceIndexer[*v1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go index 1480cb13fd..a8efe5c5e3 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type IngressClassLister interface { // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{indexer: indexer} -} - -// List lists all IngressClasses in the indexer. -func (s *ingressClassLister) List(selector labels.Selector) (ret []*v1.IngressClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.IngressClass)) - }) - return ret, err -} - -// Get retrieves the IngressClass from the index for a given name. -func (s *ingressClassLister) Get(name string) (*v1.IngressClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("ingressclass"), name) - } - return obj.(*v1.IngressClass), nil + return &ingressClassLister{listers.New[*v1.IngressClass](indexer, v1.Resource("ingressclass"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go index 34cabf0577..9a3e3172ef 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.NetworkPolicy)) - }) - return ret, err + return &networkPolicyLister{listers.New[*v1.NetworkPolicy](indexer, v1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return networkPolicyNamespaceLister{listers.NewNamespaced[*v1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -74,26 +66,5 @@ type NetworkPolicyNamespaceLister interface { // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("networkpolicy"), name) - } - return obj.(*v1.NetworkPolicy), nil + listers.ResourceIndexer[*v1.NetworkPolicy] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go index b3dfe27971..749affd7b9 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type IPAddressLister interface { // iPAddressLister implements the IPAddressLister interface. type iPAddressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.IPAddress] } // NewIPAddressLister returns a new IPAddressLister. func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { - return &iPAddressLister{indexer: indexer} -} - -// List lists all IPAddresses in the indexer. -func (s *iPAddressLister) List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAddress)) - }) - return ret, err -} - -// Get retrieves the IPAddress from the index for a given name. -func (s *iPAddressLister) Get(name string) (*v1alpha1.IPAddress, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("ipaddress"), name) - } - return obj.(*v1alpha1.IPAddress), nil + return &iPAddressLister{listers.New[*v1alpha1.IPAddress](indexer, v1alpha1.Resource("ipaddress"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go index 8bc2b10e68..8be2d11af4 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ServiceCIDRLister interface { // serviceCIDRLister implements the ServiceCIDRLister interface. type serviceCIDRLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ServiceCIDR] } // NewServiceCIDRLister returns a new ServiceCIDRLister. func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { - return &serviceCIDRLister{indexer: indexer} -} - -// List lists all ServiceCIDRs in the indexer. -func (s *serviceCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ServiceCIDR)) - }) - return ret, err -} - -// Get retrieves the ServiceCIDR from the index for a given name. -func (s *serviceCIDRLister) Get(name string) (*v1alpha1.ServiceCIDR, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("servicecidr"), name) - } - return obj.(*v1alpha1.ServiceCIDR), nil + return &serviceCIDRLister{listers.New[*v1alpha1.ServiceCIDR](indexer, v1alpha1.Resource("servicecidr"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go index d8c99c186e..320af736e6 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +// IPAddressListerExpansion allows custom methods to be added to +// IPAddressLister. +type IPAddressListerExpansion interface{} + // IngressListerExpansion allows custom methods to be added to // IngressLister. type IngressListerExpansion interface{} @@ -29,3 +33,7 @@ type IngressNamespaceListerExpansion interface{} // IngressClassListerExpansion allows custom methods to be added to // IngressClassLister. type IngressClassListerExpansion interface{} + +// ServiceCIDRListerExpansion allows custom methods to be added to +// ServiceCIDRLister. +type ServiceCIDRListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go index b8f4d35580..b8fe99e240 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*v1beta1.Ingress](indexer, v1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*v1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -74,26 +66,5 @@ type IngressNamespaceLister interface { // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil + listers.ResourceIndexer[*v1beta1.Ingress] } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go index ebcd6ba85b..a5e33525f6 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type IngressClassLister interface { // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{indexer: indexer} -} - -// List lists all IngressClasses in the indexer. -func (s *ingressClassLister) List(selector labels.Selector) (ret []*v1beta1.IngressClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.IngressClass)) - }) - return ret, err -} - -// Get retrieves the IngressClass from the index for a given name. -func (s *ingressClassLister) Get(name string) (*v1beta1.IngressClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingressclass"), name) - } - return obj.(*v1beta1.IngressClass), nil + return &ingressClassLister{listers.New[*v1beta1.IngressClass](indexer, v1beta1.Resource("ingressclass"))} } diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go new file mode 100644 index 0000000000..3614066701 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// IPAddressLister helps list IPAddresses. +// All objects returned here must be treated as read-only. +type IPAddressLister interface { + // List lists all IPAddresses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.IPAddress, err error) + // Get retrieves the IPAddress from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.IPAddress, error) + IPAddressListerExpansion +} + +// iPAddressLister implements the IPAddressLister interface. +type iPAddressLister struct { + listers.ResourceIndexer[*v1beta1.IPAddress] +} + +// NewIPAddressLister returns a new IPAddressLister. +func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { + return &iPAddressLister{listers.New[*v1beta1.IPAddress](indexer, v1beta1.Resource("ipaddress"))} +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go new file mode 100644 index 0000000000..2902fa7f15 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ServiceCIDRLister helps list ServiceCIDRs. +// All objects returned here must be treated as read-only. +type ServiceCIDRLister interface { + // List lists all ServiceCIDRs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ServiceCIDR, err error) + // Get retrieves the ServiceCIDR from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ServiceCIDR, error) + ServiceCIDRListerExpansion +} + +// serviceCIDRLister implements the ServiceCIDRLister interface. +type serviceCIDRLister struct { + listers.ResourceIndexer[*v1beta1.ServiceCIDR] +} + +// NewServiceCIDRLister returns a new ServiceCIDRLister. +func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { + return &serviceCIDRLister{listers.New[*v1beta1.ServiceCIDR](indexer, v1beta1.Resource("servicecidr"))} +} diff --git a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go index 6e00cf1a59..17b88687e2 100644 --- a/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/node/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type RuntimeClassLister interface { // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("runtimeclass"), name) - } - return obj.(*v1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*v1.RuntimeClass](indexer, v1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go index 31f3357990..1f6e06f489 100644 --- a/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type RuntimeClassLister interface { // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1alpha1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("runtimeclass"), name) - } - return obj.(*v1alpha1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*v1alpha1.RuntimeClass](indexer, v1alpha1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go index 7dbd6ab268..cd0cdf3c52 100644 --- a/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type RuntimeClassLister interface { // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1beta1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("runtimeclass"), name) - } - return obj.(*v1beta1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*v1beta1.RuntimeClass](indexer, v1beta1.Resource("runtimeclass"))} } diff --git a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go index dc5ffa0740..83695668fa 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/listers/policy/v1/eviction.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{indexer: indexer} -} - -// List lists all Evictions in the indexer. -func (s *evictionLister) List(selector labels.Selector) (ret []*v1.Eviction, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Eviction)) - }) - return ret, err + return &evictionLister{listers.New[*v1.Eviction](indexer, v1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} + return evictionNamespaceLister{listers.NewNamespaced[*v1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -74,26 +66,5 @@ type EvictionNamespaceLister interface { // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Evictions in the indexer for a given namespace. -func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1.Eviction, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Eviction)) - }) - return ret, err -} - -// Get retrieves the Eviction from the indexer for a given namespace and name. -func (s evictionNamespaceLister) Get(name string) (*v1.Eviction, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("eviction"), name) - } - return obj.(*v1.Eviction), nil + listers.ResourceIndexer[*v1.Eviction] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go index 8470d38bb2..38ed8144eb 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{indexer: indexer} -} - -// List lists all PodDisruptionBudgets in the indexer. -func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodDisruptionBudget)) - }) - return ret, err + return &podDisruptionBudgetLister{listers.New[*v1.PodDisruptionBudget](indexer, v1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -74,26 +66,5 @@ type PodDisruptionBudgetNamespaceLister interface { // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodDisruptionBudgets in the indexer for a given namespace. -func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodDisruptionBudget)) - }) - return ret, err -} - -// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. -func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1.PodDisruptionBudget, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("poddisruptionbudget"), name) - } - return obj.(*v1.PodDisruptionBudget), nil + listers.ResourceIndexer[*v1.PodDisruptionBudget] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go index e1d40d0b32..0aff833524 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{indexer: indexer} -} - -// List lists all Evictions in the indexer. -func (s *evictionLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Eviction)) - }) - return ret, err + return &evictionLister{listers.New[*v1beta1.Eviction](indexer, v1beta1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} + return evictionNamespaceLister{listers.NewNamespaced[*v1beta1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -74,26 +66,5 @@ type EvictionNamespaceLister interface { // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Evictions in the indexer for a given namespace. -func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Eviction)) - }) - return ret, err -} - -// Get retrieves the Eviction from the indexer for a given namespace and name. -func (s evictionNamespaceLister) Get(name string) (*v1beta1.Eviction, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("eviction"), name) - } - return obj.(*v1beta1.Eviction), nil + listers.ResourceIndexer[*v1beta1.Eviction] } diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go index aa08f813ee..55ae892e2b 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{indexer: indexer} -} - -// List lists all PodDisruptionBudgets in the indexer. -func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) - }) - return ret, err + return &podDisruptionBudgetLister{listers.New[*v1beta1.PodDisruptionBudget](indexer, v1beta1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*v1beta1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -74,26 +66,5 @@ type PodDisruptionBudgetNamespaceLister interface { // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodDisruptionBudgets in the indexer for a given namespace. -func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) - }) - return ret, err -} - -// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. -func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1beta1.PodDisruptionBudget, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("poddisruptionbudget"), name) - } - return obj.(*v1beta1.PodDisruptionBudget), nil + listers.ResourceIndexer[*v1beta1.PodDisruptionBudget] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go index 84dc003ca2..11a4cb4db4 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleLister interface { // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clusterrole"), name) - } - return obj.(*v1.ClusterRole), nil + return &clusterRoleLister{listers.New[*v1.ClusterRole](indexer, v1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go index ff061d4b2b..4c3583bb94 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleBindingLister interface { // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clusterrolebinding"), name) - } - return obj.(*v1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*v1.ClusterRoleBinding](indexer, v1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1/role.go index 503f013b52..3e94253215 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/role.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Role)) - }) - return ret, err + return &roleLister{listers.New[*v1.Role](indexer, v1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*v1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -74,26 +66,5 @@ type RoleNamespaceLister interface { // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("role"), name) - } - return obj.(*v1.Role), nil + listers.ResourceIndexer[*v1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go index ea50c64136..1b3162a113 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*v1.RoleBinding](indexer, v1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*v1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -74,26 +66,5 @@ type RoleBindingNamespaceLister interface { // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("rolebinding"), name) - } - return obj.(*v1.RoleBinding), nil + listers.ResourceIndexer[*v1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go index 181ea95a7d..5e5bbaa5a2 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleLister interface { // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1alpha1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clusterrole"), name) - } - return obj.(*v1alpha1.ClusterRole), nil + return &clusterRoleLister{listers.New[*v1alpha1.ClusterRole](indexer, v1alpha1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go index 29d283b6cf..d825d0a2f4 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleBindingLister interface { // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1alpha1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clusterrolebinding"), name) - } - return obj.(*v1alpha1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*v1alpha1.ClusterRoleBinding](indexer, v1alpha1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go index 13a64137ae..f3d2b2838d 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Role)) - }) - return ret, err + return &roleLister{listers.New[*v1alpha1.Role](indexer, v1alpha1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*v1alpha1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -74,26 +66,5 @@ type RoleNamespaceLister interface { // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1alpha1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("role"), name) - } - return obj.(*v1alpha1.Role), nil + listers.ResourceIndexer[*v1alpha1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go index 0ad3d0eba0..6d6f7b7005 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*v1alpha1.RoleBinding](indexer, v1alpha1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*v1alpha1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -74,26 +66,5 @@ type RoleBindingNamespaceLister interface { // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1alpha1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("rolebinding"), name) - } - return obj.(*v1alpha1.RoleBinding), nil + listers.ResourceIndexer[*v1alpha1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go index bf6cd99cb1..bade032628 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleLister interface { // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1beta1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("clusterrole"), name) - } - return obj.(*v1beta1.ClusterRole), nil + return &clusterRoleLister{listers.New[*v1beta1.ClusterRole](indexer, v1beta1.Resource("clusterrole"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go index 00bab2330b..1f4d391bef 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type ClusterRoleBindingLister interface { // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1beta1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("clusterrolebinding"), name) - } - return obj.(*v1beta1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*v1beta1.ClusterRoleBinding](indexer, v1beta1.Resource("clusterrolebinding"))} } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go index 9cd9b9042d..71666a9a03 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Role)) - }) - return ret, err + return &roleLister{listers.New[*v1beta1.Role](indexer, v1beta1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*v1beta1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -74,26 +66,5 @@ type RoleNamespaceLister interface { // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1beta1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("role"), name) - } - return obj.(*v1beta1.Role), nil + listers.ResourceIndexer[*v1beta1.Role] } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go index 7c7c91bf3f..00f8542cbd 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*v1beta1.RoleBinding](indexer, v1beta1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*v1beta1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -74,26 +66,5 @@ type RoleBindingNamespaceLister interface { // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1beta1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("rolebinding"), name) - } - return obj.(*v1beta1.RoleBinding), nil + listers.ResourceIndexer[*v1beta1.RoleBinding] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go deleted file mode 100644 index eeb2fc3379..0000000000 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClassLister helps list ResourceClasses. -// All objects returned here must be treated as read-only. -type ResourceClassLister interface { - // List lists all ResourceClasses in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) - // Get retrieves the ResourceClass from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClass, error) - ResourceClassListerExpansion -} - -// resourceClassLister implements the ResourceClassLister interface. -type resourceClassLister struct { - indexer cache.Indexer -} - -// NewResourceClassLister returns a new ResourceClassLister. -func NewResourceClassLister(indexer cache.Indexer) ResourceClassLister { - return &resourceClassLister{indexer: indexer} -} - -// List lists all ResourceClasses in the indexer. -func (s *resourceClassLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClass)) - }) - return ret, err -} - -// Get retrieves the ResourceClass from the index for a given name. -func (s *resourceClassLister) Get(name string) (*v1alpha2.ResourceClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclass"), name) - } - return obj.(*v1alpha2.ResourceClass), nil -} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go new file mode 100644 index 0000000000..0950691e2b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// DeviceClassLister helps list DeviceClasses. +// All objects returned here must be treated as read-only. +type DeviceClassLister interface { + // List lists all DeviceClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha3.DeviceClass, err error) + // Get retrieves the DeviceClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha3.DeviceClass, error) + DeviceClassListerExpansion +} + +// deviceClassLister implements the DeviceClassLister interface. +type deviceClassLister struct { + listers.ResourceIndexer[*v1alpha3.DeviceClass] +} + +// NewDeviceClassLister returns a new DeviceClassLister. +func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister { + return &deviceClassLister{listers.New[*v1alpha3.DeviceClass](indexer, v1alpha3.Resource("deviceclass"))} +} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go similarity index 85% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go index 3b16e44290..b6642f635f 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go @@ -16,7 +16,11 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 + +// DeviceClassListerExpansion allows custom methods to be added to +// DeviceClassLister. +type DeviceClassListerExpansion interface{} // PodSchedulingContextListerExpansion allows custom methods to be added to // PodSchedulingContextLister. @@ -42,6 +46,6 @@ type ResourceClaimTemplateListerExpansion interface{} // ResourceClaimTemplateNamespaceLister. type ResourceClaimTemplateNamespaceListerExpansion interface{} -// ResourceClassListerExpansion allows custom methods to be added to -// ResourceClassLister. -type ResourceClassListerExpansion interface{} +// ResourceSliceListerExpansion allows custom methods to be added to +// ResourceSliceLister. +type ResourceSliceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go similarity index 59% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go index c50b3f8890..ed9b049432 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,7 +30,7 @@ import ( type PodSchedulingContextLister interface { // List lists all PodSchedulingContexts in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) + List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error) // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister PodSchedulingContextListerExpansion @@ -38,25 +38,17 @@ type PodSchedulingContextLister interface { // podSchedulingContextLister implements the PodSchedulingContextLister interface. type podSchedulingContextLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.PodSchedulingContext] } // NewPodSchedulingContextLister returns a new PodSchedulingContextLister. func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister { - return &podSchedulingContextLister{indexer: indexer} -} - -// List lists all PodSchedulingContexts in the indexer. -func (s *podSchedulingContextLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) - }) - return ret, err + return &podSchedulingContextLister{listers.New[*v1alpha3.PodSchedulingContext](indexer, v1alpha3.Resource("podschedulingcontext"))} } // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister { - return podSchedulingContextNamespaceLister{indexer: s.indexer, namespace: namespace} + return podSchedulingContextNamespaceLister{listers.NewNamespaced[*v1alpha3.PodSchedulingContext](s.ResourceIndexer, namespace)} } // PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts. @@ -64,36 +56,15 @@ func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) Pod type PodSchedulingContextNamespaceLister interface { // List lists all PodSchedulingContexts in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) + List(selector labels.Selector) (ret []*v1alpha3.PodSchedulingContext, err error) // Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.PodSchedulingContext, error) + Get(name string) (*v1alpha3.PodSchedulingContext, error) PodSchedulingContextNamespaceListerExpansion } // podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister // interface. type podSchedulingContextNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodSchedulingContexts in the indexer for a given namespace. -func (s podSchedulingContextNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) - }) - return ret, err -} - -// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. -func (s podSchedulingContextNamespaceLister) Get(name string) (*v1alpha2.PodSchedulingContext, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("podschedulingcontext"), name) - } - return obj.(*v1alpha2.PodSchedulingContext), nil + listers.ResourceIndexer[*v1alpha3.PodSchedulingContext] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go similarity index 58% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go index 273f16af31..ac6a3e1564 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,7 +30,7 @@ import ( type ResourceClaimLister interface { // List lists all ResourceClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error) // ResourceClaims returns an object that can list and get ResourceClaims. ResourceClaims(namespace string) ResourceClaimNamespaceLister ResourceClaimListerExpansion @@ -38,25 +38,17 @@ type ResourceClaimLister interface { // resourceClaimLister implements the ResourceClaimLister interface. type resourceClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.ResourceClaim] } // NewResourceClaimLister returns a new ResourceClaimLister. func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { - return &resourceClaimLister{indexer: indexer} -} - -// List lists all ResourceClaims in the indexer. -func (s *resourceClaimLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaim)) - }) - return ret, err + return &resourceClaimLister{listers.New[*v1alpha3.ResourceClaim](indexer, v1alpha3.Resource("resourceclaim"))} } // ResourceClaims returns an object that can list and get ResourceClaims. func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { - return resourceClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceClaimNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaim](s.ResourceIndexer, namespace)} } // ResourceClaimNamespaceLister helps list and get ResourceClaims. @@ -64,36 +56,15 @@ func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimName type ResourceClaimNamespaceLister interface { // List lists all ResourceClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaim, err error) // Get retrieves the ResourceClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaim, error) + Get(name string) (*v1alpha3.ResourceClaim, error) ResourceClaimNamespaceListerExpansion } // resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister // interface. type resourceClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaims in the indexer for a given namespace. -func (s resourceClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaim)) - }) - return ret, err -} - -// Get retrieves the ResourceClaim from the indexer for a given namespace and name. -func (s resourceClaimNamespaceLister) Get(name string) (*v1alpha2.ResourceClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaim"), name) - } - return obj.(*v1alpha2.ResourceClaim), nil + listers.ResourceIndexer[*v1alpha3.ResourceClaim] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go similarity index 59% rename from vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go index 91a488b174..6c15f82bba 100644 --- a/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha2 +package v1alpha3 import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" + v1alpha3 "k8s.io/api/resource/v1alpha3" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -30,7 +30,7 @@ import ( type ResourceClaimTemplateLister interface { // List lists all ResourceClaimTemplates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error) // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister ResourceClaimTemplateListerExpansion @@ -38,25 +38,17 @@ type ResourceClaimTemplateLister interface { // resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. type resourceClaimTemplateLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate] } // NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { - return &resourceClaimTemplateLister{indexer: indexer} -} - -// List lists all ResourceClaimTemplates in the indexer. -func (s *resourceClaimTemplateLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) - }) - return ret, err + return &resourceClaimTemplateLister{listers.New[*v1alpha3.ResourceClaimTemplate](indexer, v1alpha3.Resource("resourceclaimtemplate"))} } // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { - return resourceClaimTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*v1alpha3.ResourceClaimTemplate](s.ResourceIndexer, namespace)} } // ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. @@ -64,36 +56,15 @@ func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) R type ResourceClaimTemplateNamespaceLister interface { // List lists all ResourceClaimTemplates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) + List(selector labels.Selector) (ret []*v1alpha3.ResourceClaimTemplate, err error) // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaimTemplate, error) + Get(name string) (*v1alpha3.ResourceClaimTemplate, error) ResourceClaimTemplateNamespaceListerExpansion } // resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister // interface. type resourceClaimTemplateNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaimTemplates in the indexer for a given namespace. -func (s resourceClaimTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) - }) - return ret, err -} - -// Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. -func (s resourceClaimTemplateNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimTemplate, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimtemplate"), name) - } - return obj.(*v1alpha2.ResourceClaimTemplate), nil + listers.ResourceIndexer[*v1alpha3.ResourceClaimTemplate] } diff --git a/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go new file mode 100644 index 0000000000..ae87b8b66d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ResourceSliceLister helps list ResourceSlices. +// All objects returned here must be treated as read-only. +type ResourceSliceLister interface { + // List lists all ResourceSlices in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha3.ResourceSlice, err error) + // Get retrieves the ResourceSlice from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha3.ResourceSlice, error) + ResourceSliceListerExpansion +} + +// resourceSliceLister implements the ResourceSliceLister interface. +type resourceSliceLister struct { + listers.ResourceIndexer[*v1alpha3.ResourceSlice] +} + +// NewResourceSliceLister returns a new ResourceSliceLister. +func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { + return &resourceSliceLister{listers.New[*v1alpha3.ResourceSlice](indexer, v1alpha3.Resource("resourceslice"))} +} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go index 4da84ccf8a..b9179b5685 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityClassLister interface { // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("priorityclass"), name) - } - return obj.(*v1.PriorityClass), nil + return &priorityClassLister{listers.New[*v1.PriorityClass](indexer, v1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go index 3d25dc80af..776ad5ae25 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityClassLister interface { // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1alpha1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("priorityclass"), name) - } - return obj.(*v1alpha1.PriorityClass), nil + return &priorityClassLister{listers.New[*v1alpha1.PriorityClass](indexer, v1alpha1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go index c848d035af..966064e5d6 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type PriorityClassLister interface { // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1beta1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1beta1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("priorityclass"), name) - } - return obj.(*v1beta1.PriorityClass), nil + return &priorityClassLister{listers.New[*v1beta1.PriorityClass](indexer, v1beta1.Resource("priorityclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go index 4e8ab90900..db64f45887 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSIDriverLister interface { // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csidriver"), name) - } - return obj.(*v1.CSIDriver), nil + return &cSIDriverLister{listers.New[*v1.CSIDriver](indexer, v1.Resource("csidriver"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go index 93f869572c..5bfd2a43ae 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csinode.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSINodeLister interface { // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csinode"), name) - } - return obj.(*v1.CSINode), nil + return &cSINodeLister{listers.New[*v1.CSINode](indexer, v1.Resource("csinode"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go index a72328c9a3..c2acfa1153 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*v1.CSIStorageCapacity](indexer, v1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -74,26 +66,5 @@ type CSIStorageCapacityNamespaceLister interface { // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csistoragecapacity"), name) - } - return obj.(*v1.CSIStorageCapacity), nil + listers.ResourceIndexer[*v1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go index ffa3d19f50..fc37594446 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageClassLister interface { // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List lists all StorageClasses in the indexer. -func (s *storageClassLister) List(selector labels.Selector) (ret []*v1.StorageClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StorageClass)) - }) - return ret, err -} - -// Get retrieves the StorageClass from the index for a given name. -func (s *storageClassLister) Get(name string) (*v1.StorageClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("storageclass"), name) - } - return obj.(*v1.StorageClass), nil + return &storageClassLister{listers.New[*v1.StorageClass](indexer, v1.Resource("storageclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go index fbc735c939..44754d6f25 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -20,8 +20,8 @@ package v1 import ( v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttachmentLister interface { // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) - } - return obj.(*v1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*v1.VolumeAttachment](indexer, v1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go index 0c1b5f2647..7f75aae2cd 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*v1alpha1.CSIStorageCapacity](indexer, v1alpha1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1alpha1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -74,26 +66,5 @@ type CSIStorageCapacityNamespaceLister interface { // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1alpha1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("csistoragecapacity"), name) - } - return obj.(*v1alpha1.CSIStorageCapacity), nil + listers.ResourceIndexer[*v1alpha1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go index 3d5e2b7b71..122864ffef 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttachmentLister interface { // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1alpha1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattachment"), name) - } - return obj.(*v1alpha1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*v1alpha1.VolumeAttachment](indexer, v1alpha1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go index f30b4a89ba..5d8ae09d7c 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go @@ -20,8 +20,8 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttributesClassLister interface { // volumeAttributesClassLister implements the VolumeAttributesClassLister interface. type volumeAttributesClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1alpha1.VolumeAttributesClass] } // NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { - return &volumeAttributesClassLister{indexer: indexer} -} - -// List lists all VolumeAttributesClasses in the indexer. -func (s *volumeAttributesClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttributesClass)) - }) - return ret, err -} - -// Get retrieves the VolumeAttributesClass from the index for a given name. -func (s *volumeAttributesClassLister) Get(name string) (*v1alpha1.VolumeAttributesClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattributesclass"), name) - } - return obj.(*v1alpha1.VolumeAttributesClass), nil + return &volumeAttributesClassLister{listers.New[*v1alpha1.VolumeAttributesClass](indexer, v1alpha1.Resource("volumeattributesclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go index c6787aa01b..6600386749 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSIDriverLister interface { // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1beta1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csidriver"), name) - } - return obj.(*v1beta1.CSIDriver), nil + return &cSIDriverLister{listers.New[*v1beta1.CSIDriver](indexer, v1beta1.Resource("csidriver"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go index 809efaa369..2c29ccabf3 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type CSINodeLister interface { // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1beta1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csinode"), name) - } - return obj.(*v1beta1.CSINode), nil + return &cSINodeLister{listers.New[*v1beta1.CSINode](indexer, v1beta1.Resource("csinode"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go index 4680ffb7c8..365304df12 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*v1beta1.CSIStorageCapacity](indexer, v1beta1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*v1beta1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -74,26 +66,5 @@ type CSIStorageCapacityNamespaceLister interface { // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1beta1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csistoragecapacity"), name) - } - return obj.(*v1beta1.CSIStorageCapacity), nil + listers.ResourceIndexer[*v1beta1.CSIStorageCapacity] } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go index c2b0d5b17d..4f56776be1 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -41,3 +41,7 @@ type StorageClassListerExpansion interface{} // VolumeAttachmentListerExpansion allows custom methods to be added to // VolumeAttachmentLister. type VolumeAttachmentListerExpansion interface{} + +// VolumeAttributesClassListerExpansion allows custom methods to be added to +// VolumeAttributesClassLister. +type VolumeAttributesClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go index eb7b8315c6..070c061bc5 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type StorageClassLister interface { // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List lists all StorageClasses in the indexer. -func (s *storageClassLister) List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StorageClass)) - }) - return ret, err -} - -// Get retrieves the StorageClass from the index for a given name. -func (s *storageClassLister) Get(name string) (*v1beta1.StorageClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("storageclass"), name) - } - return obj.(*v1beta1.StorageClass), nil + return &storageClassLister{listers.New[*v1beta1.StorageClass](indexer, v1beta1.Resource("storageclass"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go index bab2d317c7..d437c1eaeb 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go @@ -20,8 +20,8 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" "k8s.io/client-go/tools/cache" ) @@ -39,30 +39,10 @@ type VolumeAttachmentLister interface { // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*v1beta1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1beta1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1beta1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("volumeattachment"), name) - } - return obj.(*v1beta1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*v1beta1.VolumeAttachment](indexer, v1beta1.Resource("volumeattachment"))} } diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 0000000000..2ff71e3d7f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassLister helps list VolumeAttributesClasses. +// All objects returned here must be treated as read-only. +type VolumeAttributesClassLister interface { + // List lists all VolumeAttributesClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.VolumeAttributesClass, err error) + // Get retrieves the VolumeAttributesClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.VolumeAttributesClass, error) + VolumeAttributesClassListerExpansion +} + +// volumeAttributesClassLister implements the VolumeAttributesClassLister interface. +type volumeAttributesClassLister struct { + listers.ResourceIndexer[*v1beta1.VolumeAttributesClass] +} + +// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. +func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { + return &volumeAttributesClassLister{listers.New[*v1beta1.VolumeAttributesClass](indexer, v1beta1.Resource("volumeattributesclass"))} +} diff --git a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..92eb5c65b4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// StorageVersionMigrationListerExpansion allows custom methods to be added to +// StorageVersionMigrationLister. +type StorageVersionMigrationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go new file mode 100644 index 0000000000..794dba25c8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// StorageVersionMigrationLister helps list StorageVersionMigrations. +// All objects returned here must be treated as read-only. +type StorageVersionMigrationLister interface { + // List lists all StorageVersionMigrations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.StorageVersionMigration, err error) + // Get retrieves the StorageVersionMigration from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.StorageVersionMigration, error) + StorageVersionMigrationListerExpansion +} + +// storageVersionMigrationLister implements the StorageVersionMigrationLister interface. +type storageVersionMigrationLister struct { + listers.ResourceIndexer[*v1alpha1.StorageVersionMigration] +} + +// NewStorageVersionMigrationLister returns a new StorageVersionMigrationLister. +func NewStorageVersionMigrationLister(indexer cache.Indexer) StorageVersionMigrationLister { + return &storageVersionMigrationLister{listers.New[*v1alpha1.StorageVersionMigration](indexer, v1alpha1.Resource("storageversionmigration"))} +} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go index 3f0688774e..2d178ce374 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins_providers.go @@ -1,6 +1,3 @@ -//go:build !providerless -// +build !providerless - /* Copyright 2016 The Kubernetes Authors. diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 850e57daeb..f5a9f68ca4 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -37,12 +37,15 @@ import ( "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" @@ -768,6 +771,142 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } } +type WatchListResult struct { + // err holds any errors we might have received + // during streaming. + err error + + // items hold the collected data + items []runtime.Object + + // initialEventsEndBookmarkRV holds the resource version + // extracted from the bookmark event that marks + // the end of the stream. + initialEventsEndBookmarkRV string + + // gv represents the API version + // it is used to construct the final list response + // normally this information is filled by the server + gv schema.GroupVersion +} + +func (r WatchListResult) Into(obj runtime.Object) error { + if r.err != nil { + return r.err + } + + listPtr, err := meta.GetItemsPtr(obj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + + if len(r.items) == 0 { + listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) + } else { + listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items))) + for i, o := range r.items { + if listVal.Type().Elem() != reflect.TypeOf(o).Elem() { + return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem()) + } + listVal.Index(i).Set(reflect.ValueOf(o).Elem()) + } + } + + listMeta, err := meta.ListAccessor(obj) + if err != nil { + return err + } + listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV) + + typeMeta, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + version := r.gv.String() + typeMeta.SetAPIVersion(version) + typeMeta.SetKind(reflect.TypeOf(obj).Elem().Name()) + + return nil +} + +// WatchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// Note that the watchlist requires properly setting the ListOptions +// otherwise it just establishes a regular watch with the server. +// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists +// to see what parameters are currently required. +func (r *Request) WatchList(ctx context.Context) WatchListResult { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)} + } + // TODO(#115478): consider validating request parameters (i.e sendInitialEvents). + // Most users use the generated client, which handles the proper setting of parameters. + // We don't have validation for other methods (e.g., the Watch) + // thus, for symmetry, we haven't added additional checks for the WatchList method. + w, err := r.Watch(ctx) + if err != nil { + return WatchListResult{err: err} + } + return r.handleWatchList(ctx, w) +} + +// handleWatchList holds the actual logic for easier unit testing. +// Note that this function will close the passed watch. +func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchListResult { + defer w.Stop() + var lastKey string + var items []runtime.Object + + for { + select { + case <-ctx.Done(): + return WatchListResult{err: ctx.Err()} + case event, ok := <-w.ResultChan(): + if !ok { + return WatchListResult{err: fmt.Errorf("unexpected watch close")} + } + if event.Type == watch.Error { + return WatchListResult{err: errors.FromObject(event.Object)} + } + meta, err := meta.Accessor(event.Object) + if err != nil { + return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)} + } + + switch event.Type { + case watch.Added: + // the following check ensures that the response is ordered. + // earlier servers had a bug that caused them to not sort the output. + // in such cases, return an error which can trigger fallback logic. + key := objectKeyFromMeta(meta) + if len(lastKey) > 0 && lastKey > key { + return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)} + } + items = append(items, event.Object) + lastKey = key + case watch.Bookmark: + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + return WatchListResult{ + items: items, + initialEventsEndBookmarkRV: meta.GetResourceVersion(), + gv: r.c.content.GroupVersion, + } + } + default: + return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)} + } + } + } +} + func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) { contentType := resp.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) @@ -1470,3 +1609,10 @@ func ValidatePathSegmentName(name string, prefix bool) []string { } return IsValidPathSegmentName(name) } + +func objectKeyFromMeta(objMeta metav1.Object) string { + if len(objMeta.GetNamespace()) > 0 { + return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName()) + } + return objMeta.GetName() +} diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index e95c020b2e..9e1e04d14e 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -51,7 +51,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, err } if res != &got { - return "", nil, fmt.Errorf("unable to decode to metav1.Event") + return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent") } switch got.Type { case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index ca517a01d4..0afc8689d7 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -50,7 +50,7 @@ func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema. // In case of new CRDs this means we potentially don't have current state of discovery. // In the current wiring in k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#toRESTMapper, // we are using DeferredDiscoveryRESTMapper which on KindFor failure will clear the - // cache and fetch all data from a cluster (see vendor/k8s.io/client-go/restmapper/discovery.go#KindFor). + // cache and fetch all data from a cluster (see k8s.io/client-go/restmapper/discovery.go#KindFor). // Thus another call to expandResourceShortcut, after a NoMatchError should successfully // read Kind to the user or an error. gvk, err := e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index c8ae0aaf5d..270cc4ddbd 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -30,41 +30,61 @@ import ( ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{}) +} + +func NewRootGetActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Name = name + action.GetOptions = opts return action } func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + return NewGetActionWithOptions(resource, namespace, name, metav1.GetOptions{}) +} + +func NewGetActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + return NewGetSubresourceActionWithOptions(resource, namespace, subresource, name, metav1.GetOptions{}) +} + +func NewGetSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + return NewRootGetSubresourceActionWithOptions(resource, subresource, name, metav1.GetOptions{}) +} + +func NewRootGetSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Name = name + action.GetOptions = opts return action } @@ -76,6 +96,21 @@ func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVe action.Kind = kind labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewRootListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.ListOptions = opts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} return action } @@ -86,6 +121,21 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio action.Resource = resource action.Kind = kind action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} @@ -93,36 +143,55 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio } func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + return NewRootCreateActionWithOptions(resource, object, metav1.CreateOptions{}) +} + +func NewRootCreateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Object = object + action.CreateOptions = opts return action } func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateActionWithOptions(resource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Namespace = namespace action.Object = object + action.CreateOptions = opts return action } func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + return NewRootCreateSubresourceActionWithOptions(resource, name, subresource, object, metav1.CreateOptions{}) +} + +func NewRootCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateSubresourceActionWithOptions(resource, name, subresource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource @@ -130,41 +199,61 @@ func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subr action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + return NewRootUpdateActionWithOptions(resource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Object = object + action.UpdateOptions = opts return action } func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateActionWithOptions(resource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewRootPatchActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}) +} + +func NewRootPatchActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewPatchActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}) +} + +func NewPatchActionWithOptions(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -172,11 +261,16 @@ func NewPatchAction(resource schema.GroupVersionResource, namespace string, name action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewRootPatchSubresourceActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewRootPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -184,11 +278,16 @@ func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name st action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewPatchSubresourceActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -197,26 +296,38 @@ func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + return NewRootUpdateSubresourceActionWithOptions(resource, subresource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Object = object + action.UpdateOptions = opts return action } + func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateSubresourceActionWithOptions(resource, subresource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } @@ -236,11 +347,16 @@ func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name s } func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + return NewRootDeleteSubresourceActionWithOptions(resource, subresource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Name = name + action.DeleteOptions = opts return action } @@ -261,41 +377,69 @@ func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, } func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + return NewDeleteSubresourceActionWithOptions(resource, subresource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.DeleteOptions = opts return action } func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootDeleteCollectionActionWithOptions(resource, metav1.DeleteOptions{}, listOpts) +} + +func NewRootDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewDeleteCollectionActionWithOptions(resource, namespace, metav1.DeleteOptions{}, listOpts) +} + +func NewDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, namespace string, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource action.Namespace = namespace - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootWatchActionWithOptions(resource, listOpts) +} + +func NewRootWatchActionWithOptions(resource schema.GroupVersionResource, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -328,10 +472,17 @@ func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fi } func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewWatchActionWithOptions(resource, namespace, listOpts) +} + +func NewWatchActionWithOptions(resource schema.GroupVersionResource, namespace string, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -487,17 +638,23 @@ func (a GenericActionImpl) DeepCopy() Action { type GetActionImpl struct { ActionImpl - Name string + Name string + GetOptions metav1.GetOptions } func (a GetActionImpl) GetName() string { return a.Name } +func (a GetActionImpl) GetGetOptions() metav1.GetOptions { + return a.GetOptions +} + func (a GetActionImpl) DeepCopy() Action { return GetActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, + GetOptions: *a.GetOptions.DeepCopy(), } } @@ -506,6 +663,7 @@ type ListActionImpl struct { Kind schema.GroupVersionKind Name string ListRestrictions ListRestrictions + ListOptions metav1.ListOptions } func (a ListActionImpl) GetKind() schema.GroupVersionKind { @@ -516,6 +674,10 @@ func (a ListActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a ListActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a ListActionImpl) DeepCopy() Action { return ListActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -525,48 +687,62 @@ func (a ListActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + ListOptions: *a.ListOptions.DeepCopy(), } } type CreateActionImpl struct { ActionImpl - Name string - Object runtime.Object + Name string + Object runtime.Object + CreateOptions metav1.CreateOptions } func (a CreateActionImpl) GetObject() runtime.Object { return a.Object } +func (a CreateActionImpl) GetCreateOptions() metav1.CreateOptions { + return a.CreateOptions +} + func (a CreateActionImpl) DeepCopy() Action { return CreateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + CreateOptions: *a.CreateOptions.DeepCopy(), } } type UpdateActionImpl struct { ActionImpl - Object runtime.Object + Object runtime.Object + UpdateOptions metav1.UpdateOptions } func (a UpdateActionImpl) GetObject() runtime.Object { return a.Object } +func (a UpdateActionImpl) GetUpdateOptions() metav1.UpdateOptions { + return a.UpdateOptions +} + func (a UpdateActionImpl) DeepCopy() Action { return UpdateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + UpdateOptions: *a.UpdateOptions.DeepCopy(), } } type PatchActionImpl struct { ActionImpl - Name string - PatchType types.PatchType - Patch []byte + Name string + PatchType types.PatchType + Patch []byte + PatchOptions metav1.PatchOptions } func (a PatchActionImpl) GetName() string { @@ -581,14 +757,19 @@ func (a PatchActionImpl) GetPatchType() types.PatchType { return a.PatchType } +func (a PatchActionImpl) GetPatchOptions() metav1.PatchOptions { + return a.PatchOptions +} + func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - PatchType: a.PatchType, - Patch: patch, + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + PatchOptions: *a.PatchOptions.DeepCopy(), } } @@ -617,12 +798,22 @@ func (a DeleteActionImpl) DeepCopy() Action { type DeleteCollectionActionImpl struct { ActionImpl ListRestrictions ListRestrictions + DeleteOptions metav1.DeleteOptions + ListOptions metav1.ListOptions } func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a DeleteCollectionActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + +func (a DeleteCollectionActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a DeleteCollectionActionImpl) DeepCopy() Action { return DeleteCollectionActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -630,18 +821,25 @@ func (a DeleteCollectionActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + DeleteOptions: *a.DeleteOptions.DeepCopy(), + ListOptions: *a.ListOptions.DeepCopy(), } } type WatchActionImpl struct { ActionImpl WatchRestrictions WatchRestrictions + ListOptions metav1.ListOptions } func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { return a.WatchRestrictions } +func (a WatchActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a WatchActionImpl) DeepCopy() Action { return WatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -650,6 +848,7 @@ func (a WatchActionImpl) DeepCopy() Action { Fields: a.WatchRestrictions.Fields.DeepCopySelector(), ResourceVersion: a.WatchRestrictions.ResourceVersion, }, + ListOptions: *a.ListOptions.DeepCopy(), } } diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index 396840670f..d288a3aa45 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -19,19 +19,24 @@ package testing import ( "fmt" "reflect" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/yaml" "sort" "strings" "sync" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta/testrestmapper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/managedfields" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" @@ -46,26 +51,32 @@ type ObjectTracker interface { Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. - Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. - Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error // Update updates an existing object in the tracker in the specified namespace. - Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error + + // Patch patches an existing object in the tracker in the specified namespace. + Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error + + // Apply applies an object in the tracker in the specified namespace. + Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. - List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. - Delete(gvr schema.GroupVersionResource, ns, name string) error + Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. - Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) + Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. @@ -76,133 +87,200 @@ type ObjectScheme interface { // ObjectReaction returns a ReactionFunc that applies core.Action to // the given tracker. +// +// If tracker also implements ManagedFieldObjectTracker, then managed fields +// will be handled by the tracker and apply patch actions will be evaluated +// using the field manager and will take field ownership into consideration. +// Without a ManagedFieldObjectTracker, apply patch actions do not consider +// field ownership. +// +// WARNING: There is no server side defaulting, validation, or conversion handled +// by the fake client and subresources are not handled accurately (fields in the +// root resource are not automatically updated when a scale resource is updated, for example). func ObjectReaction(tracker ObjectTracker) ReactionFunc { + reactor := objectTrackerReact{tracker: tracker} return func(action Action) (bool, runtime.Object, error) { - ns := action.GetNamespace() - gvr := action.GetResource() // Here and below we need to switch on implementation types, // not on interfaces, as some interfaces are identical // (e.g. UpdateAction and CreateAction), so if we use them, // updates and creates end up matching the same case branch. switch action := action.(type) { - case ListActionImpl: - obj, err := tracker.List(gvr, action.GetKind(), ns) + obj, err := reactor.List(action) return true, obj, err - case GetActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) + obj, err := reactor.Get(action) return true, obj, err - case CreateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - if action.GetSubresource() == "" { - err = tracker.Create(gvr, action.GetObject(), ns) - } else { - oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName()) - if getOldObjErr != nil { - return true, nil, getOldObjErr - } - // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. - if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { - // TODO: Currently we're handling subresource creation as an update - // on the enclosing resource. This works for some subresources but - // might not be generic enough. - err = tracker.Update(gvr, action.GetObject(), ns) - } else { - // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. - return true, action.GetObject(), nil - } - } - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Create(action) return true, obj, err - case UpdateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - err = tracker.Update(gvr, action.GetObject(), ns) - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Update(action) return true, obj, err - case DeleteActionImpl: - err := tracker.Delete(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err - } - return true, nil, nil - + obj, err := reactor.Delete(action) + return true, obj, err case PatchActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err + if action.GetPatchType() == types.ApplyPatchType { + obj, err := reactor.Apply(action) + return true, obj, err } + obj, err := reactor.Patch(action) + return true, obj, err + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} - old, err := json.Marshal(obj) - if err != nil { - return true, nil, err - } +type objectTrackerReact struct { + tracker ObjectTracker +} - // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields - // in obj that are removed by patch are cleared - value := reflect.ValueOf(obj) - value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) - - switch action.GetPatchType() { - case types.JSONPatchType: - patch, err := jsonpatch.DecodePatch(action.GetPatch()) - if err != nil { - return true, nil, err - } - modified, err := patch.Apply(old) - if err != nil { - return true, nil, err - } - - if err = json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.MergePatchType: - modified, err := jsonpatch.MergePatch(old, action.GetPatch()) - if err != nil { - return true, nil, err - } - - if err := json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.StrategicMergePatchType, types.ApplyPatchType: - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err - } - default: - return true, nil, fmt.Errorf("PatchType is not supported") - } +func (o objectTrackerReact) List(action ListActionImpl) (runtime.Object, error) { + return o.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace(), action.ListOptions) +} - if err = tracker.Update(gvr, obj, ns); err != nil { - return true, nil, err - } +func (o objectTrackerReact) Get(action GetActionImpl) (runtime.Object, error) { + return o.tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName(), action.GetOptions) +} + +func (o objectTrackerReact) Create(action CreateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } + if action.GetSubresource() == "" { + err = o.tracker.Create(gvr, action.GetObject(), ns, action.CreateOptions) + if err != nil { + return nil, err + } + } else { + oldObj, getOldObjErr := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if getOldObjErr != nil { + return nil, getOldObjErr + } + // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. + if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = o.tracker.Update(gvr, action.GetObject(), ns, metav1.UpdateOptions{ + DryRun: action.CreateOptions.DryRun, + FieldManager: action.CreateOptions.FieldManager, + FieldValidation: action.CreateOptions.FieldValidation, + }) + } else { + // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. + return action.GetObject(), nil + } + } + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Update(action UpdateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } - return true, obj, nil + err = o.tracker.Update(gvr, action.GetObject(), ns, action.UpdateOptions) + if err != nil { + return nil, err + } - default: - return false, nil, fmt.Errorf("no reaction implemented for %s", action) + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Delete(action DeleteActionImpl) (runtime.Object, error) { + err := o.tracker.Delete(action.GetResource(), action.GetNamespace(), action.GetName(), action.DeleteOptions) + return nil, err +} + +func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil { + return nil, err + } + err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions) + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Patch(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType %s is not supported", action.GetPatchType()) } + + if err = o.tracker.Patch(gvr, obj, ns, action.PatchOptions); err != nil { + return nil, err + } + + return obj, nil } type tracker struct { @@ -231,7 +309,11 @@ func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracke } } -func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. @@ -270,7 +352,12 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list.DeepCopyObject(), nil } -func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + t.lock.Lock() defer t.lock.Unlock() @@ -283,8 +370,12 @@ func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Inter return fakewatcher, nil } -func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { - errNotFound := errors.NewNotFound(gvr.GroupResource(), name) +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + errNotFound := apierrors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() @@ -305,7 +396,7 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { - return nil, &errors.StatusError{ErrStatus: *status} + return nil, &apierrors.StatusError{ErrStatus: *status} } } @@ -352,11 +443,70 @@ func (t *tracker) Add(obj runtime.Object) error { return nil } -func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } return t.add(gvr, obj, ns, false) } -func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, patchedObject, ns, true) +} + +func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + obj, err := t.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + old, err := json.Marshal(obj) + if err != nil { + return err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + // For backward compatibility with behavior 1.30 and earlier, continue to handle apply + // via strategic merge patch (clients may use fake.NewClientset and ManagedFieldObjectTracker + // for full field manager support). + patch, err := json.Marshal(applyConfiguration) + if err != nil { + return err + } + mergedByte, err := strategicpatch.StrategicMergePatch(old, patch, obj) + if err != nil { + return err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return err + } + return t.add(gvr, obj, ns, true) } @@ -398,7 +548,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) - return errors.NewBadRequest(msg) + return apierrors.NewBadRequest(msg) } _, ok := t.objects[gvr] @@ -416,12 +566,12 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st t.objects[gvr][namespacedName] = obj return nil } - return errors.NewAlreadyExists(gr, newMeta.GetName()) + return apierrors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. - return errors.NewNotFound(gr, newMeta.GetName()) + return apierrors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj @@ -451,19 +601,23 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return nil } -func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } delete(objs, namespacedName) @@ -473,6 +627,203 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return nil } +type managedFieldObjectTracker struct { + ObjectTracker + scheme ObjectScheme + objectConverter runtime.ObjectConvertor + mapper meta.RESTMapper + typeConverter managedfields.TypeConverter +} + +var _ ObjectTracker = &managedFieldObjectTracker{} + +// NewFieldManagedObjectTracker returns an ObjectTracker that can be used to keep track +// of objects and managed fields for the fake clientset. Mostly useful for unit tests. +func NewFieldManagedObjectTracker(scheme *runtime.Scheme, decoder runtime.Decoder, typeConverter managedfields.TypeConverter) ObjectTracker { + return &managedFieldObjectTracker{ + ObjectTracker: NewObjectTracker(scheme, decoder), + scheme: scheme, + objectConverter: scheme, + mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme), + typeConverter: typeConverter, + } +} + +func (t *managedFieldObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.CreateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objType, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + // Stamp GVK + apiVersion, kind := gvk.ToAPIVersionAndKind() + objType.SetAPIVersion(apiVersion) + objType.SetKind(kind) + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + liveObject, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(liveObject, obj, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.UpdateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, obj, opts.FieldManager) + if err != nil { + return err + } + + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(patchedObject) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, patchedObject, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Patch(gvr, objWithManagedFields, ns, vopts...) +} + +func (t *managedFieldObjectTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + exists := true + liveObject, err := t.ObjectTracker.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + exists = false + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + force := false + if opts.Force != nil { + force = *opts.Force + } + objWithManagedFields, err := mgr.Apply(liveObject, applyConfiguration, opts.FieldManager, force) + if err != nil { + return err + } + + if !exists { + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, metav1.CreateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } else { + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, metav1.UpdateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } +} + +func (t *managedFieldObjectTracker) fieldManagerFor(gvk schema.GroupVersionKind) (*managedfields.FieldManager, error) { + return managedfields.NewDefaultFieldManager( + t.typeConverter, + t.objectConverter, + &objectDefaulter{}, + t.scheme, + gvk, + gvk.GroupVersion(), + "", + nil) +} + +// objectDefaulter implements runtime.Defaulter, but it actually +// does nothing. +type objectDefaulter struct{} + +func (d *objectDefaulter) Default(_ runtime.Object) {} + // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. @@ -579,3 +930,76 @@ func resourceCovers(resource string, action Action) bool { return false } + +// assertOptionalSingleArgument returns an error if there is more than one variadic argument. +// Otherwise, it returns the first variadic argument, or zero value if there are no arguments. +func assertOptionalSingleArgument[T any](arguments []T) (T, error) { + var a T + switch len(arguments) { + case 0: + return a, nil + case 1: + return arguments[0], nil + default: + return a, fmt.Errorf("expected only one option argument but got %d", len(arguments)) + } +} + +type TypeResolver interface { + Type(openAPIName string) typed.ParseableType +} + +type TypeConverter struct { + Scheme *runtime.Scheme + TypeResolver TypeResolver +} + +func (tc TypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + name, err := tc.openAPIName(gvk) + if err != nil { + return nil, err + } + t := tc.TypeResolver.Type(name) + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return t.FromStructured(obj, opts...) + } +} + +func (tc TypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + vu := value.AsValue().Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func (tc TypeConverter) openAPIName(kind schema.GroupVersionKind) (string, error) { + example, err := tc.Scheme.New(kind) + if err != nil { + return "", err + } + rtype := reflect.TypeOf(example).Elem() + name := friendlyName(rtype.PkgPath() + "." + rtype.Name()) + return name, nil +} + +// This is a copy of openapi.friendlyName. +// TODO: consider introducing a shared version of this function in apimachinery. +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 8a1104bde8..e523a66522 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -59,6 +59,12 @@ type Config struct { // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + // ShouldResync is periodically used by the reflector to determine // whether to Resync the Queue. If ShouldResync is `nil` or // returns true, it means the reflector should proceed with the @@ -138,6 +144,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.config.Queue, ReflectorOptions{ ResyncPeriod: c.config.FullResyncPeriod, + MinWatchTimeout: c.config.MinWatchTimeout, TypeDescription: c.config.ObjectDescription, Clock: c.clock, }, @@ -336,6 +343,68 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { return MetaNamespaceKeyFunc(obj) } +// DeletionHandlingObjectToName checks for +// DeletedFinalStateUnknown objects before calling +// ObjectToName. +func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return ParseObjectName(d.Key) + } + return ObjectToName(obj) +} + +// InformerOptions configure a Reflector. +type InformerOptions struct { + // ListerWatcher implements List and Watch functions for the source of the resource + // the informer will be informing about. + ListerWatcher ListerWatcher + + // ObjectType is an object of the type that informer is expected to receive. + ObjectType runtime.Object + + // Handler defines functions that should called on object mutations. + Handler ResourceEventHandler + + // ResyncPeriod is the underlying Reflector's resync period. If non-zero, the store + // is re-synced with that frequency - Modify events are delivered even if objects + // didn't change. + // This is useful for synchronizing objects that configure external resources + // (e.g. configure cloud provider functionalities). + // Optional - if unset, store resyncing is not happening periodically. + ResyncPeriod time.Duration + + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + + // Indexers, if set, are the indexers for the received objects to optimize + // certain queries. + // Optional - if unset no indexes are maintained. + Indexers Indexers + + // Transform function, if set, will be called on all objects before they will be + // put into the Store and corresponding Add/Modify/Delete handlers will be invoked + // for them. + // Optional - if unset no additional transforming is happening. + Transform TransformFunc +} + +// NewInformerWithOptions returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +func NewInformerWithOptions(options InformerOptions) (Store, Controller) { + var clientState Store + if options.Indexers == nil { + clientState = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + } else { + clientState = NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers) + } + return clientState, newInformer(clientState, options) +} + // NewInformer returns a Store and a controller for populating the store // while also providing event notifications. You should only used the returned // Store for Get/List operations; Add/Modify/Deletes will cause the event @@ -350,6 +419,8 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // - h is the object you want notifications sent to. +// +// Deprecated: Use NewInformerWithOptions instead. func NewInformer( lw ListerWatcher, objType runtime.Object, @@ -359,7 +430,13 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + } + return clientState, newInformer(clientState, options) } // NewIndexerInformer returns an Indexer and a Controller for populating the index @@ -377,6 +454,8 @@ func NewInformer( // or you stop the controller). // - h is the object you want notifications sent to. // - indexers is the indexer for the received object type. +// +// Deprecated: Use NewInformerWithOptions instead. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -387,7 +466,14 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + } + return clientState, newInformer(clientState, options) } // NewTransformingInformer returns a Store and a controller for populating @@ -397,6 +483,8 @@ func NewIndexerInformer( // The given transform function will be called on all objects before they will // put into the Store and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingInformer( lw ListerWatcher, objType runtime.Object, @@ -407,7 +495,14 @@ func NewTransformingInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // NewTransformingIndexerInformer returns an Indexer and a controller for @@ -417,6 +512,8 @@ func NewTransformingInformer( // The given transform function will be called on all objects before they will // be put into the Index and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -428,7 +525,15 @@ func NewTransformingIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // Multiplexes updates in the form of a list of Deltas into a Store, and informs @@ -471,42 +576,29 @@ func processDeltas( // providing event notifications. // // Parameters -// - lw is list and watch functions for the source of the resource you want to -// be informed of. -// - objType is an object of the type that you expect to receive. -// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// - h is the object you want notifications sent to. // - clientState is the store you want to populate -func newInformer( - lw ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, - clientState Store, - transformer TransformFunc, -) Controller { +// - options contain the options to configure the controller +func newInformer(clientState Store, options InformerOptions) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, - Transformer: transformer, + Transformer: options.Transform, }) cfg := &Config{ Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, + ListerWatcher: options.ListerWatcher, + ObjectType: options.ObjectType, + FullResyncPeriod: options.ResyncPeriod, + MinWatchTimeout: options.MinWatchTimeout, RetryOnError: false, Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, deltas, isInInitialList) + return processDeltas(options.Handler, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 7160bb1ee7..ce74dfb6f1 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -139,20 +139,17 @@ type DeltaFIFO struct { } // TransformFunc allows for transforming an object before it will be processed. -// TransformFunc (similarly to ResourceEventHandler functions) should be able -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown. -// -// New in v1.27: In such cases, the contained object will already have gone -// through the transform object separately (when it was added / updated prior -// to the delete), so the TransformFunc can likely safely ignore such objects -// (i.e., just return the input object). // // The most common usage pattern is to clean-up some parts of the object to // reduce component memory usage if a given component doesn't care about them. // -// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc -// sees the object before any other actor, and it is now safe to mutate the -// object in place instead of making a copy. +// New in v1.27: TransformFunc sees the object before any other actor, and it +// is now safe to mutate the object in place instead of making a copy. +// +// It's recommended for the TransformFunc to be idempotent. +// It MUST be idempotent if objects already present in the cache are passed to +// the Replace() to avoid re-mutating them. Default informers do not pass +// existing objects to Replace though. // // Note that TransformFunc is called while inserting objects into the // notification queue and is therefore extremely performance sensitive; please @@ -440,22 +437,38 @@ func isDeletionDup(a, b *Delta) *Delta { // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + return f.queueActionInternalLocked(actionType, actionType, obj) +} + +// queueActionInternalLocked appends to the delta list for the object. +// The actionType is emitted and must honor emitDeltaTypeReplaced. +// The internalActionType is only used within this function and must +// ignore emitDeltaTypeReplaced. +// Caller must lock first. +func (f *DeltaFIFO) queueActionInternalLocked(actionType, internalActionType DeltaType, obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } // Every object comes through this code path once, so this is a good - // place to call the transform func. If obj is a - // DeletedFinalStateUnknown tombstone, then the containted inner object - // will already have gone through the transformer, but we document that - // this can happen. In cases involving Replace(), such an object can - // come through multiple times. + // place to call the transform func. + // + // If obj is a DeletedFinalStateUnknown tombstone or the action is a Sync, + // then the object have already gone through the transformer. + // + // If the objects already present in the cache are passed to Replace(), + // the transformer must be idempotent to avoid re-mutating them, + // or coordinate with all readers from the cache to avoid data races. + // Default informers do not pass existing objects to Replace. if f.transformer != nil { - var err error - obj, err = f.transformer(obj) - if err != nil { - return err + _, isTombstone := obj.(DeletedFinalStateUnknown) + if !isTombstone && internalActionType != Sync { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } } } @@ -638,7 +651,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { return KeyError{item, err} } keys.Insert(key) - if err := f.queueActionLocked(action, item); err != nil { + if err := f.queueActionInternalLocked(action, Replaced, item); err != nil { return fmt.Errorf("couldn't enqueue object: %v", err) } } diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index b78d3086b8..c5819fb6f8 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -50,8 +50,7 @@ type Indexer interface { // GetIndexers return the indexers GetIndexers() Indexers - // AddIndexers adds more indexers to this store. If you call this after you already have data - // in the store, the results are undefined. + // AddIndexers adds more indexers to this store. This supports adding indexes after the store already has items. AddIndexers(newIndexers Indexers) error } diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 420ca7b2ac..a60f44943e 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,7 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) -// ListAll calls appendFn with each value retrieved from store which matches the selector. +// ListAll lists items in the store matching the given selector, calling appendFn on each one. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -51,7 +51,9 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } -// ListAllByNamespace used to list items belongs to namespace from Indexer. +// ListAllByNamespace lists items in the given namespace in the store matching the given selector, +// calling appendFn on each one. +// If a blank namespace (NamespaceAll) is specified, this delegates to ListAll(). func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { if namespace == metav1.NamespaceAll { return ListAll(indexer, selector, appendFn) diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index 10b7e6512e..f5708ffeb1 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -36,6 +36,10 @@ type Lister interface { // Watcher is any object that knows how to start a watch on a resource. type Watcher interface { // Watch should begin a watch at the specified version. + // + // If Watch returns an error, it should handle its own cleanup, including + // but not limited to calling Stop() on the watch, if one was constructed. + // This allows the caller to ignore the watch, if the error is non-nil. Watch(options metav1.ListOptions) (watch.Interface, error) } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index c1ea13de57..5e7dd57409 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "math/rand" - "os" "reflect" "strings" "sync" @@ -39,15 +38,23 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "k8s.io/utils/trace" ) const defaultExpectedTypeName = "" +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + defaultMinWatchTimeout = 5 * time.Minute +) + // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. @@ -71,6 +78,8 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager resyncPeriod time.Duration + // minWatchTimeout defines the minimum timeout for watch requests. + minWatchTimeout time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -107,7 +116,9 @@ type Reflector struct { // might result in an increased memory consumption of the APIServer. // // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details - UseWatchList bool + // + // TODO(#115478): Consider making reflector.UseWatchList a private field. Since we implemented "api streaming" on the etcd storage layer it should work. + UseWatchList *bool } // ResourceVersionUpdater is an interface that allows store implementation to @@ -148,12 +159,6 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { } } -var ( - // We try to spread the load on apiserver by setting timeouts for - // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. - minWatchTimeout = 5 * time.Minute -) - // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { @@ -191,6 +196,10 @@ type ReflectorOptions struct { // (do not resync). ResyncPeriod time.Duration + // MinWatchTimeout, if non-zero, defines the minimum timeout for watch requests send to kube-apiserver. + // However, values lower than 5m will not be honored to avoid negative performance impact on controlplane. + MinWatchTimeout time.Duration + // Clock allows tests to control time. If unset defaults to clock.RealClock{} Clock clock.Clock } @@ -210,9 +219,14 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S if reflectorClock == nil { reflectorClock = clock.RealClock{} } + minWatchTimeout := defaultMinWatchTimeout + if options.MinWatchTimeout > defaultMinWatchTimeout { + minWatchTimeout = options.MinWatchTimeout + } r := &Reflector{ name: options.Name, resyncPeriod: options.ResyncPeriod, + minWatchTimeout: minWatchTimeout, typeDescription: options.TypeDescription, listerWatcher: lw, store: store, @@ -237,8 +251,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S r.expectedGVK = getExpectedGVKFromObject(expectedType) } - if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { - r.UseWatchList = true + // don't overwrite UseWatchList if already set + // because the higher layers (e.g. storage/cacher) disabled it on purpose + if r.UseWatchList == nil { + r.UseWatchList = ptr.To(clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient)) } return r @@ -325,9 +341,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) var err error var w watch.Interface - fallbackToList := !r.UseWatchList + useWatchList := ptr.Deref(r.UseWatchList, false) + fallbackToList := !useWatchList - if r.UseWatchList { + if useWatchList { w, err = r.watchList(stopCh) if w == nil && err == nil { // stopCh was closed @@ -349,12 +366,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) - - resyncerrc := make(chan error, 1) - cancelCh := make(chan struct{}) - defer close(cancelCh) - go r.startResync(stopCh, cancelCh, resyncerrc) - return r.watch(w, stopCh, resyncerrc) + return r.watchWithResync(w, stopCh) } // startResync periodically calls r.store.Resync() method. @@ -385,6 +397,15 @@ func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{} } } +// watchWithResync runs watch with startResync in the background. +func (r *Reflector) watchWithResync(w watch.Interface, stopCh <-chan struct{}) error { + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + // watch simply starts a watch request with the server. func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { var err error @@ -407,7 +428,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc start := r.clock.Now() if w == nil { - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: r.LastSyncResourceVersion(), // We want to avoid situations of hanging watchers. Stop any watchers that do not @@ -434,13 +455,14 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc } } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + err = handleWatch(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, + r.clock, resyncerrc, stopCh) // Ensure that watch will not be reused across iterations. w.Stop() w = nil retry.After(err) if err != nil { - if err != errorStopRequested { + if !errors.Is(err, errorStopRequested) { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already @@ -634,7 +656,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // TODO(#115478): large "list", slow clients, slow network, p&f // might slow down streaming and eventually fail. // maybe in such a case we should retry with an increased timeout? - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: lastKnownRV, AllowWatchBookmarks: true, @@ -651,14 +673,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { } return nil, err } - bookmarkReceived := pointer.Bool(false) - err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + watchListBookmarkReceived, err := handleListWatch(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, func(rv string) { resourceVersion = rv }, - bookmarkReceived, r.clock, make(chan error), stopCh) if err != nil { w.Stop() // stop and retry with clean state - if err == errorStopRequested { + if errors.Is(err, errorStopRequested) { return nil, nil } if isErrorRetriableWithSideEffectsFn(err) { @@ -666,7 +686,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { } return nil, err } - if *bookmarkReceived { + if watchListBookmarkReceived { break } } @@ -678,10 +698,10 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. - checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + checkWatchListDataConsistencyIfRequested(wait.ContextForChannel(stopCh), r.name, resourceVersion, wrapListFuncWithContext(r.listerWatcher.List), temporaryStore.List) - if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { - return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %w", err) } initTrace.Step("SyncWith done") r.setLastSyncResourceVersion(resourceVersion) @@ -698,8 +718,33 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err return r.store.Replace(found, resourceVersion) } -// watchHandler watches w and sets setLastSyncResourceVersion -func watchHandler(start time.Time, +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. If successful, the watcher will be left open after receiving the +// initial set of objects, to allow watching for future events. +func handleListWatch( + start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + exitOnWatchListBookmarkReceived := true + return handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) +} + +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. The watcher will always be stopped on exit. +func handleWatch( + start time.Time, w watch.Interface, store Store, expectedType reflect.Type, @@ -707,31 +752,56 @@ func watchHandler(start time.Time, name string, expectedTypeName string, setLastSyncResourceVersion func(string), - exitOnInitialEventsEndBookmark *bool, clock clock.Clock, - errc chan error, + errCh chan error, stopCh <-chan struct{}, ) error { + exitOnWatchListBookmarkReceived := false + _, err := handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) + return err +} + +// handleAnyWatch consumes events from w, updates the Store, and records the last +// seen ResourceVersion, to allow continuing from that ResourceVersion on retry. +// If exitOnWatchListBookmarkReceived is true, the watch events will be consumed +// until a bookmark event is received with the WatchList annotation present. +// Returns true (watchListBookmarkReceived) if the WatchList bookmark was +// received, even if exitOnWatchListBookmarkReceived is false. +// The watcher will always be stopped, unless exitOnWatchListBookmarkReceived is +// true and watchListBookmarkReceived is true. This allows the same watch stream +// to be re-used by the caller to continue watching for new events. +func handleAnyWatch(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + exitOnWatchListBookmarkReceived bool, + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + watchListBookmarkReceived := false eventCount := 0 - if exitOnInitialEventsEndBookmark != nil { - // set it to false just in case somebody - // made it positive - *exitOnInitialEventsEndBookmark = false - } + initialEventsEndBookmarkWarningTicker := newInitialEventsEndBookmarkTicker(name, clock, start, exitOnWatchListBookmarkReceived) + defer initialEventsEndBookmarkWarningTicker.Stop() loop: for { select { case <-stopCh: - return errorStopRequested - case err := <-errc: - return err + return watchListBookmarkReceived, errorStopRequested + case err := <-errCh: + return watchListBookmarkReceived, err case event, ok := <-w.ResultChan(): if !ok { break loop } if event.Type == watch.Error { - return apierrors.FromObject(event.Object) + return watchListBookmarkReceived, apierrors.FromObject(event.Object) } if expectedType != nil { if e, a := expectedType, reflect.TypeOf(event.Object); e != a { @@ -772,10 +842,8 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { - if exitOnInitialEventsEndBookmark != nil { - *exitOnInitialEventsEndBookmark = true - } + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + watchListBookmarkReceived = true } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) @@ -785,20 +853,23 @@ loop: rvu.UpdateResourceVersion(resourceVersion) } eventCount++ - if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + if exitOnWatchListBookmarkReceived && watchListBookmarkReceived { watchDuration := clock.Since(start) klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) - return nil + return watchListBookmarkReceived, nil } + initialEventsEndBookmarkWarningTicker.observeLastEventTimeStamp(clock.Now()) + case <-initialEventsEndBookmarkWarningTicker.C(): + initialEventsEndBookmarkWarningTicker.warnIfExpired() } } watchDuration := clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { - return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) + return watchListBookmarkReceived, fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) } klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) - return nil + return watchListBookmarkReceived, nil } // LastSyncResourceVersion is the resource version observed when last sync with the underlying store @@ -910,3 +981,95 @@ func isWatchErrorRetriable(err error) bool { } return false } + +// wrapListFuncWithContext simply wraps ListFunction into another function that accepts a context and ignores it. +func wrapListFuncWithContext(listFn ListFunc) func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + return func(_ context.Context, options metav1.ListOptions) (runtime.Object, error) { + return listFn(options) + } +} + +// initialEventsEndBookmarkTicker a ticker that produces a warning if the bookmark event +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note: +// The methods exposed by this type are not thread-safe. +type initialEventsEndBookmarkTicker struct { + clock.Ticker + clock clock.Clock + name string + + watchStart time.Time + tickInterval time.Duration + lastEventObserveTime time.Time +} + +// newInitialEventsEndBookmarkTicker returns a noop ticker if exitOnInitialEventsEndBookmarkRequested is false. +// Otherwise, it returns a ticker that exposes a method producing a warning if the bookmark event, +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note that the caller controls whether to call t.C() and t.Stop(). +// +// In practice, the reflector exits the watchHandler as soon as the bookmark event is received and calls the t.C() method. +func newInitialEventsEndBookmarkTicker(name string, c clock.Clock, watchStart time.Time, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + return newInitialEventsEndBookmarkTickerInternal(name, c, watchStart, 10*time.Second, exitOnWatchListBookmarkReceived) +} + +func newInitialEventsEndBookmarkTickerInternal(name string, c clock.Clock, watchStart time.Time, tickInterval time.Duration, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + clockWithTicker, ok := c.(clock.WithTicker) + if !ok || !exitOnWatchListBookmarkReceived { + if exitOnWatchListBookmarkReceived { + klog.Warningf("clock does not support WithTicker interface but exitOnInitialEventsEndBookmark was requested") + } + return &initialEventsEndBookmarkTicker{ + Ticker: &noopTicker{}, + } + } + + return &initialEventsEndBookmarkTicker{ + Ticker: clockWithTicker.NewTicker(tickInterval), + clock: c, + name: name, + watchStart: watchStart, + tickInterval: tickInterval, + } +} + +func (t *initialEventsEndBookmarkTicker) observeLastEventTimeStamp(lastEventObserveTime time.Time) { + t.lastEventObserveTime = lastEventObserveTime +} + +func (t *initialEventsEndBookmarkTicker) warnIfExpired() { + if err := t.produceWarningIfExpired(); err != nil { + klog.Warning(err) + } +} + +// produceWarningIfExpired returns an error that represents a warning when +// the time elapsed since the last received event exceeds the tickInterval. +// +// Note that this method should be called when t.C() yields a value. +func (t *initialEventsEndBookmarkTicker) produceWarningIfExpired() error { + if _, ok := t.Ticker.(*noopTicker); ok { + return nil /*noop ticker*/ + } + if t.lastEventObserveTime.IsZero() { + return fmt.Errorf("%s: awaiting required bookmark event for initial events stream, no events received for %v", t.name, t.clock.Since(t.watchStart)) + } + elapsedTime := t.clock.Now().Sub(t.lastEventObserveTime) + hasBookmarkTimerExpired := elapsedTime >= t.tickInterval + + if !hasBookmarkTimerExpired { + return nil + } + return fmt.Errorf("%s: hasn't received required bookmark event marking the end of initial events stream, received last event %v ago", t.name, elapsedTime) +} + +var _ clock.Ticker = &noopTicker{} + +// TODO(#115478): move to k8s/utils repo +type noopTicker struct{} + +func (t *noopTicker) C() <-chan time.Time { return nil } + +func (t *noopTicker) Stop() {} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go index aa3027d714..a7e0d9c436 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,102 +18,26 @@ package cache import ( "context" - "os" - "sort" - "strconv" - "time" - "github.com/google/go-cmp/cmp" - - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" + "k8s.io/client-go/util/consistencydetector" ) -var dataConsistencyDetectionEnabled = false - -func init() { - dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) -} - -// checkWatchListConsistencyIfRequested performs a data consistency check only when +// checkWatchListDataConsistencyIfRequested performs a data consistency check only when // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. // // The consistency check is meant to be enforced only in the CI, not in production. // The check ensures that data retrieved by the watch-list api call -// is exactly the same as data received by the standard list api call. +// is exactly the same as data received by the standard list api call against etcd. // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. -func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - if !dataConsistencyDetectionEnabled { - return - } - checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) -} - -// checkWatchListConsistency exists solely for testing purposes. -// we cannot use checkWatchListConsistencyIfRequested because -// it is guarded by an environmental variable. -// we cannot manipulate the environmental variable because -// it will affect other tests in this package. -func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) - opts := metav1.ListOptions{ - ResourceVersion: lastSyncedResourceVersion, - ResourceVersionMatch: metav1.ResourceVersionMatchExact, - } - var list runtime.Object - err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { - list, err = listerWatcher.List(opts) - if err != nil { - // the consistency check will only be enabled in the CI - // and LIST calls in general will be retired by the client-go library - // if we fail simply log and retry - klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) - return false, nil - } - return true, nil - }) - if err != nil { - klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { + if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } - - rawListItems, err := meta.ExtractListWithAlloc(list) - if err != nil { - panic(err) // this should never happen - } - - listItems := toMetaObjectSliceOrDie(rawListItems) - storeItems := toMetaObjectSliceOrDie(store.List()) - - sort.Sort(byUID(listItems)) - sort.Sort(byUID(storeItems)) - - if !cmp.Equal(listItems, storeItems) { - klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) - msg := "data inconsistency detected for the watch-list feature, panicking!" - panic(msg) - } -} - -type byUID []metav1.Object - -func (a byUID) Len() int { return len(a) } -func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } -func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { - result := make([]metav1.Object, len(s)) - for i, v := range s { - m, err := meta.Accessor(v) - if err != nil { - panic(err) - } - result[i] = m - } - return result + // for informers we pass an empty ListOptions because + // listFn might be wrapped for filtering during informer construction. + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index b3f37431d5..c805030bd7 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -31,6 +31,8 @@ import ( "k8s.io/utils/clock" "k8s.io/klog/v2" + + clientgofeaturegate "k8s.io/client-go/features" ) // SharedInformer provides eventually consistent linkage of its @@ -409,6 +411,10 @@ func (v *dummyController) HasSynced() bool { } func (v *dummyController) LastSyncResourceVersion() string { + if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InformerResourceVersion) { + return v.informer.LastSyncResourceVersion() + } + return "" } @@ -540,8 +546,8 @@ func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { s.startedLock.Lock() defer s.startedLock.Unlock() - if s.started { - return fmt.Errorf("informer has already started") + if s.stopped { + return fmt.Errorf("indexer was not added because it has stopped already") } return s.indexer.AddIndexers(indexers) diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 145e93ee53..7a4df0e1ba 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -52,8 +52,7 @@ type ThreadSafeStore interface { ByIndex(indexName, indexedValue string) ([]interface{}, error) GetIndexers() Indexers - // AddIndexers adds more indexers to this store. If you call this after you already have data - // in the store, the results are undefined. + // AddIndexers adds more indexers to this store. This supports adding indexes after the store already has items. AddIndexers(newIndexers Indexers) error // Resync is a no-op and is deprecated Resync() error @@ -135,50 +134,66 @@ func (i *storeIndex) addIndexers(newIndexers Indexers) error { return nil } -// updateIndices modifies the objects location in the managed indexes: +// updateSingleIndex modifies the objects location in the named index: // - for create you must provide only the newObj // - for update you must provide both the oldObj and the newObj // - for delete you must provide only the oldObj -// updateIndices must be called from a function that already has a lock on the cache -func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { +// updateSingleIndex must be called from a function that already has a lock on the cache +func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj interface{}, key string) { var oldIndexValues, indexValues []string - var err error - for name, indexFunc := range i.indexers { - if oldObj != nil { - oldIndexValues, err = indexFunc(oldObj) - } else { - oldIndexValues = oldIndexValues[:0] - } + indexFunc, ok := i.indexers[name] + if !ok { + // Should never happen. Caller is responsible for ensuring this exists, and should call with lock + // held to avoid any races. + panic(fmt.Errorf("indexer %q does not exist", name)) + } + if oldObj != nil { + var err error + oldIndexValues, err = indexFunc(oldObj) if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } + } else { + oldIndexValues = oldIndexValues[:0] + } - if newObj != nil { - indexValues, err = indexFunc(newObj) - } else { - indexValues = indexValues[:0] - } + if newObj != nil { + var err error + indexValues, err = indexFunc(newObj) if err != nil { panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } + } else { + indexValues = indexValues[:0] + } - index := i.indices[name] - if index == nil { - index = Index{} - i.indices[name] = index - } + index := i.indices[name] + if index == nil { + index = Index{} + i.indices[name] = index + } - if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { - // We optimize for the most common case where indexFunc returns a single value which has not been changed - continue - } + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + return + } - for _, value := range oldIndexValues { - i.deleteKeyFromIndex(key, value, index) - } - for _, value := range indexValues { - i.addKeyToIndex(key, value, index) - } + for _, value := range oldIndexValues { + i.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + i.addKeyToIndex(key, value, index) + } +} + +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { + for name := range i.indexers { + i.updateSingleIndex(name, oldObj, newObj, key) } } @@ -339,11 +354,18 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { c.lock.Lock() defer c.lock.Unlock() - if len(c.items) > 0 { - return fmt.Errorf("cannot add indexers to running index") + if err := c.index.addIndexers(newIndexers); err != nil { + return err + } + + // If there are already items, index them + for key, item := range c.items { + for name := range newIndexers { + c.index.updateSingleIndex(name, nil, item, key) + } } - return c.index.addIndexers(newIndexers) + return nil } func (c *threadSafeMap) Resync() error { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 5871575a66..fd913a3083 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package -package api +package api // import "k8s.io/client-go/tools/clientcmd/api" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go index dd5f918067..261dcacb5b 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "reflect" "strings" @@ -115,7 +114,7 @@ func ShortenConfig(config *Config) { // FlattenConfig changes the config object into a self-contained config (useful for making secrets) func FlattenConfig(config *Config) error { for key, authInfo := range config.AuthInfos { - baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "") if err != nil { return err } @@ -130,7 +129,7 @@ func FlattenConfig(config *Config) error { config.AuthInfos[key] = authInfo } for key, cluster := range config.Clusters { - baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "") if err != nil { return err } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index 3ccdebc1c3..9e483e9d75 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=Kind -package v1 +package v1 // import "k8s.io/client-go/tools/clientcmd/api/v1" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index ae0f01f325..952f6d7eb6 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -72,6 +72,13 @@ type ClientConfig interface { ConfigAccess() ConfigAccess } +// OverridingClientConfig is used to enable overrriding the raw KubeConfig +type OverridingClientConfig interface { + ClientConfig + // MergedRawConfig return the RawConfig merged with all overrides. + MergedRawConfig() (clientcmdapi.Config, error) +} + type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister type promptedCredentials struct { @@ -91,22 +98,22 @@ type DirectClientConfig struct { } // NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name -func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { +func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) OverridingClientConfig { return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} } // NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) OverridingClientConfig { return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} } // NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags -func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) OverridingClientConfig { return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} } // NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig -func NewClientConfigFromBytes(configBytes []byte) (ClientConfig, error) { +func NewClientConfigFromBytes(configBytes []byte) (OverridingClientConfig, error) { config, err := Load(configBytes) if err != nil { return nil, err @@ -129,6 +136,40 @@ func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { return config.config, nil } +// MergedRawConfig returns the raw kube config merged with the overrides +func (config *DirectClientConfig) MergedRawConfig() (clientcmdapi.Config, error) { + if err := config.ConfirmUsable(); err != nil { + return clientcmdapi.Config{}, err + } + merged := config.config.DeepCopy() + + // set the AuthInfo merged with overrides in the merged config + mergedAuthInfo, err := config.getAuthInfo() + if err != nil { + return clientcmdapi.Config{}, err + } + mergedAuthInfoName, _ := config.getAuthInfoName() + merged.AuthInfos[mergedAuthInfoName] = &mergedAuthInfo + + // set the Context merged with overrides in the merged config + mergedContext, err := config.getContext() + if err != nil { + return clientcmdapi.Config{}, err + } + mergedContextName, _ := config.getContextName() + merged.Contexts[mergedContextName] = &mergedContext + merged.CurrentContext = mergedContextName + + // set the Cluster merged with overrides in the merged config + configClusterInfo, err := config.getCluster() + if err != nil { + return clientcmdapi.Config{}, err + } + configClusterName, _ := config.getClusterName() + merged.Clusters[configClusterName] = &configClusterInfo + return *merged, nil +} + // ClientConfig implements ClientConfig func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { // check that getAuthInfo, getContext, and getCluster do not return an error. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 31f8963160..2cd213ccb3 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -19,7 +19,6 @@ package clientcmd import ( "errors" "os" - "path" "path/filepath" "reflect" "sort" @@ -148,7 +147,7 @@ func NewDefaultPathOptions() *PathOptions { EnvVar: RecommendedConfigPathEnvVar, ExplicitFileFlag: RecommendedConfigPathFlag, - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName), LoadingRules: NewDefaultClientConfigLoadingRules(), } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index c1151baf20..d9d87d55f2 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -159,6 +159,10 @@ type LeaderElectionConfig struct { // Name is the name of the resource lock for debugging Name string + + // Coordinated will use the Coordinated Leader Election feature + // WARNING: Coordinated leader election is ALPHA. + Coordinated bool } // LeaderCallbacks are callbacks that are triggered during certain @@ -249,7 +253,11 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { desc := le.config.Lock.Describe() klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { - succeeded = le.tryAcquireOrRenew(ctx) + if !le.config.Coordinated { + succeeded = le.tryAcquireOrRenew(ctx) + } else { + succeeded = le.tryCoordinatedRenew(ctx) + } le.maybeReportTransition() if !succeeded { klog.V(4).Infof("failed to acquire lease %v", desc) @@ -272,7 +280,11 @@ func (le *LeaderElector) renew(ctx context.Context) { timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) defer timeoutCancel() err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { - return le.tryAcquireOrRenew(timeoutCtx), nil + if !le.config.Coordinated { + return le.tryAcquireOrRenew(timeoutCtx), nil + } else { + return le.tryCoordinatedRenew(timeoutCtx), nil + } }, timeoutCtx.Done()) le.maybeReportTransition() @@ -304,7 +316,9 @@ func (le *LeaderElector) release() bool { RenewTime: now, AcquireTime: now, } - if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { + timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), le.config.RenewDeadline) + defer timeoutCancel() + if err := le.config.Lock.Update(timeoutCtx, leaderElectionRecord); err != nil { klog.Errorf("Failed to release lock: %v", err) return false } @@ -313,6 +327,81 @@ func (le *LeaderElector) release() bool { return true } +// tryCoordinatedRenew checks if it acquired a lease and tries to renew the +// lease if it has already been acquired. Returns true on success else returns +// false. +func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool { + now := metav1.NewTime(le.clock.Now()) + leaderElectionRecord := rl.LeaderElectionRecord{ + HolderIdentity: le.config.Lock.Identity(), + LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), + RenewTime: now, + AcquireTime: now, + } + + // 1. obtain the electionRecord + oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx) + if err != nil { + if !errors.IsNotFound(err) { + klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) + return false + } + klog.Infof("lease lock not found: %v", le.config.Lock.Describe()) + return false + } + + // 2. Record obtained, check the Identity & Time + if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { + le.setObservedRecord(oldLeaderElectionRecord) + + le.observedRawRecord = oldLeaderElectionRawRecord + } + + hasExpired := le.observedTime.Add(time.Second * time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).Before(now.Time) + if hasExpired { + klog.Infof("lock has expired: %v", le.config.Lock.Describe()) + return false + } + + if !le.IsLeader() { + klog.V(6).Infof("lock is held by %v and has not yet expired: %v", oldLeaderElectionRecord.HolderIdentity, le.config.Lock.Describe()) + return false + } + + // 2b. If the lease has been marked as "end of term", don't renew it + if le.IsLeader() && oldLeaderElectionRecord.PreferredHolder != "" { + klog.V(4).Infof("lock is marked as 'end of term': %v", le.config.Lock.Describe()) + // TODO: Instead of letting lease expire, the holder may deleted it directly + // This will not be compatible with all controllers, so it needs to be opt-in behavior. + // We must ensure all code guarded by this lease has successfully completed + // prior to releasing or there may be two processes + // simultaneously acting on the critical path. + // Usually once this returns false, the process is terminated.. + // xref: OnStoppedLeading + return false + } + + // 3. We're going to try to update. The leaderElectionRecord is set to it's default + // here. Let's correct it before updating. + if le.IsLeader() { + leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + leaderElectionRecord.Strategy = oldLeaderElectionRecord.Strategy + le.metrics.slowpathExercised(le.config.Name) + } else { + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 + } + + // update the lock itself + if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil { + klog.Errorf("Failed to update lock: %v", err) + return false + } + + le.setObservedRecord(&leaderElectionRecord) + return true +} + // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. @@ -325,7 +414,22 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { AcquireTime: now, } - // 1. obtain or create the ElectionRecord + // 1. fast path for the leader to update optimistically assuming that the record observed + // last time is the current version. + if le.IsLeader() && le.isLeaseValid(now.Time) { + oldObservedRecord := le.getObservedRecord() + leaderElectionRecord.AcquireTime = oldObservedRecord.AcquireTime + leaderElectionRecord.LeaderTransitions = oldObservedRecord.LeaderTransitions + + err := le.config.Lock.Update(ctx, leaderElectionRecord) + if err == nil { + le.setObservedRecord(&leaderElectionRecord) + return true + } + klog.Errorf("Failed to update lock optimitically: %v, falling back to slow path", err) + } + + // 2. obtain or create the ElectionRecord oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx) if err != nil { if !errors.IsNotFound(err) { @@ -342,24 +446,23 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { return true } - // 2. Record obtained, check the Identity & Time + // 3. Record obtained, check the Identity & Time if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { le.setObservedRecord(oldLeaderElectionRecord) le.observedRawRecord = oldLeaderElectionRawRecord } - if len(oldLeaderElectionRecord.HolderIdentity) > 0 && - le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) && - !le.IsLeader() { + if len(oldLeaderElectionRecord.HolderIdentity) > 0 && le.isLeaseValid(now.Time) && !le.IsLeader() { klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false } - // 3. We're going to try to update. The leaderElectionRecord is set to it's default + // 4. We're going to try to update. The leaderElectionRecord is set to it's default // here. Let's correct it before updating. if le.IsLeader() { leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + le.metrics.slowpathExercised(le.config.Name) } else { leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 } @@ -400,6 +503,10 @@ func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error { return nil } +func (le *LeaderElector) isLeaseValid(now time.Time) bool { + return le.observedTime.Add(time.Second * time.Duration(le.getObservedRecord().LeaseDurationSeconds)).After(now) +} + // setObservedRecord will set a new observedRecord and update observedTime to the current time. // Protect critical sections with lock. func (le *LeaderElector) setObservedRecord(observedRecord *rl.LeaderElectionRecord) { diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go new file mode 100644 index 0000000000..74cf5bb5c2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go @@ -0,0 +1,202 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "context" + "reflect" + "time" + + v1 "k8s.io/api/coordination/v1" + v1alpha1 "k8s.io/api/coordination/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + coordinationv1alpha1client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +const requeueInterval = 5 * time.Minute + +type CacheSyncWaiter interface { + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + +type LeaseCandidate struct { + leaseClient coordinationv1alpha1client.LeaseCandidateInterface + leaseCandidateInformer cache.SharedIndexInformer + informerFactory informers.SharedInformerFactory + hasSynced cache.InformerSynced + + // At most there will be one item in this Queue (since we only watch one item) + queue workqueue.TypedRateLimitingInterface[int] + + name string + namespace string + + // controller lease + leaseName string + + clock clock.Clock + + binaryVersion, emulationVersion string + preferredStrategies []v1.CoordinatedLeaseStrategy +} + +// NewCandidate creates new LeaseCandidate controller that creates a +// LeaseCandidate object if it does not exist and watches changes +// to the corresponding object and renews if PingTime is set. +// WARNING: This is an ALPHA feature. Ensure that the CoordinatedLeaderElection +// feature gate is on. +func NewCandidate(clientset kubernetes.Interface, + candidateNamespace string, + candidateName string, + targetLease string, + binaryVersion, emulationVersion string, + preferredStrategies []v1.CoordinatedLeaseStrategy, +) (*LeaseCandidate, CacheSyncWaiter, error) { + fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String() + // A separate informer factory is required because this must start before informerFactories + // are started for leader elected components + informerFactory := informers.NewSharedInformerFactoryWithOptions( + clientset, 5*time.Minute, + informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector + }), + ) + leaseCandidateInformer := informerFactory.Coordination().V1alpha1().LeaseCandidates().Informer() + + lc := &LeaseCandidate{ + leaseClient: clientset.CoordinationV1alpha1().LeaseCandidates(candidateNamespace), + leaseCandidateInformer: leaseCandidateInformer, + informerFactory: informerFactory, + name: candidateName, + namespace: candidateNamespace, + leaseName: targetLease, + clock: clock.RealClock{}, + binaryVersion: binaryVersion, + emulationVersion: emulationVersion, + preferredStrategies: preferredStrategies, + } + lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"}) + + h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + if leasecandidate, ok := newObj.(*v1alpha1.LeaseCandidate); ok { + if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) { + lc.enqueueLease() + } + } + }, + }) + if err != nil { + return nil, nil, err + } + lc.hasSynced = h.HasSynced + + return lc, informerFactory, nil +} + +func (c *LeaseCandidate) Run(ctx context.Context) { + defer c.queue.ShutDown() + + c.informerFactory.Start(ctx.Done()) + if !cache.WaitForNamedCacheSync("leasecandidateclient", ctx.Done(), c.hasSynced) { + return + } + + c.enqueueLease() + go c.runWorker(ctx) + <-ctx.Done() +} + +func (c *LeaseCandidate) runWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *LeaseCandidate) processNextWorkItem(ctx context.Context) bool { + key, shutdown := c.queue.Get() + if shutdown { + return false + } + defer c.queue.Done(key) + + err := c.ensureLease(ctx) + if err == nil { + c.queue.AddAfter(key, requeueInterval) + return true + } + + utilruntime.HandleError(err) + c.queue.AddRateLimited(key) + + return true +} + +func (c *LeaseCandidate) enqueueLease() { + c.queue.Add(0) +} + +// ensureLease creates the lease if it does not exist and renew it if it exists. Returns the lease and +// a bool (true if this call created the lease), or any error that occurs. +func (c *LeaseCandidate) ensureLease(ctx context.Context) error { + lease, err := c.leaseClient.Get(ctx, c.name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Creating lease candidate") + // lease does not exist, create it. + leaseToCreate := c.newLeaseCandidate() + if _, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}); err != nil { + return err + } + klog.V(2).Infof("Created lease candidate") + return nil + } else if err != nil { + return err + } + klog.V(2).Infof("lease candidate exists. Renewing.") + clone := lease.DeepCopy() + clone.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + _, err = c.leaseClient.Update(ctx, clone, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +func (c *LeaseCandidate) newLeaseCandidate() *v1alpha1.LeaseCandidate { + lc := &v1alpha1.LeaseCandidate{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.name, + Namespace: c.namespace, + }, + Spec: v1alpha1.LeaseCandidateSpec{ + LeaseName: c.leaseName, + BinaryVersion: c.binaryVersion, + EmulationVersion: c.emulationVersion, + PreferredStrategies: c.preferredStrategies, + }, + } + lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + return lc +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/metrics.go b/vendor/k8s.io/client-go/tools/leaderelection/metrics.go index 65917bf88e..7438345fb1 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/metrics.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/metrics.go @@ -26,24 +26,26 @@ import ( type leaderMetricsAdapter interface { leaderOn(name string) leaderOff(name string) + slowpathExercised(name string) } -// GaugeMetric represents a single numerical value that can arbitrarily go up -// and down. -type SwitchMetric interface { +// LeaderMetric instruments metrics used in leader election. +type LeaderMetric interface { On(name string) Off(name string) + SlowpathExercised(name string) } type noopMetric struct{} -func (noopMetric) On(name string) {} -func (noopMetric) Off(name string) {} +func (noopMetric) On(name string) {} +func (noopMetric) Off(name string) {} +func (noopMetric) SlowpathExercised(name string) {} // defaultLeaderMetrics expects the caller to lock before setting any metrics. type defaultLeaderMetrics struct { // leader's value indicates if the current process is the owner of name lease - leader SwitchMetric + leader LeaderMetric } func (m *defaultLeaderMetrics) leaderOn(name string) { @@ -60,19 +62,27 @@ func (m *defaultLeaderMetrics) leaderOff(name string) { m.leader.Off(name) } +func (m *defaultLeaderMetrics) slowpathExercised(name string) { + if m == nil { + return + } + m.leader.SlowpathExercised(name) +} + type noMetrics struct{} -func (noMetrics) leaderOn(name string) {} -func (noMetrics) leaderOff(name string) {} +func (noMetrics) leaderOn(name string) {} +func (noMetrics) leaderOff(name string) {} +func (noMetrics) slowpathExercised(name string) {} // MetricsProvider generates various metrics used by the leader election. type MetricsProvider interface { - NewLeaderMetric() SwitchMetric + NewLeaderMetric() LeaderMetric } type noopMetricsProvider struct{} -func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric { +func (noopMetricsProvider) NewLeaderMetric() LeaderMetric { return noopMetric{} } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 483753d632..053a7570d7 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -19,14 +19,15 @@ package resourcelock import ( "context" "fmt" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" "time" + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" ) const ( @@ -114,11 +115,13 @@ type LeaderElectionRecord struct { // attempt to acquire leases with empty identities and will wait for the full lease // interval to expire before attempting to reacquire. This value is set to empty when // a client voluntarily steps down. - HolderIdentity string `json:"holderIdentity"` - LeaseDurationSeconds int `json:"leaseDurationSeconds"` - AcquireTime metav1.Time `json:"acquireTime"` - RenewTime metav1.Time `json:"renewTime"` - LeaderTransitions int `json:"leaderTransitions"` + HolderIdentity string `json:"holderIdentity"` + LeaseDurationSeconds int `json:"leaseDurationSeconds"` + AcquireTime metav1.Time `json:"acquireTime"` + RenewTime metav1.Time `json:"renewTime"` + LeaderTransitions int `json:"leaderTransitions"` + Strategy v1.CoordinatedLeaseStrategy `json:"strategy"` + PreferredHolder string `json:"preferredHolder"` } // EventRecorder records a change in the ResourceLock. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 8a9d7d60f2..7cd2a8b9ca 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -122,6 +122,12 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec if spec.RenewTime != nil { r.RenewTime = metav1.Time{Time: spec.RenewTime.Time} } + if spec.PreferredHolder != nil { + r.PreferredHolder = *spec.PreferredHolder + } + if spec.Strategy != nil { + r.Strategy = *spec.Strategy + } return &r } @@ -129,11 +135,18 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec { leaseDurationSeconds := int32(ler.LeaseDurationSeconds) leaseTransitions := int32(ler.LeaderTransitions) - return coordinationv1.LeaseSpec{ + spec := coordinationv1.LeaseSpec{ HolderIdentity: &ler.HolderIdentity, LeaseDurationSeconds: &leaseDurationSeconds, AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time}, RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time}, LeaseTransitions: &leaseTransitions, } + if ler.PreferredHolder != "" { + spec.PreferredHolder = &ler.PreferredHolder + } + if ler.Strategy != "" { + spec.Strategy = &ler.Strategy + } + return spec } diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index d1511696d0..55947d2094 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -198,16 +198,29 @@ func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster { ctx := c.Context if ctx == nil { ctx = context.Background() - } else { + } + // The are two scenarios where it makes no sense to wait for context cancelation: + // - The context was nil. + // - The context was context.Background() to begin with. + // + // Both cases get checked here: we have cancelation if (and only if) there is a channel. + haveCtxCancelation := ctx.Done() != nil + + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx) + + if haveCtxCancelation { // Calling Shutdown is not required when a context was provided: // when the context is canceled, this goroutine will shut down // the broadcaster. + // + // If Shutdown is called first, then this goroutine will + // also stop. go func() { - <-ctx.Done() + <-eventBroadcaster.cancelationCtx.Done() eventBroadcaster.Broadcaster.Shutdown() }() } - eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx) + return eventBroadcaster } @@ -382,7 +395,11 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { watcher, err := e.Watch() if err != nil { + // This function traditionally returns no error even though it can fail. + // Instead, it logs the error and returns an empty watch. The empty + // watch ensures that callers don't crash when calling Stop. klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)") + return watch.NewEmptyWatch() } go func() { defer utilruntime.HandleCrash() diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go index dc22b6ec4c..e76f65812d 100644 --- a/vendor/k8s.io/client-go/transport/cert_rotation.go +++ b/vendor/k8s.io/client-go/transport/cert_rotation.go @@ -47,14 +47,17 @@ type dynamicClientCert struct { connDialer *connrotation.Dialer // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { d := &dynamicClientCert{ reload: reload, connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"}, + ), } return d diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go new file mode 100644 index 0000000000..b33d08032f --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -0,0 +1,146 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type RetrieveItemsFunc[U any] func() []U + +type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) + +// CheckDataConsistency exists solely for testing purposes. +// we cannot use checkWatchListDataConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { + if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { + klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) + return + } + klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity) + + retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn()) + listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems)) + var list runtime.Object + err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listFn(ctx, listOptions) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + listItems := toMetaObjectSliceOrDie(rawListItems) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(retrievedItems)) + + if !cmp.Equal(listItems, retrievedItems) { + klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems)) + msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity) + panic(msg) + } +} + +// canFormAdditionalListCall ensures that we can form a valid LIST requests +// for checking data consistency. +func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool { + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the continuation hasn't been set + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38 + if len(listOptions.Continue) > 0 { + return false + } + + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the RV is valid because the validation code forbids RV == "0" + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44 + if lastSyncedResourceVersion == "0" { + return false + } + + return true +} + +// prepareListCallOptions changes the input list options so that +// the list call goes directly to etcd +func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions { + // this is our legacy case: + // + // the watch cache skips the Limit if the ResourceVersion was set to "0" + // thus, to compare with data retrieved directly from etcd + // we need to skip the limit to for the list call as well. + // + // note that when the number of retrieved items is less than the request limit, + // it means either the watch cache is disabled, or there is not enough data. + // in both cases, we can use the limit because we will be able to compare + // the data with the items retrieved from etcd. + if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit { + listOptions.Limit = 0 + } + + // set the RV and RVM so that we get the snapshot of data + // directly from etcd. + listOptions.ResourceVersion = lastSyncedResourceVersion + listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact + + return listOptions +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go new file mode 100644 index 0000000000..7610c05c28 --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go @@ -0,0 +1,70 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForListFromCacheEnabled = false + +func init() { + dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR")) +} + +// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup +// for requests that have a high chance of being served from the watch-cache. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by a list api call from the watch-cache +// is exactly the same as data received by the list api call from etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +// +// Note that this function doesn't examine the ListOptions to determine +// if the original request has hit the cache because it would be challenging +// to maintain consistency with the server-side implementation. +// For simplicity, we assume that the first request retrieved data from +// the cache (even though this might not be true for some requests) +// and issue the second call to get data from etcd for comparison. +func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !dataConsistencyDetectionForListFromCacheEnabled { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} + +func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + receivedListMeta, err := meta.ListAccessor(receivedList) + if err != nil { + panic(err) + } + rawListItems, err := meta.ExtractListWithAlloc(receivedList) + if err != nil { + panic(err) // this should never happen + } + lastSyncedResourceVersion := receivedListMeta.GetResourceVersion() + CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems }) +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go new file mode 100644 index 0000000000..cda5fc205f --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForWatchListEnabled = false + +func init() { + dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// IsDataConsistencyDetectionForWatchListEnabled returns true when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +func IsDataConsistencyDetectionForWatchListEnabled() bool { + return dataConsistencyDetectionForWatchListEnabled +} + +// CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call against etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !IsDataConsistencyDetectionForWatchListEnabled() { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 3ef88dbdb8..82e4c4c408 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -23,7 +23,6 @@ import ( "k8s.io/utils/clock" testingclock "k8s.io/utils/clock/testing" - "k8s.io/utils/integer" ) type backoffEntry struct { @@ -100,7 +99,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) { } else { delay := entry.backoff * 2 // exponential delay += p.jitter(entry.backoff) // add some jitter to the delay - entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + entry.backoff = min(delay, p.maxDuration) } entry.lastUpdate = p.Clock.Now() } diff --git a/vendor/k8s.io/client-go/util/watchlist/watch_list.go b/vendor/k8s.io/client-go/util/watchlist/watch_list.go new file mode 100644 index 0000000000..84106458a5 --- /dev/null +++ b/vendor/k8s.io/client-go/util/watchlist/watch_list.go @@ -0,0 +1,82 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchlist + +import ( + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientfeatures "k8s.io/client-go/features" + "k8s.io/utils/ptr" +) + +var scheme = runtime.NewScheme() + +func init() { + utilruntime.Must(metainternalversion.AddToScheme(scheme)) +} + +// PrepareWatchListOptionsFromListOptions creates a new ListOptions +// that can be used for a watch-list request from the given listOptions. +// +// This function also determines if the given listOptions can be used to form a watch-list request, +// which would result in streaming semantically equivalent data from the server. +func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return metav1.ListOptions{}, false, nil + } + + internalListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + watchListOptions := listOptions + // this is our legacy case, the cache ignores LIMIT for + // ResourceVersion == 0 and RVM=unset|NotOlderThan + if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" { + return metav1.ListOptions{}, false, nil + } + watchListOptions.Limit = 0 + + // to ensure that we can create a watch-list request that returns + // semantically equivalent data for the given listOptions, + // we need to validate that the RVM for the list is supported by watch-list requests. + if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + return metav1.ListOptions{}, false, nil + } + watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + + watchListOptions.Watch = true + watchListOptions.AllowWatchBookmarks = true + watchListOptions.SendInitialEvents = ptr.To(true) + + internalWatchListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + return watchListOptions, true, nil +} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index efda7c197f..1f9567881c 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -24,49 +24,66 @@ import ( "golang.org/x/time/rate" ) -type RateLimiter interface { +// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead. +type RateLimiter TypedRateLimiter[any] + +type TypedRateLimiter[T comparable] interface { // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration + When(item T) time.Duration // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int + NumRequeues(item T) int } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +// +// Deprecated: Use DefaultTypedControllerRateLimiter instead. func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + return DefaultTypedControllerRateLimiter[any]() +} + +// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedMaxOfRateLimiter( + NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { +// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead. +type BucketRateLimiter = TypedBucketRateLimiter[any] + +// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type TypedBucketRateLimiter[T comparable] struct { *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} -func (r *BucketRateLimiter) When(item interface{}) time.Duration { +func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration { return r.Limiter.Reserve().Delay() } -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int { return 0 } -func (r *BucketRateLimiter) Forget(item interface{}) { +func (r *TypedBucketRateLimiter[T]) Forget(item T) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead. +type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any] + +// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { +type TypedItemExponentialFailureRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int baseDelay time.Duration maxDelay time.Duration @@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct { var _ RateLimiter = &ItemExponentialFailureRateLimiter{} +// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead. func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay) +} + +func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedItemExponentialFailureRateLimiter[T]{ + failures: map[T]int{}, baseDelay: baseDelay, maxDelay: maxDelay, } } +// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead. func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) + return DefaultTypedItemBasedRateLimiter[any]() } -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { +func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second) +} + +func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration return calculated } -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { +func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { } // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { +// Deprecated: Use TypedItemFastSlowRateLimiter instead. +type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any] + +// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type TypedItemFastSlowRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int maxFastAttempts int fastDelay time.Duration @@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct { var _ RateLimiter = &ItemFastSlowRateLimiter{} +// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead. func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts) +} + +func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] { + return &TypedItemFastSlowRateLimiter[T]{ + failures: map[T]int{}, fastDelay: fastDelay, slowDelay: slowDelay, maxFastAttempts: maxFastAttempts, } } -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { +func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { return r.slowDelay } -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { +func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { // MaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter +// +// Deprecated: Use TypedMaxOfRateLimiter instead. +type MaxOfRateLimiter = TypedMaxOfRateLimiter[any] + +// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type TypedMaxOfRateLimiter[T comparable] struct { + limiters []TypedRateLimiter[T] } -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { +func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration { ret := time.Duration(0) for _, limiter := range r.limiters { curr := limiter.When(item) @@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { return ret } -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} +// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead. +func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter { + return NewTypedMaxOfRateLimiter(limiters...) } -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { +func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] { + return &TypedMaxOfRateLimiter[T]{limiters: limiters} +} + +func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int { ret := 0 for _, limiter := range r.limiters { curr := limiter.NumRequeues(item) @@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { return ret } -func (r *MaxOfRateLimiter) Forget(item interface{}) { +func (r *TypedMaxOfRateLimiter[T]) Forget(item T) { for _, limiter := range r.limiters { limiter.Forget(item) } } // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long -type WithMaxWaitRateLimiter struct { - limiter RateLimiter +// Deprecated: Use TypedWithMaxWaitRateLimiter instead. +type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any] + +// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type TypedWithMaxWaitRateLimiter[T comparable] struct { + limiter TypedRateLimiter[T] maxDelay time.Duration } +// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead. func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { - return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} + return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay) +} + +func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay} } -func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { +func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration { delay := w.limiter.When(item) if delay > w.maxDelay { return w.maxDelay @@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { return delay } -func (w WithMaxWaitRateLimiter) Forget(item interface{}) { +func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) { w.limiter.Forget(item) } -func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { +func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int { return w.limiter.NumRequeues(item) } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index c1df720302..958b96a80c 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -27,14 +27,25 @@ import ( // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface +// +// Deprecated: use TypedDelayingInterface instead. +type DelayingInterface TypedDelayingInterface[any] + +// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type TypedDelayingInterface[T comparable] interface { + TypedInterface[T] // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) + AddAfter(item T, duration time.Duration) } // DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. -type DelayingQueueConfig struct { +// +// Deprecated: use TypedDelayingQueueConfig instead. +type DelayingQueueConfig = TypedDelayingQueueConfig[any] + +// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type TypedDelayingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -46,25 +57,42 @@ type DelayingQueueConfig struct { Clock clock.WithTicker // Queue optionally allows injecting custom queue Interface instead of the default one. - Queue Interface + Queue TypedInterface[T] } // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewDelayingQueueWithConfig instead and specify a name. +// +// Deprecated: use TypedNewDelayingQueue instead. func NewDelayingQueue() DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{}) } +// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability. +// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// TypedNewDelayingQueueWithConfig instead and specify a name. +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) +} + // NewDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. +// +// Deprecated: use TypedNewDelayingQueueWithConfig instead. func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + return NewTypedDelayingQueueWithConfig[any](config) +} + +// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.Queue == nil { - config.Queue = NewWithConfig(QueueConfig{ + config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, @@ -100,9 +128,9 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { - ret := &delayingType{ - Interface: q, +func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { + ret := &delayingType[T]{ + TypedInterface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), @@ -115,8 +143,8 @@ func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider } // delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface +type delayingType[T comparable] struct { + TypedInterface[T] // clock tracks time for delayed firing clock clock.Clock @@ -193,16 +221,16 @@ func (pq waitForPriorityQueue) Peek() interface{} { // ShutDown stops the queue. After the queue drains, the returned shutdown bool // on Get() will be true. This method may be invoked more than once. -func (q *delayingType) ShutDown() { +func (q *delayingType[T]) ShutDown() { q.stopOnce.Do(func() { - q.Interface.ShutDown() + q.TypedInterface.ShutDown() close(q.stopCh) q.heartbeat.Stop() }) } // AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { +func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.ShuttingDown() { return @@ -229,7 +257,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { +func (q *delayingType[T]) waitingLoop() { defer utilruntime.HandleCrash() // Make a placeholder channel to use when there are no items in our list @@ -244,7 +272,7 @@ func (q *delayingType) waitingLoop() { waitingEntryByData := map[t]*waitFor{} for { - if q.Interface.ShuttingDown() { + if q.TypedInterface.ShuttingDown() { return } @@ -258,7 +286,7 @@ func (q *delayingType) waitingLoop() { } entry = heap.Pop(waitingForQueue).(*waitFor) - q.Add(entry.data) + q.Add(entry.data.(T)) delete(waitingEntryByData, entry.data) } @@ -287,7 +315,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } drained := false @@ -297,7 +325,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } default: drained = true diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index a363d1afb4..ff715482c1 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -23,18 +23,66 @@ import ( "k8s.io/utils/clock" ) -type Interface interface { - Add(item interface{}) +// Deprecated: Interface is deprecated, use TypedInterface instead. +type Interface TypedInterface[any] + +type TypedInterface[T comparable] interface { + Add(item T) Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) + Get() (item T, shutdown bool) + Done(item T) ShutDown() ShutDownWithDrain() ShuttingDown() bool } +// Queue is the underlying storage for items. The functions below are always +// called from the same goroutine. +type Queue[T comparable] interface { + // Touch can be hooked when an existing item is added again. This may be + // useful if the implementation allows priority change for the given item. + Touch(item T) + // Push adds a new item. + Push(item T) + // Len tells the total number of items. + Len() int + // Pop retrieves an item. + Pop() (item T) +} + +// DefaultQueue is a slice based FIFO queue. +func DefaultQueue[T comparable]() Queue[T] { + return new(queue[T]) +} + +// queue is a slice which implements Queue. +type queue[T comparable] []T + +func (q *queue[T]) Touch(item T) {} + +func (q *queue[T]) Push(item T) { + *q = append(*q, item) +} + +func (q *queue[T]) Len() int { + return len(*q) +} + +func (q *queue[T]) Pop() (item T) { + item = (*q)[0] + + // The underlying array still exists and reference this object, so the object will not be garbage collected. + (*q)[0] = *new(T) + *q = (*q)[1:] + + return item +} + // QueueConfig specifies optional configurations to customize an Interface. -type QueueConfig struct { +// Deprecated: use TypedQueueConfig instead. +type QueueConfig = TypedQueueConfig[any] + +type TypedQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -44,18 +92,38 @@ type QueueConfig struct { // Clock ability to inject real or fake clock for testing purposes. Clock clock.WithTicker + + // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue. + Queue Queue[T] } // New constructs a new work queue (see the package comment). +// +// Deprecated: use NewTyped instead. func New() *Type { return NewWithConfig(QueueConfig{ Name: "", }) } +// NewTyped constructs a new work queue (see the package comment). +func NewTyped[T comparable]() *Typed[T] { + return NewTypedWithConfig(TypedQueueConfig[T]{ + Name: "", + }) +} + // NewWithConfig constructs a new workqueue with ability to // customize different properties. +// +// Deprecated: use NewTypedWithConfig instead. func NewWithConfig(config QueueConfig) *Type { + return NewTypedWithConfig(config) +} + +// NewTypedWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] { return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) } @@ -69,7 +137,7 @@ func NewNamed(name string) *Type { // newQueueWithConfig constructs a new named workqueue // with the ability to customize different properties for testing purposes -func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { +func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { var metricsFactory *queueMetricsFactory if config.MetricsProvider != nil { metricsFactory = &queueMetricsFactory{ @@ -83,18 +151,24 @@ func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { config.Clock = clock.RealClock{} } + if config.Queue == nil { + config.Queue = DefaultQueue[T]() + } + return newQueue( config.Clock, + config.Queue, metricsFactory.newQueueMetrics(config.Name, config.Clock), updatePeriod, ) } -func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { - t := &Type{ +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] { + t := &Typed[T]{ clock: c, - dirty: set{}, - processing: set{}, + queue: queue, + dirty: set[T]{}, + processing: set[T]{}, cond: sync.NewCond(&sync.Mutex{}), metrics: metrics, unfinishedWorkUpdatePeriod: updatePeriod, @@ -112,20 +186,23 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond // Type is a work queue (see the package comment). -type Type struct { +// Deprecated: Use Typed instead. +type Type = Typed[any] + +type Typed[t comparable] struct { // queue defines the order in which we will work on items. Every // element of queue should be in the dirty set and not in the // processing set. - queue []t + queue Queue[t] // dirty defines all of the items that need to be processed. - dirty set + dirty set[t] // Things that are currently being processed are in the processing set. // These things may be simultaneously in the dirty set. When we finish // processing something and remove it from this set, we'll check if // it's in the dirty set, and if so, add it to the queue. - processing set + processing set[t] cond *sync.Cond @@ -140,33 +217,38 @@ type Type struct { type empty struct{} type t interface{} -type set map[t]empty +type set[t comparable] map[t]empty -func (s set) has(item t) bool { +func (s set[t]) has(item t) bool { _, exists := s[item] return exists } -func (s set) insert(item t) { +func (s set[t]) insert(item t) { s[item] = empty{} } -func (s set) delete(item t) { +func (s set[t]) delete(item t) { delete(s, item) } -func (s set) len() int { +func (s set[t]) len() int { return len(s) } // Add marks item as needing processing. -func (q *Type) Add(item interface{}) { +func (q *Typed[T]) Add(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() if q.shuttingDown { return } if q.dirty.has(item) { + // the same item is added again before it is processed, call the Touch + // function if the queue cares about it (for e.g, reset its priority) + if !q.processing.has(item) { + q.queue.Touch(item) + } return } @@ -177,37 +259,34 @@ func (q *Type) Add(item interface{}) { return } - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } // Len returns the current queue length, for informational purposes only. You // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular // value, that can't be synchronized properly. -func (q *Type) Len() int { +func (q *Typed[T]) Len() int { q.cond.L.Lock() defer q.cond.L.Unlock() - return len(q.queue) + return q.queue.Len() } // Get blocks until it can return an item to be processed. If shutdown = true, // the caller should end their goroutine. You must call Done with item when you // have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { +func (q *Typed[T]) Get() (item T, shutdown bool) { q.cond.L.Lock() defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { + for q.queue.Len() == 0 && !q.shuttingDown { q.cond.Wait() } - if len(q.queue) == 0 { + if q.queue.Len() == 0 { // We must be shutting down. - return nil, true + return *new(T), true } - item = q.queue[0] - // The underlying array still exists and reference this object, so the object will not be garbage collected. - q.queue[0] = nil - q.queue = q.queue[1:] + item = q.queue.Pop() q.metrics.get(item) @@ -220,7 +299,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) { // Done marks item as done processing, and if it has been marked as dirty again // while it was being processed, it will be re-added to the queue for // re-processing. -func (q *Type) Done(item interface{}) { +func (q *Typed[T]) Done(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -228,7 +307,7 @@ func (q *Type) Done(item interface{}) { q.processing.delete(item) if q.dirty.has(item) { - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } else if q.processing.len() == 0 { q.cond.Signal() @@ -237,7 +316,7 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. -func (q *Type) ShutDown() { +func (q *Typed[T]) ShutDown() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -255,7 +334,7 @@ func (q *Type) ShutDown() { // indefinitely. It is, however, safe to call ShutDown after having called // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. -func (q *Type) ShutDownWithDrain() { +func (q *Typed[T]) ShutDownWithDrain() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -268,14 +347,14 @@ func (q *Type) ShutDownWithDrain() { } } -func (q *Type) ShuttingDown() bool { +func (q *Typed[T]) ShuttingDown() bool { q.cond.L.Lock() defer q.cond.L.Unlock() return q.shuttingDown } -func (q *Type) updateUnfinishedWorkLoop() { +func (q *Typed[T]) updateUnfinishedWorkLoop() { t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) defer t.Stop() for range t.C() { diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 3e4016fb04..fe45afa5a4 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -19,24 +19,33 @@ package workqueue import "k8s.io/utils/clock" // RateLimitingInterface is an interface that rate limits items being added to the queue. -type RateLimitingInterface interface { - DelayingInterface +// +// Deprecated: Use TypedRateLimitingInterface instead. +type RateLimitingInterface TypedRateLimitingInterface[any] + +// TypedRateLimitingInterface is an interface that rate limits items being added to the queue. +type TypedRateLimitingInterface[T comparable] interface { + TypedDelayingInterface[T] // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok - AddRateLimited(item interface{}) + AddRateLimited(item T) // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int + NumRequeues(item T) int } // RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. +// +// Deprecated: Use TypedRateLimitingQueueConfig instead. +type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any] -type RateLimitingQueueConfig struct { +// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface. +type TypedRateLimitingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -48,36 +57,55 @@ type RateLimitingQueueConfig struct { Clock clock.WithTicker // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. - DelayingQueue DelayingInterface + DelayingQueue TypedDelayingInterface[T] } // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use // NewRateLimitingQueueWithConfig instead and specify a name. +// +// Deprecated: Use NewTypedRateLimitingQueue instead. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) } +// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedRateLimitingQueueWithConfig instead and specify a name. +func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{}) +} + // NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability // with options to customize different properties. // Remember to call Forget! If you don't, you may end up tracking failures forever. +// +// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, config) +} + +// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.DelayingQueue == nil { - config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, }) } - return &rateLimitingType{ - DelayingInterface: config.DelayingQueue, - rateLimiter: rateLimiter, + return &rateLimitingType[T]{ + TypedDelayingInterface: config.DelayingQueue, + rateLimiter: rateLimiter, } } @@ -99,21 +127,21 @@ func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter } // rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface +type rateLimitingType[T comparable] struct { + TypedDelayingInterface[T] - rateLimiter RateLimiter + rateLimiter TypedRateLimiter[T] } // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +func (q *rateLimitingType[T]) AddRateLimited(item T) { + q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } -func (q *rateLimitingType) NumRequeues(item interface{}) int { +func (q *rateLimitingType[T]) NumRequeues(item T) int { return q.rateLimiter.NumRequeues(item) } -func (q *rateLimitingType) Forget(item interface{}) { +func (q *rateLimitingType[T]) Forget(item T) { q.rateLimiter.Forget(item) } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 026be9e3b1..47ec9466a6 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - var logging loggingT var commandLine flag.FlagSet @@ -486,7 +479,7 @@ type settings struct { // Access to all of the following fields must be protected via a mutex. // file holds writer for each of the log types. - file [severity.NumSeverity]flushSyncWriter + file [severity.NumSeverity]io.Writer // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration @@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, buffer.PutBuffer(b) } -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := severity.FatalLog; s >= severity.InfoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + logging.file[s] = w } } @@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) { if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() + needToSync := l.flushAll() + l.syncAll(needToSync) OsExit(2) } @@ -1028,10 +999,6 @@ type syncBuffer struct { maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) { // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - _ = file.Flush() // ignore error - _ = file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } if logging.loggerOptions.flush != nil { logging.loggerOptions.flush() } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 2e15e163c5..e4ce843b0c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -164,6 +164,9 @@ type OpenAPIV3Config struct { // It is an optional function to customize model names. GetDefinitionName func(name string) (string, spec.Extensions) + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error) + // SecuritySchemes is list of all security schemes for OpenAPI service. SecuritySchemes spec3.SecuritySchemes diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 799d866d51..9887d185b2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } - if union.Discriminator != nil && len(union.Fields) == 0 { - return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) - } return union, nil } diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go deleted file mode 100644 index e0811e8344..0000000000 --- a/vendor/k8s.io/utils/integer/integer.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integer - -import "math" - -// IntMax returns the maximum of the params -func IntMax(a, b int) int { - if b > a { - return b - } - return a -} - -// IntMin returns the minimum of the params -func IntMin(a, b int) int { - if b < a { - return b - } - return a -} - -// Int32Max returns the maximum of the params -func Int32Max(a, b int32) int32 { - if b > a { - return b - } - return a -} - -// Int32Min returns the minimum of the params -func Int32Min(a, b int32) int32 { - if b < a { - return b - } - return a -} - -// Int64Max returns the maximum of the params -func Int64Max(a, b int64) int64 { - if b > a { - return b - } - return a -} - -// Int64Min returns the minimum of the params -func Int64Min(a, b int64) int64 { - if b < a { - return b - } - return a -} - -// RoundToInt32 rounds floats into integer numbers. -// Deprecated: use math.Round() and a cast directly. -func RoundToInt32(a float64) int32 { - return int32(math.Round(a)) -} diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 0000000000..7cb7795bec --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8ee325e95b..a0ebf2dc3b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,6 @@ +# cel.dev/expr v0.16.1 +## explicit; go 1.18 +cel.dev/expr # cloud.google.com/go v0.116.0 ## explicit; go 1.21 cloud.google.com/go @@ -5,7 +8,7 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.9.8 +# cloud.google.com/go/auth v0.10.0 ## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -22,8 +25,8 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.4 -## explicit; go 1.20 +# cloud.google.com/go/auth/oauth2adapt v0.2.5 +## explicit; go 1.21 cloud.google.com/go/auth/oauth2adapt # cloud.google.com/go/compute/metadata v0.5.2 ## explicit; go 1.21 @@ -47,9 +50,15 @@ cloud.google.com/go/kms/internal cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen cloud.google.com/go/longrunning/autogen/longrunningpb -# cloud.google.com/go/storage v1.43.0 -## explicit; go 1.20 +# cloud.google.com/go/monitoring v1.21.1 +## explicit; go 1.21 +cloud.google.com/go/monitoring/apiv3/v2 +cloud.google.com/go/monitoring/apiv3/v2/monitoringpb +cloud.google.com/go/monitoring/internal +# cloud.google.com/go/storage v1.46.0 +## explicit; go 1.21 cloud.google.com/go/storage +cloud.google.com/go/storage/experimental cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb @@ -75,7 +84,7 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider ## explicit github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.15.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -97,7 +106,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -168,7 +177,16 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -# github.com/IBM/sarama v1.43.0 +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 +## explicit; go 1.21 +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 +## explicit; go 1.21 +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 +## explicit; go 1.21 +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping +# github.com/IBM/sarama v1.43.3 ## explicit; go 1.19 github.com/IBM/sarama # github.com/Microsoft/go-winio v0.6.2 @@ -295,12 +313,11 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.30.5 +# github.com/aws/aws-sdk-go-v2 v1.32.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults github.com/aws/aws-sdk-go-v2/aws/middleware -github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics github.com/aws/aws-sdk-go-v2/aws/protocol/query github.com/aws/aws-sdk-go-v2/aws/protocol/restjson github.com/aws/aws-sdk-go-v2/aws/protocol/xml @@ -322,10 +339,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.27.33 +# github.com/aws/aws-sdk-go-v2/config v1.27.43 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.32 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.41 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -334,14 +351,14 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 @@ -357,33 +374,33 @@ github.com/aws/aws-sdk-go-v2/service/ecr/types github.com/aws/aws-sdk-go-v2/service/ecrpublic github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecrpublic/types -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 +# github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/kms github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints github.com/aws/aws-sdk-go-v2/service/kms/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 +# github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 +# github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.20.4 +# github.com/aws/smithy-go v1.22.0 ## explicit; go 1.21 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -398,11 +415,13 @@ github.com/aws/smithy-go/endpoints github.com/aws/smithy-go/internal/sync/singleflight github.com/aws/smithy-go/io github.com/aws/smithy-go/logging +github.com/aws/smithy-go/metrics github.com/aws/smithy-go/middleware github.com/aws/smithy-go/private/requestcompression github.com/aws/smithy-go/ptr github.com/aws/smithy-go/rand github.com/aws/smithy-go/time +github.com/aws/smithy-go/tracing github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io github.com/aws/smithy-go/waiter @@ -422,9 +441,9 @@ github.com/blang/semver # github.com/blendle/zapdriver v1.3.1 ## explicit; go 1.12 github.com/blendle/zapdriver -# github.com/cenkalti/backoff/v3 v3.2.2 -## explicit; go 1.12 -github.com/cenkalti/backoff/v3 +# github.com/cenkalti/backoff/v4 v4.3.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 # github.com/census-instrumentation/opencensus-proto v0.4.1 ## explicit; go 1.18 github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 @@ -474,6 +493,16 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 +# github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 +## explicit; go 1.19 +github.com/cncf/xds/go/udpa/annotations +github.com/cncf/xds/go/udpa/type/v1 +github.com/cncf/xds/go/xds/annotations/v3 +github.com/cncf/xds/go/xds/core/v3 +github.com/cncf/xds/go/xds/data/orca/v3 +github.com/cncf/xds/go/xds/service/orca/v3 +github.com/cncf/xds/go/xds/type/matcher/v3 +github.com/cncf/xds/go/xds/type/v3 # github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be ## explicit github.com/common-nighthawk/go-figure @@ -524,7 +553,7 @@ github.com/docker/docker-credential-helpers/credentials # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.6.0 +# github.com/eapache/go-resiliency v1.7.0 ## explicit; go 1.13 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 @@ -537,6 +566,49 @@ github.com/eapache/queue ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log +# github.com/envoyproxy/go-control-plane v0.13.0 +## explicit; go 1.21 +github.com/envoyproxy/go-control-plane/envoy/admin/v3 +github.com/envoyproxy/go-control-plane/envoy/annotations +github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3 +github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3 +github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3 +github.com/envoyproxy/go-control-plane/envoy/config/common/matcher/v3 +github.com/envoyproxy/go-control-plane/envoy/config/core/v3 +github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3 +github.com/envoyproxy/go-control-plane/envoy/config/listener/v3 +github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3 +github.com/envoyproxy/go-control-plane/envoy/config/overload/v3 +github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3 +github.com/envoyproxy/go-control-plane/envoy/config/route/v3 +github.com/envoyproxy/go-control-plane/envoy/config/tap/v3 +github.com/envoyproxy/go-control-plane/envoy/config/trace/v3 +github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/common/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3 +github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3 +github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3 +github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3 +github.com/envoyproxy/go-control-plane/envoy/service/status/v3 +github.com/envoyproxy/go-control-plane/envoy/type/http/v3 +github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3 +github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3 +github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3 +github.com/envoyproxy/go-control-plane/envoy/type/v3 +# github.com/envoyproxy/protoc-gen-validate v1.1.0 +## explicit; go 1.19 +github.com/envoyproxy/protoc-gen-validate/validate # github.com/evanphx/json-patch v4.12.0+incompatible ## explicit github.com/evanphx/json-patch @@ -550,9 +622,13 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal +# github.com/fxamacker/cbor/v2 v2.7.0 +## explicit; go 1.17 +github.com/fxamacker/cbor/v2 # github.com/gdamore/encoding v1.0.0 ## explicit; go 1.9 github.com/gdamore/encoding @@ -906,7 +982,7 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/vault/api v1.14.0 +# github.com/hashicorp/vault/api v1.15.0 ## explicit; go 1.21 github.com/hashicorp/vault/api # github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82 @@ -977,7 +1053,7 @@ github.com/jcmturner/rpc/v2/ndr # github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 ## explicit; go 1.20 github.com/jedisct1/go-minisign -# github.com/jellydator/ttlcache/v3 v3.2.0 +# github.com/jellydator/ttlcache/v3 v3.3.0 ## explicit; go 1.18 github.com/jellydator/ttlcache/v3 # github.com/jmespath/go-jmespath v0.4.0 @@ -1115,8 +1191,8 @@ github.com/opentracing/opentracing-go/log # github.com/openzipkin/zipkin-go v0.4.2 ## explicit; go 1.18 github.com/openzipkin/zipkin-go/model -# github.com/pelletier/go-toml/v2 v2.2.2 -## explicit; go 1.16 +# github.com/pelletier/go-toml/v2 v2.2.3 +## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger @@ -1138,6 +1214,15 @@ github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 +## explicit; go 1.20 +github.com/planetscale/vtprotobuf/protohelpers +github.com/planetscale/vtprotobuf/types/known/anypb +github.com/planetscale/vtprotobuf/types/known/durationpb +github.com/planetscale/vtprotobuf/types/known/emptypb +github.com/planetscale/vtprotobuf/types/known/structpb +github.com/planetscale/vtprotobuf/types/known/timestamppb +github.com/planetscale/vtprotobuf/types/known/wrapperspb # github.com/prometheus/client_golang v1.20.2 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil @@ -1270,8 +1355,8 @@ github.com/sigstore/rekor/pkg/types/intoto/v0.0.2 github.com/sigstore/rekor/pkg/types/rekord github.com/sigstore/rekor/pkg/types/rekord/v0.0.1 github.com/sigstore/rekor/pkg/util -# github.com/sigstore/sigstore v1.8.9 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/fulcioroots github.com/sigstore/sigstore/pkg/oauth @@ -1282,17 +1367,17 @@ github.com/sigstore/sigstore/pkg/signature/kms github.com/sigstore/sigstore/pkg/signature/options github.com/sigstore/sigstore/pkg/signature/payload github.com/sigstore/sigstore/pkg/tuf -# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/aws -# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/azure -# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/gcp -# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 -## explicit; go 1.22.5 +# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 +## explicit; go 1.22.8 github.com/sigstore/sigstore/pkg/signature/kms/hashivault # github.com/sigstore/timestamp-authority v1.2.2 ## explicit; go 1.21 @@ -1335,7 +1420,7 @@ github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/features -# github.com/spiffe/go-spiffe/v2 v2.3.0 +# github.com/spiffe/go-spiffe/v2 v2.4.0 ## explicit; go 1.21 github.com/spiffe/go-spiffe/v2/bundle/jwtbundle github.com/spiffe/go-spiffe/v2/bundle/spiffebundle @@ -1370,8 +1455,8 @@ github.com/syndtr/goleveldb/leveldb/opt github.com/syndtr/goleveldb/leveldb/storage github.com/syndtr/goleveldb/leveldb/table github.com/syndtr/goleveldb/leveldb/util -# github.com/tektoncd/chains v0.22.2 -## explicit; go 1.22 +# github.com/tektoncd/chains v0.23.0 +## explicit; go 1.22.8 github.com/tektoncd/chains/internal/backport github.com/tektoncd/chains/pkg/artifacts github.com/tektoncd/chains/pkg/chains @@ -1617,6 +1702,9 @@ github.com/transparency-dev/merkle/rfc6962 # github.com/vbatts/tar-split v0.11.5 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar +# github.com/x448/float16 v0.8.4 +## explicit; go 1.11 +github.com/x448/float16 # github.com/xanzy/go-gitlab v0.109.0 ## explicit; go 1.19 github.com/xanzy/go-gitlab @@ -1632,13 +1720,13 @@ github.com/xdg-go/stringprep # github.com/xlab/treeprint v1.2.0 ## explicit; go 1.13 github.com/xlab/treeprint -# github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 -## explicit; go 1.22 +# github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 +## explicit; go 1.17 github.com/youmark/pkcs8 # github.com/zeebo/errs v1.3.0 ## explicit; go 1.12 github.com/zeebo/errs -# go.mongodb.org/mongo-driver v1.15.0 +# go.mongodb.org/mongo-driver v1.16.1 ## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -1708,6 +1796,9 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/contrib/detectors/gcp v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/contrib/detectors/gcp # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc @@ -1733,12 +1824,27 @@ go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 +go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 # go.opentelemetry.io/otel/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop +# go.opentelemetry.io/otel/sdk v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal/x +go.opentelemetry.io/otel/sdk/resource +# go.opentelemetry.io/otel/sdk/metric v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/internal +go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/exemplar +go.opentelemetry.io/otel/sdk/metric/internal/x +go.opentelemetry.io/otel/sdk/metric/metricdata # go.opentelemetry.io/otel/trace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace @@ -1783,8 +1889,8 @@ go.uber.org/zap/zaptest ## explicit; go 1.22.0 goa.design/goa/v3/http goa.design/goa/v3/pkg -# gocloud.dev v0.37.0 -## explicit; go 1.20 +# gocloud.dev v0.40.0 +## explicit; go 1.21.0 gocloud.dev/aws gocloud.dev/docstore gocloud.dev/docstore/awsdynamodb @@ -1802,11 +1908,11 @@ gocloud.dev/pubsub gocloud.dev/pubsub/batcher gocloud.dev/pubsub/driver gocloud.dev/pubsub/mempubsub -# gocloud.dev/docstore/mongodocstore v0.37.1-0.20240501181211-d8b9c9401f18 +# gocloud.dev/docstore/mongodocstore v0.40.0 ## explicit; go 1.22 gocloud.dev/docstore/mongodocstore -# gocloud.dev/pubsub/kafkapubsub v0.37.0 -## explicit; go 1.20 +# gocloud.dev/pubsub/kafkapubsub v0.40.0 +## explicit; go 1.21.0 gocloud.dev/pubsub/kafkapubsub # golang.org/x/crypto v0.28.0 ## explicit; go 1.20 @@ -1842,7 +1948,7 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 +# golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1920,14 +2026,14 @@ golang.org/x/text/width # golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 +# golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 ## explicit; go 1.18 golang.org/x/xerrors golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.201.0 +# google.golang.org/api v0.203.0 ## explicit; go 1.21 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1949,6 +2055,7 @@ google.golang.org/api/transport/http/internal/propagation # google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/cloud/location +google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/googleapis/type/latlng @@ -1957,8 +2064,12 @@ google.golang.org/genproto/protobuf/field_mask ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations +google.golang.org/genproto/googleapis/api/distribution google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/googleapis/api/label +google.golang.org/genproto/googleapis/api/metric +google.golang.org/genproto/googleapis/api/monitoredres # google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code @@ -1968,14 +2079,24 @@ google.golang.org/genproto/googleapis/rpc/status ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes +google.golang.org/grpc/authz/audit +google.golang.org/grpc/authz/audit/stdout google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/leastrequest google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/rls +google.golang.org/grpc/balancer/rls/internal/adaptive +google.golang.org/grpc/balancer/rls/internal/keys google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/balancer/weightedroundrobin +google.golang.org/grpc/balancer/weightedroundrobin/internal +google.golang.org/grpc/balancer/weightedtarget +google.golang.org/grpc/balancer/weightedtarget/weightedaggregator google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz google.golang.org/grpc/codes @@ -1991,28 +2112,38 @@ google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/insecure google.golang.org/grpc/credentials/oauth +google.golang.org/grpc/credentials/tls/certprovider +google.golang.org/grpc/credentials/tls/certprovider/pemfile google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal +google.golang.org/grpc/internal/admin google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancer/nop +google.golang.org/grpc/internal/balancergroup google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/cache google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/credentials/xds google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/hierarchy google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/proto/grpc_lookup_v1 google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/dns/internal @@ -2024,10 +2155,17 @@ google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/internal/wrr google.golang.org/grpc/internal/xds +google.golang.org/grpc/internal/xds/bootstrap +google.golang.org/grpc/internal/xds/bootstrap/tlscreds +google.golang.org/grpc/internal/xds/matcher +google.golang.org/grpc/internal/xds/rbac google.golang.org/grpc/keepalive google.golang.org/grpc/mem google.golang.org/grpc/metadata +google.golang.org/grpc/orca +google.golang.org/grpc/orca/internal google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns @@ -2036,6 +2174,43 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap +google.golang.org/grpc/xds +google.golang.org/grpc/xds/bootstrap +google.golang.org/grpc/xds/csds +google.golang.org/grpc/xds/googledirectpath +google.golang.org/grpc/xds/internal +google.golang.org/grpc/xds/internal/balancer +google.golang.org/grpc/xds/internal/balancer/cdsbalancer +google.golang.org/grpc/xds/internal/balancer/clusterimpl +google.golang.org/grpc/xds/internal/balancer/clustermanager +google.golang.org/grpc/xds/internal/balancer/clusterresolver +google.golang.org/grpc/xds/internal/balancer/loadstore +google.golang.org/grpc/xds/internal/balancer/outlierdetection +google.golang.org/grpc/xds/internal/balancer/priority +google.golang.org/grpc/xds/internal/balancer/ringhash +google.golang.org/grpc/xds/internal/balancer/wrrlocality +google.golang.org/grpc/xds/internal/clusterspecifier +google.golang.org/grpc/xds/internal/clusterspecifier/rls +google.golang.org/grpc/xds/internal/httpfilter +google.golang.org/grpc/xds/internal/httpfilter/fault +google.golang.org/grpc/xds/internal/httpfilter/rbac +google.golang.org/grpc/xds/internal/httpfilter/router +google.golang.org/grpc/xds/internal/resolver +google.golang.org/grpc/xds/internal/resolver/internal +google.golang.org/grpc/xds/internal/server +google.golang.org/grpc/xds/internal/xdsclient +google.golang.org/grpc/xds/internal/xdsclient/internal +google.golang.org/grpc/xds/internal/xdsclient/load +google.golang.org/grpc/xds/internal/xdsclient/transport +google.golang.org/grpc/xds/internal/xdsclient/transport/internal +google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry +google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter +google.golang.org/grpc/xds/internal/xdsclient/xdsresource +google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version +# google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a +## explicit; go 1.21 +google.golang.org/grpc/stats/opentelemetry +google.golang.org/grpc/stats/opentelemetry/internal # google.golang.org/protobuf v1.35.1 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim @@ -2080,6 +2255,9 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb +# gopkg.in/evanphx/json-patch.v4 v4.12.0 +## explicit +gopkg.in/evanphx/json-patch.v4 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 @@ -2111,11 +2289,12 @@ gotest.tools/v3/internal/cleanup gotest.tools/v3/internal/difflib gotest.tools/v3/internal/format gotest.tools/v3/internal/source -# k8s.io/api v0.29.10 -## explicit; go 1.21 +# k8s.io/api v0.31.2 +## explicit; go 1.22.0 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apidiscovery/v2 k8s.io/api/apidiscovery/v2beta1 k8s.io/api/apiserverinternal/v1alpha1 k8s.io/api/apps/v1 @@ -2136,6 +2315,7 @@ k8s.io/api/certificates/v1 k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 +k8s.io/api/coordination/v1alpha1 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 k8s.io/api/discovery/v1 @@ -2147,6 +2327,7 @@ k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 +k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/networking/v1 k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 @@ -2158,25 +2339,28 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha2 +k8s.io/api/resource/v1alpha3 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 +k8s.io/api/storagemigration/v1alpha1 # k8s.io/apiextensions-apiserver v0.29.2 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.29.10 -## explicit; go 1.21 +# k8s.io/apimachinery v0.31.2 +## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/api/meta/testrestmapper k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/internalversion/validation k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme @@ -2189,6 +2373,8 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -2229,8 +2415,9 @@ k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.29.10 -## explicit; go 1.21 +# k8s.io/client-go v0.31.2 +## explicit; go 1.22.0 +k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -2248,6 +2435,7 @@ k8s.io/client-go/applyconfigurations/certificates/v1 k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 +k8s.io/client-go/applyconfigurations/coordination/v1alpha1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 k8s.io/client-go/applyconfigurations/core/v1 k8s.io/client-go/applyconfigurations/discovery/v1 @@ -2259,6 +2447,7 @@ k8s.io/client-go/applyconfigurations/flowcontrol/v1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 +k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 @@ -2272,19 +2461,22 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha2 +k8s.io/client-go/applyconfigurations/resource/v1alpha3 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 k8s.io/client-go/applyconfigurations/storage/v1 k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 +k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/cached/memory k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic k8s.io/client-go/dynamic/fake +k8s.io/client-go/features +k8s.io/client-go/gentype k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 @@ -2310,6 +2502,7 @@ k8s.io/client-go/informers/certificates/v1alpha1 k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination k8s.io/client-go/informers/coordination/v1 +k8s.io/client-go/informers/coordination/v1alpha1 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/core k8s.io/client-go/informers/core/v1 @@ -2343,7 +2536,7 @@ k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 k8s.io/client-go/informers/resource -k8s.io/client-go/informers/resource/v1alpha2 +k8s.io/client-go/informers/resource/v1alpha3 k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 @@ -2352,6 +2545,8 @@ k8s.io/client-go/informers/storage k8s.io/client-go/informers/storage/v1 k8s.io/client-go/informers/storage/v1alpha1 k8s.io/client-go/informers/storage/v1beta1 +k8s.io/client-go/informers/storagemigration +k8s.io/client-go/informers/storagemigration/v1alpha1 k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/fake k8s.io/client-go/kubernetes/scheme @@ -2399,6 +2594,8 @@ k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1/fake +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1 +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/fake k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1 @@ -2443,8 +2640,8 @@ k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake k8s.io/client-go/kubernetes/typed/rbac/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake -k8s.io/client-go/kubernetes/typed/resource/v1alpha2 -k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake +k8s.io/client-go/kubernetes/typed/resource/v1alpha3 +k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 @@ -2457,6 +2654,9 @@ k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake +k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 +k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake +k8s.io/client-go/listers k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1alpha1 k8s.io/client-go/listers/admissionregistration/v1beta1 @@ -2474,6 +2674,7 @@ k8s.io/client-go/listers/certificates/v1 k8s.io/client-go/listers/certificates/v1alpha1 k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 +k8s.io/client-go/listers/coordination/v1alpha1 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/core/v1 k8s.io/client-go/listers/discovery/v1 @@ -2496,13 +2697,14 @@ k8s.io/client-go/listers/policy/v1beta1 k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 -k8s.io/client-go/listers/resource/v1alpha2 +k8s.io/client-go/listers/resource/v1alpha3 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 +k8s.io/client-go/listers/storagemigration/v1alpha1 k8s.io/client-go/openapi k8s.io/client-go/openapi/cached k8s.io/client-go/openapi3 @@ -2540,13 +2742,15 @@ k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/consistencydetector k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry +k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.120.1 +# k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -2555,8 +2759,8 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 -## explicit; go 1.19 +# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +## explicit; go 1.20 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 @@ -2566,12 +2770,11 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing -k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/golang-lru k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/lru